3 Broadcom B43 wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
45 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
47 struct b43_dmadesc_meta **meta)
49 struct b43_dmadesc32 *desc;
51 *meta = &(ring->meta[slot]);
52 desc = ring->descbase;
55 return (struct b43_dmadesc_generic *)desc;
58 static void op32_fill_descriptor(struct b43_dmaring *ring,
59 struct b43_dmadesc_generic *desc,
60 dma_addr_t dmaaddr, u16 bufsize,
61 int start, int end, int irq)
63 struct b43_dmadesc32 *descbase = ring->descbase;
69 slot = (int)(&(desc->dma32) - descbase);
70 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
72 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
74 >> SSB_DMA_TRANSLATION_SHIFT;
75 addr |= ssb_dma_translation(ring->dev->dev);
76 ctl = (bufsize - ring->frameoffset)
77 & B43_DMA32_DCTL_BYTECNT;
78 if (slot == ring->nr_slots - 1)
79 ctl |= B43_DMA32_DCTL_DTABLEEND;
81 ctl |= B43_DMA32_DCTL_FRAMESTART;
83 ctl |= B43_DMA32_DCTL_FRAMEEND;
85 ctl |= B43_DMA32_DCTL_IRQ;
86 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
87 & B43_DMA32_DCTL_ADDREXT_MASK;
89 desc->dma32.control = cpu_to_le32(ctl);
90 desc->dma32.address = cpu_to_le32(addr);
93 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
95 b43_dma_write(ring, B43_DMA32_TXINDEX,
96 (u32) (slot * sizeof(struct b43_dmadesc32)));
99 static void op32_tx_suspend(struct b43_dmaring *ring)
101 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
102 | B43_DMA32_TXSUSPEND);
105 static void op32_tx_resume(struct b43_dmaring *ring)
107 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
108 & ~B43_DMA32_TXSUSPEND);
111 static int op32_get_current_rxslot(struct b43_dmaring *ring)
115 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
116 val &= B43_DMA32_RXDPTR;
118 return (val / sizeof(struct b43_dmadesc32));
121 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
123 b43_dma_write(ring, B43_DMA32_RXINDEX,
124 (u32) (slot * sizeof(struct b43_dmadesc32)));
127 static const struct b43_dma_ops dma32_ops = {
128 .idx2desc = op32_idx2desc,
129 .fill_descriptor = op32_fill_descriptor,
130 .poke_tx = op32_poke_tx,
131 .tx_suspend = op32_tx_suspend,
132 .tx_resume = op32_tx_resume,
133 .get_current_rxslot = op32_get_current_rxslot,
134 .set_current_rxslot = op32_set_current_rxslot,
139 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
141 struct b43_dmadesc_meta **meta)
143 struct b43_dmadesc64 *desc;
145 *meta = &(ring->meta[slot]);
146 desc = ring->descbase;
147 desc = &(desc[slot]);
149 return (struct b43_dmadesc_generic *)desc;
152 static void op64_fill_descriptor(struct b43_dmaring *ring,
153 struct b43_dmadesc_generic *desc,
154 dma_addr_t dmaaddr, u16 bufsize,
155 int start, int end, int irq)
157 struct b43_dmadesc64 *descbase = ring->descbase;
159 u32 ctl0 = 0, ctl1 = 0;
163 slot = (int)(&(desc->dma64) - descbase);
164 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
166 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
167 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
168 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
169 >> SSB_DMA_TRANSLATION_SHIFT;
170 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
171 if (slot == ring->nr_slots - 1)
172 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
174 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
176 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
178 ctl0 |= B43_DMA64_DCTL0_IRQ;
179 ctl1 |= (bufsize - ring->frameoffset)
180 & B43_DMA64_DCTL1_BYTECNT;
181 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
182 & B43_DMA64_DCTL1_ADDREXT_MASK;
184 desc->dma64.control0 = cpu_to_le32(ctl0);
185 desc->dma64.control1 = cpu_to_le32(ctl1);
186 desc->dma64.address_low = cpu_to_le32(addrlo);
187 desc->dma64.address_high = cpu_to_le32(addrhi);
190 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
192 b43_dma_write(ring, B43_DMA64_TXINDEX,
193 (u32) (slot * sizeof(struct b43_dmadesc64)));
196 static void op64_tx_suspend(struct b43_dmaring *ring)
198 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
199 | B43_DMA64_TXSUSPEND);
202 static void op64_tx_resume(struct b43_dmaring *ring)
204 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
205 & ~B43_DMA64_TXSUSPEND);
208 static int op64_get_current_rxslot(struct b43_dmaring *ring)
212 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
213 val &= B43_DMA64_RXSTATDPTR;
215 return (val / sizeof(struct b43_dmadesc64));
218 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
220 b43_dma_write(ring, B43_DMA64_RXINDEX,
221 (u32) (slot * sizeof(struct b43_dmadesc64)));
224 static const struct b43_dma_ops dma64_ops = {
225 .idx2desc = op64_idx2desc,
226 .fill_descriptor = op64_fill_descriptor,
227 .poke_tx = op64_poke_tx,
228 .tx_suspend = op64_tx_suspend,
229 .tx_resume = op64_tx_resume,
230 .get_current_rxslot = op64_get_current_rxslot,
231 .set_current_rxslot = op64_set_current_rxslot,
234 static inline int free_slots(struct b43_dmaring *ring)
236 return (ring->nr_slots - ring->used_slots);
239 static inline int next_slot(struct b43_dmaring *ring, int slot)
241 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
242 if (slot == ring->nr_slots - 1)
247 static inline int prev_slot(struct b43_dmaring *ring, int slot)
249 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
251 return ring->nr_slots - 1;
255 #ifdef CONFIG_B43_DEBUG
256 static void update_max_used_slots(struct b43_dmaring *ring,
257 int current_used_slots)
259 if (current_used_slots <= ring->max_used_slots)
261 ring->max_used_slots = current_used_slots;
262 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
263 b43dbg(ring->dev->wl,
264 "max_used_slots increased to %d on %s ring %d\n",
265 ring->max_used_slots,
266 ring->tx ? "TX" : "RX", ring->index);
271 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
276 /* Request a slot for usage. */
277 static inline int request_slot(struct b43_dmaring *ring)
281 B43_WARN_ON(!ring->tx);
282 B43_WARN_ON(ring->stopped);
283 B43_WARN_ON(free_slots(ring) == 0);
285 slot = next_slot(ring, ring->current_slot);
286 ring->current_slot = slot;
289 update_max_used_slots(ring, ring->used_slots);
294 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
296 static const u16 map64[] = {
297 B43_MMIO_DMA64_BASE0,
298 B43_MMIO_DMA64_BASE1,
299 B43_MMIO_DMA64_BASE2,
300 B43_MMIO_DMA64_BASE3,
301 B43_MMIO_DMA64_BASE4,
302 B43_MMIO_DMA64_BASE5,
304 static const u16 map32[] = {
305 B43_MMIO_DMA32_BASE0,
306 B43_MMIO_DMA32_BASE1,
307 B43_MMIO_DMA32_BASE2,
308 B43_MMIO_DMA32_BASE3,
309 B43_MMIO_DMA32_BASE4,
310 B43_MMIO_DMA32_BASE5,
313 if (type == B43_DMA_64BIT) {
314 B43_WARN_ON(!(controller_idx >= 0 &&
315 controller_idx < ARRAY_SIZE(map64)));
316 return map64[controller_idx];
318 B43_WARN_ON(!(controller_idx >= 0 &&
319 controller_idx < ARRAY_SIZE(map32)));
320 return map32[controller_idx];
324 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
325 unsigned char *buf, size_t len, int tx)
330 dmaaddr = dma_map_single(ring->dev->dev->dev,
331 buf, len, DMA_TO_DEVICE);
333 dmaaddr = dma_map_single(ring->dev->dev->dev,
334 buf, len, DMA_FROM_DEVICE);
341 void unmap_descbuffer(struct b43_dmaring *ring,
342 dma_addr_t addr, size_t len, int tx)
345 dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE);
347 dma_unmap_single(ring->dev->dev->dev,
348 addr, len, DMA_FROM_DEVICE);
353 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
354 dma_addr_t addr, size_t len)
356 B43_WARN_ON(ring->tx);
357 dma_sync_single_for_cpu(ring->dev->dev->dev,
358 addr, len, DMA_FROM_DEVICE);
362 void sync_descbuffer_for_device(struct b43_dmaring *ring,
363 dma_addr_t addr, size_t len)
365 B43_WARN_ON(ring->tx);
366 dma_sync_single_for_device(ring->dev->dev->dev,
367 addr, len, DMA_FROM_DEVICE);
371 void free_descriptor_buffer(struct b43_dmaring *ring,
372 struct b43_dmadesc_meta *meta)
375 dev_kfree_skb_any(meta->skb);
380 static int alloc_ringmemory(struct b43_dmaring *ring)
382 struct device *dev = ring->dev->dev->dev;
383 gfp_t flags = GFP_KERNEL;
385 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
386 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
387 * has shown that 4K is sufficient for the latter as long as the buffer
388 * does not cross an 8K boundary.
390 * For unknown reasons - possibly a hardware error - the BCM4311 rev
391 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
392 * which accounts for the GFP_DMA flag below.
394 if (ring->type == B43_DMA_64BIT)
396 ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
397 &(ring->dmabase), flags);
398 if (!ring->descbase) {
399 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
402 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
407 static void free_ringmemory(struct b43_dmaring *ring)
409 struct device *dev = ring->dev->dev->dev;
411 dma_free_coherent(dev, B43_DMA_RINGMEMSIZE,
412 ring->descbase, ring->dmabase);
415 /* Reset the RX DMA channel */
416 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
417 enum b43_dmatype type)
425 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
426 b43_write32(dev, mmio_base + offset, 0);
427 for (i = 0; i < 10; i++) {
428 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
430 value = b43_read32(dev, mmio_base + offset);
431 if (type == B43_DMA_64BIT) {
432 value &= B43_DMA64_RXSTAT;
433 if (value == B43_DMA64_RXSTAT_DISABLED) {
438 value &= B43_DMA32_RXSTATE;
439 if (value == B43_DMA32_RXSTAT_DISABLED) {
447 b43err(dev->wl, "DMA RX reset timed out\n");
454 /* Reset the TX DMA channel */
455 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
456 enum b43_dmatype type)
464 for (i = 0; i < 10; i++) {
465 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
467 value = b43_read32(dev, mmio_base + offset);
468 if (type == B43_DMA_64BIT) {
469 value &= B43_DMA64_TXSTAT;
470 if (value == B43_DMA64_TXSTAT_DISABLED ||
471 value == B43_DMA64_TXSTAT_IDLEWAIT ||
472 value == B43_DMA64_TXSTAT_STOPPED)
475 value &= B43_DMA32_TXSTATE;
476 if (value == B43_DMA32_TXSTAT_DISABLED ||
477 value == B43_DMA32_TXSTAT_IDLEWAIT ||
478 value == B43_DMA32_TXSTAT_STOPPED)
483 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
484 b43_write32(dev, mmio_base + offset, 0);
485 for (i = 0; i < 10; i++) {
486 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
488 value = b43_read32(dev, mmio_base + offset);
489 if (type == B43_DMA_64BIT) {
490 value &= B43_DMA64_TXSTAT;
491 if (value == B43_DMA64_TXSTAT_DISABLED) {
496 value &= B43_DMA32_TXSTATE;
497 if (value == B43_DMA32_TXSTAT_DISABLED) {
505 b43err(dev->wl, "DMA TX reset timed out\n");
508 /* ensure the reset is completed. */
514 /* Check if a DMA mapping address is invalid. */
515 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
519 if (unlikely(dma_mapping_error(addr)))
522 switch (ring->type) {
524 if ((u64)addr + buffersize > (1ULL << 30))
528 if ((u64)addr + buffersize > (1ULL << 32))
532 /* Currently we can't have addresses beyond
533 * 64bit in the kernel. */
537 /* The address is OK. */
541 static int setup_rx_descbuffer(struct b43_dmaring *ring,
542 struct b43_dmadesc_generic *desc,
543 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
545 struct b43_rxhdr_fw4 *rxhdr;
546 struct b43_hwtxstatus *txstat;
550 B43_WARN_ON(ring->tx);
552 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
555 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
556 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
557 /* ugh. try to realloc in zone_dma */
558 gfp_flags |= GFP_DMA;
560 dev_kfree_skb_any(skb);
562 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
565 dmaaddr = map_descbuffer(ring, skb->data,
566 ring->rx_buffersize, 0);
569 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
570 dev_kfree_skb_any(skb);
575 meta->dmaaddr = dmaaddr;
576 ring->ops->fill_descriptor(ring, desc, dmaaddr,
577 ring->rx_buffersize, 0, 0, 0);
579 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
580 rxhdr->frame_len = 0;
581 txstat = (struct b43_hwtxstatus *)(skb->data);
587 /* Allocate the initial descbuffers.
588 * This is used for an RX ring only.
590 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
592 int i, err = -ENOMEM;
593 struct b43_dmadesc_generic *desc;
594 struct b43_dmadesc_meta *meta;
596 for (i = 0; i < ring->nr_slots; i++) {
597 desc = ring->ops->idx2desc(ring, i, &meta);
599 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
601 b43err(ring->dev->wl,
602 "Failed to allocate initial descbuffers\n");
607 ring->used_slots = ring->nr_slots;
613 for (i--; i >= 0; i--) {
614 desc = ring->ops->idx2desc(ring, i, &meta);
616 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
617 dev_kfree_skb(meta->skb);
622 /* Do initial setup of the DMA controller.
623 * Reset the controller, write the ring busaddress
624 * and switch the "enable" bit on.
626 static int dmacontroller_setup(struct b43_dmaring *ring)
631 u32 trans = ssb_dma_translation(ring->dev->dev);
634 if (ring->type == B43_DMA_64BIT) {
635 u64 ringbase = (u64) (ring->dmabase);
637 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
638 >> SSB_DMA_TRANSLATION_SHIFT;
639 value = B43_DMA64_TXENABLE;
640 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
641 & B43_DMA64_TXADDREXT_MASK;
642 b43_dma_write(ring, B43_DMA64_TXCTL, value);
643 b43_dma_write(ring, B43_DMA64_TXRINGLO,
644 (ringbase & 0xFFFFFFFF));
645 b43_dma_write(ring, B43_DMA64_TXRINGHI,
647 ~SSB_DMA_TRANSLATION_MASK)
650 u32 ringbase = (u32) (ring->dmabase);
652 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
653 >> SSB_DMA_TRANSLATION_SHIFT;
654 value = B43_DMA32_TXENABLE;
655 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
656 & B43_DMA32_TXADDREXT_MASK;
657 b43_dma_write(ring, B43_DMA32_TXCTL, value);
658 b43_dma_write(ring, B43_DMA32_TXRING,
659 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
663 err = alloc_initial_descbuffers(ring);
666 if (ring->type == B43_DMA_64BIT) {
667 u64 ringbase = (u64) (ring->dmabase);
669 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
670 >> SSB_DMA_TRANSLATION_SHIFT;
671 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
672 value |= B43_DMA64_RXENABLE;
673 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
674 & B43_DMA64_RXADDREXT_MASK;
675 b43_dma_write(ring, B43_DMA64_RXCTL, value);
676 b43_dma_write(ring, B43_DMA64_RXRINGLO,
677 (ringbase & 0xFFFFFFFF));
678 b43_dma_write(ring, B43_DMA64_RXRINGHI,
680 ~SSB_DMA_TRANSLATION_MASK)
682 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
683 sizeof(struct b43_dmadesc64));
685 u32 ringbase = (u32) (ring->dmabase);
687 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
688 >> SSB_DMA_TRANSLATION_SHIFT;
689 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
690 value |= B43_DMA32_RXENABLE;
691 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
692 & B43_DMA32_RXADDREXT_MASK;
693 b43_dma_write(ring, B43_DMA32_RXCTL, value);
694 b43_dma_write(ring, B43_DMA32_RXRING,
695 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
697 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
698 sizeof(struct b43_dmadesc32));
706 /* Shutdown the DMA controller. */
707 static void dmacontroller_cleanup(struct b43_dmaring *ring)
710 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
712 if (ring->type == B43_DMA_64BIT) {
713 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
714 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
716 b43_dma_write(ring, B43_DMA32_TXRING, 0);
718 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
720 if (ring->type == B43_DMA_64BIT) {
721 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
722 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
724 b43_dma_write(ring, B43_DMA32_RXRING, 0);
728 static void free_all_descbuffers(struct b43_dmaring *ring)
730 struct b43_dmadesc_generic *desc;
731 struct b43_dmadesc_meta *meta;
734 if (!ring->used_slots)
736 for (i = 0; i < ring->nr_slots; i++) {
737 desc = ring->ops->idx2desc(ring, i, &meta);
740 B43_WARN_ON(!ring->tx);
744 unmap_descbuffer(ring, meta->dmaaddr,
747 unmap_descbuffer(ring, meta->dmaaddr,
748 ring->rx_buffersize, 0);
750 free_descriptor_buffer(ring, meta);
754 static u64 supported_dma_mask(struct b43_wldev *dev)
759 tmp = b43_read32(dev, SSB_TMSHIGH);
760 if (tmp & SSB_TMSHIGH_DMA64)
761 return DMA_64BIT_MASK;
762 mmio_base = b43_dmacontroller_base(0, 0);
763 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
764 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
765 if (tmp & B43_DMA32_TXADDREXT_MASK)
766 return DMA_32BIT_MASK;
768 return DMA_30BIT_MASK;
771 /* Main initialization function. */
773 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
774 int controller_index,
776 enum b43_dmatype type)
778 struct b43_dmaring *ring;
783 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
788 nr_slots = B43_RXRING_SLOTS;
790 nr_slots = B43_TXRING_SLOTS;
792 ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),
797 ring->txhdr_cache = kcalloc(nr_slots,
800 if (!ring->txhdr_cache)
803 /* test for ability to dma to txhdr_cache */
804 dma_test = dma_map_single(dev->dev->dev,
809 if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) {
811 kfree(ring->txhdr_cache);
812 ring->txhdr_cache = kcalloc(nr_slots,
814 GFP_KERNEL | GFP_DMA);
815 if (!ring->txhdr_cache)
818 dma_test = dma_map_single(dev->dev->dev,
823 if (b43_dma_mapping_error(ring, dma_test,
824 b43_txhdr_size(dev)))
825 goto err_kfree_txhdr_cache;
828 dma_unmap_single(dev->dev->dev,
829 dma_test, b43_txhdr_size(dev),
834 ring->nr_slots = nr_slots;
835 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
836 ring->index = controller_index;
837 if (type == B43_DMA_64BIT)
838 ring->ops = &dma64_ops;
840 ring->ops = &dma32_ops;
843 ring->current_slot = -1;
845 if (ring->index == 0) {
846 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
847 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
848 } else if (ring->index == 3) {
849 ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;
850 ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;
854 spin_lock_init(&ring->lock);
855 #ifdef CONFIG_B43_DEBUG
856 ring->last_injected_overflow = jiffies;
859 err = alloc_ringmemory(ring);
861 goto err_kfree_txhdr_cache;
862 err = dmacontroller_setup(ring);
864 goto err_free_ringmemory;
870 free_ringmemory(ring);
871 err_kfree_txhdr_cache:
872 kfree(ring->txhdr_cache);
881 /* Main cleanup function. */
882 static void b43_destroy_dmaring(struct b43_dmaring *ring,
883 const char *ringname)
888 b43dbg(ring->dev->wl, "DMA-%u %s max used slots: %d/%d\n",
889 (unsigned int)(ring->type), ringname,
890 ring->max_used_slots, ring->nr_slots);
891 /* Device IRQs are disabled prior entering this function,
892 * so no need to take care of concurrency with rx handler stuff.
894 dmacontroller_cleanup(ring);
895 free_all_descbuffers(ring);
896 free_ringmemory(ring);
898 kfree(ring->txhdr_cache);
903 #define destroy_ring(dma, ring) do { \
904 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
905 (dma)->ring = NULL; \
908 void b43_dma_free(struct b43_wldev *dev)
910 struct b43_dma *dma = &dev->dma;
912 destroy_ring(dma, rx_ring);
913 destroy_ring(dma, tx_ring_AC_BK);
914 destroy_ring(dma, tx_ring_AC_BE);
915 destroy_ring(dma, tx_ring_AC_VI);
916 destroy_ring(dma, tx_ring_AC_VO);
917 destroy_ring(dma, tx_ring_mcast);
920 int b43_dma_init(struct b43_wldev *dev)
922 struct b43_dma *dma = &dev->dma;
925 enum b43_dmatype type;
927 dmamask = supported_dma_mask(dev);
932 type = B43_DMA_30BIT;
935 type = B43_DMA_32BIT;
938 type = B43_DMA_64BIT;
941 err = ssb_dma_set_mask(dev->dev, dmamask);
943 b43err(dev->wl, "The machine/kernel does not support "
944 "the required DMA mask (0x%08X%08X)\n",
945 (unsigned int)((dmamask & 0xFFFFFFFF00000000ULL) >> 32),
946 (unsigned int)(dmamask & 0x00000000FFFFFFFFULL));
951 /* setup TX DMA channels. */
952 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
953 if (!dma->tx_ring_AC_BK)
956 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
957 if (!dma->tx_ring_AC_BE)
960 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
961 if (!dma->tx_ring_AC_VI)
964 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
965 if (!dma->tx_ring_AC_VO)
968 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
969 if (!dma->tx_ring_mcast)
972 /* setup RX DMA channel. */
973 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
975 goto err_destroy_mcast;
977 /* No support for the TX status DMA ring. */
978 B43_WARN_ON(dev->dev->id.revision < 5);
980 b43dbg(dev->wl, "%u-bit DMA initialized\n",
987 destroy_ring(dma, tx_ring_mcast);
989 destroy_ring(dma, tx_ring_AC_VO);
991 destroy_ring(dma, tx_ring_AC_VI);
993 destroy_ring(dma, tx_ring_AC_BE);
995 destroy_ring(dma, tx_ring_AC_BK);
999 /* Generate a cookie for the TX header. */
1000 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1004 /* Use the upper 4 bits of the cookie as
1005 * DMA controller ID and store the slot number
1006 * in the lower 12 bits.
1007 * Note that the cookie must never be 0, as this
1008 * is a special value used in RX path.
1009 * It can also not be 0xFFFF because that is special
1010 * for multicast frames.
1012 cookie = (((u16)ring->index + 1) << 12);
1013 B43_WARN_ON(slot & ~0x0FFF);
1014 cookie |= (u16)slot;
1019 /* Inspect a cookie and find out to which controller/slot it belongs. */
1021 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1023 struct b43_dma *dma = &dev->dma;
1024 struct b43_dmaring *ring = NULL;
1026 switch (cookie & 0xF000) {
1028 ring = dma->tx_ring_AC_BK;
1031 ring = dma->tx_ring_AC_BE;
1034 ring = dma->tx_ring_AC_VI;
1037 ring = dma->tx_ring_AC_VO;
1040 ring = dma->tx_ring_mcast;
1045 *slot = (cookie & 0x0FFF);
1046 B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1051 static int dma_tx_fragment(struct b43_dmaring *ring,
1052 struct sk_buff *skb,
1053 struct ieee80211_tx_control *ctl)
1055 const struct b43_dma_ops *ops = ring->ops;
1057 int slot, old_top_slot, old_used_slots;
1059 struct b43_dmadesc_generic *desc;
1060 struct b43_dmadesc_meta *meta;
1061 struct b43_dmadesc_meta *meta_hdr;
1062 struct sk_buff *bounce_skb;
1064 size_t hdrsize = b43_txhdr_size(ring->dev);
1066 #define SLOTS_PER_PACKET 2
1067 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
1069 old_top_slot = ring->current_slot;
1070 old_used_slots = ring->used_slots;
1072 /* Get a slot for the header. */
1073 slot = request_slot(ring);
1074 desc = ops->idx2desc(ring, slot, &meta_hdr);
1075 memset(meta_hdr, 0, sizeof(*meta_hdr));
1077 header = &(ring->txhdr_cache[slot * hdrsize]);
1078 cookie = generate_cookie(ring, slot);
1079 err = b43_generate_txhdr(ring->dev, header,
1080 skb->data, skb->len, ctl, cookie);
1081 if (unlikely(err)) {
1082 ring->current_slot = old_top_slot;
1083 ring->used_slots = old_used_slots;
1087 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1089 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) {
1090 ring->current_slot = old_top_slot;
1091 ring->used_slots = old_used_slots;
1094 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1097 /* Get a slot for the payload. */
1098 slot = request_slot(ring);
1099 desc = ops->idx2desc(ring, slot, &meta);
1100 memset(meta, 0, sizeof(*meta));
1102 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1104 meta->is_last_fragment = 1;
1106 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1107 /* create a bounce buffer in zone_dma on mapping failure. */
1108 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
1109 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1111 ring->current_slot = old_top_slot;
1112 ring->used_slots = old_used_slots;
1117 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1118 dev_kfree_skb_any(skb);
1121 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1122 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
1123 ring->current_slot = old_top_slot;
1124 ring->used_slots = old_used_slots;
1126 goto out_free_bounce;
1130 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1132 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1133 /* Tell the firmware about the cookie of the last
1134 * mcast frame, so it can clear the more-data bit in it. */
1135 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1136 B43_SHM_SH_MCASTCOOKIE, cookie);
1138 /* Now transfer the whole frame. */
1140 ops->poke_tx(ring, next_slot(ring, slot));
1144 dev_kfree_skb_any(skb);
1146 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1151 static inline int should_inject_overflow(struct b43_dmaring *ring)
1153 #ifdef CONFIG_B43_DEBUG
1154 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1155 /* Check if we should inject another ringbuffer overflow
1156 * to test handling of this situation in the stack. */
1157 unsigned long next_overflow;
1159 next_overflow = ring->last_injected_overflow + HZ;
1160 if (time_after(jiffies, next_overflow)) {
1161 ring->last_injected_overflow = jiffies;
1162 b43dbg(ring->dev->wl,
1163 "Injecting TX ring overflow on "
1164 "DMA controller %d\n", ring->index);
1168 #endif /* CONFIG_B43_DEBUG */
1172 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1173 static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
1176 struct b43_dmaring *ring;
1178 if (b43_modparam_qos) {
1179 /* 0 = highest priority */
1180 switch (queue_prio) {
1185 ring = dev->dma.tx_ring_AC_VO;
1188 ring = dev->dma.tx_ring_AC_VI;
1191 ring = dev->dma.tx_ring_AC_BE;
1194 ring = dev->dma.tx_ring_AC_BK;
1198 ring = dev->dma.tx_ring_AC_BE;
1203 int b43_dma_tx(struct b43_wldev *dev,
1204 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1206 struct b43_dmaring *ring;
1207 struct ieee80211_hdr *hdr;
1209 unsigned long flags;
1211 if (unlikely(skb->len < 2 + 2 + 6)) {
1212 /* Too short, this can't be a valid frame. */
1216 hdr = (struct ieee80211_hdr *)skb->data;
1217 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1218 /* The multicast ring will be sent after the DTIM */
1219 ring = dev->dma.tx_ring_mcast;
1220 /* Set the more-data bit. Ucode will clear it on
1221 * the last frame for us. */
1222 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1224 /* Decide by priority where to put this frame. */
1225 ring = select_ring_by_priority(dev, ctl->queue);
1228 spin_lock_irqsave(&ring->lock, flags);
1229 B43_WARN_ON(!ring->tx);
1230 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1231 b43warn(dev->wl, "DMA queue overflow\n");
1235 /* Check if the queue was stopped in mac80211,
1236 * but we got called nevertheless.
1237 * That would be a mac80211 bug. */
1238 B43_WARN_ON(ring->stopped);
1240 /* Assign the queue number to the ring (if not already done before)
1241 * so TX status handling can use it. The queue to ring mapping is
1242 * static, so we don't need to store it per frame. */
1243 ring->queue_prio = ctl->queue;
1245 err = dma_tx_fragment(ring, skb, ctl);
1246 if (unlikely(err == -ENOKEY)) {
1247 /* Drop this packet, as we don't have the encryption key
1248 * anymore and must not transmit it unencrypted. */
1249 dev_kfree_skb_any(skb);
1253 if (unlikely(err)) {
1254 b43err(dev->wl, "DMA tx mapping failure\n");
1257 ring->nr_tx_packets++;
1258 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1259 should_inject_overflow(ring)) {
1260 /* This TX ring is full. */
1261 ieee80211_stop_queue(dev->wl->hw, ctl->queue);
1263 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1264 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1268 spin_unlock_irqrestore(&ring->lock, flags);
1273 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1274 const struct b43_txstatus *status)
1276 const struct b43_dma_ops *ops;
1277 struct b43_dmaring *ring;
1278 struct b43_dmadesc_generic *desc;
1279 struct b43_dmadesc_meta *meta;
1282 ring = parse_cookie(dev, status->cookie, &slot);
1283 if (unlikely(!ring))
1285 B43_WARN_ON(!irqs_disabled());
1286 spin_lock(&ring->lock);
1288 B43_WARN_ON(!ring->tx);
1291 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1292 desc = ops->idx2desc(ring, slot, &meta);
1295 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
1298 unmap_descbuffer(ring, meta->dmaaddr,
1299 b43_txhdr_size(dev), 1);
1301 if (meta->is_last_fragment) {
1302 B43_WARN_ON(!meta->skb);
1303 /* Call back to inform the ieee80211 subsystem about the
1304 * status of the transmission.
1305 * Some fields of txstat are already filled in dma_tx().
1307 if (status->acked) {
1308 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;
1310 if (!(meta->txstat.control.flags
1311 & IEEE80211_TXCTL_NO_ACK))
1312 meta->txstat.excessive_retries = 1;
1314 if (status->frame_count == 0) {
1315 /* The frame was not transmitted at all. */
1316 meta->txstat.retry_count = 0;
1318 meta->txstat.retry_count = status->frame_count - 1;
1319 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
1321 /* skb is freed by ieee80211_tx_status_irqsafe() */
1324 /* No need to call free_descriptor_buffer here, as
1325 * this is only the txhdr, which is not allocated.
1327 B43_WARN_ON(meta->skb);
1330 /* Everything unmapped and free'd. So it's not used anymore. */
1333 if (meta->is_last_fragment)
1335 slot = next_slot(ring, slot);
1337 dev->stats.last_tx = jiffies;
1338 if (ring->stopped) {
1339 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1340 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1342 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1343 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1347 spin_unlock(&ring->lock);
1350 void b43_dma_get_tx_stats(struct b43_wldev *dev,
1351 struct ieee80211_tx_queue_stats *stats)
1353 const int nr_queues = dev->wl->hw->queues;
1354 struct b43_dmaring *ring;
1355 struct ieee80211_tx_queue_stats_data *data;
1356 unsigned long flags;
1359 for (i = 0; i < nr_queues; i++) {
1360 data = &(stats->data[i]);
1361 ring = select_ring_by_priority(dev, i);
1363 spin_lock_irqsave(&ring->lock, flags);
1364 data->len = ring->used_slots / SLOTS_PER_PACKET;
1365 data->limit = ring->nr_slots / SLOTS_PER_PACKET;
1366 data->count = ring->nr_tx_packets;
1367 spin_unlock_irqrestore(&ring->lock, flags);
1371 static void dma_rx(struct b43_dmaring *ring, int *slot)
1373 const struct b43_dma_ops *ops = ring->ops;
1374 struct b43_dmadesc_generic *desc;
1375 struct b43_dmadesc_meta *meta;
1376 struct b43_rxhdr_fw4 *rxhdr;
1377 struct sk_buff *skb;
1382 desc = ops->idx2desc(ring, *slot, &meta);
1384 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1387 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1388 len = le16_to_cpu(rxhdr->frame_len);
1395 len = le16_to_cpu(rxhdr->frame_len);
1396 } while (len == 0 && i++ < 5);
1397 if (unlikely(len == 0)) {
1398 /* recycle the descriptor buffer. */
1399 sync_descbuffer_for_device(ring, meta->dmaaddr,
1400 ring->rx_buffersize);
1404 if (unlikely(len > ring->rx_buffersize)) {
1405 /* The data did not fit into one descriptor buffer
1406 * and is split over multiple buffers.
1407 * This should never happen, as we try to allocate buffers
1408 * big enough. So simply ignore this packet.
1414 desc = ops->idx2desc(ring, *slot, &meta);
1415 /* recycle the descriptor buffer. */
1416 sync_descbuffer_for_device(ring, meta->dmaaddr,
1417 ring->rx_buffersize);
1418 *slot = next_slot(ring, *slot);
1420 tmp -= ring->rx_buffersize;
1424 b43err(ring->dev->wl, "DMA RX buffer too small "
1425 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1426 len, ring->rx_buffersize, cnt);
1430 dmaaddr = meta->dmaaddr;
1431 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1432 if (unlikely(err)) {
1433 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1434 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1438 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1439 skb_put(skb, len + ring->frameoffset);
1440 skb_pull(skb, ring->frameoffset);
1442 b43_rx(ring->dev, skb, rxhdr);
1447 void b43_dma_rx(struct b43_dmaring *ring)
1449 const struct b43_dma_ops *ops = ring->ops;
1450 int slot, current_slot;
1453 B43_WARN_ON(ring->tx);
1454 current_slot = ops->get_current_rxslot(ring);
1455 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1457 slot = ring->current_slot;
1458 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1459 dma_rx(ring, &slot);
1460 update_max_used_slots(ring, ++used_slots);
1462 ops->set_current_rxslot(ring, slot);
1463 ring->current_slot = slot;
1466 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1468 unsigned long flags;
1470 spin_lock_irqsave(&ring->lock, flags);
1471 B43_WARN_ON(!ring->tx);
1472 ring->ops->tx_suspend(ring);
1473 spin_unlock_irqrestore(&ring->lock, flags);
1476 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1478 unsigned long flags;
1480 spin_lock_irqsave(&ring->lock, flags);
1481 B43_WARN_ON(!ring->tx);
1482 ring->ops->tx_resume(ring);
1483 spin_unlock_irqrestore(&ring->lock, flags);
1486 void b43_dma_tx_suspend(struct b43_wldev *dev)
1488 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1489 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1490 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1491 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1492 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1493 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1496 void b43_dma_tx_resume(struct b43_wldev *dev)
1498 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1499 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1500 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1501 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1502 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1503 b43_power_saving_ctl_bits(dev, 0);