3 Broadcom B43 wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41 #include <asm/div64.h>
46 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
48 struct b43_dmadesc_meta **meta)
50 struct b43_dmadesc32 *desc;
52 *meta = &(ring->meta[slot]);
53 desc = ring->descbase;
56 return (struct b43_dmadesc_generic *)desc;
59 static void op32_fill_descriptor(struct b43_dmaring *ring,
60 struct b43_dmadesc_generic *desc,
61 dma_addr_t dmaaddr, u16 bufsize,
62 int start, int end, int irq)
64 struct b43_dmadesc32 *descbase = ring->descbase;
70 slot = (int)(&(desc->dma32) - descbase);
71 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
73 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
74 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
75 >> SSB_DMA_TRANSLATION_SHIFT;
76 addr |= ssb_dma_translation(ring->dev->dev);
77 ctl = (bufsize - ring->frameoffset)
78 & B43_DMA32_DCTL_BYTECNT;
79 if (slot == ring->nr_slots - 1)
80 ctl |= B43_DMA32_DCTL_DTABLEEND;
82 ctl |= B43_DMA32_DCTL_FRAMESTART;
84 ctl |= B43_DMA32_DCTL_FRAMEEND;
86 ctl |= B43_DMA32_DCTL_IRQ;
87 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
88 & B43_DMA32_DCTL_ADDREXT_MASK;
90 desc->dma32.control = cpu_to_le32(ctl);
91 desc->dma32.address = cpu_to_le32(addr);
94 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
96 b43_dma_write(ring, B43_DMA32_TXINDEX,
97 (u32) (slot * sizeof(struct b43_dmadesc32)));
100 static void op32_tx_suspend(struct b43_dmaring *ring)
102 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
103 | B43_DMA32_TXSUSPEND);
106 static void op32_tx_resume(struct b43_dmaring *ring)
108 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
109 & ~B43_DMA32_TXSUSPEND);
112 static int op32_get_current_rxslot(struct b43_dmaring *ring)
116 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
117 val &= B43_DMA32_RXDPTR;
119 return (val / sizeof(struct b43_dmadesc32));
122 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
124 b43_dma_write(ring, B43_DMA32_RXINDEX,
125 (u32) (slot * sizeof(struct b43_dmadesc32)));
128 static const struct b43_dma_ops dma32_ops = {
129 .idx2desc = op32_idx2desc,
130 .fill_descriptor = op32_fill_descriptor,
131 .poke_tx = op32_poke_tx,
132 .tx_suspend = op32_tx_suspend,
133 .tx_resume = op32_tx_resume,
134 .get_current_rxslot = op32_get_current_rxslot,
135 .set_current_rxslot = op32_set_current_rxslot,
140 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
142 struct b43_dmadesc_meta **meta)
144 struct b43_dmadesc64 *desc;
146 *meta = &(ring->meta[slot]);
147 desc = ring->descbase;
148 desc = &(desc[slot]);
150 return (struct b43_dmadesc_generic *)desc;
153 static void op64_fill_descriptor(struct b43_dmaring *ring,
154 struct b43_dmadesc_generic *desc,
155 dma_addr_t dmaaddr, u16 bufsize,
156 int start, int end, int irq)
158 struct b43_dmadesc64 *descbase = ring->descbase;
160 u32 ctl0 = 0, ctl1 = 0;
164 slot = (int)(&(desc->dma64) - descbase);
165 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
167 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
168 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
169 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
170 >> SSB_DMA_TRANSLATION_SHIFT;
171 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
172 if (slot == ring->nr_slots - 1)
173 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
175 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
177 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
179 ctl0 |= B43_DMA64_DCTL0_IRQ;
180 ctl1 |= (bufsize - ring->frameoffset)
181 & B43_DMA64_DCTL1_BYTECNT;
182 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
183 & B43_DMA64_DCTL1_ADDREXT_MASK;
185 desc->dma64.control0 = cpu_to_le32(ctl0);
186 desc->dma64.control1 = cpu_to_le32(ctl1);
187 desc->dma64.address_low = cpu_to_le32(addrlo);
188 desc->dma64.address_high = cpu_to_le32(addrhi);
191 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
193 b43_dma_write(ring, B43_DMA64_TXINDEX,
194 (u32) (slot * sizeof(struct b43_dmadesc64)));
197 static void op64_tx_suspend(struct b43_dmaring *ring)
199 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
200 | B43_DMA64_TXSUSPEND);
203 static void op64_tx_resume(struct b43_dmaring *ring)
205 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
206 & ~B43_DMA64_TXSUSPEND);
209 static int op64_get_current_rxslot(struct b43_dmaring *ring)
213 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
214 val &= B43_DMA64_RXSTATDPTR;
216 return (val / sizeof(struct b43_dmadesc64));
219 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
221 b43_dma_write(ring, B43_DMA64_RXINDEX,
222 (u32) (slot * sizeof(struct b43_dmadesc64)));
225 static const struct b43_dma_ops dma64_ops = {
226 .idx2desc = op64_idx2desc,
227 .fill_descriptor = op64_fill_descriptor,
228 .poke_tx = op64_poke_tx,
229 .tx_suspend = op64_tx_suspend,
230 .tx_resume = op64_tx_resume,
231 .get_current_rxslot = op64_get_current_rxslot,
232 .set_current_rxslot = op64_set_current_rxslot,
235 static inline int free_slots(struct b43_dmaring *ring)
237 return (ring->nr_slots - ring->used_slots);
240 static inline int next_slot(struct b43_dmaring *ring, int slot)
242 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
243 if (slot == ring->nr_slots - 1)
248 static inline int prev_slot(struct b43_dmaring *ring, int slot)
250 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
252 return ring->nr_slots - 1;
256 #ifdef CONFIG_B43_DEBUG
257 static void update_max_used_slots(struct b43_dmaring *ring,
258 int current_used_slots)
260 if (current_used_slots <= ring->max_used_slots)
262 ring->max_used_slots = current_used_slots;
263 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
264 b43dbg(ring->dev->wl,
265 "max_used_slots increased to %d on %s ring %d\n",
266 ring->max_used_slots,
267 ring->tx ? "TX" : "RX", ring->index);
272 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
277 /* Request a slot for usage. */
278 static inline int request_slot(struct b43_dmaring *ring)
282 B43_WARN_ON(!ring->tx);
283 B43_WARN_ON(ring->stopped);
284 B43_WARN_ON(free_slots(ring) == 0);
286 slot = next_slot(ring, ring->current_slot);
287 ring->current_slot = slot;
290 update_max_used_slots(ring, ring->used_slots);
295 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
297 static const u16 map64[] = {
298 B43_MMIO_DMA64_BASE0,
299 B43_MMIO_DMA64_BASE1,
300 B43_MMIO_DMA64_BASE2,
301 B43_MMIO_DMA64_BASE3,
302 B43_MMIO_DMA64_BASE4,
303 B43_MMIO_DMA64_BASE5,
305 static const u16 map32[] = {
306 B43_MMIO_DMA32_BASE0,
307 B43_MMIO_DMA32_BASE1,
308 B43_MMIO_DMA32_BASE2,
309 B43_MMIO_DMA32_BASE3,
310 B43_MMIO_DMA32_BASE4,
311 B43_MMIO_DMA32_BASE5,
314 if (type == B43_DMA_64BIT) {
315 B43_WARN_ON(!(controller_idx >= 0 &&
316 controller_idx < ARRAY_SIZE(map64)));
317 return map64[controller_idx];
319 B43_WARN_ON(!(controller_idx >= 0 &&
320 controller_idx < ARRAY_SIZE(map32)));
321 return map32[controller_idx];
325 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
326 unsigned char *buf, size_t len, int tx)
331 dmaaddr = dma_map_single(ring->dev->dev->dev,
332 buf, len, DMA_TO_DEVICE);
334 dmaaddr = dma_map_single(ring->dev->dev->dev,
335 buf, len, DMA_FROM_DEVICE);
342 void unmap_descbuffer(struct b43_dmaring *ring,
343 dma_addr_t addr, size_t len, int tx)
346 dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE);
348 dma_unmap_single(ring->dev->dev->dev,
349 addr, len, DMA_FROM_DEVICE);
354 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
355 dma_addr_t addr, size_t len)
357 B43_WARN_ON(ring->tx);
358 dma_sync_single_for_cpu(ring->dev->dev->dev,
359 addr, len, DMA_FROM_DEVICE);
363 void sync_descbuffer_for_device(struct b43_dmaring *ring,
364 dma_addr_t addr, size_t len)
366 B43_WARN_ON(ring->tx);
367 dma_sync_single_for_device(ring->dev->dev->dev,
368 addr, len, DMA_FROM_DEVICE);
372 void free_descriptor_buffer(struct b43_dmaring *ring,
373 struct b43_dmadesc_meta *meta)
376 dev_kfree_skb_any(meta->skb);
381 static int alloc_ringmemory(struct b43_dmaring *ring)
383 struct device *dev = ring->dev->dev->dev;
384 gfp_t flags = GFP_KERNEL;
386 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
387 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
388 * has shown that 4K is sufficient for the latter as long as the buffer
389 * does not cross an 8K boundary.
391 * For unknown reasons - possibly a hardware error - the BCM4311 rev
392 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
393 * which accounts for the GFP_DMA flag below.
395 if (ring->type == B43_DMA_64BIT)
397 ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
398 &(ring->dmabase), flags);
399 if (!ring->descbase) {
400 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
403 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
408 static void free_ringmemory(struct b43_dmaring *ring)
410 struct device *dev = ring->dev->dev->dev;
412 dma_free_coherent(dev, B43_DMA_RINGMEMSIZE,
413 ring->descbase, ring->dmabase);
416 /* Reset the RX DMA channel */
417 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
418 enum b43_dmatype type)
426 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
427 b43_write32(dev, mmio_base + offset, 0);
428 for (i = 0; i < 10; i++) {
429 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
431 value = b43_read32(dev, mmio_base + offset);
432 if (type == B43_DMA_64BIT) {
433 value &= B43_DMA64_RXSTAT;
434 if (value == B43_DMA64_RXSTAT_DISABLED) {
439 value &= B43_DMA32_RXSTATE;
440 if (value == B43_DMA32_RXSTAT_DISABLED) {
448 b43err(dev->wl, "DMA RX reset timed out\n");
455 /* Reset the TX DMA channel */
456 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
457 enum b43_dmatype type)
465 for (i = 0; i < 10; i++) {
466 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
468 value = b43_read32(dev, mmio_base + offset);
469 if (type == B43_DMA_64BIT) {
470 value &= B43_DMA64_TXSTAT;
471 if (value == B43_DMA64_TXSTAT_DISABLED ||
472 value == B43_DMA64_TXSTAT_IDLEWAIT ||
473 value == B43_DMA64_TXSTAT_STOPPED)
476 value &= B43_DMA32_TXSTATE;
477 if (value == B43_DMA32_TXSTAT_DISABLED ||
478 value == B43_DMA32_TXSTAT_IDLEWAIT ||
479 value == B43_DMA32_TXSTAT_STOPPED)
484 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
485 b43_write32(dev, mmio_base + offset, 0);
486 for (i = 0; i < 10; i++) {
487 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
489 value = b43_read32(dev, mmio_base + offset);
490 if (type == B43_DMA_64BIT) {
491 value &= B43_DMA64_TXSTAT;
492 if (value == B43_DMA64_TXSTAT_DISABLED) {
497 value &= B43_DMA32_TXSTATE;
498 if (value == B43_DMA32_TXSTAT_DISABLED) {
506 b43err(dev->wl, "DMA TX reset timed out\n");
509 /* ensure the reset is completed. */
515 /* Check if a DMA mapping address is invalid. */
516 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
518 size_t buffersize, bool dma_to_device)
520 if (unlikely(dma_mapping_error(addr)))
523 switch (ring->type) {
525 if ((u64)addr + buffersize > (1ULL << 30))
529 if ((u64)addr + buffersize > (1ULL << 32))
533 /* Currently we can't have addresses beyond
534 * 64bit in the kernel. */
538 /* The address is OK. */
542 /* We can't support this address. Unmap it again. */
543 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
548 static int setup_rx_descbuffer(struct b43_dmaring *ring,
549 struct b43_dmadesc_generic *desc,
550 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
552 struct b43_rxhdr_fw4 *rxhdr;
553 struct b43_hwtxstatus *txstat;
557 B43_WARN_ON(ring->tx);
559 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
562 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
563 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
564 /* ugh. try to realloc in zone_dma */
565 gfp_flags |= GFP_DMA;
567 dev_kfree_skb_any(skb);
569 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
572 dmaaddr = map_descbuffer(ring, skb->data,
573 ring->rx_buffersize, 0);
576 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
577 dev_kfree_skb_any(skb);
582 meta->dmaaddr = dmaaddr;
583 ring->ops->fill_descriptor(ring, desc, dmaaddr,
584 ring->rx_buffersize, 0, 0, 0);
586 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
587 rxhdr->frame_len = 0;
588 txstat = (struct b43_hwtxstatus *)(skb->data);
594 /* Allocate the initial descbuffers.
595 * This is used for an RX ring only.
597 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
599 int i, err = -ENOMEM;
600 struct b43_dmadesc_generic *desc;
601 struct b43_dmadesc_meta *meta;
603 for (i = 0; i < ring->nr_slots; i++) {
604 desc = ring->ops->idx2desc(ring, i, &meta);
606 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
608 b43err(ring->dev->wl,
609 "Failed to allocate initial descbuffers\n");
614 ring->used_slots = ring->nr_slots;
620 for (i--; i >= 0; i--) {
621 desc = ring->ops->idx2desc(ring, i, &meta);
623 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
624 dev_kfree_skb(meta->skb);
629 /* Do initial setup of the DMA controller.
630 * Reset the controller, write the ring busaddress
631 * and switch the "enable" bit on.
633 static int dmacontroller_setup(struct b43_dmaring *ring)
638 u32 trans = ssb_dma_translation(ring->dev->dev);
641 if (ring->type == B43_DMA_64BIT) {
642 u64 ringbase = (u64) (ring->dmabase);
644 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
645 >> SSB_DMA_TRANSLATION_SHIFT;
646 value = B43_DMA64_TXENABLE;
647 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
648 & B43_DMA64_TXADDREXT_MASK;
649 b43_dma_write(ring, B43_DMA64_TXCTL, value);
650 b43_dma_write(ring, B43_DMA64_TXRINGLO,
651 (ringbase & 0xFFFFFFFF));
652 b43_dma_write(ring, B43_DMA64_TXRINGHI,
654 ~SSB_DMA_TRANSLATION_MASK)
657 u32 ringbase = (u32) (ring->dmabase);
659 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
660 >> SSB_DMA_TRANSLATION_SHIFT;
661 value = B43_DMA32_TXENABLE;
662 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
663 & B43_DMA32_TXADDREXT_MASK;
664 b43_dma_write(ring, B43_DMA32_TXCTL, value);
665 b43_dma_write(ring, B43_DMA32_TXRING,
666 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
670 err = alloc_initial_descbuffers(ring);
673 if (ring->type == B43_DMA_64BIT) {
674 u64 ringbase = (u64) (ring->dmabase);
676 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
677 >> SSB_DMA_TRANSLATION_SHIFT;
678 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
679 value |= B43_DMA64_RXENABLE;
680 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
681 & B43_DMA64_RXADDREXT_MASK;
682 b43_dma_write(ring, B43_DMA64_RXCTL, value);
683 b43_dma_write(ring, B43_DMA64_RXRINGLO,
684 (ringbase & 0xFFFFFFFF));
685 b43_dma_write(ring, B43_DMA64_RXRINGHI,
687 ~SSB_DMA_TRANSLATION_MASK)
689 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
690 sizeof(struct b43_dmadesc64));
692 u32 ringbase = (u32) (ring->dmabase);
694 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
695 >> SSB_DMA_TRANSLATION_SHIFT;
696 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
697 value |= B43_DMA32_RXENABLE;
698 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
699 & B43_DMA32_RXADDREXT_MASK;
700 b43_dma_write(ring, B43_DMA32_RXCTL, value);
701 b43_dma_write(ring, B43_DMA32_RXRING,
702 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
704 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
705 sizeof(struct b43_dmadesc32));
713 /* Shutdown the DMA controller. */
714 static void dmacontroller_cleanup(struct b43_dmaring *ring)
717 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
719 if (ring->type == B43_DMA_64BIT) {
720 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
721 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
723 b43_dma_write(ring, B43_DMA32_TXRING, 0);
725 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
727 if (ring->type == B43_DMA_64BIT) {
728 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
729 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
731 b43_dma_write(ring, B43_DMA32_RXRING, 0);
735 static void free_all_descbuffers(struct b43_dmaring *ring)
737 struct b43_dmadesc_generic *desc;
738 struct b43_dmadesc_meta *meta;
741 if (!ring->used_slots)
743 for (i = 0; i < ring->nr_slots; i++) {
744 desc = ring->ops->idx2desc(ring, i, &meta);
747 B43_WARN_ON(!ring->tx);
751 unmap_descbuffer(ring, meta->dmaaddr,
754 unmap_descbuffer(ring, meta->dmaaddr,
755 ring->rx_buffersize, 0);
757 free_descriptor_buffer(ring, meta);
761 static u64 supported_dma_mask(struct b43_wldev *dev)
766 tmp = b43_read32(dev, SSB_TMSHIGH);
767 if (tmp & SSB_TMSHIGH_DMA64)
768 return DMA_64BIT_MASK;
769 mmio_base = b43_dmacontroller_base(0, 0);
770 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
771 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
772 if (tmp & B43_DMA32_TXADDREXT_MASK)
773 return DMA_32BIT_MASK;
775 return DMA_30BIT_MASK;
778 /* Main initialization function. */
780 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
781 int controller_index,
783 enum b43_dmatype type)
785 struct b43_dmaring *ring;
790 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
795 nr_slots = B43_RXRING_SLOTS;
797 nr_slots = B43_TXRING_SLOTS;
799 ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),
804 ring->txhdr_cache = kcalloc(nr_slots,
807 if (!ring->txhdr_cache)
810 /* test for ability to dma to txhdr_cache */
811 dma_test = dma_map_single(dev->dev->dev,
816 if (b43_dma_mapping_error(ring, dma_test,
817 b43_txhdr_size(dev), 1)) {
819 kfree(ring->txhdr_cache);
820 ring->txhdr_cache = kcalloc(nr_slots,
822 GFP_KERNEL | GFP_DMA);
823 if (!ring->txhdr_cache)
826 dma_test = dma_map_single(dev->dev->dev,
831 if (b43_dma_mapping_error(ring, dma_test,
832 b43_txhdr_size(dev), 1))
833 goto err_kfree_txhdr_cache;
836 dma_unmap_single(dev->dev->dev,
837 dma_test, b43_txhdr_size(dev),
842 ring->nr_slots = nr_slots;
843 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
844 ring->index = controller_index;
845 if (type == B43_DMA_64BIT)
846 ring->ops = &dma64_ops;
848 ring->ops = &dma32_ops;
851 ring->current_slot = -1;
853 if (ring->index == 0) {
854 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
855 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
856 } else if (ring->index == 3) {
857 ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;
858 ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;
862 spin_lock_init(&ring->lock);
863 #ifdef CONFIG_B43_DEBUG
864 ring->last_injected_overflow = jiffies;
867 err = alloc_ringmemory(ring);
869 goto err_kfree_txhdr_cache;
870 err = dmacontroller_setup(ring);
872 goto err_free_ringmemory;
878 free_ringmemory(ring);
879 err_kfree_txhdr_cache:
880 kfree(ring->txhdr_cache);
889 #define divide(a, b) ({ \
895 #define modulo(a, b) ({ \
900 /* Main cleanup function. */
901 static void b43_destroy_dmaring(struct b43_dmaring *ring,
902 const char *ringname)
907 #ifdef CONFIG_B43_DEBUG
909 /* Print some statistics. */
910 u64 failed_packets = ring->nr_failed_tx_packets;
911 u64 succeed_packets = ring->nr_succeed_tx_packets;
912 u64 nr_packets = failed_packets + succeed_packets;
913 u64 permille_failed = 0, average_tries = 0;
916 permille_failed = divide(failed_packets * 1000, nr_packets);
918 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
920 b43dbg(ring->dev->wl, "DMA-%u %s: "
921 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
922 "Average tries %llu.%02llu\n",
923 (unsigned int)(ring->type), ringname,
924 ring->max_used_slots,
926 (unsigned long long)failed_packets,
927 (unsigned long long)nr_packets,
928 (unsigned long long)divide(permille_failed, 10),
929 (unsigned long long)modulo(permille_failed, 10),
930 (unsigned long long)divide(average_tries, 100),
931 (unsigned long long)modulo(average_tries, 100));
935 /* Device IRQs are disabled prior entering this function,
936 * so no need to take care of concurrency with rx handler stuff.
938 dmacontroller_cleanup(ring);
939 free_all_descbuffers(ring);
940 free_ringmemory(ring);
942 kfree(ring->txhdr_cache);
947 #define destroy_ring(dma, ring) do { \
948 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
949 (dma)->ring = NULL; \
952 void b43_dma_free(struct b43_wldev *dev)
954 struct b43_dma *dma = &dev->dma;
956 destroy_ring(dma, rx_ring);
957 destroy_ring(dma, tx_ring_AC_BK);
958 destroy_ring(dma, tx_ring_AC_BE);
959 destroy_ring(dma, tx_ring_AC_VI);
960 destroy_ring(dma, tx_ring_AC_VO);
961 destroy_ring(dma, tx_ring_mcast);
964 int b43_dma_init(struct b43_wldev *dev)
966 struct b43_dma *dma = &dev->dma;
969 enum b43_dmatype type;
971 dmamask = supported_dma_mask(dev);
976 type = B43_DMA_30BIT;
979 type = B43_DMA_32BIT;
982 type = B43_DMA_64BIT;
985 err = ssb_dma_set_mask(dev->dev, dmamask);
987 b43err(dev->wl, "The machine/kernel does not support "
988 "the required DMA mask (0x%08X%08X)\n",
989 (unsigned int)((dmamask & 0xFFFFFFFF00000000ULL) >> 32),
990 (unsigned int)(dmamask & 0x00000000FFFFFFFFULL));
995 /* setup TX DMA channels. */
996 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
997 if (!dma->tx_ring_AC_BK)
1000 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1001 if (!dma->tx_ring_AC_BE)
1002 goto err_destroy_bk;
1004 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1005 if (!dma->tx_ring_AC_VI)
1006 goto err_destroy_be;
1008 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1009 if (!dma->tx_ring_AC_VO)
1010 goto err_destroy_vi;
1012 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1013 if (!dma->tx_ring_mcast)
1014 goto err_destroy_vo;
1016 /* setup RX DMA channel. */
1017 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1019 goto err_destroy_mcast;
1021 /* No support for the TX status DMA ring. */
1022 B43_WARN_ON(dev->dev->id.revision < 5);
1024 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1025 (unsigned int)type);
1031 destroy_ring(dma, tx_ring_mcast);
1033 destroy_ring(dma, tx_ring_AC_VO);
1035 destroy_ring(dma, tx_ring_AC_VI);
1037 destroy_ring(dma, tx_ring_AC_BE);
1039 destroy_ring(dma, tx_ring_AC_BK);
1043 /* Generate a cookie for the TX header. */
1044 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1048 /* Use the upper 4 bits of the cookie as
1049 * DMA controller ID and store the slot number
1050 * in the lower 12 bits.
1051 * Note that the cookie must never be 0, as this
1052 * is a special value used in RX path.
1053 * It can also not be 0xFFFF because that is special
1054 * for multicast frames.
1056 cookie = (((u16)ring->index + 1) << 12);
1057 B43_WARN_ON(slot & ~0x0FFF);
1058 cookie |= (u16)slot;
1063 /* Inspect a cookie and find out to which controller/slot it belongs. */
1065 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1067 struct b43_dma *dma = &dev->dma;
1068 struct b43_dmaring *ring = NULL;
1070 switch (cookie & 0xF000) {
1072 ring = dma->tx_ring_AC_BK;
1075 ring = dma->tx_ring_AC_BE;
1078 ring = dma->tx_ring_AC_VI;
1081 ring = dma->tx_ring_AC_VO;
1084 ring = dma->tx_ring_mcast;
1089 *slot = (cookie & 0x0FFF);
1090 B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1095 static int dma_tx_fragment(struct b43_dmaring *ring,
1096 struct sk_buff *skb,
1097 struct ieee80211_tx_control *ctl)
1099 const struct b43_dma_ops *ops = ring->ops;
1101 int slot, old_top_slot, old_used_slots;
1103 struct b43_dmadesc_generic *desc;
1104 struct b43_dmadesc_meta *meta;
1105 struct b43_dmadesc_meta *meta_hdr;
1106 struct sk_buff *bounce_skb;
1108 size_t hdrsize = b43_txhdr_size(ring->dev);
1110 #define SLOTS_PER_PACKET 2
1111 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
1113 old_top_slot = ring->current_slot;
1114 old_used_slots = ring->used_slots;
1116 /* Get a slot for the header. */
1117 slot = request_slot(ring);
1118 desc = ops->idx2desc(ring, slot, &meta_hdr);
1119 memset(meta_hdr, 0, sizeof(*meta_hdr));
1121 header = &(ring->txhdr_cache[slot * hdrsize]);
1122 cookie = generate_cookie(ring, slot);
1123 err = b43_generate_txhdr(ring->dev, header,
1124 skb->data, skb->len, ctl, cookie);
1125 if (unlikely(err)) {
1126 ring->current_slot = old_top_slot;
1127 ring->used_slots = old_used_slots;
1131 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1133 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1134 ring->current_slot = old_top_slot;
1135 ring->used_slots = old_used_slots;
1138 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1141 /* Get a slot for the payload. */
1142 slot = request_slot(ring);
1143 desc = ops->idx2desc(ring, slot, &meta);
1144 memset(meta, 0, sizeof(*meta));
1146 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1148 meta->is_last_fragment = 1;
1150 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1151 /* create a bounce buffer in zone_dma on mapping failure. */
1152 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1153 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1155 ring->current_slot = old_top_slot;
1156 ring->used_slots = old_used_slots;
1161 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1162 dev_kfree_skb_any(skb);
1165 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1166 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1167 ring->current_slot = old_top_slot;
1168 ring->used_slots = old_used_slots;
1170 goto out_free_bounce;
1174 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1176 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1177 /* Tell the firmware about the cookie of the last
1178 * mcast frame, so it can clear the more-data bit in it. */
1179 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1180 B43_SHM_SH_MCASTCOOKIE, cookie);
1182 /* Now transfer the whole frame. */
1184 ops->poke_tx(ring, next_slot(ring, slot));
1188 dev_kfree_skb_any(skb);
1190 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1195 static inline int should_inject_overflow(struct b43_dmaring *ring)
1197 #ifdef CONFIG_B43_DEBUG
1198 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1199 /* Check if we should inject another ringbuffer overflow
1200 * to test handling of this situation in the stack. */
1201 unsigned long next_overflow;
1203 next_overflow = ring->last_injected_overflow + HZ;
1204 if (time_after(jiffies, next_overflow)) {
1205 ring->last_injected_overflow = jiffies;
1206 b43dbg(ring->dev->wl,
1207 "Injecting TX ring overflow on "
1208 "DMA controller %d\n", ring->index);
1212 #endif /* CONFIG_B43_DEBUG */
1216 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1217 static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
1220 struct b43_dmaring *ring;
1222 if (b43_modparam_qos) {
1223 /* 0 = highest priority */
1224 switch (queue_prio) {
1229 ring = dev->dma.tx_ring_AC_VO;
1232 ring = dev->dma.tx_ring_AC_VI;
1235 ring = dev->dma.tx_ring_AC_BE;
1238 ring = dev->dma.tx_ring_AC_BK;
1242 ring = dev->dma.tx_ring_AC_BE;
1247 int b43_dma_tx(struct b43_wldev *dev,
1248 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1250 struct b43_dmaring *ring;
1251 struct ieee80211_hdr *hdr;
1253 unsigned long flags;
1255 if (unlikely(skb->len < 2 + 2 + 6)) {
1256 /* Too short, this can't be a valid frame. */
1260 hdr = (struct ieee80211_hdr *)skb->data;
1261 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1262 /* The multicast ring will be sent after the DTIM */
1263 ring = dev->dma.tx_ring_mcast;
1264 /* Set the more-data bit. Ucode will clear it on
1265 * the last frame for us. */
1266 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1268 /* Decide by priority where to put this frame. */
1269 ring = select_ring_by_priority(dev, ctl->queue);
1272 spin_lock_irqsave(&ring->lock, flags);
1273 B43_WARN_ON(!ring->tx);
1274 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1275 b43warn(dev->wl, "DMA queue overflow\n");
1279 /* Check if the queue was stopped in mac80211,
1280 * but we got called nevertheless.
1281 * That would be a mac80211 bug. */
1282 B43_WARN_ON(ring->stopped);
1284 /* Assign the queue number to the ring (if not already done before)
1285 * so TX status handling can use it. The queue to ring mapping is
1286 * static, so we don't need to store it per frame. */
1287 ring->queue_prio = ctl->queue;
1289 err = dma_tx_fragment(ring, skb, ctl);
1290 if (unlikely(err == -ENOKEY)) {
1291 /* Drop this packet, as we don't have the encryption key
1292 * anymore and must not transmit it unencrypted. */
1293 dev_kfree_skb_any(skb);
1297 if (unlikely(err)) {
1298 b43err(dev->wl, "DMA tx mapping failure\n");
1301 ring->nr_tx_packets++;
1302 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1303 should_inject_overflow(ring)) {
1304 /* This TX ring is full. */
1305 ieee80211_stop_queue(dev->wl->hw, ctl->queue);
1307 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1308 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1312 spin_unlock_irqrestore(&ring->lock, flags);
1317 static void b43_fill_txstatus_report(struct b43_dmaring *ring,
1318 struct ieee80211_tx_status *report,
1319 const struct b43_txstatus *status)
1321 bool frame_failed = 0;
1323 if (status->acked) {
1324 /* The frame was ACKed. */
1325 report->flags |= IEEE80211_TX_STATUS_ACK;
1327 /* The frame was not ACKed... */
1328 if (!(report->control.flags & IEEE80211_TXCTL_NO_ACK)) {
1329 /* ...but we expected an ACK. */
1331 report->excessive_retries = 1;
1334 if (status->frame_count == 0) {
1335 /* The frame was not transmitted at all. */
1336 report->retry_count = 0;
1338 report->retry_count = status->frame_count - 1;
1339 #ifdef CONFIG_B43_DEBUG
1341 ring->nr_failed_tx_packets++;
1343 ring->nr_succeed_tx_packets++;
1344 ring->nr_total_packet_tries += status->frame_count;
1349 /* Called with IRQs disabled. */
1350 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1351 const struct b43_txstatus *status)
1353 const struct b43_dma_ops *ops;
1354 struct b43_dmaring *ring;
1355 struct b43_dmadesc_generic *desc;
1356 struct b43_dmadesc_meta *meta;
1359 ring = parse_cookie(dev, status->cookie, &slot);
1360 if (unlikely(!ring))
1363 spin_lock(&ring->lock); /* IRQs are already disabled. */
1365 B43_WARN_ON(!ring->tx);
1368 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1369 desc = ops->idx2desc(ring, slot, &meta);
1372 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
1375 unmap_descbuffer(ring, meta->dmaaddr,
1376 b43_txhdr_size(dev), 1);
1378 if (meta->is_last_fragment) {
1379 B43_WARN_ON(!meta->skb);
1380 /* Call back to inform the ieee80211 subsystem about the
1381 * status of the transmission.
1382 * Some fields of txstat are already filled in dma_tx().
1384 b43_fill_txstatus_report(ring, &(meta->txstat), status);
1385 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
1387 /* skb is freed by ieee80211_tx_status_irqsafe() */
1390 /* No need to call free_descriptor_buffer here, as
1391 * this is only the txhdr, which is not allocated.
1393 B43_WARN_ON(meta->skb);
1396 /* Everything unmapped and free'd. So it's not used anymore. */
1399 if (meta->is_last_fragment)
1401 slot = next_slot(ring, slot);
1403 dev->stats.last_tx = jiffies;
1404 if (ring->stopped) {
1405 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1406 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1408 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1409 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1413 spin_unlock(&ring->lock);
1416 void b43_dma_get_tx_stats(struct b43_wldev *dev,
1417 struct ieee80211_tx_queue_stats *stats)
1419 const int nr_queues = dev->wl->hw->queues;
1420 struct b43_dmaring *ring;
1421 struct ieee80211_tx_queue_stats_data *data;
1422 unsigned long flags;
1425 for (i = 0; i < nr_queues; i++) {
1426 data = &(stats->data[i]);
1427 ring = select_ring_by_priority(dev, i);
1429 spin_lock_irqsave(&ring->lock, flags);
1430 data->len = ring->used_slots / SLOTS_PER_PACKET;
1431 data->limit = ring->nr_slots / SLOTS_PER_PACKET;
1432 data->count = ring->nr_tx_packets;
1433 spin_unlock_irqrestore(&ring->lock, flags);
1437 static void dma_rx(struct b43_dmaring *ring, int *slot)
1439 const struct b43_dma_ops *ops = ring->ops;
1440 struct b43_dmadesc_generic *desc;
1441 struct b43_dmadesc_meta *meta;
1442 struct b43_rxhdr_fw4 *rxhdr;
1443 struct sk_buff *skb;
1448 desc = ops->idx2desc(ring, *slot, &meta);
1450 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1453 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1454 len = le16_to_cpu(rxhdr->frame_len);
1461 len = le16_to_cpu(rxhdr->frame_len);
1462 } while (len == 0 && i++ < 5);
1463 if (unlikely(len == 0)) {
1464 /* recycle the descriptor buffer. */
1465 sync_descbuffer_for_device(ring, meta->dmaaddr,
1466 ring->rx_buffersize);
1470 if (unlikely(len > ring->rx_buffersize)) {
1471 /* The data did not fit into one descriptor buffer
1472 * and is split over multiple buffers.
1473 * This should never happen, as we try to allocate buffers
1474 * big enough. So simply ignore this packet.
1480 desc = ops->idx2desc(ring, *slot, &meta);
1481 /* recycle the descriptor buffer. */
1482 sync_descbuffer_for_device(ring, meta->dmaaddr,
1483 ring->rx_buffersize);
1484 *slot = next_slot(ring, *slot);
1486 tmp -= ring->rx_buffersize;
1490 b43err(ring->dev->wl, "DMA RX buffer too small "
1491 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1492 len, ring->rx_buffersize, cnt);
1496 dmaaddr = meta->dmaaddr;
1497 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1498 if (unlikely(err)) {
1499 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1500 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1504 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1505 skb_put(skb, len + ring->frameoffset);
1506 skb_pull(skb, ring->frameoffset);
1508 b43_rx(ring->dev, skb, rxhdr);
1513 void b43_dma_rx(struct b43_dmaring *ring)
1515 const struct b43_dma_ops *ops = ring->ops;
1516 int slot, current_slot;
1519 B43_WARN_ON(ring->tx);
1520 current_slot = ops->get_current_rxslot(ring);
1521 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1523 slot = ring->current_slot;
1524 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1525 dma_rx(ring, &slot);
1526 update_max_used_slots(ring, ++used_slots);
1528 ops->set_current_rxslot(ring, slot);
1529 ring->current_slot = slot;
1532 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1534 unsigned long flags;
1536 spin_lock_irqsave(&ring->lock, flags);
1537 B43_WARN_ON(!ring->tx);
1538 ring->ops->tx_suspend(ring);
1539 spin_unlock_irqrestore(&ring->lock, flags);
1542 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1544 unsigned long flags;
1546 spin_lock_irqsave(&ring->lock, flags);
1547 B43_WARN_ON(!ring->tx);
1548 ring->ops->tx_resume(ring);
1549 spin_unlock_irqrestore(&ring->lock, flags);
1552 void b43_dma_tx_suspend(struct b43_wldev *dev)
1554 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1555 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1556 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1557 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1558 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1559 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1562 void b43_dma_tx_resume(struct b43_wldev *dev)
1564 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1565 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1566 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1567 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1568 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1569 b43_power_saving_ctl_bits(dev, 0);