3 Broadcom B43 wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <asm/div64.h>
45 /* Required number of TX DMA slots per TX frame.
46 * This currently is 2, because we put the header and the ieee80211 frame
47 * into separate slots. */
48 #define TX_SLOTS_PER_FRAME 2
50 static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
51 enum b43_addrtype addrtype)
53 u32 uninitialized_var(addr);
56 case B43_DMA_ADDR_LOW:
57 addr = lower_32_bits(dmaaddr);
58 if (dma->translation_in_low) {
59 addr &= ~SSB_DMA_TRANSLATION_MASK;
60 addr |= dma->translation;
63 case B43_DMA_ADDR_HIGH:
64 addr = upper_32_bits(dmaaddr);
65 if (!dma->translation_in_low) {
66 addr &= ~SSB_DMA_TRANSLATION_MASK;
67 addr |= dma->translation;
70 case B43_DMA_ADDR_EXT:
71 if (dma->translation_in_low)
72 addr = lower_32_bits(dmaaddr);
74 addr = upper_32_bits(dmaaddr);
75 addr &= SSB_DMA_TRANSLATION_MASK;
76 addr >>= SSB_DMA_TRANSLATION_SHIFT;
85 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
87 struct b43_dmadesc_meta **meta)
89 struct b43_dmadesc32 *desc;
91 *meta = &(ring->meta[slot]);
92 desc = ring->descbase;
95 return (struct b43_dmadesc_generic *)desc;
98 static void op32_fill_descriptor(struct b43_dmaring *ring,
99 struct b43_dmadesc_generic *desc,
100 dma_addr_t dmaaddr, u16 bufsize,
101 int start, int end, int irq)
103 struct b43_dmadesc32 *descbase = ring->descbase;
109 slot = (int)(&(desc->dma32) - descbase);
110 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
112 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
113 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
115 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
116 if (slot == ring->nr_slots - 1)
117 ctl |= B43_DMA32_DCTL_DTABLEEND;
119 ctl |= B43_DMA32_DCTL_FRAMESTART;
121 ctl |= B43_DMA32_DCTL_FRAMEEND;
123 ctl |= B43_DMA32_DCTL_IRQ;
124 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
125 & B43_DMA32_DCTL_ADDREXT_MASK;
127 desc->dma32.control = cpu_to_le32(ctl);
128 desc->dma32.address = cpu_to_le32(addr);
131 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
133 b43_dma_write(ring, B43_DMA32_TXINDEX,
134 (u32) (slot * sizeof(struct b43_dmadesc32)));
137 static void op32_tx_suspend(struct b43_dmaring *ring)
139 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
140 | B43_DMA32_TXSUSPEND);
143 static void op32_tx_resume(struct b43_dmaring *ring)
145 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
146 & ~B43_DMA32_TXSUSPEND);
149 static int op32_get_current_rxslot(struct b43_dmaring *ring)
153 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
154 val &= B43_DMA32_RXDPTR;
156 return (val / sizeof(struct b43_dmadesc32));
159 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
161 b43_dma_write(ring, B43_DMA32_RXINDEX,
162 (u32) (slot * sizeof(struct b43_dmadesc32)));
165 static const struct b43_dma_ops dma32_ops = {
166 .idx2desc = op32_idx2desc,
167 .fill_descriptor = op32_fill_descriptor,
168 .poke_tx = op32_poke_tx,
169 .tx_suspend = op32_tx_suspend,
170 .tx_resume = op32_tx_resume,
171 .get_current_rxslot = op32_get_current_rxslot,
172 .set_current_rxslot = op32_set_current_rxslot,
177 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
179 struct b43_dmadesc_meta **meta)
181 struct b43_dmadesc64 *desc;
183 *meta = &(ring->meta[slot]);
184 desc = ring->descbase;
185 desc = &(desc[slot]);
187 return (struct b43_dmadesc_generic *)desc;
190 static void op64_fill_descriptor(struct b43_dmaring *ring,
191 struct b43_dmadesc_generic *desc,
192 dma_addr_t dmaaddr, u16 bufsize,
193 int start, int end, int irq)
195 struct b43_dmadesc64 *descbase = ring->descbase;
197 u32 ctl0 = 0, ctl1 = 0;
201 slot = (int)(&(desc->dma64) - descbase);
202 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
204 addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
205 addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
206 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
208 if (slot == ring->nr_slots - 1)
209 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
211 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
213 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
215 ctl0 |= B43_DMA64_DCTL0_IRQ;
216 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
217 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
218 & B43_DMA64_DCTL1_ADDREXT_MASK;
220 desc->dma64.control0 = cpu_to_le32(ctl0);
221 desc->dma64.control1 = cpu_to_le32(ctl1);
222 desc->dma64.address_low = cpu_to_le32(addrlo);
223 desc->dma64.address_high = cpu_to_le32(addrhi);
226 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
228 b43_dma_write(ring, B43_DMA64_TXINDEX,
229 (u32) (slot * sizeof(struct b43_dmadesc64)));
232 static void op64_tx_suspend(struct b43_dmaring *ring)
234 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
235 | B43_DMA64_TXSUSPEND);
238 static void op64_tx_resume(struct b43_dmaring *ring)
240 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
241 & ~B43_DMA64_TXSUSPEND);
244 static int op64_get_current_rxslot(struct b43_dmaring *ring)
248 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
249 val &= B43_DMA64_RXSTATDPTR;
251 return (val / sizeof(struct b43_dmadesc64));
254 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
256 b43_dma_write(ring, B43_DMA64_RXINDEX,
257 (u32) (slot * sizeof(struct b43_dmadesc64)));
260 static const struct b43_dma_ops dma64_ops = {
261 .idx2desc = op64_idx2desc,
262 .fill_descriptor = op64_fill_descriptor,
263 .poke_tx = op64_poke_tx,
264 .tx_suspend = op64_tx_suspend,
265 .tx_resume = op64_tx_resume,
266 .get_current_rxslot = op64_get_current_rxslot,
267 .set_current_rxslot = op64_set_current_rxslot,
270 static inline int free_slots(struct b43_dmaring *ring)
272 return (ring->nr_slots - ring->used_slots);
275 static inline int next_slot(struct b43_dmaring *ring, int slot)
277 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
278 if (slot == ring->nr_slots - 1)
283 static inline int prev_slot(struct b43_dmaring *ring, int slot)
285 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
287 return ring->nr_slots - 1;
291 #ifdef CONFIG_B43_DEBUG
292 static void update_max_used_slots(struct b43_dmaring *ring,
293 int current_used_slots)
295 if (current_used_slots <= ring->max_used_slots)
297 ring->max_used_slots = current_used_slots;
298 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
299 b43dbg(ring->dev->wl,
300 "max_used_slots increased to %d on %s ring %d\n",
301 ring->max_used_slots,
302 ring->tx ? "TX" : "RX", ring->index);
307 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
312 /* Request a slot for usage. */
313 static inline int request_slot(struct b43_dmaring *ring)
317 B43_WARN_ON(!ring->tx);
318 B43_WARN_ON(ring->stopped);
319 B43_WARN_ON(free_slots(ring) == 0);
321 slot = next_slot(ring, ring->current_slot);
322 ring->current_slot = slot;
325 update_max_used_slots(ring, ring->used_slots);
330 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
332 static const u16 map64[] = {
333 B43_MMIO_DMA64_BASE0,
334 B43_MMIO_DMA64_BASE1,
335 B43_MMIO_DMA64_BASE2,
336 B43_MMIO_DMA64_BASE3,
337 B43_MMIO_DMA64_BASE4,
338 B43_MMIO_DMA64_BASE5,
340 static const u16 map32[] = {
341 B43_MMIO_DMA32_BASE0,
342 B43_MMIO_DMA32_BASE1,
343 B43_MMIO_DMA32_BASE2,
344 B43_MMIO_DMA32_BASE3,
345 B43_MMIO_DMA32_BASE4,
346 B43_MMIO_DMA32_BASE5,
349 if (type == B43_DMA_64BIT) {
350 B43_WARN_ON(!(controller_idx >= 0 &&
351 controller_idx < ARRAY_SIZE(map64)));
352 return map64[controller_idx];
354 B43_WARN_ON(!(controller_idx >= 0 &&
355 controller_idx < ARRAY_SIZE(map32)));
356 return map32[controller_idx];
360 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
361 unsigned char *buf, size_t len, int tx)
366 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
367 buf, len, DMA_TO_DEVICE);
369 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
370 buf, len, DMA_FROM_DEVICE);
377 void unmap_descbuffer(struct b43_dmaring *ring,
378 dma_addr_t addr, size_t len, int tx)
381 dma_unmap_single(ring->dev->dev->dma_dev,
382 addr, len, DMA_TO_DEVICE);
384 dma_unmap_single(ring->dev->dev->dma_dev,
385 addr, len, DMA_FROM_DEVICE);
390 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
391 dma_addr_t addr, size_t len)
393 B43_WARN_ON(ring->tx);
394 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
395 addr, len, DMA_FROM_DEVICE);
399 void sync_descbuffer_for_device(struct b43_dmaring *ring,
400 dma_addr_t addr, size_t len)
402 B43_WARN_ON(ring->tx);
403 dma_sync_single_for_device(ring->dev->dev->dma_dev,
404 addr, len, DMA_FROM_DEVICE);
408 void free_descriptor_buffer(struct b43_dmaring *ring,
409 struct b43_dmadesc_meta *meta)
413 ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
415 dev_kfree_skb_any(meta->skb);
420 static int alloc_ringmemory(struct b43_dmaring *ring)
422 gfp_t flags = GFP_KERNEL;
424 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
425 * alignment and 8K buffers for 64-bit DMA with 8K alignment.
426 * In practice we could use smaller buffers for the latter, but the
427 * alignment is really important because of the hardware bug. If bit
428 * 0x00001000 is used in DMA address, some hardware (like BCM4331)
429 * copies that bit into B43_DMA64_RXSTATUS and we get false values from
430 * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
431 * more than 256 slots for ring.
433 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
434 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
436 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
437 ring_mem_size, &(ring->dmabase),
439 if (!ring->descbase) {
440 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
443 memset(ring->descbase, 0, ring_mem_size);
448 static void free_ringmemory(struct b43_dmaring *ring)
450 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
451 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
452 dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
453 ring->descbase, ring->dmabase);
456 /* Reset the RX DMA channel */
457 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
458 enum b43_dmatype type)
466 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
467 b43_write32(dev, mmio_base + offset, 0);
468 for (i = 0; i < 10; i++) {
469 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
471 value = b43_read32(dev, mmio_base + offset);
472 if (type == B43_DMA_64BIT) {
473 value &= B43_DMA64_RXSTAT;
474 if (value == B43_DMA64_RXSTAT_DISABLED) {
479 value &= B43_DMA32_RXSTATE;
480 if (value == B43_DMA32_RXSTAT_DISABLED) {
488 b43err(dev->wl, "DMA RX reset timed out\n");
495 /* Reset the TX DMA channel */
496 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
497 enum b43_dmatype type)
505 for (i = 0; i < 10; i++) {
506 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
508 value = b43_read32(dev, mmio_base + offset);
509 if (type == B43_DMA_64BIT) {
510 value &= B43_DMA64_TXSTAT;
511 if (value == B43_DMA64_TXSTAT_DISABLED ||
512 value == B43_DMA64_TXSTAT_IDLEWAIT ||
513 value == B43_DMA64_TXSTAT_STOPPED)
516 value &= B43_DMA32_TXSTATE;
517 if (value == B43_DMA32_TXSTAT_DISABLED ||
518 value == B43_DMA32_TXSTAT_IDLEWAIT ||
519 value == B43_DMA32_TXSTAT_STOPPED)
524 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
525 b43_write32(dev, mmio_base + offset, 0);
526 for (i = 0; i < 10; i++) {
527 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
529 value = b43_read32(dev, mmio_base + offset);
530 if (type == B43_DMA_64BIT) {
531 value &= B43_DMA64_TXSTAT;
532 if (value == B43_DMA64_TXSTAT_DISABLED) {
537 value &= B43_DMA32_TXSTATE;
538 if (value == B43_DMA32_TXSTAT_DISABLED) {
546 b43err(dev->wl, "DMA TX reset timed out\n");
549 /* ensure the reset is completed. */
555 /* Check if a DMA mapping address is invalid. */
556 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
558 size_t buffersize, bool dma_to_device)
560 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
563 switch (ring->type) {
565 if ((u64)addr + buffersize > (1ULL << 30))
569 if ((u64)addr + buffersize > (1ULL << 32))
573 /* Currently we can't have addresses beyond
574 * 64bit in the kernel. */
578 /* The address is OK. */
582 /* We can't support this address. Unmap it again. */
583 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
588 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
590 unsigned char *f = skb->data + ring->frameoffset;
592 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
595 static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
597 struct b43_rxhdr_fw4 *rxhdr;
598 unsigned char *frame;
600 /* This poisons the RX buffer to detect DMA failures. */
602 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
603 rxhdr->frame_len = 0;
605 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
606 frame = skb->data + ring->frameoffset;
607 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
610 static int setup_rx_descbuffer(struct b43_dmaring *ring,
611 struct b43_dmadesc_generic *desc,
612 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
617 B43_WARN_ON(ring->tx);
619 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
622 b43_poison_rx_buffer(ring, skb);
623 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
624 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
625 /* ugh. try to realloc in zone_dma */
626 gfp_flags |= GFP_DMA;
628 dev_kfree_skb_any(skb);
630 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
633 b43_poison_rx_buffer(ring, skb);
634 dmaaddr = map_descbuffer(ring, skb->data,
635 ring->rx_buffersize, 0);
636 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
637 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
638 dev_kfree_skb_any(skb);
644 meta->dmaaddr = dmaaddr;
645 ring->ops->fill_descriptor(ring, desc, dmaaddr,
646 ring->rx_buffersize, 0, 0, 0);
651 /* Allocate the initial descbuffers.
652 * This is used for an RX ring only.
654 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
656 int i, err = -ENOMEM;
657 struct b43_dmadesc_generic *desc;
658 struct b43_dmadesc_meta *meta;
660 for (i = 0; i < ring->nr_slots; i++) {
661 desc = ring->ops->idx2desc(ring, i, &meta);
663 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
665 b43err(ring->dev->wl,
666 "Failed to allocate initial descbuffers\n");
671 ring->used_slots = ring->nr_slots;
677 for (i--; i >= 0; i--) {
678 desc = ring->ops->idx2desc(ring, i, &meta);
680 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
681 dev_kfree_skb(meta->skb);
686 /* Do initial setup of the DMA controller.
687 * Reset the controller, write the ring busaddress
688 * and switch the "enable" bit on.
690 static int dmacontroller_setup(struct b43_dmaring *ring)
695 bool parity = ring->dev->dma.parity;
700 if (ring->type == B43_DMA_64BIT) {
701 u64 ringbase = (u64) (ring->dmabase);
702 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
703 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
704 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
706 value = B43_DMA64_TXENABLE;
707 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
708 & B43_DMA64_TXADDREXT_MASK;
710 value |= B43_DMA64_TXPARITYDISABLE;
711 b43_dma_write(ring, B43_DMA64_TXCTL, value);
712 b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
713 b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
715 u32 ringbase = (u32) (ring->dmabase);
716 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
717 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
719 value = B43_DMA32_TXENABLE;
720 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
721 & B43_DMA32_TXADDREXT_MASK;
723 value |= B43_DMA32_TXPARITYDISABLE;
724 b43_dma_write(ring, B43_DMA32_TXCTL, value);
725 b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
728 err = alloc_initial_descbuffers(ring);
731 if (ring->type == B43_DMA_64BIT) {
732 u64 ringbase = (u64) (ring->dmabase);
733 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
734 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
735 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
737 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
738 value |= B43_DMA64_RXENABLE;
739 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
740 & B43_DMA64_RXADDREXT_MASK;
742 value |= B43_DMA64_RXPARITYDISABLE;
743 b43_dma_write(ring, B43_DMA64_RXCTL, value);
744 b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
745 b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
746 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
747 sizeof(struct b43_dmadesc64));
749 u32 ringbase = (u32) (ring->dmabase);
750 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
751 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
753 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
754 value |= B43_DMA32_RXENABLE;
755 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
756 & B43_DMA32_RXADDREXT_MASK;
758 value |= B43_DMA32_RXPARITYDISABLE;
759 b43_dma_write(ring, B43_DMA32_RXCTL, value);
760 b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
761 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
762 sizeof(struct b43_dmadesc32));
770 /* Shutdown the DMA controller. */
771 static void dmacontroller_cleanup(struct b43_dmaring *ring)
774 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
776 if (ring->type == B43_DMA_64BIT) {
777 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
778 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
780 b43_dma_write(ring, B43_DMA32_TXRING, 0);
782 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
784 if (ring->type == B43_DMA_64BIT) {
785 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
786 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
788 b43_dma_write(ring, B43_DMA32_RXRING, 0);
792 static void free_all_descbuffers(struct b43_dmaring *ring)
794 struct b43_dmadesc_meta *meta;
797 if (!ring->used_slots)
799 for (i = 0; i < ring->nr_slots; i++) {
800 /* get meta - ignore returned value */
801 ring->ops->idx2desc(ring, i, &meta);
803 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
804 B43_WARN_ON(!ring->tx);
808 unmap_descbuffer(ring, meta->dmaaddr,
811 unmap_descbuffer(ring, meta->dmaaddr,
812 ring->rx_buffersize, 0);
814 free_descriptor_buffer(ring, meta);
818 static u64 supported_dma_mask(struct b43_wldev *dev)
823 switch (dev->dev->bus_type) {
824 #ifdef CONFIG_B43_BCMA
826 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
827 if (tmp & BCMA_IOST_DMA64)
828 return DMA_BIT_MASK(64);
831 #ifdef CONFIG_B43_SSB
833 tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
834 if (tmp & SSB_TMSHIGH_DMA64)
835 return DMA_BIT_MASK(64);
840 mmio_base = b43_dmacontroller_base(0, 0);
841 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
842 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
843 if (tmp & B43_DMA32_TXADDREXT_MASK)
844 return DMA_BIT_MASK(32);
846 return DMA_BIT_MASK(30);
849 static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
851 if (dmamask == DMA_BIT_MASK(30))
852 return B43_DMA_30BIT;
853 if (dmamask == DMA_BIT_MASK(32))
854 return B43_DMA_32BIT;
855 if (dmamask == DMA_BIT_MASK(64))
856 return B43_DMA_64BIT;
858 return B43_DMA_30BIT;
861 /* Main initialization function. */
863 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
864 int controller_index,
866 enum b43_dmatype type)
868 struct b43_dmaring *ring;
872 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
876 ring->nr_slots = B43_RXRING_SLOTS;
878 ring->nr_slots = B43_TXRING_SLOTS;
880 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
884 for (i = 0; i < ring->nr_slots; i++)
885 ring->meta->skb = B43_DMA_PTR_POISON;
889 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
890 ring->index = controller_index;
891 if (type == B43_DMA_64BIT)
892 ring->ops = &dma64_ops;
894 ring->ops = &dma32_ops;
897 ring->current_slot = -1;
899 if (ring->index == 0) {
900 switch (dev->fw.hdr_format) {
902 ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
903 ring->frameoffset = B43_DMA0_RX_FW598_FO;
907 ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
908 ring->frameoffset = B43_DMA0_RX_FW351_FO;
914 #ifdef CONFIG_B43_DEBUG
915 ring->last_injected_overflow = jiffies;
919 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
920 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
922 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
925 if (!ring->txhdr_cache)
928 /* test for ability to dma to txhdr_cache */
929 dma_test = dma_map_single(dev->dev->dma_dev,
934 if (b43_dma_mapping_error(ring, dma_test,
935 b43_txhdr_size(dev), 1)) {
937 kfree(ring->txhdr_cache);
938 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
940 GFP_KERNEL | GFP_DMA);
941 if (!ring->txhdr_cache)
944 dma_test = dma_map_single(dev->dev->dma_dev,
949 if (b43_dma_mapping_error(ring, dma_test,
950 b43_txhdr_size(dev), 1)) {
953 "TXHDR DMA allocation failed\n");
954 goto err_kfree_txhdr_cache;
958 dma_unmap_single(dev->dev->dma_dev,
959 dma_test, b43_txhdr_size(dev),
963 err = alloc_ringmemory(ring);
965 goto err_kfree_txhdr_cache;
966 err = dmacontroller_setup(ring);
968 goto err_free_ringmemory;
974 free_ringmemory(ring);
975 err_kfree_txhdr_cache:
976 kfree(ring->txhdr_cache);
985 #define divide(a, b) ({ \
991 #define modulo(a, b) ({ \
996 /* Main cleanup function. */
997 static void b43_destroy_dmaring(struct b43_dmaring *ring,
998 const char *ringname)
1003 #ifdef CONFIG_B43_DEBUG
1005 /* Print some statistics. */
1006 u64 failed_packets = ring->nr_failed_tx_packets;
1007 u64 succeed_packets = ring->nr_succeed_tx_packets;
1008 u64 nr_packets = failed_packets + succeed_packets;
1009 u64 permille_failed = 0, average_tries = 0;
1012 permille_failed = divide(failed_packets * 1000, nr_packets);
1014 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
1016 b43dbg(ring->dev->wl, "DMA-%u %s: "
1017 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
1018 "Average tries %llu.%02llu\n",
1019 (unsigned int)(ring->type), ringname,
1020 ring->max_used_slots,
1022 (unsigned long long)failed_packets,
1023 (unsigned long long)nr_packets,
1024 (unsigned long long)divide(permille_failed, 10),
1025 (unsigned long long)modulo(permille_failed, 10),
1026 (unsigned long long)divide(average_tries, 100),
1027 (unsigned long long)modulo(average_tries, 100));
1031 /* Device IRQs are disabled prior entering this function,
1032 * so no need to take care of concurrency with rx handler stuff.
1034 dmacontroller_cleanup(ring);
1035 free_all_descbuffers(ring);
1036 free_ringmemory(ring);
1038 kfree(ring->txhdr_cache);
1043 #define destroy_ring(dma, ring) do { \
1044 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1045 (dma)->ring = NULL; \
1048 void b43_dma_free(struct b43_wldev *dev)
1050 struct b43_dma *dma;
1052 if (b43_using_pio_transfers(dev))
1056 destroy_ring(dma, rx_ring);
1057 destroy_ring(dma, tx_ring_AC_BK);
1058 destroy_ring(dma, tx_ring_AC_BE);
1059 destroy_ring(dma, tx_ring_AC_VI);
1060 destroy_ring(dma, tx_ring_AC_VO);
1061 destroy_ring(dma, tx_ring_mcast);
1064 static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1066 u64 orig_mask = mask;
1067 bool fallback = false;
1070 /* Try to set the DMA mask. If it fails, try falling back to a
1071 * lower mask, as we can always also support a lower one. */
1073 err = dma_set_mask(dev->dev->dma_dev, mask);
1075 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1079 if (mask == DMA_BIT_MASK(64)) {
1080 mask = DMA_BIT_MASK(32);
1084 if (mask == DMA_BIT_MASK(32)) {
1085 mask = DMA_BIT_MASK(30);
1089 b43err(dev->wl, "The machine/kernel does not support "
1090 "the required %u-bit DMA mask\n",
1091 (unsigned int)dma_mask_to_engine_type(orig_mask));
1095 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1096 (unsigned int)dma_mask_to_engine_type(orig_mask),
1097 (unsigned int)dma_mask_to_engine_type(mask));
1103 /* Some hardware with 64-bit DMA seems to be bugged and looks for translation
1104 * bit in low address word instead of high one.
1106 static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
1107 enum b43_dmatype type)
1109 if (type != B43_DMA_64BIT)
1112 #ifdef CONFIG_B43_SSB
1113 if (dev->dev->bus_type == B43_BUS_SSB &&
1114 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
1115 !(pci_is_pcie(dev->dev->sdev->bus->host_pci) &&
1116 ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
1122 int b43_dma_init(struct b43_wldev *dev)
1124 struct b43_dma *dma = &dev->dma;
1127 enum b43_dmatype type;
1129 dmamask = supported_dma_mask(dev);
1130 type = dma_mask_to_engine_type(dmamask);
1131 err = b43_dma_set_mask(dev, dmamask);
1135 switch (dev->dev->bus_type) {
1136 #ifdef CONFIG_B43_BCMA
1138 dma->translation = bcma_core_dma_translation(dev->dev->bdev);
1141 #ifdef CONFIG_B43_SSB
1143 dma->translation = ssb_dma_translation(dev->dev->sdev);
1147 dma->translation_in_low = b43_dma_translation_in_low_word(dev, type);
1150 #ifdef CONFIG_B43_BCMA
1151 /* TODO: find out which SSB devices need disabling parity */
1152 if (dev->dev->bus_type == B43_BUS_BCMA)
1153 dma->parity = false;
1157 /* setup TX DMA channels. */
1158 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1159 if (!dma->tx_ring_AC_BK)
1162 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1163 if (!dma->tx_ring_AC_BE)
1164 goto err_destroy_bk;
1166 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1167 if (!dma->tx_ring_AC_VI)
1168 goto err_destroy_be;
1170 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1171 if (!dma->tx_ring_AC_VO)
1172 goto err_destroy_vi;
1174 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1175 if (!dma->tx_ring_mcast)
1176 goto err_destroy_vo;
1178 /* setup RX DMA channel. */
1179 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1181 goto err_destroy_mcast;
1183 /* No support for the TX status DMA ring. */
1184 B43_WARN_ON(dev->dev->core_rev < 5);
1186 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1187 (unsigned int)type);
1193 destroy_ring(dma, tx_ring_mcast);
1195 destroy_ring(dma, tx_ring_AC_VO);
1197 destroy_ring(dma, tx_ring_AC_VI);
1199 destroy_ring(dma, tx_ring_AC_BE);
1201 destroy_ring(dma, tx_ring_AC_BK);
1205 /* Generate a cookie for the TX header. */
1206 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1210 /* Use the upper 4 bits of the cookie as
1211 * DMA controller ID and store the slot number
1212 * in the lower 12 bits.
1213 * Note that the cookie must never be 0, as this
1214 * is a special value used in RX path.
1215 * It can also not be 0xFFFF because that is special
1216 * for multicast frames.
1218 cookie = (((u16)ring->index + 1) << 12);
1219 B43_WARN_ON(slot & ~0x0FFF);
1220 cookie |= (u16)slot;
1225 /* Inspect a cookie and find out to which controller/slot it belongs. */
1227 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1229 struct b43_dma *dma = &dev->dma;
1230 struct b43_dmaring *ring = NULL;
1232 switch (cookie & 0xF000) {
1234 ring = dma->tx_ring_AC_BK;
1237 ring = dma->tx_ring_AC_BE;
1240 ring = dma->tx_ring_AC_VI;
1243 ring = dma->tx_ring_AC_VO;
1246 ring = dma->tx_ring_mcast;
1249 *slot = (cookie & 0x0FFF);
1250 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1251 b43dbg(dev->wl, "TX-status contains "
1252 "invalid cookie: 0x%04X\n", cookie);
1259 static int dma_tx_fragment(struct b43_dmaring *ring,
1260 struct sk_buff *skb)
1262 const struct b43_dma_ops *ops = ring->ops;
1263 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1264 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
1266 int slot, old_top_slot, old_used_slots;
1268 struct b43_dmadesc_generic *desc;
1269 struct b43_dmadesc_meta *meta;
1270 struct b43_dmadesc_meta *meta_hdr;
1272 size_t hdrsize = b43_txhdr_size(ring->dev);
1274 /* Important note: If the number of used DMA slots per TX frame
1275 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1276 * the file has to be updated, too!
1279 old_top_slot = ring->current_slot;
1280 old_used_slots = ring->used_slots;
1282 /* Get a slot for the header. */
1283 slot = request_slot(ring);
1284 desc = ops->idx2desc(ring, slot, &meta_hdr);
1285 memset(meta_hdr, 0, sizeof(*meta_hdr));
1287 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
1288 cookie = generate_cookie(ring, slot);
1289 err = b43_generate_txhdr(ring->dev, header,
1291 if (unlikely(err)) {
1292 ring->current_slot = old_top_slot;
1293 ring->used_slots = old_used_slots;
1297 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1299 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1300 ring->current_slot = old_top_slot;
1301 ring->used_slots = old_used_slots;
1304 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1307 /* Get a slot for the payload. */
1308 slot = request_slot(ring);
1309 desc = ops->idx2desc(ring, slot, &meta);
1310 memset(meta, 0, sizeof(*meta));
1313 meta->is_last_fragment = true;
1314 priv_info->bouncebuffer = NULL;
1316 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1317 /* create a bounce buffer in zone_dma on mapping failure. */
1318 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1319 priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1320 GFP_ATOMIC | GFP_DMA);
1321 if (!priv_info->bouncebuffer) {
1322 ring->current_slot = old_top_slot;
1323 ring->used_slots = old_used_slots;
1328 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
1329 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1330 kfree(priv_info->bouncebuffer);
1331 priv_info->bouncebuffer = NULL;
1332 ring->current_slot = old_top_slot;
1333 ring->used_slots = old_used_slots;
1339 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1341 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1342 /* Tell the firmware about the cookie of the last
1343 * mcast frame, so it can clear the more-data bit in it. */
1344 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1345 B43_SHM_SH_MCASTCOOKIE, cookie);
1347 /* Now transfer the whole frame. */
1349 ops->poke_tx(ring, next_slot(ring, slot));
1353 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1358 static inline int should_inject_overflow(struct b43_dmaring *ring)
1360 #ifdef CONFIG_B43_DEBUG
1361 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1362 /* Check if we should inject another ringbuffer overflow
1363 * to test handling of this situation in the stack. */
1364 unsigned long next_overflow;
1366 next_overflow = ring->last_injected_overflow + HZ;
1367 if (time_after(jiffies, next_overflow)) {
1368 ring->last_injected_overflow = jiffies;
1369 b43dbg(ring->dev->wl,
1370 "Injecting TX ring overflow on "
1371 "DMA controller %d\n", ring->index);
1375 #endif /* CONFIG_B43_DEBUG */
1379 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1380 static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1383 struct b43_dmaring *ring;
1385 if (dev->qos_enabled) {
1386 /* 0 = highest priority */
1387 switch (queue_prio) {
1392 ring = dev->dma.tx_ring_AC_VO;
1395 ring = dev->dma.tx_ring_AC_VI;
1398 ring = dev->dma.tx_ring_AC_BE;
1401 ring = dev->dma.tx_ring_AC_BK;
1405 ring = dev->dma.tx_ring_AC_BE;
1410 int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1412 struct b43_dmaring *ring;
1413 struct ieee80211_hdr *hdr;
1415 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1417 hdr = (struct ieee80211_hdr *)skb->data;
1418 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1419 /* The multicast ring will be sent after the DTIM */
1420 ring = dev->dma.tx_ring_mcast;
1421 /* Set the more-data bit. Ucode will clear it on
1422 * the last frame for us. */
1423 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1425 /* Decide by priority where to put this frame. */
1426 ring = select_ring_by_priority(
1427 dev, skb_get_queue_mapping(skb));
1430 B43_WARN_ON(!ring->tx);
1432 if (unlikely(ring->stopped)) {
1433 /* We get here only because of a bug in mac80211.
1434 * Because of a race, one packet may be queued after
1435 * the queue is stopped, thus we got called when we shouldn't.
1436 * For now, just refuse the transmit. */
1437 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1438 b43err(dev->wl, "Packet after queue stopped\n");
1443 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
1444 /* If we get here, we have a real error with the queue
1445 * full, but queues not stopped. */
1446 b43err(dev->wl, "DMA queue overflow\n");
1451 /* Assign the queue number to the ring (if not already done before)
1452 * so TX status handling can use it. The queue to ring mapping is
1453 * static, so we don't need to store it per frame. */
1454 ring->queue_prio = skb_get_queue_mapping(skb);
1456 err = dma_tx_fragment(ring, skb);
1457 if (unlikely(err == -ENOKEY)) {
1458 /* Drop this packet, as we don't have the encryption key
1459 * anymore and must not transmit it unencrypted. */
1460 ieee80211_free_txskb(dev->wl->hw, skb);
1464 if (unlikely(err)) {
1465 b43err(dev->wl, "DMA tx mapping failure\n");
1468 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
1469 should_inject_overflow(ring)) {
1470 /* This TX ring is full. */
1471 unsigned int skb_mapping = skb_get_queue_mapping(skb);
1472 ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1473 dev->wl->tx_queue_stopped[skb_mapping] = 1;
1474 ring->stopped = true;
1475 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1476 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1484 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1485 const struct b43_txstatus *status)
1487 const struct b43_dma_ops *ops;
1488 struct b43_dmaring *ring;
1489 struct b43_dmadesc_meta *meta;
1490 int slot, firstused;
1493 ring = parse_cookie(dev, status->cookie, &slot);
1494 if (unlikely(!ring))
1496 B43_WARN_ON(!ring->tx);
1498 /* Sanity check: TX packets are processed in-order on one ring.
1499 * Check if the slot deduced from the cookie really is the first
1501 firstused = ring->current_slot - ring->used_slots + 1;
1503 firstused = ring->nr_slots + firstused;
1504 if (unlikely(slot != firstused)) {
1505 /* This possibly is a firmware bug and will result in
1506 * malfunction, memory leaks and/or stall of DMA functionality. */
1507 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
1508 "Expected %d, but got %d\n",
1509 ring->index, firstused, slot);
1515 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
1516 /* get meta - ignore returned value */
1517 ops->idx2desc(ring, slot, &meta);
1519 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1520 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1522 slot, firstused, ring->index);
1526 struct b43_private_tx_info *priv_info =
1527 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1529 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1530 kfree(priv_info->bouncebuffer);
1531 priv_info->bouncebuffer = NULL;
1533 unmap_descbuffer(ring, meta->dmaaddr,
1534 b43_txhdr_size(dev), 1);
1537 if (meta->is_last_fragment) {
1538 struct ieee80211_tx_info *info;
1540 if (unlikely(!meta->skb)) {
1541 /* This is a scatter-gather fragment of a frame, so
1542 * the skb pointer must not be NULL. */
1543 b43dbg(dev->wl, "TX status unexpected NULL skb "
1544 "at slot %d (first=%d) on ring %d\n",
1545 slot, firstused, ring->index);
1549 info = IEEE80211_SKB_CB(meta->skb);
1552 * Call back to inform the ieee80211 subsystem about
1553 * the status of the transmission.
1555 frame_succeed = b43_fill_txstatus_report(dev, info, status);
1556 #ifdef CONFIG_B43_DEBUG
1558 ring->nr_succeed_tx_packets++;
1560 ring->nr_failed_tx_packets++;
1561 ring->nr_total_packet_tries += status->frame_count;
1563 ieee80211_tx_status(dev->wl->hw, meta->skb);
1565 /* skb will be freed by ieee80211_tx_status().
1566 * Poison our pointer. */
1567 meta->skb = B43_DMA_PTR_POISON;
1569 /* No need to call free_descriptor_buffer here, as
1570 * this is only the txhdr, which is not allocated.
1572 if (unlikely(meta->skb)) {
1573 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1574 "at slot %d (first=%d) on ring %d\n",
1575 slot, firstused, ring->index);
1580 /* Everything unmapped and free'd. So it's not used anymore. */
1583 if (meta->is_last_fragment) {
1584 /* This is the last scatter-gather
1585 * fragment of the frame. We are done. */
1588 slot = next_slot(ring, slot);
1590 if (ring->stopped) {
1591 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
1592 ring->stopped = false;
1595 if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1596 dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1598 /* If the driver queue is running wake the corresponding
1599 * mac80211 queue. */
1600 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1601 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1602 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1605 /* Add work to the queue. */
1606 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1609 static void dma_rx(struct b43_dmaring *ring, int *slot)
1611 const struct b43_dma_ops *ops = ring->ops;
1612 struct b43_dmadesc_generic *desc;
1613 struct b43_dmadesc_meta *meta;
1614 struct b43_rxhdr_fw4 *rxhdr;
1615 struct sk_buff *skb;
1620 desc = ops->idx2desc(ring, *slot, &meta);
1622 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1625 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1626 len = le16_to_cpu(rxhdr->frame_len);
1633 len = le16_to_cpu(rxhdr->frame_len);
1634 } while (len == 0 && i++ < 5);
1635 if (unlikely(len == 0)) {
1636 dmaaddr = meta->dmaaddr;
1637 goto drop_recycle_buffer;
1640 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1641 /* Something went wrong with the DMA.
1642 * The device did not touch the buffer and did not overwrite the poison. */
1643 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
1644 dmaaddr = meta->dmaaddr;
1645 goto drop_recycle_buffer;
1647 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
1648 /* The data did not fit into one descriptor buffer
1649 * and is split over multiple buffers.
1650 * This should never happen, as we try to allocate buffers
1651 * big enough. So simply ignore this packet.
1657 desc = ops->idx2desc(ring, *slot, &meta);
1658 /* recycle the descriptor buffer. */
1659 b43_poison_rx_buffer(ring, meta->skb);
1660 sync_descbuffer_for_device(ring, meta->dmaaddr,
1661 ring->rx_buffersize);
1662 *slot = next_slot(ring, *slot);
1664 tmp -= ring->rx_buffersize;
1668 b43err(ring->dev->wl, "DMA RX buffer too small "
1669 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1670 len, ring->rx_buffersize, cnt);
1674 dmaaddr = meta->dmaaddr;
1675 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1676 if (unlikely(err)) {
1677 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1678 goto drop_recycle_buffer;
1681 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1682 skb_put(skb, len + ring->frameoffset);
1683 skb_pull(skb, ring->frameoffset);
1685 b43_rx(ring->dev, skb, rxhdr);
1689 drop_recycle_buffer:
1690 /* Poison and recycle the RX buffer. */
1691 b43_poison_rx_buffer(ring, skb);
1692 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1695 void b43_dma_rx(struct b43_dmaring *ring)
1697 const struct b43_dma_ops *ops = ring->ops;
1698 int slot, current_slot;
1701 B43_WARN_ON(ring->tx);
1702 current_slot = ops->get_current_rxslot(ring);
1703 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1705 slot = ring->current_slot;
1706 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1707 dma_rx(ring, &slot);
1708 update_max_used_slots(ring, ++used_slots);
1711 ops->set_current_rxslot(ring, slot);
1712 ring->current_slot = slot;
1715 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1717 B43_WARN_ON(!ring->tx);
1718 ring->ops->tx_suspend(ring);
1721 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1723 B43_WARN_ON(!ring->tx);
1724 ring->ops->tx_resume(ring);
1727 void b43_dma_tx_suspend(struct b43_wldev *dev)
1729 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1730 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1731 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1732 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1733 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1734 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1737 void b43_dma_tx_resume(struct b43_wldev *dev)
1739 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1740 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1741 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1742 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1743 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1744 b43_power_saving_ctl_bits(dev, 0);
1747 static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1748 u16 mmio_base, bool enable)
1752 if (type == B43_DMA_64BIT) {
1753 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1754 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1756 ctl |= B43_DMA64_RXDIRECTFIFO;
1757 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1759 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1760 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1762 ctl |= B43_DMA32_RXDIRECTFIFO;
1763 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1767 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1768 * This is called from PIO code, so DMA structures are not available. */
1769 void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1770 unsigned int engine_index, bool enable)
1772 enum b43_dmatype type;
1775 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1777 mmio_base = b43_dmacontroller_base(type, engine_index);
1778 direct_fifo_rx(dev, type, mmio_base, enable);