2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/delay.h>
19 #include <linux/pci.h>
21 #include <brcmu_utils.h>
27 * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
28 * a contiguous 8kB physical address.
30 #define D64RINGALIGN_BITS 13
31 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
32 #define D64RINGALIGN (1 << D64RINGALIGN_BITS)
34 #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
36 /* transmit channel control */
37 #define D64_XC_XE 0x00000001 /* transmit enable */
38 #define D64_XC_SE 0x00000002 /* transmit suspend request */
39 #define D64_XC_LE 0x00000004 /* loopback enable */
40 #define D64_XC_FL 0x00000010 /* flush request */
41 #define D64_XC_PD 0x00000800 /* parity check disable */
42 #define D64_XC_AE 0x00030000 /* address extension bits */
43 #define D64_XC_AE_SHIFT 16
45 /* transmit descriptor table pointer */
46 #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
48 /* transmit channel status */
49 #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
50 #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
51 #define D64_XS0_XS_SHIFT 28
52 #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
53 #define D64_XS0_XS_ACTIVE 0x10000000 /* active */
54 #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
55 #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
56 #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
58 #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
59 #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
60 #define D64_XS1_XE_SHIFT 28
61 #define D64_XS1_XE_NOERR 0x00000000 /* no error */
62 #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
63 #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
64 #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
65 #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
66 #define D64_XS1_XE_COREE 0x50000000 /* core error */
68 /* receive channel control */
70 #define D64_RC_RE 0x00000001
71 /* receive frame offset */
72 #define D64_RC_RO_MASK 0x000000fe
73 #define D64_RC_RO_SHIFT 1
74 /* direct fifo receive (pio) mode */
75 #define D64_RC_FM 0x00000100
76 /* separate rx header descriptor enable */
77 #define D64_RC_SH 0x00000200
78 /* overflow continue */
79 #define D64_RC_OC 0x00000400
80 /* parity check disable */
81 #define D64_RC_PD 0x00000800
82 /* address extension bits */
83 #define D64_RC_AE 0x00030000
84 #define D64_RC_AE_SHIFT 16
86 /* flags for dma controller */
88 #define DMA_CTRL_PEN (1 << 0)
89 /* rx overflow continue */
90 #define DMA_CTRL_ROC (1 << 1)
91 /* allow rx scatter to multiple descriptors */
92 #define DMA_CTRL_RXMULTI (1 << 2)
93 /* Unframed Rx/Tx data */
94 #define DMA_CTRL_UNFRAMED (1 << 3)
96 /* receive descriptor table pointer */
97 #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
99 /* receive channel status */
100 #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
101 #define D64_RS0_RS_MASK 0xf0000000 /* receive state */
102 #define D64_RS0_RS_SHIFT 28
103 #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
104 #define D64_RS0_RS_ACTIVE 0x10000000 /* active */
105 #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
106 #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
107 #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
109 #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
110 #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
111 #define D64_RS1_RE_SHIFT 28
112 #define D64_RS1_RE_NOERR 0x00000000 /* no error */
113 #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
114 #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
115 #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
116 #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
117 #define D64_RS1_RE_COREE 0x50000000 /* core error */
120 #define D64_FA_OFF_MASK 0xffff /* offset */
121 #define D64_FA_SEL_MASK 0xf0000 /* select */
122 #define D64_FA_SEL_SHIFT 16
123 #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
124 #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
125 #define D64_FA_SEL_RDD 0x40000 /* receive dma data */
126 #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
127 #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
128 #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
129 #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
130 #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
131 #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
132 #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
134 /* descriptor control flags 1 */
135 #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
136 #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
137 #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
138 #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
139 #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
141 /* descriptor control flags 2 */
142 /* buffer byte count. real data len must <= 16KB */
143 #define D64_CTRL2_BC_MASK 0x00007fff
144 /* address extension bits */
145 #define D64_CTRL2_AE 0x00030000
146 #define D64_CTRL2_AE_SHIFT 16
148 #define D64_CTRL2_PARITY 0x00040000
150 /* control flags in the range [27:20] are core-specific and not defined here */
151 #define D64_CTRL_CORE_MASK 0x0ff00000
153 #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
154 #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
155 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
156 #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
159 * packet headroom necessary to accommodate the largest header
160 * in the system, (i.e TXOFF). By doing, we avoid the need to
161 * allocate an extra buffer for the header when bridging to WL.
162 * There is a compile time check in wlc.c which ensure that this
163 * value is at least as big as TXOFF. This value is used in
167 #define BCMEXTRAHDROOM 172
171 #define DMA_ERROR(args) \
173 if (!(*di->msg_level & 1)) \
178 #define DMA_TRACE(args) \
180 if (!(*di->msg_level & 2)) \
186 #define DMA_ERROR(args)
187 #define DMA_TRACE(args)
190 #define DMA_NONE(args)
192 #define MAXNAMEL 8 /* 8 char names */
194 /* macros to convert between byte offsets and indexes */
195 #define B2I(bytes, type) ((bytes) / sizeof(type))
196 #define I2B(index, type) ((index) * sizeof(type))
198 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
199 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
201 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
202 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
206 * Descriptors are only read by the hardware, never written back.
209 __le32 ctrl1; /* misc control bits & bufcount */
210 __le32 ctrl2; /* buffer count and address extension */
211 __le32 addrlow; /* memory address of the date buffer, bits 31:0 */
212 __le32 addrhigh; /* memory address of the date buffer, bits 63:32 */
215 /* dma engine software state */
217 struct dma_pub dma; /* exported structure */
218 uint *msg_level; /* message level pointer */
219 char name[MAXNAMEL]; /* callers name for diag msgs */
221 struct pci_dev *pbus; /* bus handle */
223 bool dma64; /* this dma engine is operating in 64-bit mode */
224 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
226 /* 64-bit dma tx engine registers */
227 struct dma64regs __iomem *d64txregs;
228 /* 64-bit dma rx engine registers */
229 struct dma64regs __iomem *d64rxregs;
230 /* pointer to dma64 tx descriptor ring */
231 struct dma64desc *txd64;
232 /* pointer to dma64 rx descriptor ring */
233 struct dma64desc *rxd64;
235 u16 dmadesc_align; /* alignment requirement for dma descriptors */
237 u16 ntxd; /* # tx descriptors tunable */
238 u16 txin; /* index of next descriptor to reclaim */
239 u16 txout; /* index of next descriptor to post */
240 /* pointer to parallel array of pointers to packets */
241 struct sk_buff **txp;
242 /* Aligned physical address of descriptor ring */
244 /* Original physical address of descriptor ring */
245 dma_addr_t txdpaorig;
246 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
247 u32 txdalloc; /* #bytes allocated for the ring */
248 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
249 * is not just an index, it needs all 13 bits to be
250 * an offset from the addr register.
253 u16 nrxd; /* # rx descriptors tunable */
254 u16 rxin; /* index of next descriptor to reclaim */
255 u16 rxout; /* index of next descriptor to post */
256 /* pointer to parallel array of pointers to packets */
257 struct sk_buff **rxp;
258 /* Aligned physical address of descriptor ring */
260 /* Original physical address of descriptor ring */
261 dma_addr_t rxdpaorig;
262 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
263 u32 rxdalloc; /* #bytes allocated for the ring */
264 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
267 unsigned int rxbufsize; /* rx buffer size in bytes, not including
270 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper
271 * stack, e.g. some rx pkt buffers will be
272 * bridged to tx side without byte copying.
273 * The extra headroom needs to be large enough
274 * to fit txheader needs. Some dongle driver may
277 uint nrxpost; /* # rx buffers to keep posted */
278 unsigned int rxoffset; /* rxcontrol offset */
279 /* add to get dma address of descriptor ring, low 32 bits */
283 /* add to get dma address of data buffer, low 32 bits */
287 /* descriptor base need to be aligned or not */
292 * default dma message level (if input msg_level
293 * pointer is null in dma_attach())
295 static uint dma_msg_level;
297 /* Check for odd number of 1's */
298 static u32 parity32(__le32 data)
300 /* no swap needed for counting 1's */
301 u32 par_data = *(u32 *)&data;
303 par_data ^= par_data >> 16;
304 par_data ^= par_data >> 8;
305 par_data ^= par_data >> 4;
306 par_data ^= par_data >> 2;
307 par_data ^= par_data >> 1;
312 static bool dma64_dd_parity(struct dma64desc *dd)
314 return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2);
317 /* descriptor bumping functions */
319 static uint xxd(uint x, uint n)
321 return x & (n - 1); /* faster than %, but n must be power of 2 */
324 static uint txd(struct dma_info *di, uint x)
326 return xxd(x, di->ntxd);
329 static uint rxd(struct dma_info *di, uint x)
331 return xxd(x, di->nrxd);
334 static uint nexttxd(struct dma_info *di, uint i)
336 return txd(di, i + 1);
339 static uint prevtxd(struct dma_info *di, uint i)
341 return txd(di, i - 1);
344 static uint nextrxd(struct dma_info *di, uint i)
346 return txd(di, i + 1);
349 static uint ntxdactive(struct dma_info *di, uint h, uint t)
354 static uint nrxdactive(struct dma_info *di, uint h, uint t)
359 static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
361 uint dmactrlflags = di->dma.dmactrlflags;
364 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
368 dmactrlflags &= ~mask;
369 dmactrlflags |= flags;
371 /* If trying to enable parity, check if parity is actually supported */
372 if (dmactrlflags & DMA_CTRL_PEN) {
375 control = R_REG(&di->d64txregs->control);
376 W_REG(&di->d64txregs->control,
377 control | D64_XC_PD);
378 if (R_REG(&di->d64txregs->control) & D64_XC_PD)
379 /* We *can* disable it so it is supported,
380 * restore control register
382 W_REG(&di->d64txregs->control,
385 /* Not supported, don't allow it to be enabled */
386 dmactrlflags &= ~DMA_CTRL_PEN;
389 di->dma.dmactrlflags = dmactrlflags;
394 static bool _dma64_addrext(struct dma64regs __iomem *dma64regs)
397 OR_REG(&dma64regs->control, D64_XC_AE);
398 w = R_REG(&dma64regs->control);
399 AND_REG(&dma64regs->control, ~D64_XC_AE);
400 return (w & D64_XC_AE) == D64_XC_AE;
404 * return true if this dma engine supports DmaExtendedAddrChanges,
407 static bool _dma_isaddrext(struct dma_info *di)
409 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
411 /* not all tx or rx channel are available */
412 if (di->d64txregs != NULL) {
413 if (!_dma64_addrext(di->d64txregs))
414 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
415 "AE set\n", di->name));
417 } else if (di->d64rxregs != NULL) {
418 if (!_dma64_addrext(di->d64rxregs))
419 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
420 "AE set\n", di->name));
427 static bool _dma_descriptor_align(struct dma_info *di)
431 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
432 if (di->d64txregs != NULL) {
433 W_REG(&di->d64txregs->addrlow, 0xff0);
434 addrl = R_REG(&di->d64txregs->addrlow);
437 } else if (di->d64rxregs != NULL) {
438 W_REG(&di->d64rxregs->addrlow, 0xff0);
439 addrl = R_REG(&di->d64rxregs->addrlow);
447 * Descriptor table must start at the DMA hardware dictated alignment, so
448 * allocated memory must be large enough to support this requirement.
450 static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
451 u16 align_bits, uint *alloced,
455 u16 align = (1 << align_bits);
456 if (!IS_ALIGNED(PAGE_SIZE, align))
460 return pci_alloc_consistent(pdev, size, pap);
464 u8 dma_align_sizetobits(uint size)
472 /* This function ensures that the DMA descriptor ring will not get allocated
473 * across Page boundary. If the allocation is done across the page boundary
474 * at the first time, then it is freed and the allocation is done at
475 * descriptor ring size aligned location. This will ensure that the ring will
476 * not cross page boundary
478 static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
479 u16 *alignbits, uint *alloced,
484 u32 alignbytes = 1 << *alignbits;
486 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
491 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
492 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
494 *alignbits = dma_align_sizetobits(size);
495 pci_free_consistent(di->pbus, size, va, *descpa);
496 va = dma_alloc_consistent(di->pbus, size, *alignbits,
502 static bool dma64_alloc(struct dma_info *di, uint direction)
511 ddlen = sizeof(struct dma64desc);
513 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
514 align_bits = di->dmadesc_align;
515 align = (1 << align_bits);
517 if (direction == DMA_TX) {
518 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
519 &alloced, &di->txdpaorig);
521 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
522 " failed\n", di->name));
525 align = (1 << align_bits);
526 di->txd64 = (struct dma64desc *)
527 roundup((unsigned long)va, align);
528 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
529 di->txdpa = di->txdpaorig + di->txdalign;
530 di->txdalloc = alloced;
532 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
533 &alloced, &di->rxdpaorig);
535 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
536 " failed\n", di->name));
539 align = (1 << align_bits);
540 di->rxd64 = (struct dma64desc *)
541 roundup((unsigned long)va, align);
542 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
543 di->rxdpa = di->rxdpaorig + di->rxdalign;
544 di->rxdalloc = alloced;
550 static bool _dma_alloc(struct dma_info *di, uint direction)
552 return dma64_alloc(di, direction);
555 struct dma_pub *dma_attach(char *name, struct si_pub *sih,
556 void __iomem *dmaregstx, void __iomem *dmaregsrx,
557 uint ntxd, uint nrxd,
558 uint rxbufsize, int rxextheadroom,
559 uint nrxpost, uint rxoffset, uint *msg_level)
564 /* allocate private info structure */
565 di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
569 di->msg_level = msg_level ? msg_level : &dma_msg_level;
572 di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
574 /* init dma reg pointer */
575 di->d64txregs = (struct dma64regs __iomem *) dmaregstx;
576 di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx;
579 * Default flags (which can be changed by the driver calling
580 * dma_ctrlflags before enable): For backwards compatibility
581 * both Rx Overflow Continue and Parity are DISABLED.
583 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
585 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
586 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
587 "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
588 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
589 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
591 /* make a private copy of our callers name */
592 strncpy(di->name, name, MAXNAMEL);
593 di->name[MAXNAMEL - 1] = '\0';
595 di->pbus = ((struct si_info *)sih)->pbus;
598 di->ntxd = (u16) ntxd;
599 di->nrxd = (u16) nrxd;
601 /* the actual dma size doesn't include the extra headroom */
603 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
604 if (rxbufsize > BCMEXTRAHDROOM)
605 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
607 di->rxbufsize = (u16) rxbufsize;
609 di->nrxpost = (u16) nrxpost;
610 di->rxoffset = (u8) rxoffset;
613 * figure out the DMA physical address offset for dd and data
614 * PCI/PCIE: they map silicon backplace address to zero
615 * based memory, need offset
616 * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram
617 * swapped region for data buffer, not descriptor
620 di->dataoffsetlow = 0;
621 /* add offset for pcie with DMA64 bus */
623 di->ddoffsethigh = SI_PCIE_DMA_H32;
624 di->dataoffsetlow = di->ddoffsetlow;
625 di->dataoffsethigh = di->ddoffsethigh;
626 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
627 if ((ai_coreid(sih) == SDIOD_CORE_ID)
628 && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
630 else if ((ai_coreid(sih) == I2S_CORE_ID) &&
631 ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
634 di->addrext = _dma_isaddrext(di);
636 /* does the descriptor need to be aligned and if yes, on 4K/8K or not */
637 di->aligndesc_4k = _dma_descriptor_align(di);
638 if (di->aligndesc_4k) {
639 di->dmadesc_align = D64RINGALIGN_BITS;
640 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
641 /* for smaller dd table, HW relax alignment reqmnt */
642 di->dmadesc_align = D64RINGALIGN_BITS - 1;
644 di->dmadesc_align = 4; /* 16 byte alignment */
647 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
648 di->aligndesc_4k, di->dmadesc_align));
650 /* allocate tx packet pointer vector */
652 size = ntxd * sizeof(void *);
653 di->txp = kzalloc(size, GFP_ATOMIC);
658 /* allocate rx packet pointer vector */
660 size = nrxd * sizeof(void *);
661 di->rxp = kzalloc(size, GFP_ATOMIC);
667 * allocate transmit descriptor ring, only need ntxd descriptors
668 * but it must be aligned
671 if (!_dma_alloc(di, DMA_TX))
676 * allocate receive descriptor ring, only need nrxd descriptors
677 * but it must be aligned
680 if (!_dma_alloc(di, DMA_RX))
684 if ((di->ddoffsetlow != 0) && !di->addrext) {
685 if (di->txdpa > SI_PCI_DMA_SZ) {
686 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
687 "supported\n", di->name, (u32)di->txdpa));
690 if (di->rxdpa > SI_PCI_DMA_SZ) {
691 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
692 "supported\n", di->name, (u32)di->rxdpa));
697 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
698 "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
699 di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
702 return (struct dma_pub *) di;
705 dma_detach((struct dma_pub *)di);
710 dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
711 dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount)
713 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
715 /* PCI bus with big(>1G) physical address, use address extension */
716 if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) {
717 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
718 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
719 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
720 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
722 /* address extension for 32-bit PCI */
725 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
726 pa &= ~PCI32ADDR_HIGH;
728 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
729 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
730 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
731 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
732 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
734 if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
735 if (dma64_dd_parity(&ddring[outidx]))
736 ddring[outidx].ctrl2 =
737 cpu_to_le32(ctrl2 | D64_CTRL2_PARITY);
741 /* !! may be called with core in reset */
742 void dma_detach(struct dma_pub *pub)
744 struct dma_info *di = (struct dma_info *)pub;
746 DMA_TRACE(("%s: dma_detach\n", di->name));
748 /* free dma descriptor rings */
750 pci_free_consistent(di->pbus, di->txdalloc,
751 ((s8 *)di->txd64 - di->txdalign),
754 pci_free_consistent(di->pbus, di->rxdalloc,
755 ((s8 *)di->rxd64 - di->rxdalign),
758 /* free packet pointer vectors */
762 /* free our private info structure */
767 /* initialize descriptor table base address */
769 _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
771 if (!di->aligndesc_4k) {
772 if (direction == DMA_TX)
778 if ((di->ddoffsetlow == 0)
779 || !(pa & PCI32ADDR_HIGH)) {
780 if (direction == DMA_TX) {
781 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
782 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
784 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
785 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
788 /* DMA64 32bits address extension */
791 /* shift the high bit(s) from pa to ae */
792 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
793 pa &= ~PCI32ADDR_HIGH;
795 if (direction == DMA_TX) {
796 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
797 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
798 SET_REG(&di->d64txregs->control,
799 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
801 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
802 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
803 SET_REG(&di->d64rxregs->control,
804 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
809 static void _dma_rxenable(struct dma_info *di)
811 uint dmactrlflags = di->dma.dmactrlflags;
814 DMA_TRACE(("%s: dma_rxenable\n", di->name));
817 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
820 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
821 control |= D64_RC_PD;
823 if (dmactrlflags & DMA_CTRL_ROC)
824 control |= D64_RC_OC;
826 W_REG(&di->d64rxregs->control,
827 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
830 void dma_rxinit(struct dma_pub *pub)
832 struct dma_info *di = (struct dma_info *)pub;
834 DMA_TRACE(("%s: dma_rxinit\n", di->name));
839 di->rxin = di->rxout = 0;
841 /* clear rx descriptor ring */
842 memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc));
844 /* DMA engine with out alignment requirement requires table to be inited
845 * before enabling the engine
847 if (!di->aligndesc_4k)
848 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
852 if (di->aligndesc_4k)
853 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
856 static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
864 /* return if no packets posted */
869 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
870 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
872 /* ignore curr if forceall */
873 if (!forceall && (i == curr))
876 /* get the packet pointer that corresponds to the rx descriptor */
880 pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
882 /* clear this packet from the descriptor ring */
883 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
885 di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
886 di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
888 di->rxin = nextrxd(di, i);
893 static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
898 return dma64_getnextrxp(di, forceall);
902 * !! rx entry routine
903 * returns a pointer to the next frame received, or NULL if there are no more
904 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
905 * supported with pkts chain
906 * otherwise, it's treated as giant pkt and will be tossed.
907 * The DMA scattering starts with normal DMA header, followed by first
908 * buffer data. After it reaches the max size of buffer, the data continues
909 * in next DMA descriptor buffer WITHOUT DMA header
911 struct sk_buff *dma_rx(struct dma_pub *pub)
913 struct dma_info *di = (struct dma_info *)pub;
914 struct sk_buff *p, *head, *tail;
920 head = _dma_getnextrxp(di, false);
924 len = le16_to_cpu(*(__le16 *) (head->data));
925 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
926 dma_spin_for_len(len, head);
928 /* set actual length */
929 pkt_len = min((di->rxoffset + len), di->rxbufsize);
930 __skb_trim(head, pkt_len);
931 resid = len - (di->rxbufsize - di->rxoffset);
933 /* check for single or multi-buffer rx */
936 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
938 pkt_len = min_t(uint, resid, di->rxbufsize);
939 __skb_trim(p, pkt_len);
942 resid -= di->rxbufsize;
949 B2I(((R_REG(&di->d64rxregs->status0) &
951 di->rcvptrbase) & D64_RS0_CD_MASK,
953 DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
954 di->rxin, di->rxout, cur));
958 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
959 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
961 brcmu_pkt_buf_free_skb(head);
970 static bool dma64_rxidle(struct dma_info *di)
972 DMA_TRACE(("%s: dma_rxidle\n", di->name));
977 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
978 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
982 * post receive buffers
983 * return false is refill failed completely and ring is empty this will stall
984 * the rx dma and user might want to call rxfill again asap. This unlikely
985 * happens on memory-rich NIC, but often on memory-constrained dongle
987 bool dma_rxfill(struct dma_pub *pub)
989 struct dma_info *di = (struct dma_info *)pub;
996 uint extra_offset = 0;
1002 * Determine how many receive buffers we're lacking
1003 * from the full complement, allocate, initialize,
1004 * and post them, then update the chip rx lastdscr.
1010 n = di->nrxpost - nrxdactive(di, rxin, rxout);
1012 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
1014 if (di->rxbufsize > BCMEXTRAHDROOM)
1015 extra_offset = di->rxextrahdrroom;
1017 for (i = 0; i < n; i++) {
1019 * the di->rxbufsize doesn't include the extra headroom,
1020 * we need to add it to the size to be allocated
1022 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
1025 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1027 if (i == 0 && dma64_rxidle(di)) {
1028 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
1035 /* reserve an extra headroom, if applicable */
1037 skb_pull(p, extra_offset);
1039 /* Do a cached write instead of uncached write since DMA_MAP
1040 * will flush the cache.
1042 *(u32 *) (p->data) = 0;
1044 pa = pci_map_single(di->pbus, p->data,
1045 di->rxbufsize, PCI_DMA_FROMDEVICE);
1047 /* save the free packet pointer */
1050 /* reset flags for each descriptor */
1052 if (rxout == (di->nrxd - 1))
1053 flags = D64_CTRL1_EOT;
1055 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1057 rxout = nextrxd(di, rxout);
1062 /* update the chip lastdscr pointer */
1063 W_REG(&di->d64rxregs->ptr,
1064 di->rcvptrbase + I2B(rxout, struct dma64desc));
1069 void dma_rxreclaim(struct dma_pub *pub)
1071 struct dma_info *di = (struct dma_info *)pub;
1074 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
1076 while ((p = _dma_getnextrxp(di, true)))
1077 brcmu_pkt_buf_free_skb(p);
1080 void dma_counterreset(struct dma_pub *pub)
1082 /* reset all software counters */
1088 /* get the address of the var in order to change later */
1089 unsigned long dma_getvar(struct dma_pub *pub, const char *name)
1091 struct dma_info *di = (struct dma_info *)pub;
1093 if (!strcmp(name, "&txavail"))
1094 return (unsigned long)&(di->dma.txavail);
1098 /* 64-bit DMA functions */
1100 void dma_txinit(struct dma_pub *pub)
1102 struct dma_info *di = (struct dma_info *)pub;
1103 u32 control = D64_XC_XE;
1105 DMA_TRACE(("%s: dma_txinit\n", di->name));
1110 di->txin = di->txout = 0;
1111 di->dma.txavail = di->ntxd - 1;
1113 /* clear tx descriptor ring */
1114 memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
1116 /* DMA engine with out alignment requirement requires table to be inited
1117 * before enabling the engine
1119 if (!di->aligndesc_4k)
1120 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1122 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
1123 control |= D64_XC_PD;
1124 OR_REG(&di->d64txregs->control, control);
1126 /* DMA engine with alignment requirement requires table to be inited
1127 * before enabling the engine
1129 if (di->aligndesc_4k)
1130 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1133 void dma_txsuspend(struct dma_pub *pub)
1135 struct dma_info *di = (struct dma_info *)pub;
1137 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1142 OR_REG(&di->d64txregs->control, D64_XC_SE);
1145 void dma_txresume(struct dma_pub *pub)
1147 struct dma_info *di = (struct dma_info *)pub;
1149 DMA_TRACE(("%s: dma_txresume\n", di->name));
1154 AND_REG(&di->d64txregs->control, ~D64_XC_SE);
1157 bool dma_txsuspended(struct dma_pub *pub)
1159 struct dma_info *di = (struct dma_info *)pub;
1161 return (di->ntxd == 0) ||
1162 ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
1166 void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
1168 struct dma_info *di = (struct dma_info *)pub;
1171 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1172 (range == DMA_RANGE_ALL) ? "all" :
1174 DMA_RANGE_TRANSMITTED) ? "transmitted" :
1177 if (di->txin == di->txout)
1180 while ((p = dma_getnexttxp(pub, range))) {
1181 /* For unframed data, we don't have any packets to free */
1182 if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
1183 brcmu_pkt_buf_free_skb(p);
1187 bool dma_txreset(struct dma_pub *pub)
1189 struct dma_info *di = (struct dma_info *)pub;
1195 /* suspend tx DMA first */
1196 W_REG(&di->d64txregs->control, D64_XC_SE);
1198 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1199 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1200 && (status != D64_XS0_XS_STOPPED), 10000);
1202 W_REG(&di->d64txregs->control, 0);
1204 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1205 != D64_XS0_XS_DISABLED), 10000);
1207 /* wait for the last transaction to complete */
1210 return status == D64_XS0_XS_DISABLED;
1213 bool dma_rxreset(struct dma_pub *pub)
1215 struct dma_info *di = (struct dma_info *)pub;
1221 W_REG(&di->d64rxregs->control, 0);
1223 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
1224 != D64_RS0_RS_DISABLED), 10000);
1226 return status == D64_RS0_RS_DISABLED;
1230 * !! tx entry routine
1231 * WARNING: call must check the return value for error.
1232 * the error(toss frames) could be fatal and cause many subsequent hard
1235 int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
1237 struct dma_info *di = (struct dma_info *)pub;
1238 struct sk_buff *p, *next;
1239 unsigned char *data;
1245 DMA_TRACE(("%s: dma_txfast\n", di->name));
1250 * Walk the chain of packet buffers
1251 * allocating and initializing transmit descriptor entries.
1253 for (p = p0; p; p = next) {
1258 /* return nonzero if out of tx descriptors */
1259 if (nexttxd(di, txout) == di->txin)
1265 /* get physical address of buffer start */
1266 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
1270 flags |= D64_CTRL1_SOF;
1272 /* With a DMA segment list, Descriptor table is filled
1273 * using the segment list instead of looping over
1274 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
1275 * is when end of segment list is reached.
1278 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1279 if (txout == (di->ntxd - 1))
1280 flags |= D64_CTRL1_EOT;
1282 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1284 txout = nexttxd(di, txout);
1287 /* if last txd eof not set, fix it */
1288 if (!(flags & D64_CTRL1_EOF))
1289 di->txd64[prevtxd(di, txout)].ctrl1 =
1290 cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
1292 /* save the packet */
1293 di->txp[prevtxd(di, txout)] = p0;
1295 /* bump the tx descriptor index */
1300 W_REG(&di->d64txregs->ptr,
1301 di->xmtptrbase + I2B(txout, struct dma64desc));
1303 /* tx flow control */
1304 di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
1309 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
1310 brcmu_pkt_buf_free_skb(p0);
1311 di->dma.txavail = 0;
1317 * Reclaim next completed txd (txds if using chained buffers) in the range
1318 * specified and return associated packet.
1319 * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1320 * transmitted as noted by the hardware "CurrDescr" pointer.
1321 * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
1322 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
1323 * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1324 * return associated packet regardless of the value of hardware pointers.
1326 struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1328 struct dma_info *di = (struct dma_info *)pub;
1331 struct sk_buff *txp;
1333 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1334 (range == DMA_RANGE_ALL) ? "all" :
1336 DMA_RANGE_TRANSMITTED) ? "transmitted" :
1345 if (range == DMA_RANGE_ALL)
1348 struct dma64regs __iomem *dregs = di->d64txregs;
1350 end = (u16) (B2I(((R_REG(&dregs->status0) &
1352 di->xmtptrbase) & D64_XS0_CD_MASK,
1355 if (range == DMA_RANGE_TRANSFERED) {
1357 (u16) (R_REG(&dregs->status1) &
1360 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1361 active_desc = B2I(active_desc, struct dma64desc);
1362 if (end != active_desc)
1363 end = prevtxd(di, active_desc);
1367 if ((start == 0) && (end > di->txout))
1370 for (i = start; i != end && !txp; i = nexttxd(di, i)) {
1374 pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow;
1377 (le32_to_cpu(di->txd64[i].ctrl2) &
1380 di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef);
1381 di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
1386 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
1391 /* tx flow control */
1392 di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
1397 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
1398 "force %d\n", start, end, di->txout, forceall));
1403 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1404 * modified. The modified portion of the packet is not under control of the DMA
1405 * engine. This function calls a caller-supplied function for each packet in
1406 * the caller specified dma chain.
1408 void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
1409 (void *pkt, void *arg_a), void *arg_a)
1411 struct dma_info *di = (struct dma_info *) dmah;
1413 uint end = di->txout;
1414 struct sk_buff *skb;
1415 struct ieee80211_tx_info *tx_info;
1418 skb = (struct sk_buff *)di->txp[i];
1420 tx_info = (struct ieee80211_tx_info *)skb->cb;
1421 (callback_fnc)(tx_info, arg_a);