3 * BRIEF MODULE DESCRIPTION
4 * The Descriptor Based DMA channel manager that first appeared
5 * on the Au1550. I started with dma.c, but I think all that is
6 * left is this initial comment :-)
8 * Copyright 2004 Embedded Edge, LLC
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
22 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * You should have received a copy of the GNU General Public License along
28 * with this program; if not, write to the Free Software Foundation, Inc.,
29 * 675 Mass Ave, Cambridge, MA 02139, USA.
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/module.h>
38 #include <asm/mach-au1x00/au1000.h>
39 #include <asm/mach-au1x00/au1xxx_dbdma.h>
41 #if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
44 * The Descriptor Based DMA supports up to 16 channels.
46 * There are 32 devices defined. We keep an internal structure
47 * of devices using these channels, along with additional
50 * We allocate the descriptors and allow access to them through various
51 * functions. The drivers allocate the data buffers and assign them
54 static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
56 /* I couldn't find a macro that did this... */
57 #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1))
59 static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE;
60 static int dbdma_initialized;
61 static void au1xxx_dbdma_init(void);
63 static dbdev_tab_t dbdev_tab[] = {
64 #ifdef CONFIG_SOC_AU1550
66 { DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
67 { DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
68 { DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
69 { DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x11400000, 0, 0 },
72 { DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
73 { DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
74 { DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
75 { DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
78 { DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN, 4, 8, 0x10200000, 0, 0 },
79 { DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
80 { DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
81 { DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
82 { DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN, 4, 8, 0x10200010, 0, 0 },
83 { DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN, 4, 8, 0x10200014, 0, 0 },
86 { DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
87 { DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 },
90 { DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
91 { DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 },
94 { DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
95 { DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 0, 0x10a0001c, 0, 0 },
98 { DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
99 { DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 0, 0x10b0001c, 0, 0 },
101 { DSCR_CMD0_PCI_WRITE, 0, 0, 0, 0x00000000, 0, 0 }, /* PCI */
102 { DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
105 { DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
106 { DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
109 { DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
110 { DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
112 #endif /* CONFIG_SOC_AU1550 */
114 #ifdef CONFIG_SOC_AU1200
115 { DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
116 { DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
117 { DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
118 { DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x11200000, 0, 0 },
120 { DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
121 { DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
123 { DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
124 { DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
125 { DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
126 { DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
128 { DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
129 { DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
130 { DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
131 { DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 },
133 { DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
134 { DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
136 { DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
137 { DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x11a0001c, 0, 0 },
138 { DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
140 { DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
141 { DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x11b0001c, 0, 0 },
142 { DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
144 { DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
145 { DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
146 { DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
147 { DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
149 { DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
151 #endif /* CONFIG_SOC_AU1200 */
153 { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
154 { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
156 /* Provide 16 user definable device types */
157 { ~0, 0, 0, 0, 0, 0, 0 },
158 { ~0, 0, 0, 0, 0, 0, 0 },
159 { ~0, 0, 0, 0, 0, 0, 0 },
160 { ~0, 0, 0, 0, 0, 0, 0 },
161 { ~0, 0, 0, 0, 0, 0, 0 },
162 { ~0, 0, 0, 0, 0, 0, 0 },
163 { ~0, 0, 0, 0, 0, 0, 0 },
164 { ~0, 0, 0, 0, 0, 0, 0 },
165 { ~0, 0, 0, 0, 0, 0, 0 },
166 { ~0, 0, 0, 0, 0, 0, 0 },
167 { ~0, 0, 0, 0, 0, 0, 0 },
168 { ~0, 0, 0, 0, 0, 0, 0 },
169 { ~0, 0, 0, 0, 0, 0, 0 },
170 { ~0, 0, 0, 0, 0, 0, 0 },
171 { ~0, 0, 0, 0, 0, 0, 0 },
172 { ~0, 0, 0, 0, 0, 0, 0 },
175 #define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab)
178 static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][6];
182 static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
184 static dbdev_tab_t *find_dbdev_id(u32 id)
188 for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
196 void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
198 return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
200 EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
202 u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
206 static u16 new_id = 0x1000;
208 p = find_dbdev_id(~0);
210 memcpy(p, dev, sizeof(dbdev_tab_t));
211 p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
215 printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
216 p->dev_id, p->dev_flags, p->dev_physaddr);
222 EXPORT_SYMBOL(au1xxx_ddma_add_device);
224 void au1xxx_ddma_del_device(u32 devid)
226 dbdev_tab_t *p = find_dbdev_id(devid);
229 memset(p, 0, sizeof(dbdev_tab_t));
233 EXPORT_SYMBOL(au1xxx_ddma_del_device);
235 /* Allocate a channel and return a non-zero descriptor if successful. */
236 u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
237 void (*callback)(int, void *), void *callparam)
243 dbdev_tab_t *stp, *dtp;
248 * We do the intialization on the first channel allocation.
249 * We have to wait because of the interrupt handler initialization
250 * which can't be done successfully during board set up.
252 if (!dbdma_initialized)
254 dbdma_initialized = 1;
256 stp = find_dbdev_id(srcid);
259 dtp = find_dbdev_id(destid);
266 /* Check to see if we can get both channels. */
267 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
268 if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
269 (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
271 stp->dev_flags |= DEV_FLAGS_INUSE;
272 if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
273 (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
274 /* Got destination */
275 dtp->dev_flags |= DEV_FLAGS_INUSE;
277 /* Can't get dest. Release src. */
278 stp->dev_flags &= ~DEV_FLAGS_INUSE;
283 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
286 /* Let's see if we can allocate a channel for it. */
289 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
290 for (i = 0; i < NUM_DBDMA_CHANS; i++)
291 if (chan_tab_ptr[i] == NULL) {
293 * If kmalloc fails, it is caught below same
294 * as a channel not available.
296 ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
297 chan_tab_ptr[i] = ctp;
300 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
303 memset(ctp, 0, sizeof(chan_tab_t));
304 ctp->chan_index = chan = i;
305 dcp = DDMA_CHANNEL_BASE;
306 dcp += (0x0100 * chan);
307 ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
308 cp = (au1x_dma_chan_t *)dcp;
310 ctp->chan_dest = dtp;
311 ctp->chan_callback = callback;
312 ctp->chan_callparam = callparam;
314 /* Initialize channel configuration. */
316 if (stp->dev_intlevel)
318 if (stp->dev_intpolarity)
320 if (dtp->dev_intlevel)
322 if (dtp->dev_intpolarity)
324 if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
325 (dtp->dev_flags & DEV_FLAGS_SYNC))
330 /* Return a non-zero value that can be used to
331 * find the channel information in subsequent
334 rv = (u32)(&chan_tab_ptr[chan]);
336 /* Release devices */
337 stp->dev_flags &= ~DEV_FLAGS_INUSE;
338 dtp->dev_flags &= ~DEV_FLAGS_INUSE;
343 EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
346 * Set the device width if source or destination is a FIFO.
347 * Should be 8, 16, or 32 bits.
349 u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
353 dbdev_tab_t *stp, *dtp;
355 ctp = *((chan_tab_t **)chanid);
357 dtp = ctp->chan_dest;
360 if (stp->dev_flags & DEV_FLAGS_IN) { /* Source in fifo */
361 rv = stp->dev_devwidth;
362 stp->dev_devwidth = bits;
364 if (dtp->dev_flags & DEV_FLAGS_OUT) { /* Destination out fifo */
365 rv = dtp->dev_devwidth;
366 dtp->dev_devwidth = bits;
371 EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
373 /* Allocate a descriptor ring, initializing as much as possible. */
374 u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
377 u32 desc_base, srcid, destid;
378 u32 cmd0, cmd1, src1, dest1;
381 dbdev_tab_t *stp, *dtp;
382 au1x_ddma_desc_t *dp;
385 * I guess we could check this to be within the
386 * range of the table......
388 ctp = *((chan_tab_t **)chanid);
390 dtp = ctp->chan_dest;
393 * The descriptors must be 32-byte aligned. There is a
394 * possibility the allocation will give us such an address,
395 * and if we try that first we are likely to not waste larger
398 desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t),
403 if (desc_base & 0x1f) {
405 * Lost....do it again, allocate extra, and round
408 kfree((const void *)desc_base);
409 i = entries * sizeof(au1x_ddma_desc_t);
410 i += (sizeof(au1x_ddma_desc_t) - 1);
411 desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
415 ctp->cdb_membase = desc_base;
416 desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
418 ctp->cdb_membase = desc_base;
420 dp = (au1x_ddma_desc_t *)desc_base;
422 /* Keep track of the base descriptor. */
423 ctp->chan_desc_base = dp;
425 /* Initialize the rings with as much information as we know. */
427 destid = dtp->dev_id;
429 cmd0 = cmd1 = src1 = dest1 = 0;
432 cmd0 |= DSCR_CMD0_SID(srcid);
433 cmd0 |= DSCR_CMD0_DID(destid);
434 cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
435 cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
437 /* Is it mem to mem transfer? */
438 if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
439 (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
440 ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
441 (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
442 cmd0 |= DSCR_CMD0_MEM;
444 switch (stp->dev_devwidth) {
446 cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
449 cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
453 cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
457 switch (dtp->dev_devwidth) {
459 cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
462 cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
466 cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
471 * If the device is marked as an in/out FIFO, ensure it is
474 if (stp->dev_flags & DEV_FLAGS_IN)
475 cmd0 |= DSCR_CMD0_SN; /* Source in FIFO */
476 if (dtp->dev_flags & DEV_FLAGS_OUT)
477 cmd0 |= DSCR_CMD0_DN; /* Destination out FIFO */
480 * Set up source1. For now, assume no stride and increment.
481 * A channel attribute update can change this later.
483 switch (stp->dev_tsize) {
485 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
488 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
491 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
495 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
499 /* If source input is FIFO, set static address. */
500 if (stp->dev_flags & DEV_FLAGS_IN) {
501 if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
502 src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
504 src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
507 if (stp->dev_physaddr)
508 src0 = stp->dev_physaddr;
511 * Set up dest1. For now, assume no stride and increment.
512 * A channel attribute update can change this later.
514 switch (dtp->dev_tsize) {
516 dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
519 dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
522 dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
526 dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
530 /* If destination output is FIFO, set static address. */
531 if (dtp->dev_flags & DEV_FLAGS_OUT) {
532 if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
533 dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
535 dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
538 if (dtp->dev_physaddr)
539 dest0 = dtp->dev_physaddr;
542 printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
543 "source1:%x dest0:%x dest1:%x\n",
544 dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
547 for (i = 0; i < entries; i++) {
548 dp->dscr_cmd0 = cmd0;
549 dp->dscr_cmd1 = cmd1;
550 dp->dscr_source0 = src0;
551 dp->dscr_source1 = src1;
552 dp->dscr_dest0 = dest0;
553 dp->dscr_dest1 = dest1;
557 dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
561 /* Make last descrptor point to the first. */
563 dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
564 ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
566 return (u32)ctp->chan_desc_base;
568 EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
571 * Put a source buffer into the DMA ring.
572 * This updates the source pointer and byte count. Normally used
573 * for memory to fifo transfers.
575 u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags)
578 au1x_ddma_desc_t *dp;
581 * I guess we could check this to be within the
582 * range of the table......
584 ctp = *(chan_tab_t **)chanid;
587 * We should have multiple callers for a particular channel,
588 * an interrupt doesn't affect this pointer nor the descriptor,
589 * so no locking should be needed.
594 * If the descriptor is valid, we are way ahead of the DMA
595 * engine, so just return an error condition.
597 if (dp->dscr_cmd0 & DSCR_CMD0_V)
600 /* Load up buffer address and byte count. */
601 dp->dscr_source0 = virt_to_phys(buf);
602 dp->dscr_cmd1 = nbytes;
604 if (flags & DDMA_FLAGS_IE)
605 dp->dscr_cmd0 |= DSCR_CMD0_IE;
606 if (flags & DDMA_FLAGS_NOIE)
607 dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
610 * There is an errata on the Au1200/Au1550 parts that could result
611 * in "stale" data being DMA'ed. It has to do with the snoop logic on
612 * the cache eviction buffer. DMA_NONCOHERENT is on by default for
613 * these parts. If it is fixed in the future, these dma_cache_inv will
614 * just be nothing more than empty macros. See io.h.
616 dma_cache_wback_inv((unsigned long)buf, nbytes);
617 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
619 dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
620 ctp->chan_ptr->ddma_dbell = 0;
622 /* Get next descriptor pointer. */
623 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
625 /* Return something non-zero. */
628 EXPORT_SYMBOL(_au1xxx_dbdma_put_source);
630 /* Put a destination buffer into the DMA ring.
631 * This updates the destination pointer and byte count. Normally used
632 * to place an empty buffer into the ring for fifo to memory transfers.
635 _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
638 au1x_ddma_desc_t *dp;
640 /* I guess we could check this to be within the
641 * range of the table......
643 ctp = *((chan_tab_t **)chanid);
645 /* We should have multiple callers for a particular channel,
646 * an interrupt doesn't affect this pointer nor the descriptor,
647 * so no locking should be needed.
651 /* If the descriptor is valid, we are way ahead of the DMA
652 * engine, so just return an error condition.
654 if (dp->dscr_cmd0 & DSCR_CMD0_V)
657 /* Load up buffer address and byte count */
660 if (flags & DDMA_FLAGS_IE)
661 dp->dscr_cmd0 |= DSCR_CMD0_IE;
662 if (flags & DDMA_FLAGS_NOIE)
663 dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
665 dp->dscr_dest0 = virt_to_phys(buf);
666 dp->dscr_cmd1 = nbytes;
668 printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
669 dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
670 dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
673 * There is an errata on the Au1200/Au1550 parts that could result in
674 * "stale" data being DMA'ed. It has to do with the snoop logic on the
675 * cache eviction buffer. DMA_NONCOHERENT is on by default for these
676 * parts. If it is fixed in the future, these dma_cache_inv will just
677 * be nothing more than empty macros. See io.h.
679 dma_cache_inv((unsigned long)buf, nbytes);
680 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
682 dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
683 ctp->chan_ptr->ddma_dbell = 0;
685 /* Get next descriptor pointer. */
686 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
688 /* Return something non-zero. */
691 EXPORT_SYMBOL(_au1xxx_dbdma_put_dest);
694 * Get a destination buffer into the DMA ring.
695 * Normally used to get a full buffer from the ring during fifo
696 * to memory transfers. This does not set the valid bit, you will
697 * have to put another destination buffer to keep the DMA going.
699 u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
702 au1x_ddma_desc_t *dp;
706 * I guess we could check this to be within the
707 * range of the table......
709 ctp = *((chan_tab_t **)chanid);
712 * We should have multiple callers for a particular channel,
713 * an interrupt doesn't affect this pointer nor the descriptor,
714 * so no locking should be needed.
719 * If the descriptor is valid, we are way ahead of the DMA
720 * engine, so just return an error condition.
722 if (dp->dscr_cmd0 & DSCR_CMD0_V)
725 /* Return buffer address and byte count. */
726 *buf = (void *)(phys_to_virt(dp->dscr_dest0));
727 *nbytes = dp->dscr_cmd1;
730 /* Get next descriptor pointer. */
731 ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
733 /* Return something non-zero. */
736 EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
738 void au1xxx_dbdma_stop(u32 chanid)
742 int halt_timeout = 0;
744 ctp = *((chan_tab_t **)chanid);
747 cp->ddma_cfg &= ~DDMA_CFG_EN; /* Disable channel */
749 while (!(cp->ddma_stat & DDMA_STAT_H)) {
752 if (halt_timeout > 100) {
753 printk(KERN_WARNING "warning: DMA channel won't halt\n");
757 /* clear current desc valid and doorbell */
758 cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
761 EXPORT_SYMBOL(au1xxx_dbdma_stop);
764 * Start using the current descriptor pointer. If the DBDMA encounters
765 * a non-valid descriptor, it will stop. In this case, we can just
766 * continue by adding a buffer to the list and starting again.
768 void au1xxx_dbdma_start(u32 chanid)
773 ctp = *((chan_tab_t **)chanid);
775 cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
776 cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */
781 EXPORT_SYMBOL(au1xxx_dbdma_start);
783 void au1xxx_dbdma_reset(u32 chanid)
786 au1x_ddma_desc_t *dp;
788 au1xxx_dbdma_stop(chanid);
790 ctp = *((chan_tab_t **)chanid);
791 ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
793 /* Run through the descriptors and reset the valid indicator. */
794 dp = ctp->chan_desc_base;
797 dp->dscr_cmd0 &= ~DSCR_CMD0_V;
799 * Reset our software status -- this is used to determine
800 * if a descriptor is in use by upper level software. Since
801 * posting can reset 'V' bit.
804 dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
805 } while (dp != ctp->chan_desc_base);
807 EXPORT_SYMBOL(au1xxx_dbdma_reset);
809 u32 au1xxx_get_dma_residue(u32 chanid)
815 ctp = *((chan_tab_t **)chanid);
818 /* This is only valid if the channel is stopped. */
819 rv = cp->ddma_bytecnt;
824 EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
826 void au1xxx_dbdma_chan_free(u32 chanid)
829 dbdev_tab_t *stp, *dtp;
831 ctp = *((chan_tab_t **)chanid);
833 dtp = ctp->chan_dest;
835 au1xxx_dbdma_stop(chanid);
837 kfree((void *)ctp->cdb_membase);
839 stp->dev_flags &= ~DEV_FLAGS_INUSE;
840 dtp->dev_flags &= ~DEV_FLAGS_INUSE;
841 chan_tab_ptr[ctp->chan_index] = NULL;
845 EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
847 static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
852 au1x_ddma_desc_t *dp;
855 intstat = dbdma_gptr->ddma_intstat;
857 chan_index = __ffs(intstat);
859 ctp = chan_tab_ptr[chan_index];
863 /* Reset interrupt. */
867 if (ctp->chan_callback)
868 ctp->chan_callback(irq, ctp->chan_callparam);
870 ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
871 return IRQ_RETVAL(1);
874 static void au1xxx_dbdma_init(void)
878 dbdma_gptr->ddma_config = 0;
879 dbdma_gptr->ddma_throttle = 0;
880 dbdma_gptr->ddma_inten = 0xffff;
883 #if defined(CONFIG_SOC_AU1550)
884 irq_nr = AU1550_DDMA_INT;
885 #elif defined(CONFIG_SOC_AU1200)
886 irq_nr = AU1200_DDMA_INT;
888 #error Unknown Au1x00 SOC
891 if (request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED,
892 "Au1xxx dbdma", (void *)dbdma_gptr))
893 printk(KERN_ERR "Can't get 1550 dbdma irq");
896 void au1xxx_dbdma_dump(u32 chanid)
899 au1x_ddma_desc_t *dp;
900 dbdev_tab_t *stp, *dtp;
904 ctp = *((chan_tab_t **)chanid);
906 dtp = ctp->chan_dest;
909 printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d) \n",
910 (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
912 printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
913 (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
914 (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
916 printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
917 printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
918 cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
919 printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
920 cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
923 /* Run through the descriptors */
924 dp = ctp->chan_desc_base;
927 printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
928 i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
929 printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
930 dp->dscr_source0, dp->dscr_source1,
931 dp->dscr_dest0, dp->dscr_dest1);
932 printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
933 dp->dscr_stat, dp->dscr_nxtptr);
934 dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
935 } while (dp != ctp->chan_desc_base);
938 /* Put a descriptor into the DMA ring.
939 * This updates the source/destination pointers and byte count.
941 u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
944 au1x_ddma_desc_t *dp;
948 * I guess we could check this to be within the
949 * range of the table......
951 ctp = *((chan_tab_t **)chanid);
954 * We should have multiple callers for a particular channel,
955 * an interrupt doesn't affect this pointer nor the descriptor,
956 * so no locking should be needed.
961 * If the descriptor is valid, we are way ahead of the DMA
962 * engine, so just return an error condition.
964 if (dp->dscr_cmd0 & DSCR_CMD0_V)
967 /* Load up buffer addresses and byte count. */
968 dp->dscr_dest0 = dscr->dscr_dest0;
969 dp->dscr_source0 = dscr->dscr_source0;
970 dp->dscr_dest1 = dscr->dscr_dest1;
971 dp->dscr_source1 = dscr->dscr_source1;
972 dp->dscr_cmd1 = dscr->dscr_cmd1;
973 nbytes = dscr->dscr_cmd1;
974 /* Allow the caller to specifiy if an interrupt is generated */
975 dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
976 dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
977 ctp->chan_ptr->ddma_dbell = 0;
979 /* Get next descriptor pointer. */
980 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
982 /* Return something non-zero. */
987 void au1xxx_dbdma_suspend(void)
992 addr = DDMA_GLOBAL_BASE;
993 au1xxx_dbdma_pm_regs[0][0] = au_readl(addr + 0x00);
994 au1xxx_dbdma_pm_regs[0][1] = au_readl(addr + 0x04);
995 au1xxx_dbdma_pm_regs[0][2] = au_readl(addr + 0x08);
996 au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c);
998 /* save channel configurations */
999 for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
1000 au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00);
1001 au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04);
1002 au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08);
1003 au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c);
1004 au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10);
1005 au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14);
1008 au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00);
1010 while (!(au_readl(addr + 0x14) & 1))
1013 addr += 0x100; /* next channel base */
1015 /* disable channel interrupts */
1016 au_writel(0, DDMA_GLOBAL_BASE + 0x0c);
1020 void au1xxx_dbdma_resume(void)
1025 addr = DDMA_GLOBAL_BASE;
1026 au_writel(au1xxx_dbdma_pm_regs[0][0], addr + 0x00);
1027 au_writel(au1xxx_dbdma_pm_regs[0][1], addr + 0x04);
1028 au_writel(au1xxx_dbdma_pm_regs[0][2], addr + 0x08);
1029 au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c);
1031 /* restore channel configurations */
1032 for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
1033 au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00);
1034 au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04);
1035 au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08);
1036 au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c);
1037 au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10);
1038 au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14);
1040 addr += 0x100; /* next channel base */
1043 #endif /* CONFIG_PM */
1044 #endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */