2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller,
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include "at_hdmac_regs.h"
33 * at_hdmac : Name of the ATmel AHB DMA Controller
34 * at_dma_ / atdma : ATmel DMA controller entity related
35 * atc_ / atchan : ATmel DMA Channel entity related
38 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
39 #define ATC_DEFAULT_CTRLA (0)
40 #define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
44 * Initial number of descriptors to allocate for each channel. This could
45 * be increased during dma usage.
47 static unsigned int init_nr_desc_per_channel = 64;
48 module_param(init_nr_desc_per_channel, uint, 0644);
49 MODULE_PARM_DESC(init_nr_desc_per_channel,
50 "initial descriptors per channel (default: 64)");
54 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
57 /*----------------------------------------------------------------------*/
59 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
61 return list_first_entry(&atchan->active_list,
62 struct at_desc, desc_node);
65 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
67 return list_first_entry(&atchan->queue,
68 struct at_desc, desc_node);
72 * atc_alloc_descriptor - allocate and return an initialized descriptor
73 * @chan: the channel to allocate descriptors for
74 * @gfp_flags: GFP allocation flags
76 * Note: The ack-bit is positioned in the descriptor flag at creation time
77 * to make initial allocation more convenient. This bit will be cleared
78 * and control will be given to client at usage time (during
79 * preparation functions).
81 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
84 struct at_desc *desc = NULL;
85 struct at_dma *atdma = to_at_dma(chan->device);
88 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
90 memset(desc, 0, sizeof(struct at_desc));
91 INIT_LIST_HEAD(&desc->tx_list);
92 dma_async_tx_descriptor_init(&desc->txd, chan);
93 /* txd.flags will be overwritten in prep functions */
94 desc->txd.flags = DMA_CTRL_ACK;
95 desc->txd.tx_submit = atc_tx_submit;
96 desc->txd.phys = phys;
103 * atc_desc_get - get an unused descriptor from free_list
104 * @atchan: channel we want a new descriptor for
106 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
108 struct at_desc *desc, *_desc;
109 struct at_desc *ret = NULL;
113 spin_lock_bh(&atchan->lock);
114 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
116 if (async_tx_test_ack(&desc->txd)) {
117 list_del(&desc->desc_node);
121 dev_dbg(chan2dev(&atchan->chan_common),
122 "desc %p not ACKed\n", desc);
124 spin_unlock_bh(&atchan->lock);
125 dev_vdbg(chan2dev(&atchan->chan_common),
126 "scanned %u descriptors on freelist\n", i);
128 /* no more descriptor available in initial pool: create one more */
130 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
132 spin_lock_bh(&atchan->lock);
133 atchan->descs_allocated++;
134 spin_unlock_bh(&atchan->lock);
136 dev_err(chan2dev(&atchan->chan_common),
137 "not enough descriptors available\n");
145 * atc_desc_put - move a descriptor, including any children, to the free list
146 * @atchan: channel we work on
147 * @desc: descriptor, at the head of a chain, to move to free list
149 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
152 struct at_desc *child;
154 spin_lock_bh(&atchan->lock);
155 list_for_each_entry(child, &desc->tx_list, desc_node)
156 dev_vdbg(chan2dev(&atchan->chan_common),
157 "moving child desc %p to freelist\n",
159 list_splice_init(&desc->tx_list, &atchan->free_list);
160 dev_vdbg(chan2dev(&atchan->chan_common),
161 "moving desc %p to freelist\n", desc);
162 list_add(&desc->desc_node, &atchan->free_list);
163 spin_unlock_bh(&atchan->lock);
168 * atc_desc_chain - build chain adding a descripor
169 * @first: address of first descripor of the chain
170 * @prev: address of previous descripor of the chain
171 * @desc: descriptor to queue
173 * Called from prep_* functions
175 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
176 struct at_desc *desc)
181 /* inform the HW lli about chaining */
182 (*prev)->lli.dscr = desc->txd.phys;
183 /* insert the link descriptor to the LD ring */
184 list_add_tail(&desc->desc_node,
191 * atc_assign_cookie - compute and assign new cookie
192 * @atchan: channel we work on
193 * @desc: descriptor to asign cookie for
195 * Called with atchan->lock held and bh disabled
198 atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
200 dma_cookie_t cookie = atchan->chan_common.cookie;
205 atchan->chan_common.cookie = cookie;
206 desc->txd.cookie = cookie;
212 * atc_dostart - starts the DMA engine for real
213 * @atchan: the channel we want to start
214 * @first: first descriptor in the list we want to begin with
216 * Called with atchan->lock held and bh disabled
218 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
220 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
222 /* ASSERT: channel is idle */
223 if (atc_chan_is_enabled(atchan)) {
224 dev_err(chan2dev(&atchan->chan_common),
225 "BUG: Attempted to start non-idle channel\n");
226 dev_err(chan2dev(&atchan->chan_common),
227 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
228 channel_readl(atchan, SADDR),
229 channel_readl(atchan, DADDR),
230 channel_readl(atchan, CTRLA),
231 channel_readl(atchan, CTRLB),
232 channel_readl(atchan, DSCR));
234 /* The tasklet will hopefully advance the queue... */
238 vdbg_dump_regs(atchan);
240 /* clear any pending interrupt */
241 while (dma_readl(atdma, EBCISR))
244 channel_writel(atchan, SADDR, 0);
245 channel_writel(atchan, DADDR, 0);
246 channel_writel(atchan, CTRLA, 0);
247 channel_writel(atchan, CTRLB, 0);
248 channel_writel(atchan, DSCR, first->txd.phys);
249 dma_writel(atdma, CHER, atchan->mask);
251 vdbg_dump_regs(atchan);
255 * atc_chain_complete - finish work for one transaction chain
256 * @atchan: channel we work on
257 * @desc: descriptor at the head of the chain we want do complete
259 * Called with atchan->lock held and bh disabled */
261 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
263 struct dma_async_tx_descriptor *txd = &desc->txd;
265 dev_vdbg(chan2dev(&atchan->chan_common),
266 "descriptor %u complete\n", txd->cookie);
268 atchan->completed_cookie = txd->cookie;
270 /* move children to free_list */
271 list_splice_init(&desc->tx_list, &atchan->free_list);
272 /* move myself to free_list */
273 list_move(&desc->desc_node, &atchan->free_list);
275 /* unmap dma addresses (not on slave channels) */
276 if (!atchan->chan_common.private) {
277 struct device *parent = chan2parent(&atchan->chan_common);
278 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
279 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
280 dma_unmap_single(parent,
282 desc->len, DMA_FROM_DEVICE);
284 dma_unmap_page(parent,
286 desc->len, DMA_FROM_DEVICE);
288 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
289 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
290 dma_unmap_single(parent,
292 desc->len, DMA_TO_DEVICE);
294 dma_unmap_page(parent,
296 desc->len, DMA_TO_DEVICE);
300 /* for cyclic transfers,
301 * no need to replay callback function while stopping */
302 if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
303 dma_async_tx_callback callback = txd->callback;
304 void *param = txd->callback_param;
307 * The API requires that no submissions are done from a
308 * callback, so we don't need to drop the lock here
314 dma_run_dependencies(txd);
318 * atc_complete_all - finish work for all transactions
319 * @atchan: channel to complete transactions for
321 * Eventually submit queued descriptors if any
323 * Assume channel is idle while calling this function
324 * Called with atchan->lock held and bh disabled
326 static void atc_complete_all(struct at_dma_chan *atchan)
328 struct at_desc *desc, *_desc;
331 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
333 BUG_ON(atc_chan_is_enabled(atchan));
336 * Submit queued descriptors ASAP, i.e. before we go through
337 * the completed ones.
339 if (!list_empty(&atchan->queue))
340 atc_dostart(atchan, atc_first_queued(atchan));
341 /* empty active_list now it is completed */
342 list_splice_init(&atchan->active_list, &list);
343 /* empty queue list by moving descriptors (if any) to active_list */
344 list_splice_init(&atchan->queue, &atchan->active_list);
346 list_for_each_entry_safe(desc, _desc, &list, desc_node)
347 atc_chain_complete(atchan, desc);
351 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
352 * @atchan: channel to be cleaned up
354 * Called with atchan->lock held and bh disabled
356 static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
358 struct at_desc *desc, *_desc;
359 struct at_desc *child;
361 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
363 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
364 if (!(desc->lli.ctrla & ATC_DONE))
365 /* This one is currently in progress */
368 list_for_each_entry(child, &desc->tx_list, desc_node)
369 if (!(child->lli.ctrla & ATC_DONE))
370 /* Currently in progress */
374 * No descriptors so far seem to be in progress, i.e.
375 * this chain must be done.
377 atc_chain_complete(atchan, desc);
382 * atc_advance_work - at the end of a transaction, move forward
383 * @atchan: channel where the transaction ended
385 * Called with atchan->lock held and bh disabled
387 static void atc_advance_work(struct at_dma_chan *atchan)
389 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
391 if (list_empty(&atchan->active_list) ||
392 list_is_singular(&atchan->active_list)) {
393 atc_complete_all(atchan);
395 atc_chain_complete(atchan, atc_first_active(atchan));
397 atc_dostart(atchan, atc_first_active(atchan));
403 * atc_handle_error - handle errors reported by DMA controller
404 * @atchan: channel where error occurs
406 * Called with atchan->lock held and bh disabled
408 static void atc_handle_error(struct at_dma_chan *atchan)
410 struct at_desc *bad_desc;
411 struct at_desc *child;
414 * The descriptor currently at the head of the active list is
415 * broked. Since we don't have any way to report errors, we'll
416 * just have to scream loudly and try to carry on.
418 bad_desc = atc_first_active(atchan);
419 list_del_init(&bad_desc->desc_node);
421 /* As we are stopped, take advantage to push queued descriptors
423 list_splice_init(&atchan->queue, atchan->active_list.prev);
425 /* Try to restart the controller */
426 if (!list_empty(&atchan->active_list))
427 atc_dostart(atchan, atc_first_active(atchan));
430 * KERN_CRITICAL may seem harsh, but since this only happens
431 * when someone submits a bad physical address in a
432 * descriptor, we should consider ourselves lucky that the
433 * controller flagged an error instead of scribbling over
434 * random memory locations.
436 dev_crit(chan2dev(&atchan->chan_common),
437 "Bad descriptor submitted for DMA!\n");
438 dev_crit(chan2dev(&atchan->chan_common),
439 " cookie: %d\n", bad_desc->txd.cookie);
440 atc_dump_lli(atchan, &bad_desc->lli);
441 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
442 atc_dump_lli(atchan, &child->lli);
444 /* Pretend the descriptor completed successfully */
445 atc_chain_complete(atchan, bad_desc);
449 * atc_handle_cyclic - at the end of a period, run callback function
450 * @atchan: channel used for cyclic operations
452 * Called with atchan->lock held and bh disabled
454 static void atc_handle_cyclic(struct at_dma_chan *atchan)
456 struct at_desc *first = atc_first_active(atchan);
457 struct dma_async_tx_descriptor *txd = &first->txd;
458 dma_async_tx_callback callback = txd->callback;
459 void *param = txd->callback_param;
461 dev_vdbg(chan2dev(&atchan->chan_common),
462 "new cyclic period llp 0x%08x\n",
463 channel_readl(atchan, DSCR));
469 /*-- IRQ & Tasklet ---------------------------------------------------*/
471 static void atc_tasklet(unsigned long data)
473 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
475 /* Channel cannot be enabled here */
476 if (atc_chan_is_enabled(atchan)) {
477 dev_err(chan2dev(&atchan->chan_common),
478 "BUG: channel enabled in tasklet\n");
482 spin_lock(&atchan->lock);
483 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
484 atc_handle_error(atchan);
485 else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
486 atc_handle_cyclic(atchan);
488 atc_advance_work(atchan);
490 spin_unlock(&atchan->lock);
493 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
495 struct at_dma *atdma = (struct at_dma *)dev_id;
496 struct at_dma_chan *atchan;
498 u32 status, pending, imr;
502 imr = dma_readl(atdma, EBCIMR);
503 status = dma_readl(atdma, EBCISR);
504 pending = status & imr;
509 dev_vdbg(atdma->dma_common.dev,
510 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
511 status, imr, pending);
513 for (i = 0; i < atdma->dma_common.chancnt; i++) {
514 atchan = &atdma->chan[i];
515 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
516 if (pending & AT_DMA_ERR(i)) {
517 /* Disable channel on AHB error */
518 dma_writel(atdma, CHDR, atchan->mask);
519 /* Give information to tasklet */
520 set_bit(ATC_IS_ERROR, &atchan->status);
522 tasklet_schedule(&atchan->tasklet);
533 /*-- DMA Engine API --------------------------------------------------*/
536 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
537 * @desc: descriptor at the head of the transaction chain
539 * Queue chain if DMA engine is working already
541 * Cookie increment and adding to active_list or queue must be atomic
543 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
545 struct at_desc *desc = txd_to_at_desc(tx);
546 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
549 spin_lock_bh(&atchan->lock);
550 cookie = atc_assign_cookie(atchan, desc);
552 if (list_empty(&atchan->active_list)) {
553 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
555 atc_dostart(atchan, desc);
556 list_add_tail(&desc->desc_node, &atchan->active_list);
558 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
560 list_add_tail(&desc->desc_node, &atchan->queue);
563 spin_unlock_bh(&atchan->lock);
569 * atc_prep_dma_memcpy - prepare a memcpy operation
570 * @chan: the channel to prepare operation on
571 * @dest: operation virtual destination address
572 * @src: operation virtual source address
573 * @len: operation length
574 * @flags: tx descriptor status flags
576 static struct dma_async_tx_descriptor *
577 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
578 size_t len, unsigned long flags)
580 struct at_dma_chan *atchan = to_at_dma_chan(chan);
581 struct at_desc *desc = NULL;
582 struct at_desc *first = NULL;
583 struct at_desc *prev = NULL;
586 unsigned int src_width;
587 unsigned int dst_width;
591 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
592 dest, src, len, flags);
594 if (unlikely(!len)) {
595 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
599 ctrla = ATC_DEFAULT_CTRLA;
600 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
601 | ATC_SRC_ADDR_MODE_INCR
602 | ATC_DST_ADDR_MODE_INCR
606 * We can be a lot more clever here, but this should take care
607 * of the most common optimization.
609 if (!((src | dest | len) & 3)) {
610 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
611 src_width = dst_width = 2;
612 } else if (!((src | dest | len) & 1)) {
613 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
614 src_width = dst_width = 1;
616 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
617 src_width = dst_width = 0;
620 for (offset = 0; offset < len; offset += xfer_count << src_width) {
621 xfer_count = min_t(size_t, (len - offset) >> src_width,
624 desc = atc_desc_get(atchan);
628 desc->lli.saddr = src + offset;
629 desc->lli.daddr = dest + offset;
630 desc->lli.ctrla = ctrla | xfer_count;
631 desc->lli.ctrlb = ctrlb;
633 desc->txd.cookie = 0;
638 /* inform the HW lli about chaining */
639 prev->lli.dscr = desc->txd.phys;
640 /* insert the link descriptor to the LD ring */
641 list_add_tail(&desc->desc_node,
647 /* First descriptor of the chain embedds additional information */
648 first->txd.cookie = -EBUSY;
651 /* set end-of-link to the last link descriptor of list*/
654 first->txd.flags = flags; /* client is in control of this ack */
659 atc_desc_put(atchan, first);
665 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
667 * @sgl: scatterlist to transfer to/from
668 * @sg_len: number of entries in @scatterlist
669 * @direction: DMA direction
670 * @flags: tx descriptor status flags
672 static struct dma_async_tx_descriptor *
673 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
674 unsigned int sg_len, enum dma_data_direction direction,
677 struct at_dma_chan *atchan = to_at_dma_chan(chan);
678 struct at_dma_slave *atslave = chan->private;
679 struct at_desc *first = NULL;
680 struct at_desc *prev = NULL;
684 unsigned int reg_width;
685 unsigned int mem_width;
687 struct scatterlist *sg;
688 size_t total_len = 0;
690 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
692 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
695 if (unlikely(!atslave || !sg_len)) {
696 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
700 reg_width = atslave->reg_width;
702 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
703 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
707 ctrla |= ATC_DST_WIDTH(reg_width);
708 ctrlb |= ATC_DST_ADDR_MODE_FIXED
709 | ATC_SRC_ADDR_MODE_INCR
711 reg = atslave->tx_reg;
712 for_each_sg(sgl, sg, sg_len, i) {
713 struct at_desc *desc;
717 desc = atc_desc_get(atchan);
721 mem = sg_dma_address(sg);
722 len = sg_dma_len(sg);
724 if (unlikely(mem & 3 || len & 3))
727 desc->lli.saddr = mem;
728 desc->lli.daddr = reg;
729 desc->lli.ctrla = ctrla
730 | ATC_SRC_WIDTH(mem_width)
732 desc->lli.ctrlb = ctrlb;
737 /* inform the HW lli about chaining */
738 prev->lli.dscr = desc->txd.phys;
739 /* insert the link descriptor to the LD ring */
740 list_add_tail(&desc->desc_node,
747 case DMA_FROM_DEVICE:
748 ctrla |= ATC_SRC_WIDTH(reg_width);
749 ctrlb |= ATC_DST_ADDR_MODE_INCR
750 | ATC_SRC_ADDR_MODE_FIXED
753 reg = atslave->rx_reg;
754 for_each_sg(sgl, sg, sg_len, i) {
755 struct at_desc *desc;
759 desc = atc_desc_get(atchan);
763 mem = sg_dma_address(sg);
764 len = sg_dma_len(sg);
766 if (unlikely(mem & 3 || len & 3))
769 desc->lli.saddr = reg;
770 desc->lli.daddr = mem;
771 desc->lli.ctrla = ctrla
772 | ATC_DST_WIDTH(mem_width)
774 desc->lli.ctrlb = ctrlb;
779 /* inform the HW lli about chaining */
780 prev->lli.dscr = desc->txd.phys;
781 /* insert the link descriptor to the LD ring */
782 list_add_tail(&desc->desc_node,
793 /* set end-of-link to the last link descriptor of list*/
796 /* First descriptor of the chain embedds additional information */
797 first->txd.cookie = -EBUSY;
798 first->len = total_len;
800 /* first link descriptor of list is responsible of flags */
801 first->txd.flags = flags; /* client is in control of this ack */
806 dev_err(chan2dev(chan), "not enough descriptors available\n");
807 atc_desc_put(atchan, first);
812 * atc_dma_cyclic_check_values
813 * Check for too big/unaligned periods and unaligned DMA buffer
816 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
817 size_t period_len, enum dma_data_direction direction)
819 if (period_len > (ATC_BTSIZE_MAX << reg_width))
821 if (unlikely(period_len & ((1 << reg_width) - 1)))
823 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
825 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
835 * atc_dma_cyclic_fill_desc - Fill one period decriptor
838 atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
839 unsigned int period_index, dma_addr_t buf_addr,
840 size_t period_len, enum dma_data_direction direction)
843 unsigned int reg_width = atslave->reg_width;
845 /* prepare common CRTLA value */
846 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
847 | ATC_DST_WIDTH(reg_width)
848 | ATC_SRC_WIDTH(reg_width)
849 | period_len >> reg_width;
853 desc->lli.saddr = buf_addr + (period_len * period_index);
854 desc->lli.daddr = atslave->tx_reg;
855 desc->lli.ctrla = ctrla;
856 desc->lli.ctrlb = ATC_DEFAULT_CTRLB
857 | ATC_DST_ADDR_MODE_FIXED
858 | ATC_SRC_ADDR_MODE_INCR
862 case DMA_FROM_DEVICE:
863 desc->lli.saddr = atslave->rx_reg;
864 desc->lli.daddr = buf_addr + (period_len * period_index);
865 desc->lli.ctrla = ctrla;
866 desc->lli.ctrlb = ATC_DEFAULT_CTRLB
867 | ATC_DST_ADDR_MODE_INCR
868 | ATC_SRC_ADDR_MODE_FIXED
880 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
881 * @chan: the DMA channel to prepare
882 * @buf_addr: physical DMA address where the buffer starts
883 * @buf_len: total number of bytes for the entire buffer
884 * @period_len: number of bytes for each period
885 * @direction: transfer direction, to or from device
887 static struct dma_async_tx_descriptor *
888 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
889 size_t period_len, enum dma_data_direction direction)
891 struct at_dma_chan *atchan = to_at_dma_chan(chan);
892 struct at_dma_slave *atslave = chan->private;
893 struct at_desc *first = NULL;
894 struct at_desc *prev = NULL;
895 unsigned long was_cyclic;
896 unsigned int periods = buf_len / period_len;
899 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
900 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
902 periods, buf_len, period_len);
904 if (unlikely(!atslave || !buf_len || !period_len)) {
905 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
909 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
911 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
915 /* Check for too big/unaligned periods and unaligned DMA buffer */
916 if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
917 period_len, direction))
920 /* build cyclic linked list */
921 for (i = 0; i < periods; i++) {
922 struct at_desc *desc;
924 desc = atc_desc_get(atchan);
928 if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
929 period_len, direction))
932 atc_desc_chain(&first, &prev, desc);
935 /* lets make a cyclic list */
936 prev->lli.dscr = first->txd.phys;
938 /* First descriptor of the chain embedds additional information */
939 first->txd.cookie = -EBUSY;
940 first->len = buf_len;
945 dev_err(chan2dev(chan), "not enough descriptors available\n");
946 atc_desc_put(atchan, first);
948 clear_bit(ATC_IS_CYCLIC, &atchan->status);
953 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
956 struct at_dma_chan *atchan = to_at_dma_chan(chan);
957 struct at_dma *atdma = to_at_dma(chan->device);
958 struct at_desc *desc, *_desc;
961 /* Only supports DMA_TERMINATE_ALL */
962 if (cmd != DMA_TERMINATE_ALL)
966 * This is only called when something went wrong elsewhere, so
967 * we don't really care about the data. Just disable the
968 * channel. We still have to poll the channel enable bit due
969 * to AHB/HSB limitations.
971 spin_lock_bh(&atchan->lock);
973 dma_writel(atdma, CHDR, atchan->mask);
975 /* confirm that this channel is disabled */
976 while (dma_readl(atdma, CHSR) & atchan->mask)
979 /* active_list entries will end up before queued entries */
980 list_splice_init(&atchan->queue, &list);
981 list_splice_init(&atchan->active_list, &list);
983 /* Flush all pending and queued descriptors */
984 list_for_each_entry_safe(desc, _desc, &list, desc_node)
985 atc_chain_complete(atchan, desc);
987 /* if channel dedicated to cyclic operations, free it */
988 clear_bit(ATC_IS_CYCLIC, &atchan->status);
990 spin_unlock_bh(&atchan->lock);
996 * atc_tx_status - poll for transaction completion
998 * @cookie: transaction identifier to check status of
999 * @txstate: if not %NULL updated with transaction state
1001 * If @txstate is passed in, upon return it reflect the driver
1002 * internal state and can be used with dma_async_is_complete() to check
1003 * the status of multiple cookies without re-checking hardware state.
1005 static enum dma_status
1006 atc_tx_status(struct dma_chan *chan,
1007 dma_cookie_t cookie,
1008 struct dma_tx_state *txstate)
1010 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1011 dma_cookie_t last_used;
1012 dma_cookie_t last_complete;
1013 enum dma_status ret;
1015 spin_lock_bh(&atchan->lock);
1017 last_complete = atchan->completed_cookie;
1018 last_used = chan->cookie;
1020 ret = dma_async_is_complete(cookie, last_complete, last_used);
1021 if (ret != DMA_SUCCESS) {
1022 atc_cleanup_descriptors(atchan);
1024 last_complete = atchan->completed_cookie;
1025 last_used = chan->cookie;
1027 ret = dma_async_is_complete(cookie, last_complete, last_used);
1030 spin_unlock_bh(&atchan->lock);
1032 dma_set_tx_state(txstate, last_complete, last_used, 0);
1033 dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
1034 cookie, last_complete ? last_complete : 0,
1035 last_used ? last_used : 0);
1041 * atc_issue_pending - try to finish work
1042 * @chan: target DMA channel
1044 static void atc_issue_pending(struct dma_chan *chan)
1046 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1048 dev_vdbg(chan2dev(chan), "issue_pending\n");
1050 /* Not needed for cyclic transfers */
1051 if (test_bit(ATC_IS_CYCLIC, &atchan->status))
1054 spin_lock_bh(&atchan->lock);
1055 if (!atc_chan_is_enabled(atchan)) {
1056 atc_advance_work(atchan);
1058 spin_unlock_bh(&atchan->lock);
1062 * atc_alloc_chan_resources - allocate resources for DMA channel
1063 * @chan: allocate descriptor resources for this channel
1064 * @client: current client requesting the channel be ready for requests
1066 * return - the number of allocated descriptors
1068 static int atc_alloc_chan_resources(struct dma_chan *chan)
1070 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1071 struct at_dma *atdma = to_at_dma(chan->device);
1072 struct at_desc *desc;
1073 struct at_dma_slave *atslave;
1076 LIST_HEAD(tmp_list);
1078 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1080 /* ASSERT: channel is idle */
1081 if (atc_chan_is_enabled(atchan)) {
1082 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1086 cfg = ATC_DEFAULT_CFG;
1088 atslave = chan->private;
1091 * We need controller-specific data to set up slave
1094 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1096 /* if cfg configuration specified take it instad of default */
1101 /* have we already been set up?
1102 * reconfigure channel but no need to reallocate descriptors */
1103 if (!list_empty(&atchan->free_list))
1104 return atchan->descs_allocated;
1106 /* Allocate initial pool of descriptors */
1107 for (i = 0; i < init_nr_desc_per_channel; i++) {
1108 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1110 dev_err(atdma->dma_common.dev,
1111 "Only %d initial descriptors\n", i);
1114 list_add_tail(&desc->desc_node, &tmp_list);
1117 spin_lock_bh(&atchan->lock);
1118 atchan->descs_allocated = i;
1119 list_splice(&tmp_list, &atchan->free_list);
1120 atchan->completed_cookie = chan->cookie = 1;
1121 spin_unlock_bh(&atchan->lock);
1123 /* channel parameters */
1124 channel_writel(atchan, CFG, cfg);
1126 dev_dbg(chan2dev(chan),
1127 "alloc_chan_resources: allocated %d descriptors\n",
1128 atchan->descs_allocated);
1130 return atchan->descs_allocated;
1134 * atc_free_chan_resources - free all channel resources
1135 * @chan: DMA channel
1137 static void atc_free_chan_resources(struct dma_chan *chan)
1139 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1140 struct at_dma *atdma = to_at_dma(chan->device);
1141 struct at_desc *desc, *_desc;
1144 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1145 atchan->descs_allocated);
1147 /* ASSERT: channel is idle */
1148 BUG_ON(!list_empty(&atchan->active_list));
1149 BUG_ON(!list_empty(&atchan->queue));
1150 BUG_ON(atc_chan_is_enabled(atchan));
1152 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1153 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1154 list_del(&desc->desc_node);
1155 /* free link descriptor */
1156 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1158 list_splice_init(&atchan->free_list, &list);
1159 atchan->descs_allocated = 0;
1162 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1166 /*-- Module Management -----------------------------------------------*/
1169 * at_dma_off - disable DMA controller
1170 * @atdma: the Atmel HDAMC device
1172 static void at_dma_off(struct at_dma *atdma)
1174 dma_writel(atdma, EN, 0);
1176 /* disable all interrupts */
1177 dma_writel(atdma, EBCIDR, -1L);
1179 /* confirm that all channels are disabled */
1180 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1184 static int __init at_dma_probe(struct platform_device *pdev)
1186 struct at_dma_platform_data *pdata;
1187 struct resource *io;
1188 struct at_dma *atdma;
1194 /* get DMA Controller parameters from platform */
1195 pdata = pdev->dev.platform_data;
1196 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
1199 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1203 irq = platform_get_irq(pdev, 0);
1207 size = sizeof(struct at_dma);
1208 size += pdata->nr_channels * sizeof(struct at_dma_chan);
1209 atdma = kzalloc(size, GFP_KERNEL);
1213 /* discover transaction capabilites from the platform data */
1214 atdma->dma_common.cap_mask = pdata->cap_mask;
1215 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1217 size = io->end - io->start + 1;
1218 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1223 atdma->regs = ioremap(io->start, size);
1229 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1230 if (IS_ERR(atdma->clk)) {
1231 err = PTR_ERR(atdma->clk);
1234 clk_enable(atdma->clk);
1236 /* force dma off, just in case */
1239 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1243 platform_set_drvdata(pdev, atdma);
1245 /* create a pool of consistent memory blocks for hardware descriptors */
1246 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1247 &pdev->dev, sizeof(struct at_desc),
1248 4 /* word alignment */, 0);
1249 if (!atdma->dma_desc_pool) {
1250 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1252 goto err_pool_create;
1255 /* clear any pending interrupt */
1256 while (dma_readl(atdma, EBCISR))
1259 /* initialize channels related values */
1260 INIT_LIST_HEAD(&atdma->dma_common.channels);
1261 for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
1262 struct at_dma_chan *atchan = &atdma->chan[i];
1264 atchan->chan_common.device = &atdma->dma_common;
1265 atchan->chan_common.cookie = atchan->completed_cookie = 1;
1266 atchan->chan_common.chan_id = i;
1267 list_add_tail(&atchan->chan_common.device_node,
1268 &atdma->dma_common.channels);
1270 atchan->ch_regs = atdma->regs + ch_regs(i);
1271 spin_lock_init(&atchan->lock);
1272 atchan->mask = 1 << i;
1274 INIT_LIST_HEAD(&atchan->active_list);
1275 INIT_LIST_HEAD(&atchan->queue);
1276 INIT_LIST_HEAD(&atchan->free_list);
1278 tasklet_init(&atchan->tasklet, atc_tasklet,
1279 (unsigned long)atchan);
1280 atc_enable_irq(atchan);
1283 /* set base routines */
1284 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1285 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1286 atdma->dma_common.device_tx_status = atc_tx_status;
1287 atdma->dma_common.device_issue_pending = atc_issue_pending;
1288 atdma->dma_common.dev = &pdev->dev;
1290 /* set prep routines based on capability */
1291 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1292 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1294 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
1295 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1297 if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1298 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1300 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
1301 dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1302 atdma->dma_common.device_control = atc_control;
1304 dma_writel(atdma, EN, AT_DMA_ENABLE);
1306 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1307 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1308 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1309 atdma->dma_common.chancnt);
1311 dma_async_device_register(&atdma->dma_common);
1316 platform_set_drvdata(pdev, NULL);
1317 free_irq(platform_get_irq(pdev, 0), atdma);
1319 clk_disable(atdma->clk);
1320 clk_put(atdma->clk);
1322 iounmap(atdma->regs);
1325 release_mem_region(io->start, size);
1331 static int __exit at_dma_remove(struct platform_device *pdev)
1333 struct at_dma *atdma = platform_get_drvdata(pdev);
1334 struct dma_chan *chan, *_chan;
1335 struct resource *io;
1338 dma_async_device_unregister(&atdma->dma_common);
1340 dma_pool_destroy(atdma->dma_desc_pool);
1341 platform_set_drvdata(pdev, NULL);
1342 free_irq(platform_get_irq(pdev, 0), atdma);
1344 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1346 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1348 /* Disable interrupts */
1349 atc_disable_irq(atchan);
1350 tasklet_disable(&atchan->tasklet);
1352 tasklet_kill(&atchan->tasklet);
1353 list_del(&chan->device_node);
1356 clk_disable(atdma->clk);
1357 clk_put(atdma->clk);
1359 iounmap(atdma->regs);
1362 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1363 release_mem_region(io->start, io->end - io->start + 1);
1370 static void at_dma_shutdown(struct platform_device *pdev)
1372 struct at_dma *atdma = platform_get_drvdata(pdev);
1374 at_dma_off(platform_get_drvdata(pdev));
1375 clk_disable(atdma->clk);
1378 static int at_dma_suspend_noirq(struct device *dev)
1380 struct platform_device *pdev = to_platform_device(dev);
1381 struct at_dma *atdma = platform_get_drvdata(pdev);
1383 at_dma_off(platform_get_drvdata(pdev));
1384 clk_disable(atdma->clk);
1388 static int at_dma_resume_noirq(struct device *dev)
1390 struct platform_device *pdev = to_platform_device(dev);
1391 struct at_dma *atdma = platform_get_drvdata(pdev);
1393 clk_enable(atdma->clk);
1394 dma_writel(atdma, EN, AT_DMA_ENABLE);
1398 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1399 .suspend_noirq = at_dma_suspend_noirq,
1400 .resume_noirq = at_dma_resume_noirq,
1403 static struct platform_driver at_dma_driver = {
1404 .remove = __exit_p(at_dma_remove),
1405 .shutdown = at_dma_shutdown,
1408 .pm = &at_dma_dev_pm_ops,
1412 static int __init at_dma_init(void)
1414 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1416 subsys_initcall(at_dma_init);
1418 static void __exit at_dma_exit(void)
1420 platform_driver_unregister(&at_dma_driver);
1422 module_exit(at_dma_exit);
1424 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1425 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1426 MODULE_LICENSE("GPL");
1427 MODULE_ALIAS("platform:at_hdmac");