2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
26 #include "dw_dmac_regs.h"
27 #include "dmaengine.h"
30 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
31 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
32 * of which use ARM any more). See the "Databook" from Synopsys for
33 * information beyond what licensees probably provide.
35 * The driver has currently been tested only with the Atmel AT32AP7000,
36 * which does not support descriptor writeback.
39 #define DWC_DEFAULT_CTLLO(_chan) ({ \
40 struct dw_dma_slave *__slave = (_chan->private); \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 int _dms = __slave ? __slave->dst_master : 0; \
44 int _sms = __slave ? __slave->src_master : 1; \
45 u8 _smsize = __slave ? _sconfig->src_maxburst : \
47 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
50 (DWC_CTLL_DST_MSIZE(_dmsize) \
51 | DWC_CTLL_SRC_MSIZE(_smsize) \
54 | DWC_CTLL_DMS(_dms) \
55 | DWC_CTLL_SMS(_sms)); \
59 * This is configuration-dependent and usually a funny size like 4095.
61 * Note that this is a transfer count, i.e. if we transfer 32-bit
62 * words, we can do 16380 bytes per descriptor.
64 * This parameter is also system-specific.
66 #define DWC_MAX_COUNT 4095U
69 * Number of descriptors to allocate for each channel. This should be
70 * made configurable somehow; preferably, the clients (at least the
71 * ones using slave transfers) should be able to give us a hint.
73 #define NR_DESCS_PER_CHANNEL 64
75 /*----------------------------------------------------------------------*/
78 * Because we're not relying on writeback from the controller (it may not
79 * even be configured into the core!) we don't need to use dma_pool. These
80 * descriptors -- and associated data -- are cacheable. We do need to make
81 * sure their dcache entries are written back before handing them off to
82 * the controller, though.
85 static struct device *chan2dev(struct dma_chan *chan)
87 return &chan->dev->device;
89 static struct device *chan2parent(struct dma_chan *chan)
91 return chan->dev->device.parent;
94 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
96 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
99 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
101 struct dw_desc *desc, *_desc;
102 struct dw_desc *ret = NULL;
106 spin_lock_irqsave(&dwc->lock, flags);
107 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
109 if (async_tx_test_ack(&desc->txd)) {
110 list_del(&desc->desc_node);
114 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
116 spin_unlock_irqrestore(&dwc->lock, flags);
118 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
123 static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
125 struct dw_desc *child;
127 list_for_each_entry(child, &desc->tx_list, desc_node)
128 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
129 child->txd.phys, sizeof(child->lli),
131 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
132 desc->txd.phys, sizeof(desc->lli),
137 * Move a descriptor, including any children, to the free list.
138 * `desc' must not be on any lists.
140 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
145 struct dw_desc *child;
147 dwc_sync_desc_for_cpu(dwc, desc);
149 spin_lock_irqsave(&dwc->lock, flags);
150 list_for_each_entry(child, &desc->tx_list, desc_node)
151 dev_vdbg(chan2dev(&dwc->chan),
152 "moving child desc %p to freelist\n",
154 list_splice_init(&desc->tx_list, &dwc->free_list);
155 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
156 list_add(&desc->desc_node, &dwc->free_list);
157 spin_unlock_irqrestore(&dwc->lock, flags);
161 static void dwc_initialize(struct dw_dma_chan *dwc)
163 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
164 struct dw_dma_slave *dws = dwc->chan.private;
165 u32 cfghi = DWC_CFGH_FIFO_MODE;
166 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
168 if (dwc->initialized == true)
173 * We need controller-specific data to set up slave
176 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
179 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
182 channel_writel(dwc, CFG_LO, cfglo);
183 channel_writel(dwc, CFG_HI, cfghi);
185 /* Enable interrupts */
186 channel_set_bit(dw, MASK.XFER, dwc->mask);
187 channel_set_bit(dw, MASK.ERROR, dwc->mask);
189 dwc->initialized = true;
192 /*----------------------------------------------------------------------*/
194 static inline unsigned int dwc_fast_fls(unsigned long long v)
197 * We can be a lot more clever here, but this should take care
198 * of the most common optimization.
209 static void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
211 dev_err(chan2dev(&dwc->chan),
212 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
213 channel_readl(dwc, SAR),
214 channel_readl(dwc, DAR),
215 channel_readl(dwc, LLP),
216 channel_readl(dwc, CTL_HI),
217 channel_readl(dwc, CTL_LO));
221 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
223 channel_clear_bit(dw, CH_EN, dwc->mask);
224 while (dma_readl(dw, CH_EN) & dwc->mask)
228 /*----------------------------------------------------------------------*/
230 /* Called with dwc->lock held and bh disabled */
231 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
233 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
235 /* ASSERT: channel is idle */
236 if (dma_readl(dw, CH_EN) & dwc->mask) {
237 dev_err(chan2dev(&dwc->chan),
238 "BUG: Attempted to start non-idle channel\n");
239 dwc_dump_chan_regs(dwc);
241 /* The tasklet will hopefully advance the queue... */
247 channel_writel(dwc, LLP, first->txd.phys);
248 channel_writel(dwc, CTL_LO,
249 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
250 channel_writel(dwc, CTL_HI, 0);
251 channel_set_bit(dw, CH_EN, dwc->mask);
254 /*----------------------------------------------------------------------*/
257 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
258 bool callback_required)
260 dma_async_tx_callback callback = NULL;
262 struct dma_async_tx_descriptor *txd = &desc->txd;
263 struct dw_desc *child;
266 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
268 spin_lock_irqsave(&dwc->lock, flags);
269 dma_cookie_complete(txd);
270 if (callback_required) {
271 callback = txd->callback;
272 param = txd->callback_param;
275 dwc_sync_desc_for_cpu(dwc, desc);
278 list_for_each_entry(child, &desc->tx_list, desc_node)
279 async_tx_ack(&child->txd);
280 async_tx_ack(&desc->txd);
282 list_splice_init(&desc->tx_list, &dwc->free_list);
283 list_move(&desc->desc_node, &dwc->free_list);
285 if (!dwc->chan.private) {
286 struct device *parent = chan2parent(&dwc->chan);
287 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
288 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
289 dma_unmap_single(parent, desc->lli.dar,
290 desc->len, DMA_FROM_DEVICE);
292 dma_unmap_page(parent, desc->lli.dar,
293 desc->len, DMA_FROM_DEVICE);
295 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
296 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
297 dma_unmap_single(parent, desc->lli.sar,
298 desc->len, DMA_TO_DEVICE);
300 dma_unmap_page(parent, desc->lli.sar,
301 desc->len, DMA_TO_DEVICE);
305 spin_unlock_irqrestore(&dwc->lock, flags);
307 if (callback_required && callback)
311 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
313 struct dw_desc *desc, *_desc;
317 spin_lock_irqsave(&dwc->lock, flags);
318 if (dma_readl(dw, CH_EN) & dwc->mask) {
319 dev_err(chan2dev(&dwc->chan),
320 "BUG: XFER bit set, but channel not idle!\n");
322 /* Try to continue after resetting the channel... */
323 dwc_chan_disable(dw, dwc);
327 * Submit queued descriptors ASAP, i.e. before we go through
328 * the completed ones.
330 list_splice_init(&dwc->active_list, &list);
331 if (!list_empty(&dwc->queue)) {
332 list_move(dwc->queue.next, &dwc->active_list);
333 dwc_dostart(dwc, dwc_first_active(dwc));
336 spin_unlock_irqrestore(&dwc->lock, flags);
338 list_for_each_entry_safe(desc, _desc, &list, desc_node)
339 dwc_descriptor_complete(dwc, desc, true);
342 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
345 struct dw_desc *desc, *_desc;
346 struct dw_desc *child;
350 spin_lock_irqsave(&dwc->lock, flags);
351 llp = channel_readl(dwc, LLP);
352 status_xfer = dma_readl(dw, RAW.XFER);
354 if (status_xfer & dwc->mask) {
355 /* Everything we've submitted is done */
356 dma_writel(dw, CLEAR.XFER, dwc->mask);
357 spin_unlock_irqrestore(&dwc->lock, flags);
359 dwc_complete_all(dw, dwc);
363 if (list_empty(&dwc->active_list)) {
364 spin_unlock_irqrestore(&dwc->lock, flags);
368 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
369 (unsigned long long)llp);
371 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
372 /* check first descriptors addr */
373 if (desc->txd.phys == llp) {
374 spin_unlock_irqrestore(&dwc->lock, flags);
378 /* check first descriptors llp */
379 if (desc->lli.llp == llp) {
380 /* This one is currently in progress */
381 spin_unlock_irqrestore(&dwc->lock, flags);
385 list_for_each_entry(child, &desc->tx_list, desc_node)
386 if (child->lli.llp == llp) {
387 /* Currently in progress */
388 spin_unlock_irqrestore(&dwc->lock, flags);
393 * No descriptors so far seem to be in progress, i.e.
394 * this one must be done.
396 spin_unlock_irqrestore(&dwc->lock, flags);
397 dwc_descriptor_complete(dwc, desc, true);
398 spin_lock_irqsave(&dwc->lock, flags);
401 dev_err(chan2dev(&dwc->chan),
402 "BUG: All descriptors done, but channel not idle!\n");
404 /* Try to continue after resetting the channel... */
405 dwc_chan_disable(dw, dwc);
407 if (!list_empty(&dwc->queue)) {
408 list_move(dwc->queue.next, &dwc->active_list);
409 dwc_dostart(dwc, dwc_first_active(dwc));
411 spin_unlock_irqrestore(&dwc->lock, flags);
414 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
416 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
417 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
418 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
421 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
423 struct dw_desc *bad_desc;
424 struct dw_desc *child;
427 dwc_scan_descriptors(dw, dwc);
429 spin_lock_irqsave(&dwc->lock, flags);
432 * The descriptor currently at the head of the active list is
433 * borked. Since we don't have any way to report errors, we'll
434 * just have to scream loudly and try to carry on.
436 bad_desc = dwc_first_active(dwc);
437 list_del_init(&bad_desc->desc_node);
438 list_move(dwc->queue.next, dwc->active_list.prev);
440 /* Clear the error flag and try to restart the controller */
441 dma_writel(dw, CLEAR.ERROR, dwc->mask);
442 if (!list_empty(&dwc->active_list))
443 dwc_dostart(dwc, dwc_first_active(dwc));
446 * KERN_CRITICAL may seem harsh, but since this only happens
447 * when someone submits a bad physical address in a
448 * descriptor, we should consider ourselves lucky that the
449 * controller flagged an error instead of scribbling over
450 * random memory locations.
452 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
453 "Bad descriptor submitted for DMA!\n");
454 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
455 " cookie: %d\n", bad_desc->txd.cookie);
456 dwc_dump_lli(dwc, &bad_desc->lli);
457 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
458 dwc_dump_lli(dwc, &child->lli);
460 spin_unlock_irqrestore(&dwc->lock, flags);
462 /* Pretend the descriptor completed successfully */
463 dwc_descriptor_complete(dwc, bad_desc, true);
466 /* --------------------- Cyclic DMA API extensions -------------------- */
468 inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
470 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
471 return channel_readl(dwc, SAR);
473 EXPORT_SYMBOL(dw_dma_get_src_addr);
475 inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
477 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
478 return channel_readl(dwc, DAR);
480 EXPORT_SYMBOL(dw_dma_get_dst_addr);
482 /* called with dwc->lock held and all DMAC interrupts disabled */
483 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
484 u32 status_err, u32 status_xfer)
489 void (*callback)(void *param);
490 void *callback_param;
492 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
493 channel_readl(dwc, LLP));
495 callback = dwc->cdesc->period_callback;
496 callback_param = dwc->cdesc->period_callback_param;
499 callback(callback_param);
503 * Error and transfer complete are highly unlikely, and will most
504 * likely be due to a configuration error by the user.
506 if (unlikely(status_err & dwc->mask) ||
507 unlikely(status_xfer & dwc->mask)) {
510 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
511 "interrupt, stopping DMA transfer\n",
512 status_xfer ? "xfer" : "error");
514 spin_lock_irqsave(&dwc->lock, flags);
516 dwc_dump_chan_regs(dwc);
518 dwc_chan_disable(dw, dwc);
520 /* make sure DMA does not restart by loading a new list */
521 channel_writel(dwc, LLP, 0);
522 channel_writel(dwc, CTL_LO, 0);
523 channel_writel(dwc, CTL_HI, 0);
525 dma_writel(dw, CLEAR.ERROR, dwc->mask);
526 dma_writel(dw, CLEAR.XFER, dwc->mask);
528 for (i = 0; i < dwc->cdesc->periods; i++)
529 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
531 spin_unlock_irqrestore(&dwc->lock, flags);
535 /* ------------------------------------------------------------------------- */
537 static void dw_dma_tasklet(unsigned long data)
539 struct dw_dma *dw = (struct dw_dma *)data;
540 struct dw_dma_chan *dwc;
545 status_xfer = dma_readl(dw, RAW.XFER);
546 status_err = dma_readl(dw, RAW.ERROR);
548 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
550 for (i = 0; i < dw->dma.chancnt; i++) {
552 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
553 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
554 else if (status_err & (1 << i))
555 dwc_handle_error(dw, dwc);
556 else if (status_xfer & (1 << i))
557 dwc_scan_descriptors(dw, dwc);
561 * Re-enable interrupts.
563 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
564 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
567 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
569 struct dw_dma *dw = dev_id;
572 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
573 dma_readl(dw, STATUS_INT));
576 * Just disable the interrupts. We'll turn them back on in the
579 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
580 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
582 status = dma_readl(dw, STATUS_INT);
585 "BUG: Unexpected interrupts pending: 0x%x\n",
589 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
590 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
591 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
592 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
595 tasklet_schedule(&dw->tasklet);
600 /*----------------------------------------------------------------------*/
602 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
604 struct dw_desc *desc = txd_to_dw_desc(tx);
605 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
609 spin_lock_irqsave(&dwc->lock, flags);
610 cookie = dma_cookie_assign(tx);
613 * REVISIT: We should attempt to chain as many descriptors as
614 * possible, perhaps even appending to those already submitted
615 * for DMA. But this is hard to do in a race-free manner.
617 if (list_empty(&dwc->active_list)) {
618 dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
620 list_add_tail(&desc->desc_node, &dwc->active_list);
621 dwc_dostart(dwc, dwc_first_active(dwc));
623 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
626 list_add_tail(&desc->desc_node, &dwc->queue);
629 spin_unlock_irqrestore(&dwc->lock, flags);
634 static struct dma_async_tx_descriptor *
635 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
636 size_t len, unsigned long flags)
638 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
639 struct dw_desc *desc;
640 struct dw_desc *first;
641 struct dw_desc *prev;
644 unsigned int src_width;
645 unsigned int dst_width;
648 dev_vdbg(chan2dev(chan),
649 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
650 (unsigned long long)dest, (unsigned long long)src,
653 if (unlikely(!len)) {
654 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
658 src_width = dst_width = dwc_fast_fls(src | dest | len);
660 ctllo = DWC_DEFAULT_CTLLO(chan)
661 | DWC_CTLL_DST_WIDTH(dst_width)
662 | DWC_CTLL_SRC_WIDTH(src_width)
668 for (offset = 0; offset < len; offset += xfer_count << src_width) {
669 xfer_count = min_t(size_t, (len - offset) >> src_width,
672 desc = dwc_desc_get(dwc);
676 desc->lli.sar = src + offset;
677 desc->lli.dar = dest + offset;
678 desc->lli.ctllo = ctllo;
679 desc->lli.ctlhi = xfer_count;
684 prev->lli.llp = desc->txd.phys;
685 dma_sync_single_for_device(chan2parent(chan),
686 prev->txd.phys, sizeof(prev->lli),
688 list_add_tail(&desc->desc_node,
695 if (flags & DMA_PREP_INTERRUPT)
696 /* Trigger interrupt after last block */
697 prev->lli.ctllo |= DWC_CTLL_INT_EN;
700 dma_sync_single_for_device(chan2parent(chan),
701 prev->txd.phys, sizeof(prev->lli),
704 first->txd.flags = flags;
710 dwc_desc_put(dwc, first);
714 static struct dma_async_tx_descriptor *
715 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
716 unsigned int sg_len, enum dma_transfer_direction direction,
717 unsigned long flags, void *context)
719 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
720 struct dw_dma_slave *dws = chan->private;
721 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
722 struct dw_desc *prev;
723 struct dw_desc *first;
726 unsigned int reg_width;
727 unsigned int mem_width;
729 struct scatterlist *sg;
730 size_t total_len = 0;
732 dev_vdbg(chan2dev(chan), "%s\n", __func__);
734 if (unlikely(!dws || !sg_len))
741 reg_width = __fls(sconfig->dst_addr_width);
742 reg = sconfig->dst_addr;
743 ctllo = (DWC_DEFAULT_CTLLO(chan)
744 | DWC_CTLL_DST_WIDTH(reg_width)
748 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
749 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
751 for_each_sg(sgl, sg, sg_len, i) {
752 struct dw_desc *desc;
755 mem = sg_dma_address(sg);
756 len = sg_dma_len(sg);
758 mem_width = dwc_fast_fls(mem | len);
760 slave_sg_todev_fill_desc:
761 desc = dwc_desc_get(dwc);
763 dev_err(chan2dev(chan),
764 "not enough descriptors available\n");
770 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
771 if ((len >> mem_width) > DWC_MAX_COUNT) {
772 dlen = DWC_MAX_COUNT << mem_width;
780 desc->lli.ctlhi = dlen >> mem_width;
785 prev->lli.llp = desc->txd.phys;
786 dma_sync_single_for_device(chan2parent(chan),
790 list_add_tail(&desc->desc_node,
797 goto slave_sg_todev_fill_desc;
801 reg_width = __fls(sconfig->src_addr_width);
802 reg = sconfig->src_addr;
803 ctllo = (DWC_DEFAULT_CTLLO(chan)
804 | DWC_CTLL_SRC_WIDTH(reg_width)
808 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
809 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
811 for_each_sg(sgl, sg, sg_len, i) {
812 struct dw_desc *desc;
815 mem = sg_dma_address(sg);
816 len = sg_dma_len(sg);
818 mem_width = dwc_fast_fls(mem | len);
820 slave_sg_fromdev_fill_desc:
821 desc = dwc_desc_get(dwc);
823 dev_err(chan2dev(chan),
824 "not enough descriptors available\n");
830 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
831 if ((len >> reg_width) > DWC_MAX_COUNT) {
832 dlen = DWC_MAX_COUNT << reg_width;
839 desc->lli.ctlhi = dlen >> reg_width;
844 prev->lli.llp = desc->txd.phys;
845 dma_sync_single_for_device(chan2parent(chan),
849 list_add_tail(&desc->desc_node,
856 goto slave_sg_fromdev_fill_desc;
863 if (flags & DMA_PREP_INTERRUPT)
864 /* Trigger interrupt after last block */
865 prev->lli.ctllo |= DWC_CTLL_INT_EN;
868 dma_sync_single_for_device(chan2parent(chan),
869 prev->txd.phys, sizeof(prev->lli),
872 first->len = total_len;
877 dwc_desc_put(dwc, first);
882 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
883 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
885 * NOTE: burst size 2 is not supported by controller.
887 * This can be done by finding least significant bit set: n & (n - 1)
889 static inline void convert_burst(u32 *maxburst)
892 *maxburst = fls(*maxburst) - 2;
898 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
900 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
902 /* Check if it is chan is configured for slave transfers */
906 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
908 convert_burst(&dwc->dma_sconfig.src_maxburst);
909 convert_burst(&dwc->dma_sconfig.dst_maxburst);
914 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
917 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
918 struct dw_dma *dw = to_dw_dma(chan->device);
919 struct dw_desc *desc, *_desc;
924 if (cmd == DMA_PAUSE) {
925 spin_lock_irqsave(&dwc->lock, flags);
927 cfglo = channel_readl(dwc, CFG_LO);
928 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
929 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
933 spin_unlock_irqrestore(&dwc->lock, flags);
934 } else if (cmd == DMA_RESUME) {
938 spin_lock_irqsave(&dwc->lock, flags);
940 cfglo = channel_readl(dwc, CFG_LO);
941 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
944 spin_unlock_irqrestore(&dwc->lock, flags);
945 } else if (cmd == DMA_TERMINATE_ALL) {
946 spin_lock_irqsave(&dwc->lock, flags);
948 dwc_chan_disable(dw, dwc);
952 /* active_list entries will end up before queued entries */
953 list_splice_init(&dwc->queue, &list);
954 list_splice_init(&dwc->active_list, &list);
956 spin_unlock_irqrestore(&dwc->lock, flags);
958 /* Flush all pending and queued descriptors */
959 list_for_each_entry_safe(desc, _desc, &list, desc_node)
960 dwc_descriptor_complete(dwc, desc, false);
961 } else if (cmd == DMA_SLAVE_CONFIG) {
962 return set_runtime_config(chan, (struct dma_slave_config *)arg);
970 static enum dma_status
971 dwc_tx_status(struct dma_chan *chan,
973 struct dma_tx_state *txstate)
975 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
978 ret = dma_cookie_status(chan, cookie, txstate);
979 if (ret != DMA_SUCCESS) {
980 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
982 ret = dma_cookie_status(chan, cookie, txstate);
985 if (ret != DMA_SUCCESS)
986 dma_set_residue(txstate, dwc_first_active(dwc)->len);
994 static void dwc_issue_pending(struct dma_chan *chan)
996 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
998 if (!list_empty(&dwc->queue))
999 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1002 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1004 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1005 struct dw_dma *dw = to_dw_dma(chan->device);
1006 struct dw_desc *desc;
1008 unsigned long flags;
1010 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1012 /* ASSERT: channel is idle */
1013 if (dma_readl(dw, CH_EN) & dwc->mask) {
1014 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1018 dma_cookie_init(chan);
1021 * NOTE: some controllers may have additional features that we
1022 * need to initialize here, like "scatter-gather" (which
1023 * doesn't mean what you think it means), and status writeback.
1026 spin_lock_irqsave(&dwc->lock, flags);
1027 i = dwc->descs_allocated;
1028 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1029 spin_unlock_irqrestore(&dwc->lock, flags);
1031 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
1033 dev_info(chan2dev(chan),
1034 "only allocated %d descriptors\n", i);
1035 spin_lock_irqsave(&dwc->lock, flags);
1039 INIT_LIST_HEAD(&desc->tx_list);
1040 dma_async_tx_descriptor_init(&desc->txd, chan);
1041 desc->txd.tx_submit = dwc_tx_submit;
1042 desc->txd.flags = DMA_CTRL_ACK;
1043 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
1044 sizeof(desc->lli), DMA_TO_DEVICE);
1045 dwc_desc_put(dwc, desc);
1047 spin_lock_irqsave(&dwc->lock, flags);
1048 i = ++dwc->descs_allocated;
1051 spin_unlock_irqrestore(&dwc->lock, flags);
1053 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1058 static void dwc_free_chan_resources(struct dma_chan *chan)
1060 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1061 struct dw_dma *dw = to_dw_dma(chan->device);
1062 struct dw_desc *desc, *_desc;
1063 unsigned long flags;
1066 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1067 dwc->descs_allocated);
1069 /* ASSERT: channel is idle */
1070 BUG_ON(!list_empty(&dwc->active_list));
1071 BUG_ON(!list_empty(&dwc->queue));
1072 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1074 spin_lock_irqsave(&dwc->lock, flags);
1075 list_splice_init(&dwc->free_list, &list);
1076 dwc->descs_allocated = 0;
1077 dwc->initialized = false;
1079 /* Disable interrupts */
1080 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1081 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1083 spin_unlock_irqrestore(&dwc->lock, flags);
1085 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1086 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1087 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1088 sizeof(desc->lli), DMA_TO_DEVICE);
1092 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1095 /* --------------------- Cyclic DMA API extensions -------------------- */
1098 * dw_dma_cyclic_start - start the cyclic DMA transfer
1099 * @chan: the DMA channel to start
1101 * Must be called with soft interrupts disabled. Returns zero on success or
1102 * -errno on failure.
1104 int dw_dma_cyclic_start(struct dma_chan *chan)
1106 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1107 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1108 unsigned long flags;
1110 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1111 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1115 spin_lock_irqsave(&dwc->lock, flags);
1117 /* assert channel is idle */
1118 if (dma_readl(dw, CH_EN) & dwc->mask) {
1119 dev_err(chan2dev(&dwc->chan),
1120 "BUG: Attempted to start non-idle channel\n");
1121 dwc_dump_chan_regs(dwc);
1122 spin_unlock_irqrestore(&dwc->lock, flags);
1126 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1127 dma_writel(dw, CLEAR.XFER, dwc->mask);
1129 /* setup DMAC channel registers */
1130 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1131 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1132 channel_writel(dwc, CTL_HI, 0);
1134 channel_set_bit(dw, CH_EN, dwc->mask);
1136 spin_unlock_irqrestore(&dwc->lock, flags);
1140 EXPORT_SYMBOL(dw_dma_cyclic_start);
1143 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1144 * @chan: the DMA channel to stop
1146 * Must be called with soft interrupts disabled.
1148 void dw_dma_cyclic_stop(struct dma_chan *chan)
1150 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1151 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1152 unsigned long flags;
1154 spin_lock_irqsave(&dwc->lock, flags);
1156 dwc_chan_disable(dw, dwc);
1158 spin_unlock_irqrestore(&dwc->lock, flags);
1160 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1163 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1164 * @chan: the DMA channel to prepare
1165 * @buf_addr: physical DMA address where the buffer starts
1166 * @buf_len: total number of bytes for the entire buffer
1167 * @period_len: number of bytes for each period
1168 * @direction: transfer direction, to or from device
1170 * Must be called before trying to start the transfer. Returns a valid struct
1171 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1173 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1174 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1175 enum dma_transfer_direction direction)
1177 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1178 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
1179 struct dw_cyclic_desc *cdesc;
1180 struct dw_cyclic_desc *retval = NULL;
1181 struct dw_desc *desc;
1182 struct dw_desc *last = NULL;
1183 unsigned long was_cyclic;
1184 unsigned int reg_width;
1185 unsigned int periods;
1187 unsigned long flags;
1189 spin_lock_irqsave(&dwc->lock, flags);
1190 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1191 spin_unlock_irqrestore(&dwc->lock, flags);
1192 dev_dbg(chan2dev(&dwc->chan),
1193 "queue and/or active list are not empty\n");
1194 return ERR_PTR(-EBUSY);
1197 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1198 spin_unlock_irqrestore(&dwc->lock, flags);
1200 dev_dbg(chan2dev(&dwc->chan),
1201 "channel already prepared for cyclic DMA\n");
1202 return ERR_PTR(-EBUSY);
1205 retval = ERR_PTR(-EINVAL);
1207 if (direction == DMA_MEM_TO_DEV)
1208 reg_width = __ffs(sconfig->dst_addr_width);
1210 reg_width = __ffs(sconfig->src_addr_width);
1212 periods = buf_len / period_len;
1214 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1215 if (period_len > (DWC_MAX_COUNT << reg_width))
1217 if (unlikely(period_len & ((1 << reg_width) - 1)))
1219 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1221 if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
1224 retval = ERR_PTR(-ENOMEM);
1226 if (periods > NR_DESCS_PER_CHANNEL)
1229 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1233 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1237 for (i = 0; i < periods; i++) {
1238 desc = dwc_desc_get(dwc);
1240 goto out_err_desc_get;
1242 switch (direction) {
1243 case DMA_MEM_TO_DEV:
1244 desc->lli.dar = sconfig->dst_addr;
1245 desc->lli.sar = buf_addr + (period_len * i);
1246 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1247 | DWC_CTLL_DST_WIDTH(reg_width)
1248 | DWC_CTLL_SRC_WIDTH(reg_width)
1253 desc->lli.ctllo |= sconfig->device_fc ?
1254 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1255 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1258 case DMA_DEV_TO_MEM:
1259 desc->lli.dar = buf_addr + (period_len * i);
1260 desc->lli.sar = sconfig->src_addr;
1261 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1262 | DWC_CTLL_SRC_WIDTH(reg_width)
1263 | DWC_CTLL_DST_WIDTH(reg_width)
1268 desc->lli.ctllo |= sconfig->device_fc ?
1269 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1270 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1277 desc->lli.ctlhi = (period_len >> reg_width);
1278 cdesc->desc[i] = desc;
1281 last->lli.llp = desc->txd.phys;
1282 dma_sync_single_for_device(chan2parent(chan),
1283 last->txd.phys, sizeof(last->lli),
1290 /* lets make a cyclic list */
1291 last->lli.llp = cdesc->desc[0]->txd.phys;
1292 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1293 sizeof(last->lli), DMA_TO_DEVICE);
1295 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
1296 "period %zu periods %d\n", (unsigned long long)buf_addr,
1297 buf_len, period_len, periods);
1299 cdesc->periods = periods;
1306 dwc_desc_put(dwc, cdesc->desc[i]);
1310 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1311 return (struct dw_cyclic_desc *)retval;
1313 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1316 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1317 * @chan: the DMA channel to free
1319 void dw_dma_cyclic_free(struct dma_chan *chan)
1321 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1322 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1323 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1325 unsigned long flags;
1327 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1332 spin_lock_irqsave(&dwc->lock, flags);
1334 dwc_chan_disable(dw, dwc);
1336 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1337 dma_writel(dw, CLEAR.XFER, dwc->mask);
1339 spin_unlock_irqrestore(&dwc->lock, flags);
1341 for (i = 0; i < cdesc->periods; i++)
1342 dwc_desc_put(dwc, cdesc->desc[i]);
1347 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1349 EXPORT_SYMBOL(dw_dma_cyclic_free);
1351 /*----------------------------------------------------------------------*/
1353 static void dw_dma_off(struct dw_dma *dw)
1357 dma_writel(dw, CFG, 0);
1359 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1360 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1361 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1362 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1364 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1367 for (i = 0; i < dw->dma.chancnt; i++)
1368 dw->chan[i].initialized = false;
1371 static int __devinit dw_probe(struct platform_device *pdev)
1373 struct dw_dma_platform_data *pdata;
1374 struct resource *io;
1381 pdata = dev_get_platdata(&pdev->dev);
1382 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1385 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1389 irq = platform_get_irq(pdev, 0);
1393 size = sizeof(struct dw_dma);
1394 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1395 dw = kzalloc(size, GFP_KERNEL);
1399 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1404 dw->regs = ioremap(io->start, DW_REGLEN);
1410 dw->clk = clk_get(&pdev->dev, "hclk");
1411 if (IS_ERR(dw->clk)) {
1412 err = PTR_ERR(dw->clk);
1415 clk_prepare_enable(dw->clk);
1417 /* Calculate all channel mask before DMA setup */
1418 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1420 /* force dma off, just in case */
1423 /* disable BLOCK interrupts as well */
1424 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1426 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1430 platform_set_drvdata(pdev, dw);
1432 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1434 INIT_LIST_HEAD(&dw->dma.channels);
1435 for (i = 0; i < pdata->nr_channels; i++) {
1436 struct dw_dma_chan *dwc = &dw->chan[i];
1438 dwc->chan.device = &dw->dma;
1439 dma_cookie_init(&dwc->chan);
1440 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1441 list_add_tail(&dwc->chan.device_node,
1444 list_add(&dwc->chan.device_node, &dw->dma.channels);
1446 /* 7 is highest priority & 0 is lowest. */
1447 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1448 dwc->priority = pdata->nr_channels - i - 1;
1452 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1453 spin_lock_init(&dwc->lock);
1456 INIT_LIST_HEAD(&dwc->active_list);
1457 INIT_LIST_HEAD(&dwc->queue);
1458 INIT_LIST_HEAD(&dwc->free_list);
1460 channel_clear_bit(dw, CH_EN, dwc->mask);
1463 /* Clear all interrupts on all channels. */
1464 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1465 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1466 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1467 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1468 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1470 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1471 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1472 if (pdata->is_private)
1473 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1474 dw->dma.dev = &pdev->dev;
1475 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1476 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1478 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1480 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1481 dw->dma.device_control = dwc_control;
1483 dw->dma.device_tx_status = dwc_tx_status;
1484 dw->dma.device_issue_pending = dwc_issue_pending;
1486 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1488 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1489 dev_name(&pdev->dev), pdata->nr_channels);
1491 dma_async_device_register(&dw->dma);
1496 clk_disable_unprepare(dw->clk);
1502 release_resource(io);
1508 static int __devexit dw_remove(struct platform_device *pdev)
1510 struct dw_dma *dw = platform_get_drvdata(pdev);
1511 struct dw_dma_chan *dwc, *_dwc;
1512 struct resource *io;
1515 dma_async_device_unregister(&dw->dma);
1517 free_irq(platform_get_irq(pdev, 0), dw);
1518 tasklet_kill(&dw->tasklet);
1520 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1522 list_del(&dwc->chan.device_node);
1523 channel_clear_bit(dw, CH_EN, dwc->mask);
1526 clk_disable_unprepare(dw->clk);
1532 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1533 release_mem_region(io->start, DW_REGLEN);
1540 static void dw_shutdown(struct platform_device *pdev)
1542 struct dw_dma *dw = platform_get_drvdata(pdev);
1544 dw_dma_off(platform_get_drvdata(pdev));
1545 clk_disable_unprepare(dw->clk);
1548 static int dw_suspend_noirq(struct device *dev)
1550 struct platform_device *pdev = to_platform_device(dev);
1551 struct dw_dma *dw = platform_get_drvdata(pdev);
1553 dw_dma_off(platform_get_drvdata(pdev));
1554 clk_disable_unprepare(dw->clk);
1559 static int dw_resume_noirq(struct device *dev)
1561 struct platform_device *pdev = to_platform_device(dev);
1562 struct dw_dma *dw = platform_get_drvdata(pdev);
1564 clk_prepare_enable(dw->clk);
1565 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1569 static const struct dev_pm_ops dw_dev_pm_ops = {
1570 .suspend_noirq = dw_suspend_noirq,
1571 .resume_noirq = dw_resume_noirq,
1572 .freeze_noirq = dw_suspend_noirq,
1573 .thaw_noirq = dw_resume_noirq,
1574 .restore_noirq = dw_resume_noirq,
1575 .poweroff_noirq = dw_suspend_noirq,
1579 static const struct of_device_id dw_dma_id_table[] = {
1580 { .compatible = "snps,dma-spear1340" },
1583 MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1586 static struct platform_driver dw_driver = {
1587 .remove = __devexit_p(dw_remove),
1588 .shutdown = dw_shutdown,
1591 .pm = &dw_dev_pm_ops,
1592 .of_match_table = of_match_ptr(dw_dma_id_table),
1596 static int __init dw_init(void)
1598 return platform_driver_probe(&dw_driver, dw_probe);
1600 subsys_initcall(dw_init);
1602 static void __exit dw_exit(void)
1604 platform_driver_unregister(&dw_driver);
1606 module_exit(dw_exit);
1608 MODULE_LICENSE("GPL v2");
1609 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1610 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1611 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");