2 * Copyright 2012 Marvell International Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/dmaengine.h>
16 #include <linux/platform_device.h>
17 #include <linux/device.h>
18 #include <linux/platform_data/mmp_dma.h>
19 #include <linux/dmapool.h>
20 #include <linux/of_device.h>
21 #include <linux/of_dma.h>
23 #include <linux/dma/mmp-pdma.h>
25 #include "dmaengine.h"
35 #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
36 #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
37 #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
38 #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
39 #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
40 #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
41 #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
42 #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
44 #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
45 #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
46 #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
47 #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
48 #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
49 #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
50 #define DCSR_EORINTR (1 << 9) /* The end of Receive */
52 #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
54 #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
55 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
57 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
58 #define DDADR_STOP (1 << 0) /* Stop (read / write) */
60 #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
61 #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
62 #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
63 #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
64 #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
65 #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
66 #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
67 #define DCMD_BURST8 (1 << 16) /* 8 byte burst */
68 #define DCMD_BURST16 (2 << 16) /* 16 byte burst */
69 #define DCMD_BURST32 (3 << 16) /* 32 byte burst */
70 #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
71 #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
72 #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
73 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
75 #define PDMA_ALIGNMENT 3
76 #define PDMA_MAX_DESC_BYTES DCMD_LENGTH
78 struct mmp_pdma_desc_hw {
79 u32 ddadr; /* Points to the next descriptor + flags */
80 u32 dsadr; /* DSADR value for the current transfer */
81 u32 dtadr; /* DTADR value for the current transfer */
82 u32 dcmd; /* DCMD value for the current transfer */
85 struct mmp_pdma_desc_sw {
86 struct mmp_pdma_desc_hw desc;
87 struct list_head node;
88 struct list_head tx_list;
89 struct dma_async_tx_descriptor async_tx;
94 struct mmp_pdma_chan {
97 struct dma_async_tx_descriptor desc;
98 struct mmp_pdma_phy *phy;
99 enum dma_transfer_direction dir;
101 /* channel's basic info */
102 struct tasklet_struct tasklet;
108 spinlock_t desc_lock; /* Descriptor list lock */
109 struct list_head chain_pending; /* Link descriptors queue for pending */
110 struct list_head chain_running; /* Link descriptors queue for running */
111 bool idle; /* channel statue machine */
114 struct dma_pool *desc_pool; /* Descriptors pool */
117 struct mmp_pdma_phy {
120 struct mmp_pdma_chan *vchan;
123 struct mmp_pdma_device {
127 struct dma_device device;
128 struct mmp_pdma_phy *phy;
129 spinlock_t phy_lock; /* protect alloc/free phy channels */
132 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
133 #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
134 #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
135 #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
137 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
139 u32 reg = (phy->idx << 4) + DDADR;
141 writel(addr, phy->base + reg);
144 static void enable_chan(struct mmp_pdma_phy *phy)
151 reg = DRCMR(phy->vchan->drcmr);
152 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
154 dalgn = readl(phy->base + DALGN);
155 if (phy->vchan->byte_align)
156 dalgn |= 1 << phy->idx;
158 dalgn &= ~(1 << phy->idx);
159 writel(dalgn, phy->base + DALGN);
161 reg = (phy->idx << 2) + DCSR;
162 writel(readl(phy->base + reg) | DCSR_RUN,
166 static void disable_chan(struct mmp_pdma_phy *phy)
171 reg = (phy->idx << 2) + DCSR;
172 writel(readl(phy->base + reg) & ~DCSR_RUN,
177 static int clear_chan_irq(struct mmp_pdma_phy *phy)
180 u32 dint = readl(phy->base + DINT);
181 u32 reg = (phy->idx << 2) + DCSR;
183 if (dint & BIT(phy->idx)) {
185 dcsr = readl(phy->base + reg);
186 writel(dcsr, phy->base + reg);
187 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
188 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
194 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
196 struct mmp_pdma_phy *phy = dev_id;
198 if (clear_chan_irq(phy) == 0) {
199 tasklet_schedule(&phy->vchan->tasklet);
205 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
207 struct mmp_pdma_device *pdev = dev_id;
208 struct mmp_pdma_phy *phy;
209 u32 dint = readl(pdev->base + DINT);
217 ret = mmp_pdma_chan_handler(irq, phy);
218 if (ret == IRQ_HANDLED)
228 /* lookup free phy channel as descending priority */
229 static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
232 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
233 struct mmp_pdma_phy *phy, *found = NULL;
237 * dma channel priorities
238 * ch 0 - 3, 16 - 19 <--> (0)
239 * ch 4 - 7, 20 - 23 <--> (1)
240 * ch 8 - 11, 24 - 27 <--> (2)
241 * ch 12 - 15, 28 - 31 <--> (3)
244 spin_lock_irqsave(&pdev->phy_lock, flags);
245 for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
246 for (i = 0; i < pdev->dma_channels; i++) {
247 if (prio != ((i & 0xf) >> 2))
259 spin_unlock_irqrestore(&pdev->phy_lock, flags);
263 static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
265 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
272 /* clear the channel mapping in DRCMR */
273 reg = DRCMR(pchan->phy->vchan->drcmr);
274 writel(0, pchan->phy->base + reg);
276 spin_lock_irqsave(&pdev->phy_lock, flags);
277 pchan->phy->vchan = NULL;
279 spin_unlock_irqrestore(&pdev->phy_lock, flags);
282 /* desc->tx_list ==> pending list */
283 static void append_pending_queue(struct mmp_pdma_chan *chan,
284 struct mmp_pdma_desc_sw *desc)
286 struct mmp_pdma_desc_sw *tail =
287 to_mmp_pdma_desc(chan->chain_pending.prev);
289 if (list_empty(&chan->chain_pending))
292 /* one irq per queue, even appended */
293 tail->desc.ddadr = desc->async_tx.phys;
294 tail->desc.dcmd &= ~DCMD_ENDIRQEN;
296 /* softly link to pending list */
298 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
302 * start_pending_queue - transfer any pending transactions
303 * pending list ==> running list
305 static void start_pending_queue(struct mmp_pdma_chan *chan)
307 struct mmp_pdma_desc_sw *desc;
309 /* still in running, irq will start the pending list */
311 dev_dbg(chan->dev, "DMA controller still busy\n");
315 if (list_empty(&chan->chain_pending)) {
316 /* chance to re-fetch phy channel with higher prio */
317 mmp_pdma_free_phy(chan);
318 dev_dbg(chan->dev, "no pending list\n");
323 chan->phy = lookup_phy(chan);
325 dev_dbg(chan->dev, "no free dma channel\n");
332 * reintilize pending list
334 desc = list_first_entry(&chan->chain_pending,
335 struct mmp_pdma_desc_sw, node);
336 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
339 * Program the descriptor's address into the DMA controller,
340 * then start the DMA transaction
342 set_desc(chan->phy, desc->async_tx.phys);
343 enable_chan(chan->phy);
348 /* desc->tx_list ==> pending list */
349 static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
351 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
352 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
353 struct mmp_pdma_desc_sw *child;
355 dma_cookie_t cookie = -EBUSY;
357 spin_lock_irqsave(&chan->desc_lock, flags);
359 list_for_each_entry(child, &desc->tx_list, node) {
360 cookie = dma_cookie_assign(&child->async_tx);
363 append_pending_queue(chan, desc);
365 spin_unlock_irqrestore(&chan->desc_lock, flags);
370 static struct mmp_pdma_desc_sw *
371 mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
373 struct mmp_pdma_desc_sw *desc;
376 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
378 dev_err(chan->dev, "out of memory for link descriptor\n");
382 memset(desc, 0, sizeof(*desc));
383 INIT_LIST_HEAD(&desc->tx_list);
384 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
385 /* each desc has submit */
386 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
387 desc->async_tx.phys = pdesc;
393 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
395 * This function will create a dma pool for descriptor allocation.
396 * Request irq only when channel is requested
397 * Return - The number of allocated descriptors.
400 static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
402 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
408 dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
409 sizeof(struct mmp_pdma_desc_sw),
410 __alignof__(struct mmp_pdma_desc_sw), 0);
411 if (!chan->desc_pool) {
412 dev_err(chan->dev, "unable to allocate descriptor pool\n");
415 mmp_pdma_free_phy(chan);
421 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
422 struct list_head *list)
424 struct mmp_pdma_desc_sw *desc, *_desc;
426 list_for_each_entry_safe(desc, _desc, list, node) {
427 list_del(&desc->node);
428 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
432 static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
434 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
437 spin_lock_irqsave(&chan->desc_lock, flags);
438 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
439 mmp_pdma_free_desc_list(chan, &chan->chain_running);
440 spin_unlock_irqrestore(&chan->desc_lock, flags);
442 dma_pool_destroy(chan->desc_pool);
443 chan->desc_pool = NULL;
446 mmp_pdma_free_phy(chan);
450 static struct dma_async_tx_descriptor *
451 mmp_pdma_prep_memcpy(struct dma_chan *dchan,
452 dma_addr_t dma_dst, dma_addr_t dma_src,
453 size_t len, unsigned long flags)
455 struct mmp_pdma_chan *chan;
456 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
465 chan = to_mmp_pdma_chan(dchan);
466 chan->byte_align = false;
469 chan->dir = DMA_MEM_TO_MEM;
470 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
471 chan->dcmd |= DCMD_BURST32;
475 /* Allocate the link descriptor from DMA pool */
476 new = mmp_pdma_alloc_descriptor(chan);
478 dev_err(chan->dev, "no memory for desc\n");
482 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
483 if (dma_src & 0x7 || dma_dst & 0x7)
484 chan->byte_align = true;
486 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
487 new->desc.dsadr = dma_src;
488 new->desc.dtadr = dma_dst;
493 prev->desc.ddadr = new->async_tx.phys;
495 new->async_tx.cookie = 0;
496 async_tx_ack(&new->async_tx);
501 if (chan->dir == DMA_MEM_TO_DEV) {
503 } else if (chan->dir == DMA_DEV_TO_MEM) {
505 } else if (chan->dir == DMA_MEM_TO_MEM) {
510 /* Insert the link descriptor to the LD ring */
511 list_add_tail(&new->node, &first->tx_list);
514 first->async_tx.flags = flags; /* client is in control of this ack */
515 first->async_tx.cookie = -EBUSY;
517 /* last desc and fire IRQ */
518 new->desc.ddadr = DDADR_STOP;
519 new->desc.dcmd |= DCMD_ENDIRQEN;
521 return &first->async_tx;
525 mmp_pdma_free_desc_list(chan, &first->tx_list);
529 static struct dma_async_tx_descriptor *
530 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
531 unsigned int sg_len, enum dma_transfer_direction dir,
532 unsigned long flags, void *context)
534 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
535 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
537 struct scatterlist *sg;
541 if ((sgl == NULL) || (sg_len == 0))
544 chan->byte_align = false;
546 for_each_sg(sgl, sg, sg_len, i) {
547 addr = sg_dma_address(sg);
548 avail = sg_dma_len(sgl);
551 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
553 chan->byte_align = true;
555 /* allocate and populate the descriptor */
556 new = mmp_pdma_alloc_descriptor(chan);
558 dev_err(chan->dev, "no memory for desc\n");
562 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
563 if (dir == DMA_MEM_TO_DEV) {
564 new->desc.dsadr = addr;
565 new->desc.dtadr = chan->dev_addr;
567 new->desc.dsadr = chan->dev_addr;
568 new->desc.dtadr = addr;
574 prev->desc.ddadr = new->async_tx.phys;
576 new->async_tx.cookie = 0;
577 async_tx_ack(&new->async_tx);
580 /* Insert the link descriptor to the LD ring */
581 list_add_tail(&new->node, &first->tx_list);
583 /* update metadata */
589 first->async_tx.cookie = -EBUSY;
590 first->async_tx.flags = flags;
592 /* last desc and fire IRQ */
593 new->desc.ddadr = DDADR_STOP;
594 new->desc.dcmd |= DCMD_ENDIRQEN;
596 return &first->async_tx;
600 mmp_pdma_free_desc_list(chan, &first->tx_list);
604 static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
607 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
608 struct dma_slave_config *cfg = (void *)arg;
611 u32 maxburst = 0, addr = 0;
612 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
618 case DMA_TERMINATE_ALL:
619 disable_chan(chan->phy);
620 mmp_pdma_free_phy(chan);
621 spin_lock_irqsave(&chan->desc_lock, flags);
622 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
623 mmp_pdma_free_desc_list(chan, &chan->chain_running);
624 spin_unlock_irqrestore(&chan->desc_lock, flags);
627 case DMA_SLAVE_CONFIG:
628 if (cfg->direction == DMA_DEV_TO_MEM) {
629 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
630 maxburst = cfg->src_maxburst;
631 width = cfg->src_addr_width;
632 addr = cfg->src_addr;
633 } else if (cfg->direction == DMA_MEM_TO_DEV) {
634 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
635 maxburst = cfg->dst_maxburst;
636 width = cfg->dst_addr_width;
637 addr = cfg->dst_addr;
640 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
641 chan->dcmd |= DCMD_WIDTH1;
642 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
643 chan->dcmd |= DCMD_WIDTH2;
644 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
645 chan->dcmd |= DCMD_WIDTH4;
648 chan->dcmd |= DCMD_BURST8;
649 else if (maxburst == 16)
650 chan->dcmd |= DCMD_BURST16;
651 else if (maxburst == 32)
652 chan->dcmd |= DCMD_BURST32;
654 chan->dir = cfg->direction;
655 chan->dev_addr = addr;
656 /* FIXME: drivers should be ported over to use the filter
657 * function. Once that's done, the following two lines can
661 chan->drcmr = cfg->slave_id;
670 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
671 dma_cookie_t cookie, struct dma_tx_state *txstate)
673 return dma_cookie_status(dchan, cookie, txstate);
677 * mmp_pdma_issue_pending - Issue the DMA start command
678 * pending list ==> running list
680 static void mmp_pdma_issue_pending(struct dma_chan *dchan)
682 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
685 spin_lock_irqsave(&chan->desc_lock, flags);
686 start_pending_queue(chan);
687 spin_unlock_irqrestore(&chan->desc_lock, flags);
695 static void dma_do_tasklet(unsigned long data)
697 struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
698 struct mmp_pdma_desc_sw *desc, *_desc;
699 LIST_HEAD(chain_cleanup);
702 /* submit pending list; callback for each desc; free desc */
704 spin_lock_irqsave(&chan->desc_lock, flags);
706 /* update the cookie if we have some descriptors to cleanup */
707 if (!list_empty(&chan->chain_running)) {
710 desc = to_mmp_pdma_desc(chan->chain_running.prev);
711 cookie = desc->async_tx.cookie;
712 dma_cookie_complete(&desc->async_tx);
714 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
718 * move the descriptors to a temporary list so we can drop the lock
719 * during the entire cleanup operation
721 list_splice_tail_init(&chan->chain_running, &chain_cleanup);
723 /* the hardware is now idle and ready for more */
726 /* Start any pending transactions automatically */
727 start_pending_queue(chan);
728 spin_unlock_irqrestore(&chan->desc_lock, flags);
730 /* Run the callback for each descriptor, in order */
731 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
732 struct dma_async_tx_descriptor *txd = &desc->async_tx;
734 /* Remove from the list of transactions */
735 list_del(&desc->node);
736 /* Run the link descriptor callback function */
738 txd->callback(txd->callback_param);
740 dma_pool_free(chan->desc_pool, desc, txd->phys);
744 static int mmp_pdma_remove(struct platform_device *op)
746 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
748 dma_async_device_unregister(&pdev->device);
752 static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
755 struct mmp_pdma_phy *phy = &pdev->phy[idx];
756 struct mmp_pdma_chan *chan;
759 chan = devm_kzalloc(pdev->dev,
760 sizeof(struct mmp_pdma_chan), GFP_KERNEL);
765 phy->base = pdev->base;
768 ret = devm_request_irq(pdev->dev, irq,
769 mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
771 dev_err(pdev->dev, "channel request irq fail!\n");
776 spin_lock_init(&chan->desc_lock);
777 chan->dev = pdev->dev;
778 chan->chan.device = &pdev->device;
779 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
780 INIT_LIST_HEAD(&chan->chain_pending);
781 INIT_LIST_HEAD(&chan->chain_running);
783 /* register virt channel to dma engine */
784 list_add_tail(&chan->chan.device_node,
785 &pdev->device.channels);
790 static struct of_device_id mmp_pdma_dt_ids[] = {
791 { .compatible = "marvell,pdma-1.0", },
794 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
796 static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
797 struct of_dma *ofdma)
799 struct mmp_pdma_device *d = ofdma->of_dma_data;
800 struct dma_chan *chan, *candidate;
805 /* walk the list of channels registered with the current instance and
806 * find one that is currently unused */
807 list_for_each_entry(chan, &d->device.channels, device_node)
808 if (chan->client_count == 0) {
816 /* dma_get_slave_channel will return NULL if we lost a race between
817 * the lookup and the reservation */
818 chan = dma_get_slave_channel(candidate);
821 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
822 c->drcmr = dma_spec->args[0];
829 static int mmp_pdma_probe(struct platform_device *op)
831 struct mmp_pdma_device *pdev;
832 const struct of_device_id *of_id;
833 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
834 struct resource *iores;
836 int dma_channels = 0, irq_num = 0;
838 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
841 pdev->dev = &op->dev;
843 spin_lock_init(&pdev->phy_lock);
845 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
846 pdev->base = devm_ioremap_resource(pdev->dev, iores);
847 if (IS_ERR(pdev->base))
848 return PTR_ERR(pdev->base);
850 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
852 of_property_read_u32(pdev->dev->of_node,
853 "#dma-channels", &dma_channels);
854 else if (pdata && pdata->dma_channels)
855 dma_channels = pdata->dma_channels;
857 dma_channels = 32; /* default 32 channel */
858 pdev->dma_channels = dma_channels;
860 for (i = 0; i < dma_channels; i++) {
861 if (platform_get_irq(op, i) > 0)
865 pdev->phy = devm_kzalloc(pdev->dev,
866 dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
867 if (pdev->phy == NULL)
870 INIT_LIST_HEAD(&pdev->device.channels);
872 if (irq_num != dma_channels) {
873 /* all chan share one irq, demux inside */
874 irq = platform_get_irq(op, 0);
875 ret = devm_request_irq(pdev->dev, irq,
876 mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
881 for (i = 0; i < dma_channels; i++) {
882 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
883 ret = mmp_pdma_chan_init(pdev, i, irq);
888 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
889 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
890 pdev->device.dev = &op->dev;
891 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
892 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
893 pdev->device.device_tx_status = mmp_pdma_tx_status;
894 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
895 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
896 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
897 pdev->device.device_control = mmp_pdma_control;
898 pdev->device.copy_align = PDMA_ALIGNMENT;
900 if (pdev->dev->coherent_dma_mask)
901 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
903 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
905 ret = dma_async_device_register(&pdev->device);
907 dev_err(pdev->device.dev, "unable to register\n");
911 if (op->dev.of_node) {
912 /* Device-tree DMA controller registration */
913 ret = of_dma_controller_register(op->dev.of_node,
914 mmp_pdma_dma_xlate, pdev);
916 dev_err(&op->dev, "of_dma_controller_register failed\n");
921 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
925 static const struct platform_device_id mmp_pdma_id_table[] = {
930 static struct platform_driver mmp_pdma_driver = {
933 .owner = THIS_MODULE,
934 .of_match_table = mmp_pdma_dt_ids,
936 .id_table = mmp_pdma_id_table,
937 .probe = mmp_pdma_probe,
938 .remove = mmp_pdma_remove,
941 bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
943 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
945 if (chan->device->dev->driver != &mmp_pdma_driver.driver)
948 c->drcmr = *(unsigned int *) param;
952 EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
954 module_platform_driver(mmp_pdma_driver);
956 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
957 MODULE_AUTHOR("Marvell International Ltd.");
958 MODULE_LICENSE("GPL v2");