2 * EDMA3 support for DaVinci
4 * Copyright (C) 2006-2009 Texas Instruments.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/err.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 #include <linux/edma.h>
29 #include <linux/of_address.h>
30 #include <linux/of_device.h>
31 #include <linux/of_dma.h>
32 #include <linux/of_irq.h>
33 #include <linux/pm_runtime.h>
35 #include <linux/platform_data/edma.h>
37 /* Offsets matching "struct edmacc_param" */
40 #define PARM_A_B_CNT 0x08
42 #define PARM_SRC_DST_BIDX 0x10
43 #define PARM_LINK_BCNTRLD 0x14
44 #define PARM_SRC_DST_CIDX 0x18
45 #define PARM_CCNT 0x1c
47 #define PARM_SIZE 0x20
49 /* Offsets for EDMA CC global channel registers and their shadows */
50 #define SH_ER 0x00 /* 64 bits */
51 #define SH_ECR 0x08 /* 64 bits */
52 #define SH_ESR 0x10 /* 64 bits */
53 #define SH_CER 0x18 /* 64 bits */
54 #define SH_EER 0x20 /* 64 bits */
55 #define SH_EECR 0x28 /* 64 bits */
56 #define SH_EESR 0x30 /* 64 bits */
57 #define SH_SER 0x38 /* 64 bits */
58 #define SH_SECR 0x40 /* 64 bits */
59 #define SH_IER 0x50 /* 64 bits */
60 #define SH_IECR 0x58 /* 64 bits */
61 #define SH_IESR 0x60 /* 64 bits */
62 #define SH_IPR 0x68 /* 64 bits */
63 #define SH_ICR 0x70 /* 64 bits */
73 /* Offsets for EDMA CC global registers */
74 #define EDMA_REV 0x0000
75 #define EDMA_CCCFG 0x0004
76 #define EDMA_QCHMAP 0x0200 /* 8 registers */
77 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
78 #define EDMA_QDMAQNUM 0x0260
79 #define EDMA_QUETCMAP 0x0280
80 #define EDMA_QUEPRI 0x0284
81 #define EDMA_EMR 0x0300 /* 64 bits */
82 #define EDMA_EMCR 0x0308 /* 64 bits */
83 #define EDMA_QEMR 0x0310
84 #define EDMA_QEMCR 0x0314
85 #define EDMA_CCERR 0x0318
86 #define EDMA_CCERRCLR 0x031c
87 #define EDMA_EEVAL 0x0320
88 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
89 #define EDMA_QRAE 0x0380 /* 4 registers */
90 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
91 #define EDMA_QSTAT 0x0600 /* 2 registers */
92 #define EDMA_QWMTHRA 0x0620
93 #define EDMA_QWMTHRB 0x0624
94 #define EDMA_CCSTAT 0x0640
96 #define EDMA_M 0x1000 /* global channel registers */
97 #define EDMA_ECR 0x1008
98 #define EDMA_ECRH 0x100C
99 #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
100 #define EDMA_PARM 0x4000 /* 128 param entries */
102 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
104 #define EDMA_DCHMAP 0x0100 /* 64 registers */
105 #define CHMAP_EXIST BIT(24)
107 #define EDMA_MAX_DMACH 64
108 #define EDMA_MAX_PARAMENTRY 512
110 /*****************************************************************************/
112 static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
114 static inline unsigned int edma_read(unsigned ctlr, int offset)
116 return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
119 static inline void edma_write(unsigned ctlr, int offset, int val)
121 __raw_writel(val, edmacc_regs_base[ctlr] + offset);
123 static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
126 unsigned val = edma_read(ctlr, offset);
129 edma_write(ctlr, offset, val);
131 static inline void edma_and(unsigned ctlr, int offset, unsigned and)
133 unsigned val = edma_read(ctlr, offset);
135 edma_write(ctlr, offset, val);
137 static inline void edma_or(unsigned ctlr, int offset, unsigned or)
139 unsigned val = edma_read(ctlr, offset);
141 edma_write(ctlr, offset, val);
143 static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
145 return edma_read(ctlr, offset + (i << 2));
147 static inline void edma_write_array(unsigned ctlr, int offset, int i,
150 edma_write(ctlr, offset + (i << 2), val);
152 static inline void edma_modify_array(unsigned ctlr, int offset, int i,
153 unsigned and, unsigned or)
155 edma_modify(ctlr, offset + (i << 2), and, or);
157 static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
159 edma_or(ctlr, offset + (i << 2), or);
161 static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
164 edma_or(ctlr, offset + ((i*2 + j) << 2), or);
166 static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
169 edma_write(ctlr, offset + ((i*2 + j) << 2), val);
171 static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
173 return edma_read(ctlr, EDMA_SHADOW0 + offset);
175 static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
178 return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
180 static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
182 edma_write(ctlr, EDMA_SHADOW0 + offset, val);
184 static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
187 edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
189 static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
192 return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
194 static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
197 edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
199 static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
200 unsigned and, unsigned or)
202 edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
204 static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
207 edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
209 static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
212 edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
215 static inline void set_bits(int offset, int len, unsigned long *p)
217 for (; len > 0; len--)
218 set_bit(offset + (len - 1), p);
221 static inline void clear_bits(int offset, int len, unsigned long *p)
223 for (; len > 0; len--)
224 clear_bit(offset + (len - 1), p);
227 /*****************************************************************************/
229 /* actual number of DMA channels and slots on this silicon */
231 /* how many dma resources of each type */
232 unsigned num_channels;
237 enum dma_event_q default_queue;
239 /* list of channels with no even trigger; terminated by "-1" */
242 /* The edma_inuse bit for each PaRAM slot is clear unless the
243 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
245 DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
247 /* The edma_unused bit for each channel is clear unless
248 * it is not being used on this platform. It uses a bit
249 * of SOC-specific initialization code.
251 DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
253 unsigned irq_res_start;
254 unsigned irq_res_end;
256 struct dma_interrupt_data {
257 void (*callback)(unsigned channel, unsigned short ch_status,
260 } intr_data[EDMA_MAX_DMACH];
263 static struct edma *edma_cc[EDMA_MAX_CC];
264 static int arch_num_cc;
266 /* dummy param set used to (re)initialize parameter RAM slots */
267 static const struct edmacc_param dummy_paramset = {
268 .link_bcntrld = 0xffff,
272 /*****************************************************************************/
274 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
275 enum dma_event_q queue_no)
277 int bit = (ch_no & 0x7) * 4;
279 /* default to low priority queue */
280 if (queue_no == EVENTQ_DEFAULT)
281 queue_no = edma_cc[ctlr]->default_queue;
284 edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
285 ~(0x7 << bit), queue_no << bit);
288 static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
290 int bit = queue_no * 4;
291 edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
294 static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
297 int bit = queue_no * 4;
298 edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
299 ((priority & 0x7) << bit));
303 * map_dmach_param - Maps channel number to param entry number
305 * This maps the dma channel number to param entry numberter. In
306 * other words using the DMA channel mapping registers a param entry
307 * can be mapped to any channel
309 * Callers are responsible for ensuring the channel mapping logic is
310 * included in that particular EDMA variant (Eg : dm646x)
313 static void __init map_dmach_param(unsigned ctlr)
316 for (i = 0; i < EDMA_MAX_DMACH; i++)
317 edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
321 setup_dma_interrupt(unsigned lch,
322 void (*callback)(unsigned channel, u16 ch_status, void *data),
327 ctlr = EDMA_CTLR(lch);
328 lch = EDMA_CHAN_SLOT(lch);
331 edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
334 edma_cc[ctlr]->intr_data[lch].callback = callback;
335 edma_cc[ctlr]->intr_data[lch].data = data;
338 edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
340 edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
345 static int irq2ctlr(int irq)
347 if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
349 else if (irq >= edma_cc[1]->irq_res_start &&
350 irq <= edma_cc[1]->irq_res_end)
356 /******************************************************************************
358 * DMA interrupt handler
360 *****************************************************************************/
361 static irqreturn_t dma_irq_handler(int irq, void *data)
368 ctlr = irq2ctlr(irq);
372 dev_dbg(data, "dma_irq_handler\n");
374 sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
376 sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
379 sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
382 sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
390 dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
392 slot = __ffs(sh_ipr);
393 sh_ipr &= ~(BIT(slot));
395 if (sh_ier & BIT(slot)) {
396 channel = (bank << 5) | slot;
397 /* Clear the corresponding IPR bits */
398 edma_shadow0_write_array(ctlr, SH_ICR, bank,
400 if (edma_cc[ctlr]->intr_data[channel].callback)
401 edma_cc[ctlr]->intr_data[channel].callback(
402 channel, DMA_COMPLETE,
403 edma_cc[ctlr]->intr_data[channel].data);
407 edma_shadow0_write(ctlr, SH_IEVAL, 1);
411 /******************************************************************************
413 * DMA error interrupt handler
415 *****************************************************************************/
416 static irqreturn_t dma_ccerr_handler(int irq, void *data)
420 unsigned int cnt = 0;
422 ctlr = irq2ctlr(irq);
426 dev_dbg(data, "dma_ccerr_handler\n");
428 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
429 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
430 (edma_read(ctlr, EDMA_QEMR) == 0) &&
431 (edma_read(ctlr, EDMA_CCERR) == 0))
436 if (edma_read_array(ctlr, EDMA_EMR, 0))
438 else if (edma_read_array(ctlr, EDMA_EMR, 1))
441 dev_dbg(data, "EMR%d %08x\n", j,
442 edma_read_array(ctlr, EDMA_EMR, j));
443 for (i = 0; i < 32; i++) {
444 int k = (j << 5) + i;
445 if (edma_read_array(ctlr, EDMA_EMR, j) &
447 /* Clear the corresponding EMR bits */
448 edma_write_array(ctlr, EDMA_EMCR, j,
451 edma_shadow0_write_array(ctlr, SH_SECR,
453 if (edma_cc[ctlr]->intr_data[k].
455 edma_cc[ctlr]->intr_data[k].
458 edma_cc[ctlr]->intr_data
463 } else if (edma_read(ctlr, EDMA_QEMR)) {
464 dev_dbg(data, "QEMR %02x\n",
465 edma_read(ctlr, EDMA_QEMR));
466 for (i = 0; i < 8; i++) {
467 if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
468 /* Clear the corresponding IPR bits */
469 edma_write(ctlr, EDMA_QEMCR, BIT(i));
470 edma_shadow0_write(ctlr, SH_QSECR,
473 /* NOTE: not reported!! */
476 } else if (edma_read(ctlr, EDMA_CCERR)) {
477 dev_dbg(data, "CCERR %08x\n",
478 edma_read(ctlr, EDMA_CCERR));
479 /* FIXME: CCERR.BIT(16) ignored! much better
480 * to just write CCERRCLR with CCERR value...
482 for (i = 0; i < 8; i++) {
483 if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
484 /* Clear the corresponding IPR bits */
485 edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
487 /* NOTE: not reported!! */
491 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
492 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
493 (edma_read(ctlr, EDMA_QEMR) == 0) &&
494 (edma_read(ctlr, EDMA_CCERR) == 0))
500 edma_write(ctlr, EDMA_EEVAL, 1);
504 static int reserve_contiguous_slots(int ctlr, unsigned int id,
505 unsigned int num_slots,
506 unsigned int start_slot)
509 unsigned int count = num_slots;
510 int stop_slot = start_slot;
511 DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
513 for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
514 j = EDMA_CHAN_SLOT(i);
515 if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
516 /* Record our current beginning slot */
517 if (count == num_slots)
521 set_bit(j, tmp_inuse);
526 clear_bit(j, tmp_inuse);
528 if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
538 * We have to clear any bits that we set
539 * if we run out parameter RAM slots, i.e we do find a set
540 * of contiguous parameter RAM slots but do not find the exact number
541 * requested as we may reach the total number of parameter RAM slots
543 if (i == edma_cc[ctlr]->num_slots)
547 for_each_set_bit_from(j, tmp_inuse, stop_slot)
548 clear_bit(j, edma_cc[ctlr]->edma_inuse);
553 for (j = i - num_slots + 1; j <= i; ++j)
554 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
555 &dummy_paramset, PARM_SIZE);
557 return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
560 static int prepare_unused_channel_list(struct device *dev, void *data)
562 struct platform_device *pdev = to_platform_device(dev);
565 for (i = 0; i < pdev->num_resources; i++) {
566 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
567 (int)pdev->resource[i].start >= 0) {
568 ctlr = EDMA_CTLR(pdev->resource[i].start);
569 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
570 edma_cc[ctlr]->edma_unused);
577 /*-----------------------------------------------------------------------*/
579 static bool unused_chan_list_done;
581 /* Resource alloc/free: dma channels, parameter RAM slots */
584 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
585 * @channel: specific channel to allocate; negative for "any unmapped channel"
586 * @callback: optional; to be issued on DMA completion or errors
587 * @data: passed to callback
588 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
589 * Controller (TC) executes requests using this channel. Use
590 * EVENTQ_DEFAULT unless you really need a high priority queue.
592 * This allocates a DMA channel and its associated parameter RAM slot.
593 * The parameter RAM is initialized to hold a dummy transfer.
595 * Normal use is to pass a specific channel number as @channel, to make
596 * use of hardware events mapped to that channel. When the channel will
597 * be used only for software triggering or event chaining, channels not
598 * mapped to hardware events (or mapped to unused events) are preferable.
600 * DMA transfers start from a channel using edma_start(), or by
601 * chaining. When the transfer described in that channel's parameter RAM
602 * slot completes, that slot's data may be reloaded through a link.
604 * DMA errors are only reported to the @callback associated with the
605 * channel driving that transfer, but transfer completion callbacks can
606 * be sent to another channel under control of the TCC field in
607 * the option word of the transfer's parameter RAM set. Drivers must not
608 * use DMA transfer completion callbacks for channels they did not allocate.
609 * (The same applies to TCC codes used in transfer chaining.)
611 * Returns the number of the channel, else negative errno.
613 int edma_alloc_channel(int channel,
614 void (*callback)(unsigned channel, u16 ch_status, void *data),
616 enum dma_event_q eventq_no)
618 unsigned i, done = 0, ctlr = 0;
621 if (!unused_chan_list_done) {
623 * Scan all the platform devices to find out the EDMA channels
624 * used and clear them in the unused list, making the rest
625 * available for ARM usage.
627 ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
628 prepare_unused_channel_list);
632 unused_chan_list_done = true;
636 ctlr = EDMA_CTLR(channel);
637 channel = EDMA_CHAN_SLOT(channel);
641 for (i = 0; i < arch_num_cc; i++) {
644 channel = find_next_bit(edma_cc[i]->edma_unused,
645 edma_cc[i]->num_channels,
647 if (channel == edma_cc[i]->num_channels)
649 if (!test_and_set_bit(channel,
650 edma_cc[i]->edma_inuse)) {
662 } else if (channel >= edma_cc[ctlr]->num_channels) {
664 } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
668 /* ensure access through shadow region 0 */
669 edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
671 /* ensure no events are pending */
672 edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
673 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
674 &dummy_paramset, PARM_SIZE);
677 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
680 map_dmach_queue(ctlr, channel, eventq_no);
682 return EDMA_CTLR_CHAN(ctlr, channel);
684 EXPORT_SYMBOL(edma_alloc_channel);
688 * edma_free_channel - deallocate DMA channel
689 * @channel: dma channel returned from edma_alloc_channel()
691 * This deallocates the DMA channel and associated parameter RAM slot
692 * allocated by edma_alloc_channel().
694 * Callers are responsible for ensuring the channel is inactive, and
695 * will not be reactivated by linking, chaining, or software calls to
698 void edma_free_channel(unsigned channel)
702 ctlr = EDMA_CTLR(channel);
703 channel = EDMA_CHAN_SLOT(channel);
705 if (channel >= edma_cc[ctlr]->num_channels)
708 setup_dma_interrupt(channel, NULL, NULL);
709 /* REVISIT should probably take out of shadow region 0 */
711 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
712 &dummy_paramset, PARM_SIZE);
713 clear_bit(channel, edma_cc[ctlr]->edma_inuse);
715 EXPORT_SYMBOL(edma_free_channel);
718 * edma_alloc_slot - allocate DMA parameter RAM
719 * @slot: specific slot to allocate; negative for "any unused slot"
721 * This allocates a parameter RAM slot, initializing it to hold a
722 * dummy transfer. Slots allocated using this routine have not been
723 * mapped to a hardware DMA channel, and will normally be used by
724 * linking to them from a slot associated with a DMA channel.
726 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
727 * slots may be allocated on behalf of DSP firmware.
729 * Returns the number of the slot, else negative errno.
731 int edma_alloc_slot(unsigned ctlr, int slot)
737 slot = EDMA_CHAN_SLOT(slot);
740 slot = edma_cc[ctlr]->num_channels;
742 slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
743 edma_cc[ctlr]->num_slots, slot);
744 if (slot == edma_cc[ctlr]->num_slots)
746 if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
749 } else if (slot < edma_cc[ctlr]->num_channels ||
750 slot >= edma_cc[ctlr]->num_slots) {
752 } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
756 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
757 &dummy_paramset, PARM_SIZE);
759 return EDMA_CTLR_CHAN(ctlr, slot);
761 EXPORT_SYMBOL(edma_alloc_slot);
764 * edma_free_slot - deallocate DMA parameter RAM
765 * @slot: parameter RAM slot returned from edma_alloc_slot()
767 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
768 * Callers are responsible for ensuring the slot is inactive, and will
771 void edma_free_slot(unsigned slot)
775 ctlr = EDMA_CTLR(slot);
776 slot = EDMA_CHAN_SLOT(slot);
778 if (slot < edma_cc[ctlr]->num_channels ||
779 slot >= edma_cc[ctlr]->num_slots)
782 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
783 &dummy_paramset, PARM_SIZE);
784 clear_bit(slot, edma_cc[ctlr]->edma_inuse);
786 EXPORT_SYMBOL(edma_free_slot);
790 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
791 * The API will return the starting point of a set of
792 * contiguous parameter RAM slots that have been requested
794 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
795 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
796 * @count: number of contiguous Paramter RAM slots
797 * @slot - the start value of Parameter RAM slot that should be passed if id
798 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
800 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
801 * contiguous Parameter RAM slots from parameter RAM 64 in the case of
802 * DaVinci SOCs and 32 in the case of DA8xx SOCs.
804 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
805 * set of contiguous parameter RAM slots from the "slot" that is passed as an
806 * argument to the API.
808 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
809 * starts looking for a set of contiguous parameter RAMs from the "slot"
810 * that is passed as an argument to the API. On failure the API will try to
811 * find a set of contiguous Parameter RAM slots from the remaining Parameter
814 int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
817 * The start slot requested should be greater than
818 * the number of channels and lesser than the total number
821 if ((id != EDMA_CONT_PARAMS_ANY) &&
822 (slot < edma_cc[ctlr]->num_channels ||
823 slot >= edma_cc[ctlr]->num_slots))
827 * The number of parameter RAM slots requested cannot be less than 1
828 * and cannot be more than the number of slots minus the number of
831 if (count < 1 || count >
832 (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
836 case EDMA_CONT_PARAMS_ANY:
837 return reserve_contiguous_slots(ctlr, id, count,
838 edma_cc[ctlr]->num_channels);
839 case EDMA_CONT_PARAMS_FIXED_EXACT:
840 case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
841 return reserve_contiguous_slots(ctlr, id, count, slot);
847 EXPORT_SYMBOL(edma_alloc_cont_slots);
850 * edma_free_cont_slots - deallocate DMA parameter RAM slots
851 * @slot: first parameter RAM of a set of parameter RAM slots to be freed
852 * @count: the number of contiguous parameter RAM slots to be freed
854 * This deallocates the parameter RAM slots allocated by
855 * edma_alloc_cont_slots.
856 * Callers/applications need to keep track of sets of contiguous
857 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
859 * Callers are responsible for ensuring the slots are inactive, and will
862 int edma_free_cont_slots(unsigned slot, int count)
864 unsigned ctlr, slot_to_free;
867 ctlr = EDMA_CTLR(slot);
868 slot = EDMA_CHAN_SLOT(slot);
870 if (slot < edma_cc[ctlr]->num_channels ||
871 slot >= edma_cc[ctlr]->num_slots ||
875 for (i = slot; i < slot + count; ++i) {
877 slot_to_free = EDMA_CHAN_SLOT(i);
879 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
880 &dummy_paramset, PARM_SIZE);
881 clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
886 EXPORT_SYMBOL(edma_free_cont_slots);
888 /*-----------------------------------------------------------------------*/
890 /* Parameter RAM operations (i) -- read/write partial slots */
893 * edma_set_src - set initial DMA source address in parameter RAM slot
894 * @slot: parameter RAM slot being configured
895 * @src_port: physical address of source (memory, controller FIFO, etc)
896 * @addressMode: INCR, except in very rare cases
897 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
898 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
900 * Note that the source address is modified during the DMA transfer
901 * according to edma_set_src_index().
903 void edma_set_src(unsigned slot, dma_addr_t src_port,
904 enum address_mode mode, enum fifo_width width)
908 ctlr = EDMA_CTLR(slot);
909 slot = EDMA_CHAN_SLOT(slot);
911 if (slot < edma_cc[ctlr]->num_slots) {
912 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
915 /* set SAM and program FWID */
916 i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
921 edma_parm_write(ctlr, PARM_OPT, slot, i);
923 /* set the source port address
924 in source register of param structure */
925 edma_parm_write(ctlr, PARM_SRC, slot, src_port);
928 EXPORT_SYMBOL(edma_set_src);
931 * edma_set_dest - set initial DMA destination address in parameter RAM slot
932 * @slot: parameter RAM slot being configured
933 * @dest_port: physical address of destination (memory, controller FIFO, etc)
934 * @addressMode: INCR, except in very rare cases
935 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
936 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
938 * Note that the destination address is modified during the DMA transfer
939 * according to edma_set_dest_index().
941 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
942 enum address_mode mode, enum fifo_width width)
946 ctlr = EDMA_CTLR(slot);
947 slot = EDMA_CHAN_SLOT(slot);
949 if (slot < edma_cc[ctlr]->num_slots) {
950 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
953 /* set DAM and program FWID */
954 i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
959 edma_parm_write(ctlr, PARM_OPT, slot, i);
960 /* set the destination port address
961 in dest register of param structure */
962 edma_parm_write(ctlr, PARM_DST, slot, dest_port);
965 EXPORT_SYMBOL(edma_set_dest);
968 * edma_get_position - returns the current transfer points
969 * @slot: parameter RAM slot being examined
970 * @src: pointer to source port position
971 * @dst: pointer to destination port position
973 * Returns current source and destination addresses for a particular
974 * parameter RAM slot. Its channel should not be active when this is called.
976 void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
978 struct edmacc_param temp;
981 ctlr = EDMA_CTLR(slot);
982 slot = EDMA_CHAN_SLOT(slot);
984 edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
990 EXPORT_SYMBOL(edma_get_position);
993 * edma_set_src_index - configure DMA source address indexing
994 * @slot: parameter RAM slot being configured
995 * @src_bidx: byte offset between source arrays in a frame
996 * @src_cidx: byte offset between source frames in a block
998 * Offsets are specified to support either contiguous or discontiguous
999 * memory transfers, or repeated access to a hardware register, as needed.
1000 * When accessing hardware registers, both offsets are normally zero.
1002 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
1006 ctlr = EDMA_CTLR(slot);
1007 slot = EDMA_CHAN_SLOT(slot);
1009 if (slot < edma_cc[ctlr]->num_slots) {
1010 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1011 0xffff0000, src_bidx);
1012 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1013 0xffff0000, src_cidx);
1016 EXPORT_SYMBOL(edma_set_src_index);
1019 * edma_set_dest_index - configure DMA destination address indexing
1020 * @slot: parameter RAM slot being configured
1021 * @dest_bidx: byte offset between destination arrays in a frame
1022 * @dest_cidx: byte offset between destination frames in a block
1024 * Offsets are specified to support either contiguous or discontiguous
1025 * memory transfers, or repeated access to a hardware register, as needed.
1026 * When accessing hardware registers, both offsets are normally zero.
1028 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
1032 ctlr = EDMA_CTLR(slot);
1033 slot = EDMA_CHAN_SLOT(slot);
1035 if (slot < edma_cc[ctlr]->num_slots) {
1036 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1037 0x0000ffff, dest_bidx << 16);
1038 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1039 0x0000ffff, dest_cidx << 16);
1042 EXPORT_SYMBOL(edma_set_dest_index);
1045 * edma_set_transfer_params - configure DMA transfer parameters
1046 * @slot: parameter RAM slot being configured
1047 * @acnt: how many bytes per array (at least one)
1048 * @bcnt: how many arrays per frame (at least one)
1049 * @ccnt: how many frames per block (at least one)
1050 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1051 * the value to reload into bcnt when it decrements to zero
1052 * @sync_mode: ASYNC or ABSYNC
1054 * See the EDMA3 documentation to understand how to configure and link
1055 * transfers using the fields in PaRAM slots. If you are not doing it
1056 * all at once with edma_write_slot(), you will use this routine
1057 * plus two calls each for source and destination, setting the initial
1058 * address and saying how to index that address.
1060 * An example of an A-Synchronized transfer is a serial link using a
1061 * single word shift register. In that case, @acnt would be equal to
1062 * that word size; the serial controller issues a DMA synchronization
1063 * event to transfer each word, and memory access by the DMA transfer
1064 * controller will be word-at-a-time.
1066 * An example of an AB-Synchronized transfer is a device using a FIFO.
1067 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1068 * The controller with the FIFO issues DMA synchronization events when
1069 * the FIFO threshold is reached, and the DMA transfer controller will
1070 * transfer one frame to (or from) the FIFO. It will probably use
1071 * efficient burst modes to access memory.
1073 void edma_set_transfer_params(unsigned slot,
1074 u16 acnt, u16 bcnt, u16 ccnt,
1075 u16 bcnt_rld, enum sync_dimension sync_mode)
1079 ctlr = EDMA_CTLR(slot);
1080 slot = EDMA_CHAN_SLOT(slot);
1082 if (slot < edma_cc[ctlr]->num_slots) {
1083 edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1084 0x0000ffff, bcnt_rld << 16);
1085 if (sync_mode == ASYNC)
1086 edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1088 edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1089 /* Set the acount, bcount, ccount registers */
1090 edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1091 edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1094 EXPORT_SYMBOL(edma_set_transfer_params);
1097 * edma_link - link one parameter RAM slot to another
1098 * @from: parameter RAM slot originating the link
1099 * @to: parameter RAM slot which is the link target
1101 * The originating slot should not be part of any active DMA transfer.
1103 void edma_link(unsigned from, unsigned to)
1105 unsigned ctlr_from, ctlr_to;
1107 ctlr_from = EDMA_CTLR(from);
1108 from = EDMA_CHAN_SLOT(from);
1109 ctlr_to = EDMA_CTLR(to);
1110 to = EDMA_CHAN_SLOT(to);
1112 if (from >= edma_cc[ctlr_from]->num_slots)
1114 if (to >= edma_cc[ctlr_to]->num_slots)
1116 edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1119 EXPORT_SYMBOL(edma_link);
1122 * edma_unlink - cut link from one parameter RAM slot
1123 * @from: parameter RAM slot originating the link
1125 * The originating slot should not be part of any active DMA transfer.
1126 * Its link is set to 0xffff.
1128 void edma_unlink(unsigned from)
1132 ctlr = EDMA_CTLR(from);
1133 from = EDMA_CHAN_SLOT(from);
1135 if (from >= edma_cc[ctlr]->num_slots)
1137 edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1139 EXPORT_SYMBOL(edma_unlink);
1141 /*-----------------------------------------------------------------------*/
1143 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
1146 * edma_write_slot - write parameter RAM data for slot
1147 * @slot: number of parameter RAM slot being modified
1148 * @param: data to be written into parameter RAM slot
1150 * Use this to assign all parameters of a transfer at once. This
1151 * allows more efficient setup of transfers than issuing multiple
1152 * calls to set up those parameters in small pieces, and provides
1153 * complete control over all transfer options.
1155 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1159 ctlr = EDMA_CTLR(slot);
1160 slot = EDMA_CHAN_SLOT(slot);
1162 if (slot >= edma_cc[ctlr]->num_slots)
1164 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1167 EXPORT_SYMBOL(edma_write_slot);
1170 * edma_read_slot - read parameter RAM data from slot
1171 * @slot: number of parameter RAM slot being copied
1172 * @param: where to store copy of parameter RAM data
1174 * Use this to read data from a parameter RAM slot, perhaps to
1175 * save them as a template for later reuse.
1177 void edma_read_slot(unsigned slot, struct edmacc_param *param)
1181 ctlr = EDMA_CTLR(slot);
1182 slot = EDMA_CHAN_SLOT(slot);
1184 if (slot >= edma_cc[ctlr]->num_slots)
1186 memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1189 EXPORT_SYMBOL(edma_read_slot);
1191 /*-----------------------------------------------------------------------*/
1193 /* Various EDMA channel control operations */
1196 * edma_pause - pause dma on a channel
1197 * @channel: on which edma_start() has been called
1199 * This temporarily disables EDMA hardware events on the specified channel,
1200 * preventing them from triggering new transfers on its behalf
1202 void edma_pause(unsigned channel)
1206 ctlr = EDMA_CTLR(channel);
1207 channel = EDMA_CHAN_SLOT(channel);
1209 if (channel < edma_cc[ctlr]->num_channels) {
1210 unsigned int mask = BIT(channel & 0x1f);
1212 edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1215 EXPORT_SYMBOL(edma_pause);
1218 * edma_resume - resumes dma on a paused channel
1219 * @channel: on which edma_pause() has been called
1221 * This re-enables EDMA hardware events on the specified channel.
1223 void edma_resume(unsigned channel)
1227 ctlr = EDMA_CTLR(channel);
1228 channel = EDMA_CHAN_SLOT(channel);
1230 if (channel < edma_cc[ctlr]->num_channels) {
1231 unsigned int mask = BIT(channel & 0x1f);
1233 edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1236 EXPORT_SYMBOL(edma_resume);
1238 int edma_trigger_channel(unsigned channel)
1243 ctlr = EDMA_CTLR(channel);
1244 channel = EDMA_CHAN_SLOT(channel);
1245 mask = BIT(channel & 0x1f);
1247 edma_shadow0_write_array(ctlr, SH_ESR, (channel >> 5), mask);
1249 pr_debug("EDMA: ESR%d %08x\n", (channel >> 5),
1250 edma_shadow0_read_array(ctlr, SH_ESR, (channel >> 5)));
1253 EXPORT_SYMBOL(edma_trigger_channel);
1256 * edma_start - start dma on a channel
1257 * @channel: channel being activated
1259 * Channels with event associations will be triggered by their hardware
1260 * events, and channels without such associations will be triggered by
1261 * software. (At this writing there is no interface for using software
1262 * triggers except with channels that don't support hardware triggers.)
1264 * Returns zero on success, else negative errno.
1266 int edma_start(unsigned channel)
1270 ctlr = EDMA_CTLR(channel);
1271 channel = EDMA_CHAN_SLOT(channel);
1273 if (channel < edma_cc[ctlr]->num_channels) {
1274 int j = channel >> 5;
1275 unsigned int mask = BIT(channel & 0x1f);
1277 /* EDMA channels without event association */
1278 if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
1279 pr_debug("EDMA: ESR%d %08x\n", j,
1280 edma_shadow0_read_array(ctlr, SH_ESR, j));
1281 edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1285 /* EDMA channel with event association */
1286 pr_debug("EDMA: ER%d %08x\n", j,
1287 edma_shadow0_read_array(ctlr, SH_ER, j));
1288 /* Clear any pending event or error */
1289 edma_write_array(ctlr, EDMA_ECR, j, mask);
1290 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1292 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1293 edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1294 pr_debug("EDMA: EER%d %08x\n", j,
1295 edma_shadow0_read_array(ctlr, SH_EER, j));
1301 EXPORT_SYMBOL(edma_start);
1304 * edma_stop - stops dma on the channel passed
1305 * @channel: channel being deactivated
1307 * When @lch is a channel, any active transfer is paused and
1308 * all pending hardware events are cleared. The current transfer
1309 * may not be resumed, and the channel's Parameter RAM should be
1310 * reinitialized before being reused.
1312 void edma_stop(unsigned channel)
1316 ctlr = EDMA_CTLR(channel);
1317 channel = EDMA_CHAN_SLOT(channel);
1319 if (channel < edma_cc[ctlr]->num_channels) {
1320 int j = channel >> 5;
1321 unsigned int mask = BIT(channel & 0x1f);
1323 edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1324 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1325 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1326 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1328 pr_debug("EDMA: EER%d %08x\n", j,
1329 edma_shadow0_read_array(ctlr, SH_EER, j));
1331 /* REVISIT: consider guarding against inappropriate event
1332 * chaining by overwriting with dummy_paramset.
1336 EXPORT_SYMBOL(edma_stop);
1338 /******************************************************************************
1340 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1341 * been removed before EDMA has finished.It is usedful for removable media.
1343 * ch_no - channel no
1345 * Return: zero on success, or corresponding error no on failure
1347 * FIXME this should not be needed ... edma_stop() should suffice.
1349 *****************************************************************************/
1351 void edma_clean_channel(unsigned channel)
1355 ctlr = EDMA_CTLR(channel);
1356 channel = EDMA_CHAN_SLOT(channel);
1358 if (channel < edma_cc[ctlr]->num_channels) {
1359 int j = (channel >> 5);
1360 unsigned int mask = BIT(channel & 0x1f);
1362 pr_debug("EDMA: EMR%d %08x\n", j,
1363 edma_read_array(ctlr, EDMA_EMR, j));
1364 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1365 /* Clear the corresponding EMR bits */
1366 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1368 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1369 edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1372 EXPORT_SYMBOL(edma_clean_channel);
1375 * edma_clear_event - clear an outstanding event on the DMA channel
1377 * channel - channel number
1379 void edma_clear_event(unsigned channel)
1383 ctlr = EDMA_CTLR(channel);
1384 channel = EDMA_CHAN_SLOT(channel);
1386 if (channel >= edma_cc[ctlr]->num_channels)
1389 edma_write(ctlr, EDMA_ECR, BIT(channel));
1391 edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
1393 EXPORT_SYMBOL(edma_clear_event);
1395 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
1397 static int edma_of_read_u32_to_s16_array(const struct device_node *np,
1398 const char *propname, s16 *out_values,
1403 ret = of_property_read_u16_array(np, propname, out_values, sz);
1414 static int edma_xbar_event_map(struct device *dev,
1415 struct device_node *node,
1416 struct edma_soc_info *pdata, int len)
1419 struct resource res;
1421 const s16 (*xbar_chans)[2];
1422 u32 shift, offset, mux;
1424 xbar_chans = devm_kzalloc(dev,
1425 len/sizeof(s16) + 2*sizeof(s16),
1430 ret = of_address_to_resource(node, 1, &res);
1434 xbar = devm_ioremap(dev, res.start, resource_size(&res));
1438 ret = edma_of_read_u32_to_s16_array(node,
1439 "ti,edma-xbar-event-map",
1445 for (i = 0; xbar_chans[i][0] != -1; i++) {
1446 shift = (xbar_chans[i][1] & 0x03) << 3;
1447 offset = xbar_chans[i][1] & 0xfffffffc;
1448 mux = readl(xbar + offset);
1449 mux &= ~(0xff << shift);
1450 mux |= xbar_chans[i][0] << shift;
1451 writel(mux, (xbar + offset));
1454 pdata->xbar_chans = xbar_chans;
1459 static int edma_of_parse_dt(struct device *dev,
1460 struct device_node *node,
1461 struct edma_soc_info *pdata)
1465 struct property *prop;
1467 struct edma_rsv_info *rsv_info;
1468 s8 (*queue_tc_map)[2], (*queue_priority_map)[2];
1470 memset(pdata, 0, sizeof(struct edma_soc_info));
1472 ret = of_property_read_u32(node, "dma-channels", &value);
1475 pdata->n_channel = value;
1477 ret = of_property_read_u32(node, "ti,edma-regions", &value);
1480 pdata->n_region = value;
1482 ret = of_property_read_u32(node, "ti,edma-slots", &value);
1485 pdata->n_slot = value;
1489 rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL);
1492 pdata->rsv = rsv_info;
1494 queue_tc_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL);
1498 for (i = 0; i < 3; i++) {
1499 queue_tc_map[i][0] = i;
1500 queue_tc_map[i][1] = i;
1502 queue_tc_map[i][0] = -1;
1503 queue_tc_map[i][1] = -1;
1505 pdata->queue_tc_mapping = queue_tc_map;
1507 queue_priority_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL);
1508 if (!queue_priority_map)
1511 for (i = 0; i < 3; i++) {
1512 queue_priority_map[i][0] = i;
1513 queue_priority_map[i][1] = i;
1515 queue_priority_map[i][0] = -1;
1516 queue_priority_map[i][1] = -1;
1518 pdata->queue_priority_mapping = queue_priority_map;
1520 pdata->default_queue = 0;
1522 prop = of_find_property(node, "ti,edma-xbar-event-map", &sz);
1524 ret = edma_xbar_event_map(dev, node, pdata, sz);
1529 static struct of_dma_filter_info edma_filter_info = {
1530 .filter_fn = edma_filter_fn,
1533 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1534 struct device_node *node)
1536 struct edma_soc_info *info;
1539 info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
1541 return ERR_PTR(-ENOMEM);
1543 ret = edma_of_parse_dt(dev, node, info);
1545 return ERR_PTR(ret);
1547 dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap);
1548 of_dma_controller_register(dev->of_node, of_dma_simple_xlate,
1554 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1555 struct device_node *node)
1557 return ERR_PTR(-ENOSYS);
1561 static int edma_probe(struct platform_device *pdev)
1563 struct edma_soc_info **info = pdev->dev.platform_data;
1564 struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL};
1565 s8 (*queue_priority_mapping)[2];
1566 s8 (*queue_tc_mapping)[2];
1567 int i, j, off, ln, found = 0;
1569 const s16 (*rsv_chans)[2];
1570 const s16 (*rsv_slots)[2];
1571 const s16 (*xbar_chans)[2];
1572 int irq[EDMA_MAX_CC] = {0, 0};
1573 int err_irq[EDMA_MAX_CC] = {0, 0};
1574 struct resource *r[EDMA_MAX_CC] = {NULL};
1575 struct resource res[EDMA_MAX_CC];
1578 struct device_node *node = pdev->dev.of_node;
1579 struct device *dev = &pdev->dev;
1583 /* Check if this is a second instance registered */
1585 dev_err(dev, "only one EDMA instance is supported via DT\n");
1589 ninfo[0] = edma_setup_info_from_dt(dev, node);
1590 if (IS_ERR(ninfo[0])) {
1591 dev_err(dev, "failed to get DT data\n");
1592 return PTR_ERR(ninfo[0]);
1601 pm_runtime_enable(dev);
1602 ret = pm_runtime_get_sync(dev);
1604 dev_err(dev, "pm_runtime_get_sync() failed\n");
1608 for (j = 0; j < EDMA_MAX_CC; j++) {
1615 ret = of_address_to_resource(node, j, &res[j]);
1619 sprintf(res_name, "edma_cc%d", j);
1620 r[j] = platform_get_resource_byname(pdev,
1633 edmacc_regs_base[j] = devm_ioremap_resource(&pdev->dev, r[j]);
1634 if (IS_ERR(edmacc_regs_base[j]))
1635 return PTR_ERR(edmacc_regs_base[j]);
1637 edma_cc[j] = devm_kzalloc(&pdev->dev, sizeof(struct edma),
1642 edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel,
1644 edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot,
1645 EDMA_MAX_PARAMENTRY);
1646 edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc,
1649 edma_cc[j]->default_queue = info[j]->default_queue;
1651 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1652 edmacc_regs_base[j]);
1654 for (i = 0; i < edma_cc[j]->num_slots; i++)
1655 memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1656 &dummy_paramset, PARM_SIZE);
1658 /* Mark all channels as unused */
1659 memset(edma_cc[j]->edma_unused, 0xff,
1660 sizeof(edma_cc[j]->edma_unused));
1664 /* Clear the reserved channels in unused list */
1665 rsv_chans = info[j]->rsv->rsv_chans;
1667 for (i = 0; rsv_chans[i][0] != -1; i++) {
1668 off = rsv_chans[i][0];
1669 ln = rsv_chans[i][1];
1671 edma_cc[j]->edma_unused);
1675 /* Set the reserved slots in inuse list */
1676 rsv_slots = info[j]->rsv->rsv_slots;
1678 for (i = 0; rsv_slots[i][0] != -1; i++) {
1679 off = rsv_slots[i][0];
1680 ln = rsv_slots[i][1];
1682 edma_cc[j]->edma_inuse);
1687 /* Clear the xbar mapped channels in unused list */
1688 xbar_chans = info[j]->xbar_chans;
1690 for (i = 0; xbar_chans[i][1] != -1; i++) {
1691 off = xbar_chans[i][1];
1693 edma_cc[j]->edma_unused);
1698 irq[j] = irq_of_parse_and_map(node, 0);
1700 sprintf(irq_name, "edma%d", j);
1701 irq[j] = platform_get_irq_byname(pdev, irq_name);
1703 edma_cc[j]->irq_res_start = irq[j];
1704 status = devm_request_irq(&pdev->dev, irq[j],
1705 dma_irq_handler, 0, "edma",
1709 "devm_request_irq %d failed --> %d\n",
1715 err_irq[j] = irq_of_parse_and_map(node, 2);
1717 sprintf(irq_name, "edma%d_err", j);
1718 err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1720 edma_cc[j]->irq_res_end = err_irq[j];
1721 status = devm_request_irq(&pdev->dev, err_irq[j],
1722 dma_ccerr_handler, 0,
1723 "edma_error", &pdev->dev);
1726 "devm_request_irq %d failed --> %d\n",
1727 err_irq[j], status);
1731 for (i = 0; i < edma_cc[j]->num_channels; i++)
1732 map_dmach_queue(j, i, info[j]->default_queue);
1734 queue_tc_mapping = info[j]->queue_tc_mapping;
1735 queue_priority_mapping = info[j]->queue_priority_mapping;
1737 /* Event queue to TC mapping */
1738 for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1739 map_queue_tc(j, queue_tc_mapping[i][0],
1740 queue_tc_mapping[i][1]);
1742 /* Event queue priority mapping */
1743 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1744 assign_priority_to_queue(j,
1745 queue_priority_mapping[i][0],
1746 queue_priority_mapping[i][1]);
1748 /* Map the channel to param entry if channel mapping logic
1751 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1754 for (i = 0; i < info[j]->n_region; i++) {
1755 edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1756 edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1757 edma_write_array(j, EDMA_QRAE, i, 0x0);
1765 static const struct of_device_id edma_of_ids[] = {
1766 { .compatible = "ti,edma3", },
1770 static struct platform_driver edma_driver = {
1773 .of_match_table = edma_of_ids,
1775 .probe = edma_probe,
1778 static int __init edma_init(void)
1780 return platform_driver_probe(&edma_driver, edma_probe);
1782 arch_initcall(edma_init);