2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/platform_data/edma.h>
36 #include "dmaengine.h"
39 /* Offsets matching "struct edmacc_param" */
42 #define PARM_A_B_CNT 0x08
44 #define PARM_SRC_DST_BIDX 0x10
45 #define PARM_LINK_BCNTRLD 0x14
46 #define PARM_SRC_DST_CIDX 0x18
47 #define PARM_CCNT 0x1c
49 #define PARM_SIZE 0x20
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER 0x00 /* 64 bits */
53 #define SH_ECR 0x08 /* 64 bits */
54 #define SH_ESR 0x10 /* 64 bits */
55 #define SH_CER 0x18 /* 64 bits */
56 #define SH_EER 0x20 /* 64 bits */
57 #define SH_EECR 0x28 /* 64 bits */
58 #define SH_EESR 0x30 /* 64 bits */
59 #define SH_SER 0x38 /* 64 bits */
60 #define SH_SECR 0x40 /* 64 bits */
61 #define SH_IER 0x50 /* 64 bits */
62 #define SH_IECR 0x58 /* 64 bits */
63 #define SH_IESR 0x60 /* 64 bits */
64 #define SH_IPR 0x68 /* 64 bits */
65 #define SH_ICR 0x70 /* 64 bits */
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV 0x0000
77 #define EDMA_CCCFG 0x0004
78 #define EDMA_QCHMAP 0x0200 /* 8 registers */
79 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM 0x0260
81 #define EDMA_QUETCMAP 0x0280
82 #define EDMA_QUEPRI 0x0284
83 #define EDMA_EMR 0x0300 /* 64 bits */
84 #define EDMA_EMCR 0x0308 /* 64 bits */
85 #define EDMA_QEMR 0x0310
86 #define EDMA_QEMCR 0x0314
87 #define EDMA_CCERR 0x0318
88 #define EDMA_CCERRCLR 0x031c
89 #define EDMA_EEVAL 0x0320
90 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
91 #define EDMA_QRAE 0x0380 /* 4 registers */
92 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
93 #define EDMA_QSTAT 0x0600 /* 2 registers */
94 #define EDMA_QWMTHRA 0x0620
95 #define EDMA_QWMTHRB 0x0624
96 #define EDMA_CCSTAT 0x0640
98 #define EDMA_M 0x1000 /* global channel registers */
99 #define EDMA_ECR 0x1008
100 #define EDMA_ECRH 0x100C
101 #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
102 #define EDMA_PARM 0x4000 /* PaRAM entries */
104 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
106 #define EDMA_DCHMAP 0x0100 /* 64 registers */
109 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */
111 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
112 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
113 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114 #define CHMAP_EXIST BIT(24)
117 * Max of 20 segments per channel to conserve PaRAM slots
118 * Also note that MAX_NR_SG should be atleast the no.of periods
119 * that are required for ASoC, otherwise DMA prep calls will
120 * fail. Today davinci-pcm is the only user of this driver and
121 * requires atleast 17 slots, so we setup the default to 20.
124 #define EDMA_MAX_SLOTS MAX_NR_SG
125 #define EDMA_DESCRIPTORS 16
127 #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
128 #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
129 #define EDMA_CONT_PARAMS_ANY 1001
130 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
131 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
133 /* PaRAM slots are laid out like this */
134 struct edmacc_param {
145 /* fields in edmacc_param.opt */
148 #define SYNCDIM BIT(2)
149 #define STATIC BIT(3)
150 #define EDMA_FWID (0x07 << 8)
151 #define TCCMODE BIT(11)
152 #define EDMA_TCC(t) ((t) << 12)
153 #define TCINTEN BIT(20)
154 #define ITCINTEN BIT(21)
155 #define TCCHEN BIT(22)
156 #define ITCCHEN BIT(23)
161 struct edmacc_param param;
165 struct virt_dma_desc vdesc;
166 struct list_head node;
167 enum dma_transfer_direction direction;
171 struct edma_chan *echan;
175 * The following 4 elements are used for residue accounting.
177 * - processed_stat: the number of SG elements we have traversed
178 * so far to cover accounting. This is updated directly to processed
179 * during edma_callback and is always <= processed, because processed
180 * refers to the number of pending transfer (programmed to EDMA
181 * controller), where as processed_stat tracks number of transfers
182 * accounted for so far.
184 * - residue: The amount of bytes we have left to transfer for this desc
186 * - residue_stat: The residue in bytes of data we have covered
187 * so far for accounting. This is updated directly to residue
188 * during callbacks to keep it current.
190 * - sg_len: Tracks the length of the current intermediate transfer,
191 * this is required to update the residue during intermediate transfer
192 * completion callback.
199 struct edma_pset pset[0];
205 struct virt_dma_chan vchan;
206 struct list_head node;
207 struct edma_desc *edesc;
211 int slot[EDMA_MAX_SLOTS];
213 struct dma_slave_config cfg;
218 struct edma_soc_info *info;
222 /* eDMA3 resource information */
223 unsigned num_channels;
224 unsigned num_qchannels;
229 enum dma_event_q default_queue;
231 bool unused_chan_list_done;
232 /* The slot_inuse bit for each PaRAM slot is clear unless the
233 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
235 unsigned long *slot_inuse;
237 /* The channel_unused bit for each channel is clear unless
238 * it is not being used on this platform. It uses a bit
239 * of SOC-specific initialization code.
241 unsigned long *channel_unused;
243 struct dma_device dma_slave;
244 struct edma_chan *slave_chans;
248 /* dummy param set used to (re)initialize parameter RAM slots */
249 static const struct edmacc_param dummy_paramset = {
250 .link_bcntrld = 0xffff,
254 static const struct of_device_id edma_of_ids[] = {
255 { .compatible = "ti,edma3", },
259 static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
261 return (unsigned int)__raw_readl(ecc->base + offset);
264 static inline void edma_write(struct edma_cc *ecc, int offset, int val)
266 __raw_writel(val, ecc->base + offset);
269 static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
272 unsigned val = edma_read(ecc, offset);
276 edma_write(ecc, offset, val);
279 static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
281 unsigned val = edma_read(ecc, offset);
284 edma_write(ecc, offset, val);
287 static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
289 unsigned val = edma_read(ecc, offset);
292 edma_write(ecc, offset, val);
295 static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
298 return edma_read(ecc, offset + (i << 2));
301 static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
304 edma_write(ecc, offset + (i << 2), val);
307 static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
308 unsigned and, unsigned or)
310 edma_modify(ecc, offset + (i << 2), and, or);
313 static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
316 edma_or(ecc, offset + (i << 2), or);
319 static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
322 edma_or(ecc, offset + ((i * 2 + j) << 2), or);
325 static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
328 edma_write(ecc, offset + ((i * 2 + j) << 2), val);
331 static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
333 return edma_read(ecc, EDMA_SHADOW0 + offset);
336 static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
339 return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
342 static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
345 edma_write(ecc, EDMA_SHADOW0 + offset, val);
348 static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
351 edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
354 static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
357 return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
360 static inline void edma_param_write(struct edma_cc *ecc, int offset,
361 int param_no, unsigned val)
363 edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
366 static inline void edma_param_modify(struct edma_cc *ecc, int offset,
367 int param_no, unsigned and, unsigned or)
369 edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
372 static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
375 edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
378 static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
381 edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
384 static inline void set_bits(int offset, int len, unsigned long *p)
386 for (; len > 0; len--)
387 set_bit(offset + (len - 1), p);
390 static inline void clear_bits(int offset, int len, unsigned long *p)
392 for (; len > 0; len--)
393 clear_bit(offset + (len - 1), p);
396 static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
399 int bit = queue_no * 4;
401 edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
404 static void edma_set_chmap(struct edma_chan *echan, int slot)
406 struct edma_cc *ecc = echan->ecc;
407 int channel = EDMA_CHAN_SLOT(echan->ch_num);
409 if (ecc->chmap_exist) {
410 slot = EDMA_CHAN_SLOT(slot);
411 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
415 static int prepare_unused_channel_list(struct device *dev, void *data)
417 struct platform_device *pdev = to_platform_device(dev);
418 struct edma_cc *ecc = data;
419 int dma_req_min = EDMA_CTLR_CHAN(ecc->id, 0);
420 int dma_req_max = dma_req_min + ecc->num_channels;
422 struct of_phandle_args dma_spec;
425 struct platform_device *dma_pdev;
427 count = of_property_count_strings(dev->of_node, "dma-names");
430 for (i = 0; i < count; i++) {
431 if (of_parse_phandle_with_args(dev->of_node, "dmas",
436 if (!of_match_node(edma_of_ids, dma_spec.np)) {
437 of_node_put(dma_spec.np);
441 dma_pdev = of_find_device_by_node(dma_spec.np);
442 if (&dma_pdev->dev != ecc->dev)
445 clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
446 ecc->channel_unused);
447 of_node_put(dma_spec.np);
452 /* For non-OF case */
453 for (i = 0; i < pdev->num_resources; i++) {
454 struct resource *res = &pdev->resource[i];
457 if (!(res->flags & IORESOURCE_DMA))
460 dma_req = (int)res->start;
461 if (dma_req >= dma_req_min && dma_req < dma_req_max)
462 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
463 ecc->channel_unused);
469 static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
471 struct edma_cc *ecc = echan->ecc;
472 int channel = EDMA_CHAN_SLOT(echan->ch_num);
475 edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
476 BIT(channel & 0x1f));
477 edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
478 BIT(channel & 0x1f));
480 edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
481 BIT(channel & 0x1f));
486 * paRAM slot management functions
488 static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
489 const struct edmacc_param *param)
491 slot = EDMA_CHAN_SLOT(slot);
492 if (slot >= ecc->num_slots)
494 memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
497 static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
498 struct edmacc_param *param)
500 slot = EDMA_CHAN_SLOT(slot);
501 if (slot >= ecc->num_slots)
503 memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
507 * edma_alloc_slot - allocate DMA parameter RAM
508 * @ecc: pointer to edma_cc struct
509 * @slot: specific slot to allocate; negative for "any unused slot"
511 * This allocates a parameter RAM slot, initializing it to hold a
512 * dummy transfer. Slots allocated using this routine have not been
513 * mapped to a hardware DMA channel, and will normally be used by
514 * linking to them from a slot associated with a DMA channel.
516 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
517 * slots may be allocated on behalf of DSP firmware.
519 * Returns the number of the slot, else negative errno.
521 static int edma_alloc_slot(struct edma_cc *ecc, int slot)
524 slot = EDMA_CHAN_SLOT(slot);
525 /* Requesting entry paRAM slot for a HW triggered channel. */
526 if (ecc->chmap_exist && slot < ecc->num_channels)
527 slot = EDMA_SLOT_ANY;
531 if (ecc->chmap_exist)
534 slot = ecc->num_channels;
536 slot = find_next_zero_bit(ecc->slot_inuse,
539 if (slot == ecc->num_slots)
541 if (!test_and_set_bit(slot, ecc->slot_inuse))
544 } else if (slot >= ecc->num_slots) {
546 } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
550 edma_write_slot(ecc, slot, &dummy_paramset);
552 return EDMA_CTLR_CHAN(ecc->id, slot);
555 static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
557 slot = EDMA_CHAN_SLOT(slot);
558 if (slot >= ecc->num_slots)
561 edma_write_slot(ecc, slot, &dummy_paramset);
562 clear_bit(slot, ecc->slot_inuse);
566 * edma_link - link one parameter RAM slot to another
567 * @ecc: pointer to edma_cc struct
568 * @from: parameter RAM slot originating the link
569 * @to: parameter RAM slot which is the link target
571 * The originating slot should not be part of any active DMA transfer.
573 static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
575 if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
576 dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
578 from = EDMA_CHAN_SLOT(from);
579 to = EDMA_CHAN_SLOT(to);
580 if (from >= ecc->num_slots || to >= ecc->num_slots)
583 edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
588 * edma_get_position - returns the current transfer point
589 * @ecc: pointer to edma_cc struct
590 * @slot: parameter RAM slot being examined
591 * @dst: true selects the dest position, false the source
593 * Returns the position of the current active slot
595 static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
600 slot = EDMA_CHAN_SLOT(slot);
601 offs = PARM_OFFSET(slot);
602 offs += dst ? PARM_DST : PARM_SRC;
604 return edma_read(ecc, offs);
608 * Channels with event associations will be triggered by their hardware
609 * events, and channels without such associations will be triggered by
610 * software. (At this writing there is no interface for using software
611 * triggers except with channels that don't support hardware triggers.)
613 static void edma_start(struct edma_chan *echan)
615 struct edma_cc *ecc = echan->ecc;
616 int channel = EDMA_CHAN_SLOT(echan->ch_num);
617 int j = (channel >> 5);
618 unsigned int mask = BIT(channel & 0x1f);
620 if (test_bit(channel, ecc->channel_unused)) {
621 /* EDMA channels without event association */
622 dev_dbg(ecc->dev, "ESR%d %08x\n", j,
623 edma_shadow0_read_array(ecc, SH_ESR, j));
624 edma_shadow0_write_array(ecc, SH_ESR, j, mask);
626 /* EDMA channel with event association */
627 dev_dbg(ecc->dev, "ER%d %08x\n", j,
628 edma_shadow0_read_array(ecc, SH_ER, j));
629 /* Clear any pending event or error */
630 edma_write_array(ecc, EDMA_ECR, j, mask);
631 edma_write_array(ecc, EDMA_EMCR, j, mask);
633 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
634 edma_shadow0_write_array(ecc, SH_EESR, j, mask);
635 dev_dbg(ecc->dev, "EER%d %08x\n", j,
636 edma_shadow0_read_array(ecc, SH_EER, j));
640 static void edma_stop(struct edma_chan *echan)
642 struct edma_cc *ecc = echan->ecc;
643 int channel = EDMA_CHAN_SLOT(echan->ch_num);
644 int j = (channel >> 5);
645 unsigned int mask = BIT(channel & 0x1f);
647 edma_shadow0_write_array(ecc, SH_EECR, j, mask);
648 edma_shadow0_write_array(ecc, SH_ECR, j, mask);
649 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
650 edma_write_array(ecc, EDMA_EMCR, j, mask);
652 /* clear possibly pending completion interrupt */
653 edma_shadow0_write_array(ecc, SH_ICR, j, mask);
655 dev_dbg(ecc->dev, "EER%d %08x\n", j,
656 edma_shadow0_read_array(ecc, SH_EER, j));
658 /* REVISIT: consider guarding against inappropriate event
659 * chaining by overwriting with dummy_paramset.
664 * Temporarily disable EDMA hardware events on the specified channel,
665 * preventing them from triggering new transfers
667 static void edma_pause(struct edma_chan *echan)
669 int channel = EDMA_CHAN_SLOT(echan->ch_num);
670 unsigned int mask = BIT(channel & 0x1f);
672 edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
675 /* Re-enable EDMA hardware events on the specified channel. */
676 static void edma_resume(struct edma_chan *echan)
678 int channel = EDMA_CHAN_SLOT(echan->ch_num);
679 unsigned int mask = BIT(channel & 0x1f);
681 edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
684 static void edma_trigger_channel(struct edma_chan *echan)
686 struct edma_cc *ecc = echan->ecc;
687 int channel = EDMA_CHAN_SLOT(echan->ch_num);
688 unsigned int mask = BIT(channel & 0x1f);
690 edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
692 dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
693 edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
696 static void edma_clean_channel(struct edma_chan *echan)
698 struct edma_cc *ecc = echan->ecc;
699 int channel = EDMA_CHAN_SLOT(echan->ch_num);
700 int j = (channel >> 5);
701 unsigned int mask = BIT(channel & 0x1f);
703 dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
704 edma_shadow0_write_array(ecc, SH_ECR, j, mask);
705 /* Clear the corresponding EMR bits */
706 edma_write_array(ecc, EDMA_EMCR, j, mask);
708 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
709 edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
712 /* Move channel to a specific event queue */
713 static void edma_assign_channel_eventq(struct edma_chan *echan,
714 enum dma_event_q eventq_no)
716 struct edma_cc *ecc = echan->ecc;
717 int channel = EDMA_CHAN_SLOT(echan->ch_num);
718 int bit = (channel & 0x7) * 4;
720 /* default to low priority queue */
721 if (eventq_no == EVENTQ_DEFAULT)
722 eventq_no = ecc->default_queue;
723 if (eventq_no >= ecc->num_tc)
727 edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
731 static int edma_alloc_channel(struct edma_chan *echan,
732 enum dma_event_q eventq_no)
734 struct edma_cc *ecc = echan->ecc;
735 int channel = EDMA_CHAN_SLOT(echan->ch_num);
737 if (!ecc->unused_chan_list_done) {
739 * Scan all the platform devices to find out the EDMA channels
740 * used and clear them in the unused list, making the rest
741 * available for ARM usage.
743 int ret = bus_for_each_dev(&platform_bus_type, NULL, ecc,
744 prepare_unused_channel_list);
748 ecc->unused_chan_list_done = true;
751 /* ensure access through shadow region 0 */
752 edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
754 /* ensure no events are pending */
757 edma_setup_interrupt(echan, true);
759 edma_assign_channel_eventq(echan, eventq_no);
764 static void edma_free_channel(struct edma_chan *echan)
766 /* ensure no events are pending */
768 /* REVISIT should probably take out of shadow region 0 */
769 edma_setup_interrupt(echan, false);
772 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
774 return container_of(d, struct edma_cc, dma_slave);
777 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
779 return container_of(c, struct edma_chan, vchan.chan);
782 static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
784 return container_of(tx, struct edma_desc, vdesc.tx);
787 static void edma_desc_free(struct virt_dma_desc *vdesc)
789 kfree(container_of(vdesc, struct edma_desc, vdesc));
792 /* Dispatch a queued descriptor to the controller (caller holds lock) */
793 static void edma_execute(struct edma_chan *echan)
795 struct edma_cc *ecc = echan->ecc;
796 struct virt_dma_desc *vdesc;
797 struct edma_desc *edesc;
798 struct device *dev = echan->vchan.chan.device->dev;
799 int i, j, left, nslots;
802 /* Setup is needed for the first transfer */
803 vdesc = vchan_next_desc(&echan->vchan);
806 list_del(&vdesc->node);
807 echan->edesc = to_edma_desc(&vdesc->tx);
810 edesc = echan->edesc;
812 /* Find out how many left */
813 left = edesc->pset_nr - edesc->processed;
814 nslots = min(MAX_NR_SG, left);
817 /* Write descriptor PaRAM set(s) */
818 for (i = 0; i < nslots; i++) {
819 j = i + edesc->processed;
820 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
821 edesc->sg_len += edesc->pset[j].len;
834 j, echan->ch_num, echan->slot[i],
835 edesc->pset[j].param.opt,
836 edesc->pset[j].param.src,
837 edesc->pset[j].param.dst,
838 edesc->pset[j].param.a_b_cnt,
839 edesc->pset[j].param.ccnt,
840 edesc->pset[j].param.src_dst_bidx,
841 edesc->pset[j].param.src_dst_cidx,
842 edesc->pset[j].param.link_bcntrld);
843 /* Link to the previous slot if not the last set */
844 if (i != (nslots - 1))
845 edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
848 edesc->processed += nslots;
851 * If this is either the last set in a set of SG-list transactions
852 * then setup a link to the dummy slot, this results in all future
853 * events being absorbed and that's OK because we're done
855 if (edesc->processed == edesc->pset_nr) {
857 edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
859 edma_link(ecc, echan->slot[nslots - 1],
860 echan->ecc->dummy_slot);
865 * This happens due to setup times between intermediate
866 * transfers in long SG lists which have to be broken up into
867 * transfers of MAX_NR_SG
869 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
870 edma_clean_channel(echan);
873 edma_trigger_channel(echan);
875 } else if (edesc->processed <= MAX_NR_SG) {
876 dev_dbg(dev, "first transfer starting on channel %d\n",
880 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
881 echan->ch_num, edesc->processed);
886 static int edma_terminate_all(struct dma_chan *chan)
888 struct edma_chan *echan = to_edma_chan(chan);
892 spin_lock_irqsave(&echan->vchan.lock, flags);
895 * Stop DMA activity: we assume the callback will not be called
896 * after edma_dma() returns (even if it does, it will see
897 * echan->edesc is NULL and exit.)
901 /* Move the cyclic channel back to default queue */
902 if (echan->edesc->cyclic)
903 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
905 * free the running request descriptor
906 * since it is not in any of the vdesc lists
908 edma_desc_free(&echan->edesc->vdesc);
912 vchan_get_all_descriptors(&echan->vchan, &head);
913 spin_unlock_irqrestore(&echan->vchan.lock, flags);
914 vchan_dma_desc_free_list(&echan->vchan, &head);
919 static int edma_slave_config(struct dma_chan *chan,
920 struct dma_slave_config *cfg)
922 struct edma_chan *echan = to_edma_chan(chan);
924 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
925 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
928 memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
933 static int edma_dma_pause(struct dma_chan *chan)
935 struct edma_chan *echan = to_edma_chan(chan);
944 static int edma_dma_resume(struct dma_chan *chan)
946 struct edma_chan *echan = to_edma_chan(chan);
953 * A PaRAM set configuration abstraction used by other modes
954 * @chan: Channel who's PaRAM set we're configuring
955 * @pset: PaRAM set to initialize and setup.
956 * @src_addr: Source address of the DMA
957 * @dst_addr: Destination address of the DMA
958 * @burst: In units of dev_width, how much to send
959 * @dev_width: How much is the dev_width
960 * @dma_length: Total length of the DMA transfer
961 * @direction: Direction of the transfer
963 static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
964 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
965 unsigned int acnt, unsigned int dma_length,
966 enum dma_transfer_direction direction)
968 struct edma_chan *echan = to_edma_chan(chan);
969 struct device *dev = chan->device->dev;
970 struct edmacc_param *param = &epset->param;
971 int bcnt, ccnt, cidx;
972 int src_bidx, dst_bidx, src_cidx, dst_cidx;
975 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
979 * If the maxburst is equal to the fifo width, use
980 * A-synced transfers. This allows for large contiguous
981 * buffer transfers using only one PaRAM set.
985 * For the A-sync case, bcnt and ccnt are the remainder
986 * and quotient respectively of the division of:
987 * (dma_length / acnt) by (SZ_64K -1). This is so
988 * that in case bcnt over flows, we have ccnt to use.
989 * Note: In A-sync tranfer only, bcntrld is used, but it
990 * only applies for sg_dma_len(sg) >= SZ_64K.
991 * In this case, the best way adopted is- bccnt for the
992 * first frame will be the remainder below. Then for
993 * every successive frame, bcnt will be SZ_64K-1. This
994 * is assured as bcntrld = 0xffff in end of function.
997 ccnt = dma_length / acnt / (SZ_64K - 1);
998 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
1000 * If bcnt is non-zero, we have a remainder and hence an
1001 * extra frame to transfer, so increment ccnt.
1010 * If maxburst is greater than the fifo address_width,
1011 * use AB-synced transfers where A count is the fifo
1012 * address_width and B count is the maxburst. In this
1013 * case, we are limited to transfers of C count frames
1014 * of (address_width * maxburst) where C count is limited
1015 * to SZ_64K-1. This places an upper bound on the length
1016 * of an SG segment that can be handled.
1020 ccnt = dma_length / (acnt * bcnt);
1021 if (ccnt > (SZ_64K - 1)) {
1022 dev_err(dev, "Exceeded max SG segment size\n");
1028 epset->len = dma_length;
1030 if (direction == DMA_MEM_TO_DEV) {
1035 epset->addr = src_addr;
1036 } else if (direction == DMA_DEV_TO_MEM) {
1041 epset->addr = dst_addr;
1042 } else if (direction == DMA_MEM_TO_MEM) {
1048 dev_err(dev, "%s: direction not implemented yet\n", __func__);
1052 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1053 /* Configure A or AB synchronized transfers */
1055 param->opt |= SYNCDIM;
1057 param->src = src_addr;
1058 param->dst = dst_addr;
1060 param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1061 param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
1063 param->a_b_cnt = bcnt << 16 | acnt;
1066 * Only time when (bcntrld) auto reload is required is for
1067 * A-sync case, and in this case, a requirement of reload value
1068 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1069 * and then later will be populated by edma_execute.
1071 param->link_bcntrld = 0xffffffff;
1075 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
1076 struct dma_chan *chan, struct scatterlist *sgl,
1077 unsigned int sg_len, enum dma_transfer_direction direction,
1078 unsigned long tx_flags, void *context)
1080 struct edma_chan *echan = to_edma_chan(chan);
1081 struct device *dev = chan->device->dev;
1082 struct edma_desc *edesc;
1083 dma_addr_t src_addr = 0, dst_addr = 0;
1084 enum dma_slave_buswidth dev_width;
1086 struct scatterlist *sg;
1089 if (unlikely(!echan || !sgl || !sg_len))
1092 if (direction == DMA_DEV_TO_MEM) {
1093 src_addr = echan->cfg.src_addr;
1094 dev_width = echan->cfg.src_addr_width;
1095 burst = echan->cfg.src_maxburst;
1096 } else if (direction == DMA_MEM_TO_DEV) {
1097 dst_addr = echan->cfg.dst_addr;
1098 dev_width = echan->cfg.dst_addr_width;
1099 burst = echan->cfg.dst_maxburst;
1101 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1105 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1106 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1110 edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
1113 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
1117 edesc->pset_nr = sg_len;
1119 edesc->direction = direction;
1120 edesc->echan = echan;
1122 /* Allocate a PaRAM slot, if needed */
1123 nslots = min_t(unsigned, MAX_NR_SG, sg_len);
1125 for (i = 0; i < nslots; i++) {
1126 if (echan->slot[i] < 0) {
1128 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1129 if (echan->slot[i] < 0) {
1131 dev_err(dev, "%s: Failed to allocate slot\n",
1138 /* Configure PaRAM sets for each SG */
1139 for_each_sg(sgl, sg, sg_len, i) {
1140 /* Get address for each SG */
1141 if (direction == DMA_DEV_TO_MEM)
1142 dst_addr = sg_dma_address(sg);
1144 src_addr = sg_dma_address(sg);
1146 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1147 dst_addr, burst, dev_width,
1148 sg_dma_len(sg), direction);
1154 edesc->absync = ret;
1155 edesc->residue += sg_dma_len(sg);
1157 /* If this is the last in a current SG set of transactions,
1158 enable interrupts so that next set is processed */
1159 if (!((i+1) % MAX_NR_SG))
1160 edesc->pset[i].param.opt |= TCINTEN;
1162 /* If this is the last set, enable completion interrupt flag */
1163 if (i == sg_len - 1)
1164 edesc->pset[i].param.opt |= TCINTEN;
1166 edesc->residue_stat = edesc->residue;
1168 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1171 static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1172 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1173 size_t len, unsigned long tx_flags)
1176 struct edma_desc *edesc;
1177 struct device *dev = chan->device->dev;
1178 struct edma_chan *echan = to_edma_chan(chan);
1179 unsigned int width, pset_len;
1181 if (unlikely(!echan || !len))
1186 * Transfer size less than 64K can be handled with one paRAM
1187 * slot and with one burst.
1195 * Transfer size bigger than 64K will be handled with maximum of
1197 * slot1: (full_length / 32767) times 32767 bytes bursts.
1198 * ACNT = 32767, length1: (full_length / 32767) * 32767
1199 * slot2: the remaining amount of data after slot1.
1200 * ACNT = full_length - length1, length2 = ACNT
1202 * When the full_length is multibple of 32767 one slot can be
1203 * used to complete the transfer.
1206 pset_len = rounddown(len, width);
1207 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1208 if (unlikely(pset_len == len))
1214 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1217 dev_dbg(dev, "Failed to allocate a descriptor\n");
1221 edesc->pset_nr = nslots;
1222 edesc->residue = edesc->residue_stat = len;
1223 edesc->direction = DMA_MEM_TO_MEM;
1224 edesc->echan = echan;
1226 ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
1227 width, pset_len, DMA_MEM_TO_MEM);
1233 edesc->absync = ret;
1235 edesc->pset[0].param.opt |= ITCCHEN;
1237 /* Enable transfer complete interrupt */
1238 edesc->pset[0].param.opt |= TCINTEN;
1240 /* Enable transfer complete chaining for the first slot */
1241 edesc->pset[0].param.opt |= TCCHEN;
1243 if (echan->slot[1] < 0) {
1244 echan->slot[1] = edma_alloc_slot(echan->ecc,
1246 if (echan->slot[1] < 0) {
1248 dev_err(dev, "%s: Failed to allocate slot\n",
1255 pset_len = width = len % (SZ_32K - 1);
1257 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1258 width, pset_len, DMA_MEM_TO_MEM);
1264 edesc->pset[1].param.opt |= ITCCHEN;
1265 edesc->pset[1].param.opt |= TCINTEN;
1268 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1271 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1272 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1273 size_t period_len, enum dma_transfer_direction direction,
1274 unsigned long tx_flags)
1276 struct edma_chan *echan = to_edma_chan(chan);
1277 struct device *dev = chan->device->dev;
1278 struct edma_desc *edesc;
1279 dma_addr_t src_addr, dst_addr;
1280 enum dma_slave_buswidth dev_width;
1284 if (unlikely(!echan || !buf_len || !period_len))
1287 if (direction == DMA_DEV_TO_MEM) {
1288 src_addr = echan->cfg.src_addr;
1289 dst_addr = buf_addr;
1290 dev_width = echan->cfg.src_addr_width;
1291 burst = echan->cfg.src_maxburst;
1292 } else if (direction == DMA_MEM_TO_DEV) {
1293 src_addr = buf_addr;
1294 dst_addr = echan->cfg.dst_addr;
1295 dev_width = echan->cfg.dst_addr_width;
1296 burst = echan->cfg.dst_maxburst;
1298 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1302 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1303 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1307 if (unlikely(buf_len % period_len)) {
1308 dev_err(dev, "Period should be multiple of Buffer length\n");
1312 nslots = (buf_len / period_len) + 1;
1315 * Cyclic DMA users such as audio cannot tolerate delays introduced
1316 * by cases where the number of periods is more than the maximum
1317 * number of SGs the EDMA driver can handle at a time. For DMA types
1318 * such as Slave SGs, such delays are tolerable and synchronized,
1319 * but the synchronization is difficult to achieve with Cyclic and
1320 * cannot be guaranteed, so we error out early.
1322 if (nslots > MAX_NR_SG)
1325 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1328 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
1333 edesc->pset_nr = nslots;
1334 edesc->residue = edesc->residue_stat = buf_len;
1335 edesc->direction = direction;
1336 edesc->echan = echan;
1338 dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1339 __func__, echan->ch_num, nslots, period_len, buf_len);
1341 for (i = 0; i < nslots; i++) {
1342 /* Allocate a PaRAM slot, if needed */
1343 if (echan->slot[i] < 0) {
1345 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1346 if (echan->slot[i] < 0) {
1348 dev_err(dev, "%s: Failed to allocate slot\n",
1354 if (i == nslots - 1) {
1355 memcpy(&edesc->pset[i], &edesc->pset[0],
1356 sizeof(edesc->pset[0]));
1360 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1361 dst_addr, burst, dev_width, period_len,
1368 if (direction == DMA_DEV_TO_MEM)
1369 dst_addr += period_len;
1371 src_addr += period_len;
1373 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
1386 i, echan->ch_num, echan->slot[i],
1387 edesc->pset[i].param.opt,
1388 edesc->pset[i].param.src,
1389 edesc->pset[i].param.dst,
1390 edesc->pset[i].param.a_b_cnt,
1391 edesc->pset[i].param.ccnt,
1392 edesc->pset[i].param.src_dst_bidx,
1393 edesc->pset[i].param.src_dst_cidx,
1394 edesc->pset[i].param.link_bcntrld);
1396 edesc->absync = ret;
1399 * Enable period interrupt only if it is requested
1401 if (tx_flags & DMA_PREP_INTERRUPT)
1402 edesc->pset[i].param.opt |= TCINTEN;
1405 /* Place the cyclic channel to highest priority queue */
1406 edma_assign_channel_eventq(echan, EVENTQ_0);
1408 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1411 static void edma_completion_handler(struct edma_chan *echan)
1413 struct device *dev = echan->vchan.chan.device->dev;
1414 struct edma_desc *edesc = echan->edesc;
1419 spin_lock(&echan->vchan.lock);
1420 if (edesc->cyclic) {
1421 vchan_cyclic_callback(&edesc->vdesc);
1422 spin_unlock(&echan->vchan.lock);
1424 } else if (edesc->processed == edesc->pset_nr) {
1427 vchan_cookie_complete(&edesc->vdesc);
1428 echan->edesc = NULL;
1430 dev_dbg(dev, "Transfer completed on channel %d\n",
1433 dev_dbg(dev, "Sub transfer completed on channel %d\n",
1438 /* Update statistics for tx_status */
1439 edesc->residue -= edesc->sg_len;
1440 edesc->residue_stat = edesc->residue;
1441 edesc->processed_stat = edesc->processed;
1443 edma_execute(echan);
1445 spin_unlock(&echan->vchan.lock);
1448 /* eDMA interrupt handler */
1449 static irqreturn_t dma_irq_handler(int irq, void *data)
1451 struct edma_cc *ecc = data;
1461 dev_vdbg(ecc->dev, "dma_irq_handler\n");
1463 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
1465 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
1468 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
1471 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
1479 slot = __ffs(sh_ipr);
1480 sh_ipr &= ~(BIT(slot));
1482 if (sh_ier & BIT(slot)) {
1483 channel = (bank << 5) | slot;
1484 /* Clear the corresponding IPR bits */
1485 edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
1486 edma_completion_handler(&ecc->slave_chans[channel]);
1490 edma_shadow0_write(ecc, SH_IEVAL, 1);
1494 static void edma_error_handler(struct edma_chan *echan)
1496 struct edma_cc *ecc = echan->ecc;
1497 struct device *dev = echan->vchan.chan.device->dev;
1498 struct edmacc_param p;
1503 spin_lock(&echan->vchan.lock);
1505 edma_read_slot(ecc, echan->slot[0], &p);
1507 * Issue later based on missed flag which will be sure
1509 * (1) we finished transmitting an intermediate slot and
1510 * edma_execute is coming up.
1511 * (2) or we finished current transfer and issue will
1512 * call edma_execute.
1514 * Important note: issuing can be dangerous here and
1515 * lead to some nasty recursion when we are in a NULL
1516 * slot. So we avoid doing so and set the missed flag.
1518 if (p.a_b_cnt == 0 && p.ccnt == 0) {
1519 dev_dbg(dev, "Error on null slot, setting miss\n");
1523 * The slot is already programmed but the event got
1524 * missed, so its safe to issue it here.
1526 dev_dbg(dev, "Missed event, TRIGGERING\n");
1527 edma_clean_channel(echan);
1530 edma_trigger_channel(echan);
1532 spin_unlock(&echan->vchan.lock);
1535 static inline bool edma_error_pending(struct edma_cc *ecc)
1537 if (edma_read_array(ecc, EDMA_EMR, 0) ||
1538 edma_read_array(ecc, EDMA_EMR, 1) ||
1539 edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
1545 /* eDMA error interrupt handler */
1546 static irqreturn_t dma_ccerr_handler(int irq, void *data)
1548 struct edma_cc *ecc = data;
1551 unsigned int cnt = 0;
1558 dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
1560 if (!edma_error_pending(ecc))
1564 /* Event missed register(s) */
1565 for (j = 0; j < 2; j++) {
1568 val = edma_read_array(ecc, EDMA_EMR, j);
1572 dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
1574 for (i = find_next_bit(&emr, 32, 0); i < 32;
1575 i = find_next_bit(&emr, 32, i + 1)) {
1576 int k = (j << 5) + i;
1578 /* Clear the corresponding EMR bits */
1579 edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
1581 edma_shadow0_write_array(ecc, SH_SECR, j,
1583 edma_error_handler(&ecc->slave_chans[k]);
1587 val = edma_read(ecc, EDMA_QEMR);
1589 dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
1590 /* Not reported, just clear the interrupt reason. */
1591 edma_write(ecc, EDMA_QEMCR, val);
1592 edma_shadow0_write(ecc, SH_QSECR, val);
1595 val = edma_read(ecc, EDMA_CCERR);
1597 dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
1598 /* Not reported, just clear the interrupt reason. */
1599 edma_write(ecc, EDMA_CCERRCLR, val);
1602 if (!edma_error_pending(ecc))
1608 edma_write(ecc, EDMA_EEVAL, 1);
1612 /* Alloc channel resources */
1613 static int edma_alloc_chan_resources(struct dma_chan *chan)
1615 struct edma_chan *echan = to_edma_chan(chan);
1616 struct device *dev = chan->device->dev;
1619 ret = edma_alloc_channel(echan, EVENTQ_DEFAULT);
1623 echan->slot[0] = edma_alloc_slot(echan->ecc, echan->ch_num);
1624 if (echan->slot[0] < 0) {
1625 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1626 EDMA_CHAN_SLOT(echan->ch_num));
1630 /* Set up channel -> slot mapping for the entry slot */
1631 edma_set_chmap(echan, echan->slot[0]);
1632 echan->alloced = true;
1634 dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
1635 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
1640 edma_free_channel(echan);
1644 /* Free channel resources */
1645 static void edma_free_chan_resources(struct dma_chan *chan)
1647 struct edma_chan *echan = to_edma_chan(chan);
1650 /* Terminate transfers */
1653 vchan_free_chan_resources(&echan->vchan);
1655 /* Free EDMA PaRAM slots */
1656 for (i = 0; i < EDMA_MAX_SLOTS; i++) {
1657 if (echan->slot[i] >= 0) {
1658 edma_free_slot(echan->ecc, echan->slot[i]);
1659 echan->slot[i] = -1;
1663 /* Set entry slot to the dummy slot */
1664 edma_set_chmap(echan, echan->ecc->dummy_slot);
1666 /* Free EDMA channel */
1667 if (echan->alloced) {
1668 edma_free_channel(echan);
1669 echan->alloced = false;
1672 dev_dbg(chan->device->dev, "freeing channel for %u\n", echan->ch_num);
1675 /* Send pending descriptor to hardware */
1676 static void edma_issue_pending(struct dma_chan *chan)
1678 struct edma_chan *echan = to_edma_chan(chan);
1679 unsigned long flags;
1681 spin_lock_irqsave(&echan->vchan.lock, flags);
1682 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
1683 edma_execute(echan);
1684 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1687 static u32 edma_residue(struct edma_desc *edesc)
1689 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1690 struct edma_pset *pset = edesc->pset;
1691 dma_addr_t done, pos;
1695 * We always read the dst/src position from the first RamPar
1696 * pset. That's the one which is active now.
1698 pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst);
1701 * Cyclic is simple. Just subtract pset[0].addr from pos.
1703 * We never update edesc->residue in the cyclic case, so we
1704 * can tell the remaining room to the end of the circular
1707 if (edesc->cyclic) {
1708 done = pos - pset->addr;
1709 edesc->residue_stat = edesc->residue - done;
1710 return edesc->residue_stat;
1714 * For SG operation we catch up with the last processed
1717 pset += edesc->processed_stat;
1719 for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
1721 * If we are inside this pset address range, we know
1722 * this is the active one. Get the current delta and
1723 * stop walking the psets.
1725 if (pos >= pset->addr && pos < pset->addr + pset->len)
1726 return edesc->residue_stat - (pos - pset->addr);
1728 /* Otherwise mark it done and update residue_stat. */
1729 edesc->processed_stat++;
1730 edesc->residue_stat -= pset->len;
1732 return edesc->residue_stat;
1735 /* Check request completion status */
1736 static enum dma_status edma_tx_status(struct dma_chan *chan,
1737 dma_cookie_t cookie,
1738 struct dma_tx_state *txstate)
1740 struct edma_chan *echan = to_edma_chan(chan);
1741 struct virt_dma_desc *vdesc;
1742 enum dma_status ret;
1743 unsigned long flags;
1745 ret = dma_cookie_status(chan, cookie, txstate);
1746 if (ret == DMA_COMPLETE || !txstate)
1749 spin_lock_irqsave(&echan->vchan.lock, flags);
1750 if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
1751 txstate->residue = edma_residue(echan->edesc);
1752 else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
1753 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1754 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1759 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1760 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1761 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1762 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1764 static void edma_dma_init(struct edma_cc *ecc)
1766 struct dma_device *ddev = &ecc->dma_slave;
1769 dma_cap_zero(ddev->cap_mask);
1770 dma_cap_set(DMA_SLAVE, ddev->cap_mask);
1771 dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
1772 dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
1774 ddev->device_prep_slave_sg = edma_prep_slave_sg;
1775 ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1776 ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1777 ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1778 ddev->device_free_chan_resources = edma_free_chan_resources;
1779 ddev->device_issue_pending = edma_issue_pending;
1780 ddev->device_tx_status = edma_tx_status;
1781 ddev->device_config = edma_slave_config;
1782 ddev->device_pause = edma_dma_pause;
1783 ddev->device_resume = edma_dma_resume;
1784 ddev->device_terminate_all = edma_terminate_all;
1786 ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1787 ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1788 ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1789 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1791 ddev->dev = ecc->dev;
1793 INIT_LIST_HEAD(&ddev->channels);
1795 for (i = 0; i < ecc->num_channels; i++) {
1796 struct edma_chan *echan = &ecc->slave_chans[i];
1797 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
1799 echan->vchan.desc_free = edma_desc_free;
1801 vchan_init(&echan->vchan, ddev);
1803 INIT_LIST_HEAD(&echan->node);
1804 for (j = 0; j < EDMA_MAX_SLOTS; j++)
1805 echan->slot[j] = -1;
1809 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1810 struct edma_cc *ecc)
1814 s8 (*queue_priority_map)[2];
1816 /* Decode the eDMA3 configuration from CCCFG register */
1817 cccfg = edma_read(ecc, EDMA_CCCFG);
1819 value = GET_NUM_REGN(cccfg);
1820 ecc->num_region = BIT(value);
1822 value = GET_NUM_DMACH(cccfg);
1823 ecc->num_channels = BIT(value + 1);
1825 value = GET_NUM_QDMACH(cccfg);
1826 ecc->num_qchannels = value * 2;
1828 value = GET_NUM_PAENTRY(cccfg);
1829 ecc->num_slots = BIT(value + 4);
1831 value = GET_NUM_EVQUE(cccfg);
1832 ecc->num_tc = value + 1;
1834 ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
1836 dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
1837 dev_dbg(dev, "num_region: %u\n", ecc->num_region);
1838 dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
1839 dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
1840 dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
1841 dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
1842 dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
1844 /* Nothing need to be done if queue priority is provided */
1845 if (pdata->queue_priority_mapping)
1849 * Configure TC/queue priority as follows:
1854 * The meaning of priority numbers: 0 highest priority, 7 lowest
1855 * priority. So Q0 is the highest priority queue and the last queue has
1856 * the lowest priority.
1858 queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
1860 if (!queue_priority_map)
1863 for (i = 0; i < ecc->num_tc; i++) {
1864 queue_priority_map[i][0] = i;
1865 queue_priority_map[i][1] = i;
1867 queue_priority_map[i][0] = -1;
1868 queue_priority_map[i][1] = -1;
1870 pdata->queue_priority_mapping = queue_priority_map;
1871 /* Default queue has the lowest priority */
1872 pdata->default_queue = i - 1;
1877 #if IS_ENABLED(CONFIG_OF)
1878 static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
1881 const char pname[] = "ti,edma-xbar-event-map";
1882 struct resource res;
1884 s16 (*xbar_chans)[2];
1885 size_t nelm = sz / sizeof(s16);
1886 u32 shift, offset, mux;
1889 xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
1893 ret = of_address_to_resource(dev->of_node, 1, &res);
1897 xbar = devm_ioremap(dev, res.start, resource_size(&res));
1901 ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
1906 /* Invalidate last entry for the other user of this mess */
1908 xbar_chans[nelm][0] = -1;
1909 xbar_chans[nelm][1] = -1;
1911 for (i = 0; i < nelm; i++) {
1912 shift = (xbar_chans[i][1] & 0x03) << 3;
1913 offset = xbar_chans[i][1] & 0xfffffffc;
1914 mux = readl(xbar + offset);
1915 mux &= ~(0xff << shift);
1916 mux |= xbar_chans[i][0] << shift;
1917 writel(mux, (xbar + offset));
1920 pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
1924 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev)
1926 struct edma_soc_info *info;
1927 struct property *prop;
1931 info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
1933 return ERR_PTR(-ENOMEM);
1935 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", &sz);
1937 ret = edma_xbar_event_map(dev, info, sz);
1939 return ERR_PTR(ret);
1945 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev)
1947 return ERR_PTR(-EINVAL);
1951 static int edma_probe(struct platform_device *pdev)
1953 struct edma_soc_info *info = pdev->dev.platform_data;
1954 s8 (*queue_priority_mapping)[2];
1956 const s16 (*rsv_chans)[2];
1957 const s16 (*rsv_slots)[2];
1958 const s16 (*xbar_chans)[2];
1961 struct resource *mem;
1962 struct device_node *node = pdev->dev.of_node;
1963 struct device *dev = &pdev->dev;
1964 struct edma_cc *ecc;
1968 info = edma_setup_info_from_dt(dev);
1970 dev_err(dev, "failed to get DT data\n");
1971 return PTR_ERR(info);
1978 pm_runtime_enable(dev);
1979 ret = pm_runtime_get_sync(dev);
1981 dev_err(dev, "pm_runtime_get_sync() failed\n");
1985 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1989 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
1991 dev_err(dev, "Can't allocate controller\n");
1997 /* When booting with DT the pdev->id is -1 */
2001 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
2003 dev_dbg(dev, "mem resource not found, using index 0\n");
2004 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2006 dev_err(dev, "no mem resource?\n");
2010 ecc->base = devm_ioremap_resource(dev, mem);
2011 if (IS_ERR(ecc->base))
2012 return PTR_ERR(ecc->base);
2014 platform_set_drvdata(pdev, ecc);
2016 /* Get eDMA3 configuration from IP */
2017 ret = edma_setup_from_hw(dev, info, ecc);
2021 /* Allocate memory based on the information we got from the IP */
2022 ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
2023 sizeof(*ecc->slave_chans), GFP_KERNEL);
2024 if (!ecc->slave_chans)
2027 ecc->channel_unused = devm_kcalloc(dev,
2028 BITS_TO_LONGS(ecc->num_channels),
2029 sizeof(unsigned long), GFP_KERNEL);
2030 if (!ecc->channel_unused)
2033 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
2034 sizeof(unsigned long), GFP_KERNEL);
2035 if (!ecc->slot_inuse)
2038 ecc->default_queue = info->default_queue;
2040 for (i = 0; i < ecc->num_slots; i++)
2041 edma_write_slot(ecc, i, &dummy_paramset);
2043 /* Mark all channels as unused */
2044 memset(ecc->channel_unused, 0xff, sizeof(ecc->channel_unused));
2047 /* Clear the reserved channels in unused list */
2048 rsv_chans = info->rsv->rsv_chans;
2050 for (i = 0; rsv_chans[i][0] != -1; i++) {
2051 off = rsv_chans[i][0];
2052 ln = rsv_chans[i][1];
2053 clear_bits(off, ln, ecc->channel_unused);
2057 /* Set the reserved slots in inuse list */
2058 rsv_slots = info->rsv->rsv_slots;
2060 for (i = 0; rsv_slots[i][0] != -1; i++) {
2061 off = rsv_slots[i][0];
2062 ln = rsv_slots[i][1];
2063 set_bits(off, ln, ecc->slot_inuse);
2068 /* Clear the xbar mapped channels in unused list */
2069 xbar_chans = info->xbar_chans;
2071 for (i = 0; xbar_chans[i][1] != -1; i++) {
2072 off = xbar_chans[i][1];
2073 clear_bits(off, 1, ecc->channel_unused);
2077 irq = platform_get_irq_byname(pdev, "edma3_ccint");
2078 if (irq < 0 && node)
2079 irq = irq_of_parse_and_map(node, 0);
2082 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
2084 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
2087 dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2092 irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
2093 if (irq < 0 && node)
2094 irq = irq_of_parse_and_map(node, 2);
2097 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
2099 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
2102 dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2107 ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
2108 if (ecc->dummy_slot < 0) {
2109 dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2110 return ecc->dummy_slot;
2113 queue_priority_mapping = info->queue_priority_mapping;
2115 /* Event queue priority mapping */
2116 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2117 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2118 queue_priority_mapping[i][1]);
2120 for (i = 0; i < ecc->num_region; i++) {
2121 edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
2122 edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
2123 edma_write_array(ecc, EDMA_QRAE, i, 0x0);
2127 /* Init the dma device and channels */
2130 for (i = 0; i < ecc->num_channels; i++) {
2131 /* Assign all channels to the default queue */
2132 edma_assign_channel_eventq(&ecc->slave_chans[i],
2133 info->default_queue);
2134 /* Set entry slot to the dummy slot */
2135 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2138 ret = dma_async_device_register(&ecc->dma_slave);
2143 of_dma_controller_register(node, of_dma_xlate_by_chan_id,
2146 dev_info(dev, "TI EDMA DMA engine driver\n");
2151 edma_free_slot(ecc, ecc->dummy_slot);
2155 static int edma_remove(struct platform_device *pdev)
2157 struct device *dev = &pdev->dev;
2158 struct edma_cc *ecc = dev_get_drvdata(dev);
2161 of_dma_controller_free(dev->of_node);
2162 dma_async_device_unregister(&ecc->dma_slave);
2163 edma_free_slot(ecc, ecc->dummy_slot);
2168 #ifdef CONFIG_PM_SLEEP
2169 static int edma_pm_resume(struct device *dev)
2171 struct edma_cc *ecc = dev_get_drvdata(dev);
2172 struct edma_chan *echan = ecc->slave_chans;
2174 s8 (*queue_priority_mapping)[2];
2176 queue_priority_mapping = ecc->info->queue_priority_mapping;
2178 /* Event queue priority mapping */
2179 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2180 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2181 queue_priority_mapping[i][1]);
2183 for (i = 0; i < ecc->num_channels; i++) {
2184 if (echan[i].alloced) {
2185 /* ensure access through shadow region 0 */
2186 edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
2189 edma_setup_interrupt(&echan[i], true);
2191 /* Set up channel -> slot mapping for the entry slot */
2192 edma_set_chmap(&echan[i], echan[i].slot[0]);
2200 static const struct dev_pm_ops edma_pm_ops = {
2201 SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume)
2204 static struct platform_driver edma_driver = {
2205 .probe = edma_probe,
2206 .remove = edma_remove,
2210 .of_match_table = edma_of_ids,
2214 bool edma_filter_fn(struct dma_chan *chan, void *param)
2216 if (chan->device->dev->driver == &edma_driver.driver) {
2217 struct edma_chan *echan = to_edma_chan(chan);
2218 unsigned ch_req = *(unsigned *)param;
2219 return ch_req == echan->ch_num;
2223 EXPORT_SYMBOL(edma_filter_fn);
2225 static int edma_init(void)
2227 return platform_driver_register(&edma_driver);
2229 subsys_initcall(edma_init);
2231 static void __exit edma_exit(void)
2233 platform_driver_unregister(&edma_driver);
2235 module_exit(edma_exit);
2237 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2238 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2239 MODULE_LICENSE("GPL v2");