2 * EDMA3 support for DaVinci
4 * Copyright (C) 2006-2009 Texas Instruments.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/platform_device.h>
27 #include <mach/edma.h>
29 /* Offsets matching "struct edmacc_param" */
32 #define PARM_A_B_CNT 0x08
34 #define PARM_SRC_DST_BIDX 0x10
35 #define PARM_LINK_BCNTRLD 0x14
36 #define PARM_SRC_DST_CIDX 0x18
37 #define PARM_CCNT 0x1c
39 #define PARM_SIZE 0x20
41 /* Offsets for EDMA CC global channel registers and their shadows */
42 #define SH_ER 0x00 /* 64 bits */
43 #define SH_ECR 0x08 /* 64 bits */
44 #define SH_ESR 0x10 /* 64 bits */
45 #define SH_CER 0x18 /* 64 bits */
46 #define SH_EER 0x20 /* 64 bits */
47 #define SH_EECR 0x28 /* 64 bits */
48 #define SH_EESR 0x30 /* 64 bits */
49 #define SH_SER 0x38 /* 64 bits */
50 #define SH_SECR 0x40 /* 64 bits */
51 #define SH_IER 0x50 /* 64 bits */
52 #define SH_IECR 0x58 /* 64 bits */
53 #define SH_IESR 0x60 /* 64 bits */
54 #define SH_IPR 0x68 /* 64 bits */
55 #define SH_ICR 0x70 /* 64 bits */
65 /* Offsets for EDMA CC global registers */
66 #define EDMA_REV 0x0000
67 #define EDMA_CCCFG 0x0004
68 #define EDMA_QCHMAP 0x0200 /* 8 registers */
69 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
70 #define EDMA_QDMAQNUM 0x0260
71 #define EDMA_QUETCMAP 0x0280
72 #define EDMA_QUEPRI 0x0284
73 #define EDMA_EMR 0x0300 /* 64 bits */
74 #define EDMA_EMCR 0x0308 /* 64 bits */
75 #define EDMA_QEMR 0x0310
76 #define EDMA_QEMCR 0x0314
77 #define EDMA_CCERR 0x0318
78 #define EDMA_CCERRCLR 0x031c
79 #define EDMA_EEVAL 0x0320
80 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
81 #define EDMA_QRAE 0x0380 /* 4 registers */
82 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
83 #define EDMA_QSTAT 0x0600 /* 2 registers */
84 #define EDMA_QWMTHRA 0x0620
85 #define EDMA_QWMTHRB 0x0624
86 #define EDMA_CCSTAT 0x0640
88 #define EDMA_M 0x1000 /* global channel registers */
89 #define EDMA_ECR 0x1008
90 #define EDMA_ECRH 0x100C
91 #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
92 #define EDMA_PARM 0x4000 /* 128 param entries */
94 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
96 #define EDMA_DCHMAP 0x0100 /* 64 registers */
97 #define CHMAP_EXIST BIT(24)
99 #define EDMA_MAX_DMACH 64
100 #define EDMA_MAX_PARAMENTRY 512
101 #define EDMA_MAX_CC 2
104 /*****************************************************************************/
106 static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
108 static inline unsigned int edma_read(unsigned ctlr, int offset)
110 return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
113 static inline void edma_write(unsigned ctlr, int offset, int val)
115 __raw_writel(val, edmacc_regs_base[ctlr] + offset);
117 static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
120 unsigned val = edma_read(ctlr, offset);
123 edma_write(ctlr, offset, val);
125 static inline void edma_and(unsigned ctlr, int offset, unsigned and)
127 unsigned val = edma_read(ctlr, offset);
129 edma_write(ctlr, offset, val);
131 static inline void edma_or(unsigned ctlr, int offset, unsigned or)
133 unsigned val = edma_read(ctlr, offset);
135 edma_write(ctlr, offset, val);
137 static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
139 return edma_read(ctlr, offset + (i << 2));
141 static inline void edma_write_array(unsigned ctlr, int offset, int i,
144 edma_write(ctlr, offset + (i << 2), val);
146 static inline void edma_modify_array(unsigned ctlr, int offset, int i,
147 unsigned and, unsigned or)
149 edma_modify(ctlr, offset + (i << 2), and, or);
151 static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
153 edma_or(ctlr, offset + (i << 2), or);
155 static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
158 edma_or(ctlr, offset + ((i*2 + j) << 2), or);
160 static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
163 edma_write(ctlr, offset + ((i*2 + j) << 2), val);
165 static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
167 return edma_read(ctlr, EDMA_SHADOW0 + offset);
169 static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
172 return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
174 static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
176 edma_write(ctlr, EDMA_SHADOW0 + offset, val);
178 static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
181 edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
183 static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
186 return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
188 static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
191 edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
193 static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
194 unsigned and, unsigned or)
196 edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
198 static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
201 edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
203 static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
206 edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
209 /*****************************************************************************/
211 /* actual number of DMA channels and slots on this silicon */
213 /* how many dma resources of each type */
214 unsigned num_channels;
219 enum dma_event_q default_queue;
221 /* list of channels with no even trigger; terminated by "-1" */
224 /* The edma_inuse bit for each PaRAM slot is clear unless the
225 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
227 DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
229 /* The edma_unused bit for each channel is clear unless
230 * it is not being used on this platform. It uses a bit
231 * of SOC-specific initialization code.
233 DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
235 unsigned irq_res_start;
236 unsigned irq_res_end;
238 struct dma_interrupt_data {
239 void (*callback)(unsigned channel, unsigned short ch_status,
242 } intr_data[EDMA_MAX_DMACH];
245 static struct edma *edma_info[EDMA_MAX_CC];
246 static int arch_num_cc;
248 /* dummy param set used to (re)initialize parameter RAM slots */
249 static const struct edmacc_param dummy_paramset = {
250 .link_bcntrld = 0xffff,
254 /*****************************************************************************/
256 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
257 enum dma_event_q queue_no)
259 int bit = (ch_no & 0x7) * 4;
261 /* default to low priority queue */
262 if (queue_no == EVENTQ_DEFAULT)
263 queue_no = edma_info[ctlr]->default_queue;
266 edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
267 ~(0x7 << bit), queue_no << bit);
270 static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
272 int bit = queue_no * 4;
273 edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
276 static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
279 int bit = queue_no * 4;
280 edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
281 ((priority & 0x7) << bit));
285 * map_dmach_param - Maps channel number to param entry number
287 * This maps the dma channel number to param entry numberter. In
288 * other words using the DMA channel mapping registers a param entry
289 * can be mapped to any channel
291 * Callers are responsible for ensuring the channel mapping logic is
292 * included in that particular EDMA variant (Eg : dm646x)
295 static void __init map_dmach_param(unsigned ctlr)
298 for (i = 0; i < EDMA_MAX_DMACH; i++)
299 edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
303 setup_dma_interrupt(unsigned lch,
304 void (*callback)(unsigned channel, u16 ch_status, void *data),
309 ctlr = EDMA_CTLR(lch);
310 lch = EDMA_CHAN_SLOT(lch);
313 edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
314 (1 << (lch & 0x1f)));
317 edma_info[ctlr]->intr_data[lch].callback = callback;
318 edma_info[ctlr]->intr_data[lch].data = data;
321 edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
322 (1 << (lch & 0x1f)));
323 edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
324 (1 << (lch & 0x1f)));
328 static int irq2ctlr(int irq)
330 if (irq >= edma_info[0]->irq_res_start &&
331 irq <= edma_info[0]->irq_res_end)
333 else if (irq >= edma_info[1]->irq_res_start &&
334 irq <= edma_info[1]->irq_res_end)
340 /******************************************************************************
342 * DMA interrupt handler
344 *****************************************************************************/
345 static irqreturn_t dma_irq_handler(int irq, void *data)
349 unsigned int cnt = 0;
351 ctlr = irq2ctlr(irq);
353 dev_dbg(data, "dma_irq_handler\n");
355 if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0)
356 && (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
361 if (edma_shadow0_read_array(ctlr, SH_IPR, 0))
363 else if (edma_shadow0_read_array(ctlr, SH_IPR, 1))
367 dev_dbg(data, "IPR%d %08x\n", j,
368 edma_shadow0_read_array(ctlr, SH_IPR, j));
369 for (i = 0; i < 32; i++) {
370 int k = (j << 5) + i;
371 if (edma_shadow0_read_array(ctlr, SH_IPR, j) &
373 /* Clear the corresponding IPR bits */
374 edma_shadow0_write_array(ctlr, SH_ICR, j,
376 if (edma_info[ctlr]->intr_data[k].callback) {
377 edma_info[ctlr]->intr_data[k].callback(
379 edma_info[ctlr]->intr_data[k].
388 edma_shadow0_write(ctlr, SH_IEVAL, 1);
392 /******************************************************************************
394 * DMA error interrupt handler
396 *****************************************************************************/
397 static irqreturn_t dma_ccerr_handler(int irq, void *data)
401 unsigned int cnt = 0;
403 ctlr = irq2ctlr(irq);
405 dev_dbg(data, "dma_ccerr_handler\n");
407 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
408 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
409 (edma_read(ctlr, EDMA_QEMR) == 0) &&
410 (edma_read(ctlr, EDMA_CCERR) == 0))
415 if (edma_read_array(ctlr, EDMA_EMR, 0))
417 else if (edma_read_array(ctlr, EDMA_EMR, 1))
420 dev_dbg(data, "EMR%d %08x\n", j,
421 edma_read_array(ctlr, EDMA_EMR, j));
422 for (i = 0; i < 32; i++) {
423 int k = (j << 5) + i;
424 if (edma_read_array(ctlr, EDMA_EMR, j) &
426 /* Clear the corresponding EMR bits */
427 edma_write_array(ctlr, EDMA_EMCR, j,
430 edma_shadow0_write_array(ctlr, SH_SECR,
432 if (edma_info[ctlr]->intr_data[k].
434 edma_info[ctlr]->intr_data[k].
437 edma_info[ctlr]->intr_data
442 } else if (edma_read(ctlr, EDMA_QEMR)) {
443 dev_dbg(data, "QEMR %02x\n",
444 edma_read(ctlr, EDMA_QEMR));
445 for (i = 0; i < 8; i++) {
446 if (edma_read(ctlr, EDMA_QEMR) & (1 << i)) {
447 /* Clear the corresponding IPR bits */
448 edma_write(ctlr, EDMA_QEMCR, 1 << i);
449 edma_shadow0_write(ctlr, SH_QSECR,
452 /* NOTE: not reported!! */
455 } else if (edma_read(ctlr, EDMA_CCERR)) {
456 dev_dbg(data, "CCERR %08x\n",
457 edma_read(ctlr, EDMA_CCERR));
458 /* FIXME: CCERR.BIT(16) ignored! much better
459 * to just write CCERRCLR with CCERR value...
461 for (i = 0; i < 8; i++) {
462 if (edma_read(ctlr, EDMA_CCERR) & (1 << i)) {
463 /* Clear the corresponding IPR bits */
464 edma_write(ctlr, EDMA_CCERRCLR, 1 << i);
466 /* NOTE: not reported!! */
470 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0)
471 && (edma_read_array(ctlr, EDMA_EMR, 1) == 0)
472 && (edma_read(ctlr, EDMA_QEMR) == 0)
473 && (edma_read(ctlr, EDMA_CCERR) == 0)) {
480 edma_write(ctlr, EDMA_EEVAL, 1);
484 /******************************************************************************
486 * Transfer controller error interrupt handlers
488 *****************************************************************************/
490 #define tc_errs_handled false /* disabled as long as they're NOPs */
492 static irqreturn_t dma_tc0err_handler(int irq, void *data)
494 dev_dbg(data, "dma_tc0err_handler\n");
498 static irqreturn_t dma_tc1err_handler(int irq, void *data)
500 dev_dbg(data, "dma_tc1err_handler\n");
504 static int reserve_contiguous_slots(int ctlr, unsigned int id,
505 unsigned int num_slots,
506 unsigned int start_slot)
509 unsigned int count = num_slots;
510 int stop_slot = start_slot;
511 DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
513 for (i = start_slot; i < edma_info[ctlr]->num_slots; ++i) {
514 j = EDMA_CHAN_SLOT(i);
515 if (!test_and_set_bit(j, edma_info[ctlr]->edma_inuse)) {
516 /* Record our current beginning slot */
517 if (count == num_slots)
521 set_bit(j, tmp_inuse);
526 clear_bit(j, tmp_inuse);
528 if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
537 * We have to clear any bits that we set
538 * if we run out parameter RAM slots, i.e we do find a set
539 * of contiguous parameter RAM slots but do not find the exact number
540 * requested as we may reach the total number of parameter RAM slots
542 if (i == edma_info[ctlr]->num_slots)
545 for (j = start_slot; j < stop_slot; j++)
546 if (test_bit(j, tmp_inuse))
547 clear_bit(j, edma_info[ctlr]->edma_inuse);
552 for (j = i - num_slots + 1; j <= i; ++j)
553 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
554 &dummy_paramset, PARM_SIZE);
556 return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
559 static int prepare_unused_channel_list(struct device *dev, void *data)
561 struct platform_device *pdev = to_platform_device(dev);
564 for (i = 0; i < pdev->num_resources; i++) {
565 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
566 (int)pdev->resource[i].start >= 0) {
567 ctlr = EDMA_CTLR(pdev->resource[i].start);
568 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
569 edma_info[ctlr]->edma_unused);
576 /*-----------------------------------------------------------------------*/
578 static bool unused_chan_list_done;
580 /* Resource alloc/free: dma channels, parameter RAM slots */
583 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
584 * @channel: specific channel to allocate; negative for "any unmapped channel"
585 * @callback: optional; to be issued on DMA completion or errors
586 * @data: passed to callback
587 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
588 * Controller (TC) executes requests using this channel. Use
589 * EVENTQ_DEFAULT unless you really need a high priority queue.
591 * This allocates a DMA channel and its associated parameter RAM slot.
592 * The parameter RAM is initialized to hold a dummy transfer.
594 * Normal use is to pass a specific channel number as @channel, to make
595 * use of hardware events mapped to that channel. When the channel will
596 * be used only for software triggering or event chaining, channels not
597 * mapped to hardware events (or mapped to unused events) are preferable.
599 * DMA transfers start from a channel using edma_start(), or by
600 * chaining. When the transfer described in that channel's parameter RAM
601 * slot completes, that slot's data may be reloaded through a link.
603 * DMA errors are only reported to the @callback associated with the
604 * channel driving that transfer, but transfer completion callbacks can
605 * be sent to another channel under control of the TCC field in
606 * the option word of the transfer's parameter RAM set. Drivers must not
607 * use DMA transfer completion callbacks for channels they did not allocate.
608 * (The same applies to TCC codes used in transfer chaining.)
610 * Returns the number of the channel, else negative errno.
612 int edma_alloc_channel(int channel,
613 void (*callback)(unsigned channel, u16 ch_status, void *data),
615 enum dma_event_q eventq_no)
617 unsigned i, done = 0, ctlr = 0;
620 if (!unused_chan_list_done) {
622 * Scan all the platform devices to find out the EDMA channels
623 * used and clear them in the unused list, making the rest
624 * available for ARM usage.
626 ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
627 prepare_unused_channel_list);
631 unused_chan_list_done = true;
635 ctlr = EDMA_CTLR(channel);
636 channel = EDMA_CHAN_SLOT(channel);
640 for (i = 0; i < arch_num_cc; i++) {
643 channel = find_next_bit(edma_info[i]->
645 edma_info[i]->num_channels,
647 if (channel == edma_info[i]->num_channels)
649 if (!test_and_set_bit(channel,
650 edma_info[i]->edma_inuse)) {
662 } else if (channel >= edma_info[ctlr]->num_channels) {
664 } else if (test_and_set_bit(channel, edma_info[ctlr]->edma_inuse)) {
668 /* ensure access through shadow region 0 */
669 edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f));
671 /* ensure no events are pending */
672 edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
673 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
674 &dummy_paramset, PARM_SIZE);
677 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
680 map_dmach_queue(ctlr, channel, eventq_no);
682 return EDMA_CTLR_CHAN(ctlr, channel);
684 EXPORT_SYMBOL(edma_alloc_channel);
688 * edma_free_channel - deallocate DMA channel
689 * @channel: dma channel returned from edma_alloc_channel()
691 * This deallocates the DMA channel and associated parameter RAM slot
692 * allocated by edma_alloc_channel().
694 * Callers are responsible for ensuring the channel is inactive, and
695 * will not be reactivated by linking, chaining, or software calls to
698 void edma_free_channel(unsigned channel)
702 ctlr = EDMA_CTLR(channel);
703 channel = EDMA_CHAN_SLOT(channel);
705 if (channel >= edma_info[ctlr]->num_channels)
708 setup_dma_interrupt(channel, NULL, NULL);
709 /* REVISIT should probably take out of shadow region 0 */
711 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
712 &dummy_paramset, PARM_SIZE);
713 clear_bit(channel, edma_info[ctlr]->edma_inuse);
715 EXPORT_SYMBOL(edma_free_channel);
718 * edma_alloc_slot - allocate DMA parameter RAM
719 * @slot: specific slot to allocate; negative for "any unused slot"
721 * This allocates a parameter RAM slot, initializing it to hold a
722 * dummy transfer. Slots allocated using this routine have not been
723 * mapped to a hardware DMA channel, and will normally be used by
724 * linking to them from a slot associated with a DMA channel.
726 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
727 * slots may be allocated on behalf of DSP firmware.
729 * Returns the number of the slot, else negative errno.
731 int edma_alloc_slot(unsigned ctlr, int slot)
734 slot = EDMA_CHAN_SLOT(slot);
737 slot = edma_info[ctlr]->num_channels;
739 slot = find_next_zero_bit(edma_info[ctlr]->edma_inuse,
740 edma_info[ctlr]->num_slots, slot);
741 if (slot == edma_info[ctlr]->num_slots)
743 if (!test_and_set_bit(slot,
744 edma_info[ctlr]->edma_inuse))
747 } else if (slot < edma_info[ctlr]->num_channels ||
748 slot >= edma_info[ctlr]->num_slots) {
750 } else if (test_and_set_bit(slot, edma_info[ctlr]->edma_inuse)) {
754 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
755 &dummy_paramset, PARM_SIZE);
757 return EDMA_CTLR_CHAN(ctlr, slot);
759 EXPORT_SYMBOL(edma_alloc_slot);
762 * edma_free_slot - deallocate DMA parameter RAM
763 * @slot: parameter RAM slot returned from edma_alloc_slot()
765 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
766 * Callers are responsible for ensuring the slot is inactive, and will
769 void edma_free_slot(unsigned slot)
773 ctlr = EDMA_CTLR(slot);
774 slot = EDMA_CHAN_SLOT(slot);
776 if (slot < edma_info[ctlr]->num_channels ||
777 slot >= edma_info[ctlr]->num_slots)
780 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
781 &dummy_paramset, PARM_SIZE);
782 clear_bit(slot, edma_info[ctlr]->edma_inuse);
784 EXPORT_SYMBOL(edma_free_slot);
788 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
789 * The API will return the starting point of a set of
790 * contiguous parameter RAM slots that have been requested
792 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
793 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
794 * @count: number of contiguous Paramter RAM slots
795 * @slot - the start value of Parameter RAM slot that should be passed if id
796 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
798 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
799 * contiguous Parameter RAM slots from parameter RAM 64 in the case of
800 * DaVinci SOCs and 32 in the case of DA8xx SOCs.
802 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
803 * set of contiguous parameter RAM slots from the "slot" that is passed as an
804 * argument to the API.
806 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
807 * starts looking for a set of contiguous parameter RAMs from the "slot"
808 * that is passed as an argument to the API. On failure the API will try to
809 * find a set of contiguous Parameter RAM slots from the remaining Parameter
812 int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
815 * The start slot requested should be greater than
816 * the number of channels and lesser than the total number
819 if ((id != EDMA_CONT_PARAMS_ANY) &&
820 (slot < edma_info[ctlr]->num_channels ||
821 slot >= edma_info[ctlr]->num_slots))
825 * The number of parameter RAM slots requested cannot be less than 1
826 * and cannot be more than the number of slots minus the number of
829 if (count < 1 || count >
830 (edma_info[ctlr]->num_slots - edma_info[ctlr]->num_channels))
834 case EDMA_CONT_PARAMS_ANY:
835 return reserve_contiguous_slots(ctlr, id, count,
836 edma_info[ctlr]->num_channels);
837 case EDMA_CONT_PARAMS_FIXED_EXACT:
838 case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
839 return reserve_contiguous_slots(ctlr, id, count, slot);
845 EXPORT_SYMBOL(edma_alloc_cont_slots);
848 * edma_free_cont_slots - deallocate DMA parameter RAM slots
849 * @slot: first parameter RAM of a set of parameter RAM slots to be freed
850 * @count: the number of contiguous parameter RAM slots to be freed
852 * This deallocates the parameter RAM slots allocated by
853 * edma_alloc_cont_slots.
854 * Callers/applications need to keep track of sets of contiguous
855 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
857 * Callers are responsible for ensuring the slots are inactive, and will
860 int edma_free_cont_slots(unsigned slot, int count)
862 unsigned ctlr, slot_to_free;
865 ctlr = EDMA_CTLR(slot);
866 slot = EDMA_CHAN_SLOT(slot);
868 if (slot < edma_info[ctlr]->num_channels ||
869 slot >= edma_info[ctlr]->num_slots ||
873 for (i = slot; i < slot + count; ++i) {
875 slot_to_free = EDMA_CHAN_SLOT(i);
877 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
878 &dummy_paramset, PARM_SIZE);
879 clear_bit(slot_to_free, edma_info[ctlr]->edma_inuse);
884 EXPORT_SYMBOL(edma_free_cont_slots);
886 /*-----------------------------------------------------------------------*/
888 /* Parameter RAM operations (i) -- read/write partial slots */
891 * edma_set_src - set initial DMA source address in parameter RAM slot
892 * @slot: parameter RAM slot being configured
893 * @src_port: physical address of source (memory, controller FIFO, etc)
894 * @addressMode: INCR, except in very rare cases
895 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
896 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
898 * Note that the source address is modified during the DMA transfer
899 * according to edma_set_src_index().
901 void edma_set_src(unsigned slot, dma_addr_t src_port,
902 enum address_mode mode, enum fifo_width width)
906 ctlr = EDMA_CTLR(slot);
907 slot = EDMA_CHAN_SLOT(slot);
909 if (slot < edma_info[ctlr]->num_slots) {
910 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
913 /* set SAM and program FWID */
914 i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
919 edma_parm_write(ctlr, PARM_OPT, slot, i);
921 /* set the source port address
922 in source register of param structure */
923 edma_parm_write(ctlr, PARM_SRC, slot, src_port);
926 EXPORT_SYMBOL(edma_set_src);
929 * edma_set_dest - set initial DMA destination address in parameter RAM slot
930 * @slot: parameter RAM slot being configured
931 * @dest_port: physical address of destination (memory, controller FIFO, etc)
932 * @addressMode: INCR, except in very rare cases
933 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
934 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
936 * Note that the destination address is modified during the DMA transfer
937 * according to edma_set_dest_index().
939 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
940 enum address_mode mode, enum fifo_width width)
944 ctlr = EDMA_CTLR(slot);
945 slot = EDMA_CHAN_SLOT(slot);
947 if (slot < edma_info[ctlr]->num_slots) {
948 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
951 /* set DAM and program FWID */
952 i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
957 edma_parm_write(ctlr, PARM_OPT, slot, i);
958 /* set the destination port address
959 in dest register of param structure */
960 edma_parm_write(ctlr, PARM_DST, slot, dest_port);
963 EXPORT_SYMBOL(edma_set_dest);
966 * edma_get_position - returns the current transfer points
967 * @slot: parameter RAM slot being examined
968 * @src: pointer to source port position
969 * @dst: pointer to destination port position
971 * Returns current source and destination addresses for a particular
972 * parameter RAM slot. Its channel should not be active when this is called.
974 void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
976 struct edmacc_param temp;
979 ctlr = EDMA_CTLR(slot);
980 slot = EDMA_CHAN_SLOT(slot);
982 edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
988 EXPORT_SYMBOL(edma_get_position);
991 * edma_set_src_index - configure DMA source address indexing
992 * @slot: parameter RAM slot being configured
993 * @src_bidx: byte offset between source arrays in a frame
994 * @src_cidx: byte offset between source frames in a block
996 * Offsets are specified to support either contiguous or discontiguous
997 * memory transfers, or repeated access to a hardware register, as needed.
998 * When accessing hardware registers, both offsets are normally zero.
1000 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
1004 ctlr = EDMA_CTLR(slot);
1005 slot = EDMA_CHAN_SLOT(slot);
1007 if (slot < edma_info[ctlr]->num_slots) {
1008 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1009 0xffff0000, src_bidx);
1010 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1011 0xffff0000, src_cidx);
1014 EXPORT_SYMBOL(edma_set_src_index);
1017 * edma_set_dest_index - configure DMA destination address indexing
1018 * @slot: parameter RAM slot being configured
1019 * @dest_bidx: byte offset between destination arrays in a frame
1020 * @dest_cidx: byte offset between destination frames in a block
1022 * Offsets are specified to support either contiguous or discontiguous
1023 * memory transfers, or repeated access to a hardware register, as needed.
1024 * When accessing hardware registers, both offsets are normally zero.
1026 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
1030 ctlr = EDMA_CTLR(slot);
1031 slot = EDMA_CHAN_SLOT(slot);
1033 if (slot < edma_info[ctlr]->num_slots) {
1034 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1035 0x0000ffff, dest_bidx << 16);
1036 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1037 0x0000ffff, dest_cidx << 16);
1040 EXPORT_SYMBOL(edma_set_dest_index);
1043 * edma_set_transfer_params - configure DMA transfer parameters
1044 * @slot: parameter RAM slot being configured
1045 * @acnt: how many bytes per array (at least one)
1046 * @bcnt: how many arrays per frame (at least one)
1047 * @ccnt: how many frames per block (at least one)
1048 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1049 * the value to reload into bcnt when it decrements to zero
1050 * @sync_mode: ASYNC or ABSYNC
1052 * See the EDMA3 documentation to understand how to configure and link
1053 * transfers using the fields in PaRAM slots. If you are not doing it
1054 * all at once with edma_write_slot(), you will use this routine
1055 * plus two calls each for source and destination, setting the initial
1056 * address and saying how to index that address.
1058 * An example of an A-Synchronized transfer is a serial link using a
1059 * single word shift register. In that case, @acnt would be equal to
1060 * that word size; the serial controller issues a DMA synchronization
1061 * event to transfer each word, and memory access by the DMA transfer
1062 * controller will be word-at-a-time.
1064 * An example of an AB-Synchronized transfer is a device using a FIFO.
1065 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1066 * The controller with the FIFO issues DMA synchronization events when
1067 * the FIFO threshold is reached, and the DMA transfer controller will
1068 * transfer one frame to (or from) the FIFO. It will probably use
1069 * efficient burst modes to access memory.
1071 void edma_set_transfer_params(unsigned slot,
1072 u16 acnt, u16 bcnt, u16 ccnt,
1073 u16 bcnt_rld, enum sync_dimension sync_mode)
1077 ctlr = EDMA_CTLR(slot);
1078 slot = EDMA_CHAN_SLOT(slot);
1080 if (slot < edma_info[ctlr]->num_slots) {
1081 edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1082 0x0000ffff, bcnt_rld << 16);
1083 if (sync_mode == ASYNC)
1084 edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1086 edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1087 /* Set the acount, bcount, ccount registers */
1088 edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1089 edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1092 EXPORT_SYMBOL(edma_set_transfer_params);
1095 * edma_link - link one parameter RAM slot to another
1096 * @from: parameter RAM slot originating the link
1097 * @to: parameter RAM slot which is the link target
1099 * The originating slot should not be part of any active DMA transfer.
1101 void edma_link(unsigned from, unsigned to)
1103 unsigned ctlr_from, ctlr_to;
1105 ctlr_from = EDMA_CTLR(from);
1106 from = EDMA_CHAN_SLOT(from);
1107 ctlr_to = EDMA_CTLR(to);
1108 to = EDMA_CHAN_SLOT(to);
1110 if (from >= edma_info[ctlr_from]->num_slots)
1112 if (to >= edma_info[ctlr_to]->num_slots)
1114 edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1117 EXPORT_SYMBOL(edma_link);
1120 * edma_unlink - cut link from one parameter RAM slot
1121 * @from: parameter RAM slot originating the link
1123 * The originating slot should not be part of any active DMA transfer.
1124 * Its link is set to 0xffff.
1126 void edma_unlink(unsigned from)
1130 ctlr = EDMA_CTLR(from);
1131 from = EDMA_CHAN_SLOT(from);
1133 if (from >= edma_info[ctlr]->num_slots)
1135 edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1137 EXPORT_SYMBOL(edma_unlink);
1139 /*-----------------------------------------------------------------------*/
1141 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
1144 * edma_write_slot - write parameter RAM data for slot
1145 * @slot: number of parameter RAM slot being modified
1146 * @param: data to be written into parameter RAM slot
1148 * Use this to assign all parameters of a transfer at once. This
1149 * allows more efficient setup of transfers than issuing multiple
1150 * calls to set up those parameters in small pieces, and provides
1151 * complete control over all transfer options.
1153 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1157 ctlr = EDMA_CTLR(slot);
1158 slot = EDMA_CHAN_SLOT(slot);
1160 if (slot >= edma_info[ctlr]->num_slots)
1162 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1165 EXPORT_SYMBOL(edma_write_slot);
1168 * edma_read_slot - read parameter RAM data from slot
1169 * @slot: number of parameter RAM slot being copied
1170 * @param: where to store copy of parameter RAM data
1172 * Use this to read data from a parameter RAM slot, perhaps to
1173 * save them as a template for later reuse.
1175 void edma_read_slot(unsigned slot, struct edmacc_param *param)
1179 ctlr = EDMA_CTLR(slot);
1180 slot = EDMA_CHAN_SLOT(slot);
1182 if (slot >= edma_info[ctlr]->num_slots)
1184 memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1187 EXPORT_SYMBOL(edma_read_slot);
1189 /*-----------------------------------------------------------------------*/
1191 /* Various EDMA channel control operations */
1194 * edma_pause - pause dma on a channel
1195 * @channel: on which edma_start() has been called
1197 * This temporarily disables EDMA hardware events on the specified channel,
1198 * preventing them from triggering new transfers on its behalf
1200 void edma_pause(unsigned channel)
1204 ctlr = EDMA_CTLR(channel);
1205 channel = EDMA_CHAN_SLOT(channel);
1207 if (channel < edma_info[ctlr]->num_channels) {
1208 unsigned int mask = (1 << (channel & 0x1f));
1210 edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1213 EXPORT_SYMBOL(edma_pause);
1216 * edma_resume - resumes dma on a paused channel
1217 * @channel: on which edma_pause() has been called
1219 * This re-enables EDMA hardware events on the specified channel.
1221 void edma_resume(unsigned channel)
1225 ctlr = EDMA_CTLR(channel);
1226 channel = EDMA_CHAN_SLOT(channel);
1228 if (channel < edma_info[ctlr]->num_channels) {
1229 unsigned int mask = (1 << (channel & 0x1f));
1231 edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1234 EXPORT_SYMBOL(edma_resume);
1237 * edma_start - start dma on a channel
1238 * @channel: channel being activated
1240 * Channels with event associations will be triggered by their hardware
1241 * events, and channels without such associations will be triggered by
1242 * software. (At this writing there is no interface for using software
1243 * triggers except with channels that don't support hardware triggers.)
1245 * Returns zero on success, else negative errno.
1247 int edma_start(unsigned channel)
1251 ctlr = EDMA_CTLR(channel);
1252 channel = EDMA_CHAN_SLOT(channel);
1254 if (channel < edma_info[ctlr]->num_channels) {
1255 int j = channel >> 5;
1256 unsigned int mask = (1 << (channel & 0x1f));
1258 /* EDMA channels without event association */
1259 if (test_bit(channel, edma_info[ctlr]->edma_unused)) {
1260 pr_debug("EDMA: ESR%d %08x\n", j,
1261 edma_shadow0_read_array(ctlr, SH_ESR, j));
1262 edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1266 /* EDMA channel with event association */
1267 pr_debug("EDMA: ER%d %08x\n", j,
1268 edma_shadow0_read_array(ctlr, SH_ER, j));
1269 /* Clear any pending error */
1270 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1272 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1273 edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1274 pr_debug("EDMA: EER%d %08x\n", j,
1275 edma_shadow0_read_array(ctlr, SH_EER, j));
1281 EXPORT_SYMBOL(edma_start);
1284 * edma_stop - stops dma on the channel passed
1285 * @channel: channel being deactivated
1287 * When @lch is a channel, any active transfer is paused and
1288 * all pending hardware events are cleared. The current transfer
1289 * may not be resumed, and the channel's Parameter RAM should be
1290 * reinitialized before being reused.
1292 void edma_stop(unsigned channel)
1296 ctlr = EDMA_CTLR(channel);
1297 channel = EDMA_CHAN_SLOT(channel);
1299 if (channel < edma_info[ctlr]->num_channels) {
1300 int j = channel >> 5;
1301 unsigned int mask = (1 << (channel & 0x1f));
1303 edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1304 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1305 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1306 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1308 pr_debug("EDMA: EER%d %08x\n", j,
1309 edma_shadow0_read_array(ctlr, SH_EER, j));
1311 /* REVISIT: consider guarding against inappropriate event
1312 * chaining by overwriting with dummy_paramset.
1316 EXPORT_SYMBOL(edma_stop);
1318 /******************************************************************************
1320 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1321 * been removed before EDMA has finished.It is usedful for removable media.
1323 * ch_no - channel no
1325 * Return: zero on success, or corresponding error no on failure
1327 * FIXME this should not be needed ... edma_stop() should suffice.
1329 *****************************************************************************/
1331 void edma_clean_channel(unsigned channel)
1335 ctlr = EDMA_CTLR(channel);
1336 channel = EDMA_CHAN_SLOT(channel);
1338 if (channel < edma_info[ctlr]->num_channels) {
1339 int j = (channel >> 5);
1340 unsigned int mask = 1 << (channel & 0x1f);
1342 pr_debug("EDMA: EMR%d %08x\n", j,
1343 edma_read_array(ctlr, EDMA_EMR, j));
1344 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1345 /* Clear the corresponding EMR bits */
1346 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1348 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1349 edma_write(ctlr, EDMA_CCERRCLR, (1 << 16) | 0x3);
1352 EXPORT_SYMBOL(edma_clean_channel);
1355 * edma_clear_event - clear an outstanding event on the DMA channel
1357 * channel - channel number
1359 void edma_clear_event(unsigned channel)
1363 ctlr = EDMA_CTLR(channel);
1364 channel = EDMA_CHAN_SLOT(channel);
1366 if (channel >= edma_info[ctlr]->num_channels)
1369 edma_write(ctlr, EDMA_ECR, 1 << channel);
1371 edma_write(ctlr, EDMA_ECRH, 1 << (channel - 32));
1373 EXPORT_SYMBOL(edma_clear_event);
1375 /*-----------------------------------------------------------------------*/
1377 static int __init edma_probe(struct platform_device *pdev)
1379 struct edma_soc_info *info = pdev->dev.platform_data;
1380 const s8 (*queue_priority_mapping)[2];
1381 const s8 (*queue_tc_mapping)[2];
1382 int i, j, found = 0;
1384 int irq[EDMA_MAX_CC] = {0, 0};
1385 int err_irq[EDMA_MAX_CC] = {0, 0};
1386 struct resource *r[EDMA_MAX_CC] = {NULL};
1387 resource_size_t len[EDMA_MAX_CC];
1394 for (j = 0; j < EDMA_MAX_CC; j++) {
1395 sprintf(res_name, "edma_cc%d", j);
1396 r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1406 len[j] = resource_size(r[j]);
1408 r[j] = request_mem_region(r[j]->start, len[j],
1409 dev_name(&pdev->dev));
1415 edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
1416 if (!edmacc_regs_base[j]) {
1421 edma_info[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
1422 if (!edma_info[j]) {
1426 memset(edma_info[j], 0, sizeof(struct edma));
1428 edma_info[j]->num_channels = min_t(unsigned, info[j].n_channel,
1430 edma_info[j]->num_slots = min_t(unsigned, info[j].n_slot,
1431 EDMA_MAX_PARAMENTRY);
1432 edma_info[j]->num_cc = min_t(unsigned, info[j].n_cc,
1435 edma_info[j]->default_queue = info[j].default_queue;
1436 if (!edma_info[j]->default_queue)
1437 edma_info[j]->default_queue = EVENTQ_1;
1439 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1440 edmacc_regs_base[j]);
1442 for (i = 0; i < edma_info[j]->num_slots; i++)
1443 memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1444 &dummy_paramset, PARM_SIZE);
1446 /* Mark all channels as unused */
1447 memset(edma_info[j]->edma_unused, 0xff,
1448 sizeof(edma_info[j]->edma_unused));
1450 sprintf(irq_name, "edma%d", j);
1451 irq[j] = platform_get_irq_byname(pdev, irq_name);
1452 edma_info[j]->irq_res_start = irq[j];
1453 status = request_irq(irq[j], dma_irq_handler, 0, "edma",
1456 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1461 sprintf(irq_name, "edma%d_err", j);
1462 err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1463 edma_info[j]->irq_res_end = err_irq[j];
1464 status = request_irq(err_irq[j], dma_ccerr_handler, 0,
1465 "edma_error", &pdev->dev);
1467 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1468 err_irq[j], status);
1472 /* Everything lives on transfer controller 1 until otherwise
1473 * specified. This way, long transfers on the low priority queue
1474 * started by the codec engine will not cause audio defects.
1476 for (i = 0; i < edma_info[j]->num_channels; i++)
1477 map_dmach_queue(j, i, EVENTQ_1);
1479 queue_tc_mapping = info[j].queue_tc_mapping;
1480 queue_priority_mapping = info[j].queue_priority_mapping;
1482 /* Event queue to TC mapping */
1483 for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1484 map_queue_tc(j, queue_tc_mapping[i][0],
1485 queue_tc_mapping[i][1]);
1487 /* Event queue priority mapping */
1488 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1489 assign_priority_to_queue(j,
1490 queue_priority_mapping[i][0],
1491 queue_priority_mapping[i][1]);
1493 /* Map the channel to param entry if channel mapping logic
1496 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1499 for (i = 0; i < info[j].n_region; i++) {
1500 edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1501 edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1502 edma_write_array(j, EDMA_QRAE, i, 0x0);
1507 if (tc_errs_handled) {
1508 status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
1509 "edma_tc0", &pdev->dev);
1511 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1512 IRQ_TCERRINT0, status);
1515 status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
1516 "edma_tc1", &pdev->dev);
1518 dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
1519 IRQ_TCERRINT, status);
1527 for (i = 0; i < EDMA_MAX_CC; i++) {
1529 free_irq(err_irq[i], &pdev->dev);
1531 free_irq(irq[i], &pdev->dev);
1534 for (i = 0; i < EDMA_MAX_CC; i++) {
1536 release_mem_region(r[i]->start, len[i]);
1537 if (edmacc_regs_base[i])
1538 iounmap(edmacc_regs_base[i]);
1539 kfree(edma_info[i]);
1545 static struct platform_driver edma_driver = {
1546 .driver.name = "edma",
1549 static int __init edma_init(void)
1551 return platform_driver_probe(&edma_driver, edma_probe);
1553 arch_initcall(edma_init);