2 * drivers/dma/imx-sdma.c
4 * This file contains a driver for the Freescale Smart DMA engine
6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Based on code from Freescale:
10 * Copyright 2004-2013 Freescale Semiconductor, Inc.
12 * The code contained herein is licensed under the GNU General Public
13 * License. You may obtain a copy of the GNU General Public License
14 * Version 2 or later at the following locations:
16 * http://www.opensource.org/licenses/gpl-license.html
17 * http://www.gnu.org/copyleft/gpl.html
20 #include <linux/init.h>
21 #include <linux/types.h>
23 #include <linux/interrupt.h>
24 #include <linux/clk.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <linux/semaphore.h>
28 #include <linux/spinlock.h>
29 #include <linux/device.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/firmware.h>
32 #include <linux/slab.h>
33 #include <linux/platform_device.h>
34 #include <linux/dmaengine.h>
35 #include <linux/delay.h>
36 #include <linux/genalloc.h>
39 #include <mach/sdma.h>
41 #include <mach/hardware.h>
42 #include <mach/iram.h>
46 #define SDMA_H_C0PTR 0x000
47 #define SDMA_H_INTR 0x004
48 #define SDMA_H_STATSTOP 0x008
49 #define SDMA_H_START 0x00c
50 #define SDMA_H_EVTOVR 0x010
51 #define SDMA_H_DSPOVR 0x014
52 #define SDMA_H_HOSTOVR 0x018
53 #define SDMA_H_EVTPEND 0x01c
54 #define SDMA_H_DSPENBL 0x020
55 #define SDMA_H_RESET 0x024
56 #define SDMA_H_EVTERR 0x028
57 #define SDMA_H_INTRMSK 0x02c
58 #define SDMA_H_PSW 0x030
59 #define SDMA_H_EVTERRDBG 0x034
60 #define SDMA_H_CONFIG 0x038
61 #define SDMA_ONCE_ENB 0x040
62 #define SDMA_ONCE_DATA 0x044
63 #define SDMA_ONCE_INSTR 0x048
64 #define SDMA_ONCE_STAT 0x04c
65 #define SDMA_ONCE_CMD 0x050
66 #define SDMA_EVT_MIRROR 0x054
67 #define SDMA_ILLINSTADDR 0x058
68 #define SDMA_CHN0ADDR 0x05c
69 #define SDMA_ONCE_RTB 0x060
70 #define SDMA_XTRIG_CONF1 0x070
71 #define SDMA_XTRIG_CONF2 0x074
72 #define SDMA_CHNENBL0_V2 0x200
73 #define SDMA_CHNENBL0_V1 0x080
74 #define SDMA_CHNPRI_0 0x100
77 * Buffer descriptor status values.
88 * Data Node descriptor status values.
90 #define DND_END_OF_FRAME 0x80
91 #define DND_END_OF_XFER 0x40
93 #define DND_UNUSED 0x01
96 * IPCV2 descriptor status values.
98 #define BD_IPCV2_END_OF_FRAME 0x40
100 #define IPCV2_MAX_NODES 50
102 * Error bit set in the CCB status field by the SDMA,
103 * in setbd routine, in case of a transfer error
105 #define DATA_ERROR 0x10000000
108 * Buffer descriptor commands.
113 #define C0_SETCTX 0x07
114 #define C0_GETCTX 0x03
115 #define C0_SETDM 0x01
116 #define C0_SETPM 0x04
117 #define C0_GETDM 0x02
118 #define C0_GETPM 0x08
120 * Change endianness indicator in the BD command field
122 #define CHANGE_ENDIANNESS 0x80
125 * Mode/Count of data node descriptors - IPCv2
127 struct sdma_mode_count {
128 u32 count : 16; /* size of the buffer pointed by this BD */
129 u32 status : 8; /* E,R,I,C,W,D status bits stored here */
130 u32 command : 8; /* command mostlky used for channel 0 */
136 struct sdma_buffer_descriptor {
137 struct sdma_mode_count mode;
138 u32 buffer_addr; /* address of the buffer described */
139 u32 ext_buffer_addr; /* extended buffer address */
140 } __attribute__ ((packed));
143 * struct sdma_channel_control - Channel control Block
145 * @current_bd_ptr current buffer descriptor processed
146 * @base_bd_ptr first element of buffer descriptor array
147 * @unused padding. The SDMA engine expects an array of 128 byte
150 struct sdma_channel_control {
154 } __attribute__ ((packed));
157 * struct sdma_state_registers - SDMA context for a channel
159 * @pc: program counter
160 * @t: test bit: status of arithmetic & test instruction
161 * @rpc: return program counter
162 * @sf: source fault while loading data
163 * @spc: loop start program counter
164 * @df: destination fault while storing data
165 * @epc: loop end program counter
168 struct sdma_state_registers {
180 } __attribute__ ((packed));
183 * struct sdma_context_data - sdma context specific to a channel
185 * @channel_state: channel state bits
186 * @gReg: general registers
187 * @mda: burst dma destination address register
188 * @msa: burst dma source address register
189 * @ms: burst dma status register
190 * @md: burst dma data register
191 * @pda: peripheral dma destination address register
192 * @psa: peripheral dma source address register
193 * @ps: peripheral dma status register
194 * @pd: peripheral dma data register
195 * @ca: CRC polynomial register
196 * @cs: CRC accumulator register
197 * @dda: dedicated core destination address register
198 * @dsa: dedicated core source address register
199 * @ds: dedicated core status register
200 * @dd: dedicated core data register
202 struct sdma_context_data {
203 struct sdma_state_registers channel_state;
227 } __attribute__ ((packed));
229 #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
234 SDMA_MODE_INVALID = 0,
242 * struct sdma_channel - housekeeping for a SDMA channel
244 * @sdma pointer to the SDMA engine for this channel
245 * @channel the channel number, matches dmaengine chan_id + 1
246 * @direction transfer type. Needed for setting SDMA script
247 * @peripheral_type Peripheral type. Needed for setting SDMA script
248 * @event_id0 aka dma request line
249 * @event_id1 for channels that use 2 events
250 * @word_size peripheral access size
251 * @buf_tail ID of the buffer that was processed
252 * @done channel completion
253 * @num_bd max NUM_BD. number of descriptors currently handling
255 struct sdma_channel {
256 struct sdma_engine *sdma;
257 unsigned int channel;
258 enum dma_transfer_direction direction;
259 enum sdma_peripheral_type peripheral_type;
260 unsigned int event_id0;
261 unsigned int event_id1;
262 enum dma_slave_buswidth word_size;
263 unsigned int buf_tail;
264 struct completion done;
266 struct sdma_buffer_descriptor *bd;
268 unsigned int pc_from_device;
269 unsigned int pc_to_device;
270 unsigned int device_to_device;
271 unsigned int other_script;
272 unsigned int pc_to_pc;
274 dma_addr_t per_address, per_address2;
275 u32 event_mask0, event_mask1;
277 u32 shp_addr, per_addr;
278 u32 data_addr1, data_addr2;
279 struct dma_chan chan;
281 struct dma_async_tx_descriptor desc;
282 dma_cookie_t last_completed;
283 enum dma_status status;
284 unsigned int chn_count;
285 unsigned int chn_real_count;
286 unsigned int irq_handling;
289 #define MAX_DMA_CHANNELS 32
290 #define MXC_SDMA_DEFAULT_PRIORITY 1
291 #define MXC_SDMA_MIN_PRIORITY 1
292 #define MXC_SDMA_MAX_PRIORITY 7
294 #define SDMA_FIRMWARE_MAGIC 0x414d4453
297 * struct sdma_firmware_header - Layout of the firmware image
300 * @version_major increased whenever layout of struct sdma_script_start_addrs
302 * @version_minor firmware minor version (for binary compatible changes)
303 * @script_addrs_start offset of struct sdma_script_start_addrs in this image
304 * @num_script_addrs Number of script addresses in this image
305 * @ram_code_start offset of SDMA ram image in this firmware image
306 * @ram_code_size size of SDMA ram image
307 * @script_addrs Stores the start address of the SDMA scripts
308 * (in SDMA memory space)
310 struct sdma_firmware_header {
314 u32 script_addrs_start;
315 u32 num_script_addrs;
322 struct device_dma_parameters dma_parms;
323 struct sdma_channel channel[MAX_DMA_CHANNELS];
324 struct sdma_channel_control *channel_control;
326 unsigned int version;
327 unsigned int num_events;
328 struct sdma_context_data *context;
329 dma_addr_t context_phys;
330 struct dma_device dma_device;
332 struct sdma_script_start_addrs *script_addrs;
333 spinlock_t irq_reg_lock;
334 spinlock_t channel_0_lock;
337 #define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
338 #define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
339 #define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
340 #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
342 #ifdef CONFIG_SDMA_IRAM
343 static unsigned long sdma_iram_paddr;
344 static void *sdma_iram_vaddr;
345 #define sdma_iram_phys_to_virt(p) (sdma_iram_vaddr + ((p) - sdma_iram_paddr))
346 #define sdma_iram_virt_to_phys(v) (sdma_iram_paddr + ((v) - sdma_iram_vaddr))
347 static struct gen_pool *sdma_iram_pool;
350 * Allocates uncacheable buffer from IRAM
352 void __iomem *sdma_iram_malloc(size_t size, unsigned long *buf)
354 *buf = gen_pool_alloc(sdma_iram_pool, size);
358 return sdma_iram_phys_to_virt(*buf);
361 void sdma_iram_free(unsigned long buf, size_t size)
366 gen_pool_free(sdma_iram_pool, buf, size);
368 #endif /*CONFIG_SDMA_IRAM */
371 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
373 u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1);
375 return chnenbl0 + event * 4;
378 static int sdma_config_ownership(struct sdma_channel *sdmac,
379 bool event_override, bool mcu_override, bool dsp_override)
381 struct sdma_engine *sdma = sdmac->sdma;
382 int channel = sdmac->channel;
385 if (event_override && mcu_override && dsp_override)
388 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
389 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
390 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
393 dsp &= ~(1 << channel);
395 dsp |= (1 << channel);
398 evt &= ~(1 << channel);
400 evt |= (1 << channel);
403 mcu &= ~(1 << channel);
405 mcu |= (1 << channel);
407 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
408 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
409 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
415 * sdma_run_channel - run a channel and wait till it's done
417 static int sdma_run_channel(struct sdma_channel *sdmac)
419 struct sdma_engine *sdma = sdmac->sdma;
420 int channel = sdmac->channel;
421 unsigned long timeout = 1000;
424 writel(1 << channel, sdma->regs + SDMA_H_START);
426 while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
433 /* Clear the interrupt status */
434 writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
436 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
439 return ret ? 0 : -ETIMEDOUT;
442 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
445 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
451 #ifdef CONFIG_SDMA_IRAM
452 buf_virt = sdma_iram_malloc(size, (unsigned long *)&buf_phys);
454 buf_virt = dma_alloc_coherent(NULL,
456 &buf_phys, GFP_KERNEL);
461 spin_lock_irqsave(&sdma->channel_0_lock, flags);
463 bd0->mode.command = C0_SETPM;
464 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
465 bd0->mode.count = size / 2;
466 bd0->buffer_addr = buf_phys;
467 bd0->ext_buffer_addr = address;
469 memcpy(buf_virt, buf, size);
471 ret = sdma_run_channel(&sdma->channel[0]);
473 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
474 #ifdef CONFIG_SDMA_IRAM
475 sdma_iram_free(buf_phys, size);
477 dma_free_coherent(NULL, size, buf_virt, buf_phys);
483 static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
485 struct sdma_engine *sdma = sdmac->sdma;
486 int channel = sdmac->channel;
488 u32 chnenbl = chnenbl_ofs(sdma, event);
490 val = readl_relaxed(sdma->regs + chnenbl);
491 val |= (1 << channel);
492 writel_relaxed(val, sdma->regs + chnenbl);
495 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
497 struct sdma_engine *sdma = sdmac->sdma;
498 int channel = sdmac->channel;
499 u32 chnenbl = chnenbl_ofs(sdma, event);
502 val = readl_relaxed(sdma->regs + chnenbl);
503 val &= ~(1 << channel);
504 writel_relaxed(val, sdma->regs + chnenbl);
507 static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
509 struct sdma_buffer_descriptor *bd;
511 * loop mode. Iterate over descriptors, re-setup them and
512 * call callback function.
515 bd = &sdmac->bd[sdmac->buf_tail];
517 if (bd->mode.status & BD_DONE)
520 if (bd->mode.status & BD_RROR)
521 sdmac->status = DMA_ERROR;
523 sdmac->status = DMA_IN_PROGRESS;
525 bd->mode.status |= BD_DONE;
527 sdmac->buf_tail %= sdmac->num_bd;
529 if (sdmac->desc.callback)
530 sdmac->desc.callback(sdmac->desc.callback_param);
534 static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
536 struct sdma_buffer_descriptor *bd;
539 sdmac->chn_real_count = 0;
541 * non loop mode. Iterate over all descriptors, collect
542 * errors and call callback function
544 for (i = 0; i < sdmac->num_bd; i++) {
547 if (bd->mode.status & (BD_DONE | BD_RROR))
549 sdmac->chn_real_count += bd->mode.count;
553 sdmac->status = DMA_ERROR;
555 sdmac->status = DMA_SUCCESS;
557 sdmac->last_completed = sdmac->desc.cookie;
558 if (sdmac->desc.callback)
559 sdmac->desc.callback(sdmac->desc.callback_param);
563 static void sdma_handle_other_intr(struct sdma_channel *sdmac)
565 sdmac->last_completed = sdmac->desc.cookie;
567 if (sdmac->desc.callback)
568 sdmac->desc.callback(sdmac->desc.callback_param);
571 static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
573 complete(&sdmac->done);
575 switch (sdmac->mode) {
577 sdma_handle_channel_loop(sdmac);
579 case SDMA_MODE_NORMAL:
580 mxc_sdma_handle_channel_normal(sdmac);
582 case SDMA_MODE_NO_BD:
583 sdma_handle_other_intr(sdmac);
586 pr_err("Unvalid SDMA MODE!\n");
591 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
593 struct sdma_engine *sdma = dev_id;
594 struct sdma_channel *sdmac;
599 spin_lock_irqsave(&sdma->irq_reg_lock, flag);
600 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
601 /* not interested in channel 0 interrupts */
603 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
604 spin_unlock_irqrestore(&sdma->irq_reg_lock, flag);
608 channel = fls(stat_bak) - 1;
609 sdmac = &sdma->channel[channel];
610 sdmac->irq_handling = 1;
611 stat_bak &= ~(1 << channel);
615 channel = fls(stat) - 1;
616 sdmac = &sdma->channel[channel];
618 if (sdmac->irq_handling)
619 mxc_sdma_handle_channel(sdmac);
621 stat &= ~(1 << channel);
622 sdmac->irq_handling = 0;
629 * sets the pc of SDMA script according to the peripheral type
631 static void sdma_get_pc(struct sdma_channel *sdmac,
632 enum sdma_peripheral_type peripheral_type)
634 struct sdma_engine *sdma = sdmac->sdma;
635 int per_2_emi = 0, emi_2_per = 0;
637 * These are needed once we start to support transfers between
638 * two peripherals or memory-to-memory transfers
640 int per_2_per = 0, emi_2_emi = 0;
643 sdmac->pc_from_device = 0;
644 sdmac->pc_to_device = 0;
645 sdmac->device_to_device = 0;
646 sdmac->other_script = 0;
649 switch (peripheral_type) {
650 case IMX_DMATYPE_MEMORY:
651 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
653 case IMX_DMATYPE_DSP:
654 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
655 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
657 case IMX_DMATYPE_FIRI:
658 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
659 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
661 case IMX_DMATYPE_UART:
662 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
663 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
665 case IMX_DMATYPE_UART_SP:
666 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
667 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
669 case IMX_DMATYPE_ATA:
670 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
671 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
673 case IMX_DMATYPE_CSPI:
674 case IMX_DMATYPE_EXT:
675 case IMX_DMATYPE_SSI:
676 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
677 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
679 case IMX_DMATYPE_SSI_SP:
680 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
681 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
683 case IMX_DMATYPE_MMC:
684 case IMX_DMATYPE_SDHC:
685 case IMX_DMATYPE_CSPI_SP:
686 case IMX_DMATYPE_ESAI:
687 case IMX_DMATYPE_MSHC_SP:
688 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
689 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
691 case IMX_DMATYPE_ASRC:
692 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
693 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
694 per_2_per = sdma->script_addrs->per_2_per_addr;
696 case IMX_DMATYPE_MSHC:
697 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
698 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
700 case IMX_DMATYPE_CCM:
701 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
703 case IMX_DMATYPE_SPDIF:
704 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
705 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
707 case IMX_DMATYPE_IPU_MEMORY:
708 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
710 case IMX_DMATYPE_HDMI:
711 other = sdma->script_addrs->hdmi_dma_addr;
716 sdmac->pc_from_device = per_2_emi;
717 sdmac->pc_to_device = emi_2_per;
718 sdmac->device_to_device = per_2_per;
719 sdmac->other_script = other;
720 sdmac->pc_to_pc = emi_2_emi;
723 static int sdma_set_context_reg(struct sdma_channel *sdmac,
724 struct sdma_context_data *context)
726 switch (sdmac->peripheral_type) {
727 case IMX_DMATYPE_HDMI:
728 context->gReg[4] = sdmac->data_addr1;
729 context->gReg[6] = sdmac->data_addr2;
732 context->gReg[0] = sdmac->event_mask1;
733 context->gReg[1] = sdmac->event_mask0;
734 context->gReg[2] = sdmac->per_addr;
735 context->gReg[6] = sdmac->shp_addr;
736 context->gReg[7] = sdmac->watermark_level;
743 static int sdma_load_context(struct sdma_channel *sdmac)
745 struct sdma_engine *sdma = sdmac->sdma;
746 int channel = sdmac->channel;
748 struct sdma_context_data *context = sdma->context;
749 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
754 if (sdmac->direction == DMA_DEV_TO_MEM)
755 load_address = sdmac->pc_from_device;
756 else if (sdmac->direction == DMA_DEV_TO_DEV)
757 load_address = sdmac->device_to_device;
758 else if (sdmac->direction == DMA_MEM_TO_DEV)
759 load_address = sdmac->pc_to_device;
760 else if (sdmac->direction == DMA_MEM_TO_MEM)
761 load_address = sdmac->pc_to_pc;
763 load_address = sdmac->other_script;
766 if (load_address < 0)
769 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
770 dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
771 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
772 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
773 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
774 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
776 spin_lock_irqsave(&sdma->channel_0_lock, flags);
777 memset(context, 0, sizeof(*context));
778 context->channel_state.pc = load_address;
780 /* Send by context the event mask,base address for peripheral
781 * and watermark level
783 sdma_set_context_reg(sdmac, context);
785 bd0->mode.command = C0_SETDM;
786 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
787 bd0->mode.count = sizeof(*context) / 4;
788 bd0->buffer_addr = sdma->context_phys;
789 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
791 ret = sdma_run_channel(&sdma->channel[0]);
793 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
797 static void sdma_disable_channel(struct sdma_channel *sdmac)
799 struct sdma_engine *sdma = sdmac->sdma;
800 int channel = sdmac->channel;
802 writel_relaxed(1 << channel, sdma->regs + SDMA_H_STATSTOP);
803 sdmac->status = DMA_ERROR;
806 static int sdma_set_chan_private_data(struct sdma_channel *sdmac)
808 struct sdma_engine *sdma = sdmac->sdma;
809 struct imx_dma_data *data = sdmac->chan.private;
813 sdmac->data_addr1 = 0;
814 sdmac->data_addr2 = 0;
817 if (sdmac->direction == DMA_DEV_TO_DEV) {
818 sdmac->per_addr = sdmac->per_address;
819 sdmac->shp_addr = sdmac->per_address2;
820 } else if (sdmac->direction == DMA_TRANS_NONE) {
821 switch (sdmac->peripheral_type) {
822 case IMX_DMATYPE_HDMI:
823 sdmac->data_addr1 = *(u32 *)data->private;;
824 sdmac->data_addr2 = *((u32 *)data->private + 1);
828 "periphal type not support for DMA_TRANS_NONE!\n");
832 sdmac->shp_addr = sdmac->per_address;
838 static int sdma_config_channel(struct sdma_channel *sdmac)
842 sdma_disable_channel(sdmac);
844 sdmac->event_mask0 = 0;
845 sdmac->event_mask1 = 0;
847 if (sdmac->event_id0)
848 sdma_event_enable(sdmac, sdmac->event_id0);
850 if (sdmac->event_id1)
851 sdma_event_enable(sdmac, sdmac->event_id1);
853 switch (sdmac->peripheral_type) {
854 case IMX_DMATYPE_DSP:
855 sdma_config_ownership(sdmac, false, true, true);
857 case IMX_DMATYPE_MEMORY:
858 sdma_config_ownership(sdmac, false, true, false);
861 sdma_config_ownership(sdmac, true, true, false);
865 sdma_get_pc(sdmac, sdmac->peripheral_type);
867 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
868 (sdmac->peripheral_type != IMX_DMATYPE_DSP) &&
869 (sdmac->peripheral_type != IMX_DMATYPE_HDMI)) {
870 /* Handle multiple event channels differently */
871 if (sdmac->event_id1) {
872 if (sdmac->event_id0 > 31) {
873 sdmac->watermark_level |= 1 << 28;
874 sdmac->event_mask0 |= 0;
875 sdmac->event_mask1 |=
876 1 << ((sdmac->event_id0)%32);
878 sdmac->event_mask0 |=
879 1 << ((sdmac->event_id0)%32);
880 sdmac->event_mask1 |= 0;
882 if (sdmac->event_id1 > 31) {
883 sdmac->watermark_level |= 1 << 29;
884 sdmac->event_mask0 |= 0;
885 sdmac->event_mask1 |=
886 1 << ((sdmac->event_id1)%32);
888 sdmac->event_mask0 |=
889 1 << ((sdmac->event_id1)%32);
890 sdmac->event_mask1 |= 0;
892 sdmac->watermark_level |= (unsigned int)(3<<11);
893 sdmac->watermark_level |= (unsigned int)(1<<31);
894 sdmac->watermark_level |= (unsigned int)(2<<24);
896 if (sdmac->event_id0 > 31) {
897 sdmac->event_mask0 = 0;
899 1 << ((sdmac->event_id0)%32);
902 1 << ((sdmac->event_id0)%32);
903 sdmac->event_mask1 = 0;
906 /* Watermark Level */
907 sdmac->watermark_level |= sdmac->watermark_level;
909 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
911 sdma_set_chan_private_data(sdmac);
913 ret = sdma_load_context(sdmac);
918 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
919 unsigned int priority)
921 struct sdma_engine *sdma = sdmac->sdma;
922 int channel = sdmac->channel;
924 if (priority < MXC_SDMA_MIN_PRIORITY
925 || priority > MXC_SDMA_MAX_PRIORITY) {
929 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
934 static int sdma_request_channel(struct sdma_channel *sdmac)
936 struct sdma_engine *sdma = sdmac->sdma;
937 int channel = sdmac->channel;
940 #ifdef CONFIG_SDMA_IRAM
941 sdmac->bd = sdma_iram_malloc(sizeof(sdmac->bd),
942 (unsigned long *)&sdmac->bd_phys);
944 sdmac->bd = dma_alloc_noncached(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
951 memset(sdmac->bd, 0, PAGE_SIZE);
953 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
954 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
956 clk_enable(sdma->clk);
958 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
960 init_completion(&sdmac->done);
964 sdmac->irq_handling = 0;
972 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
975 writel(1 << channel, sdma->regs + SDMA_H_START);
978 static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
980 dma_cookie_t cookie = sdmac->chan.cookie;
985 sdmac->chan.cookie = cookie;
986 sdmac->desc.cookie = cookie;
991 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
993 return container_of(chan, struct sdma_channel, chan);
996 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
998 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
999 struct sdma_engine *sdma = sdmac->sdma;
1000 dma_cookie_t cookie;
1003 spin_lock_irqsave(&sdmac->lock, flag);
1005 cookie = sdma_assign_cookie(sdmac);
1007 sdma_enable_channel(sdma, sdmac->channel);
1009 spin_unlock_irqrestore(&sdmac->lock, flag);
1014 static int sdma_alloc_chan_resources(struct dma_chan *chan)
1016 struct sdma_channel *sdmac = to_sdma_chan(chan);
1017 struct imx_dma_data *data = chan->private;
1023 switch (data->priority) {
1027 case DMA_PRIO_MEDIUM:
1036 sdmac->peripheral_type = data->peripheral_type;
1037 sdmac->event_id0 = data->dma_request;
1038 if (data->dma_request_p2p > 0)
1039 sdmac->event_id1 = data->dma_request_p2p;
1041 sdmac->event_id1 = 0;
1042 ret = sdma_request_channel(sdmac);
1046 ret = sdma_set_channel_priority(sdmac, prio);
1050 dma_async_tx_descriptor_init(&sdmac->desc, chan);
1051 sdmac->desc.tx_submit = sdma_tx_submit;
1052 /* txd.flags will be overwritten in prep funcs */
1053 sdmac->desc.flags = DMA_CTRL_ACK;
1055 /* Set SDMA channel mode to unvalid to avoid misconfig */
1056 sdmac->mode = SDMA_MODE_INVALID;
1061 static void sdma_irq_pending_check(struct sdma_channel *sdmac)
1063 struct sdma_engine *sdma = sdmac->sdma;
1067 spin_lock_irqsave(&sdma->irq_reg_lock, flag);
1068 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
1070 /*Check if the current channel's IRQ hasn't been responded*/
1071 if (stat & (1 << sdmac->channel)) {
1072 /*Handle the irq manually*/
1073 writel_relaxed(1 << sdmac->channel, sdma->regs + SDMA_H_INTR);
1074 spin_unlock_irqrestore(&sdma->irq_reg_lock, flag);
1076 /*Prevent irq_handler from doing handle_channel() again*/
1077 sdmac->irq_handling = 0;
1078 mxc_sdma_handle_channel(sdmac);
1080 spin_unlock_irqrestore(&sdma->irq_reg_lock, flag);
1083 /*Wait here until irq_handler's finished*/
1084 while (sdmac->irq_handling)
1088 static void sdma_free_chan_resources(struct dma_chan *chan)
1090 struct sdma_channel *sdmac = to_sdma_chan(chan);
1091 struct sdma_engine *sdma = sdmac->sdma;
1093 /*Check if irq to the channel is still pending*/
1094 sdma_irq_pending_check(sdmac);
1096 sdma_disable_channel(sdmac);
1098 if (sdmac->event_id0)
1099 sdma_event_disable(sdmac, sdmac->event_id0);
1100 if (sdmac->event_id1)
1101 sdma_event_disable(sdmac, sdmac->event_id1);
1103 sdmac->event_id0 = 0;
1104 sdmac->event_id1 = 0;
1106 sdma_set_channel_priority(sdmac, 0);
1108 #ifdef CONFIG_SDMA_IRAM
1109 sdma_iram_free(sdmac->bd_phys, sizeof(sdmac->bd));
1111 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
1113 clk_disable(sdma->clk);
1116 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1117 struct dma_chan *chan, struct scatterlist *sgl,
1118 unsigned int sg_len, enum dma_transfer_direction direction,
1119 unsigned long flags)
1121 struct sdma_channel *sdmac = to_sdma_chan(chan);
1122 struct sdma_engine *sdma = sdmac->sdma;
1124 int channel = sdmac->channel;
1125 struct scatterlist *sg;
1128 * For SDMA M2M use, we need 2 scatterlists, the src addresses are
1129 * stored in the first sg, and the dst addresses are stored in the
1130 * second sg. In the former code, when the first sg entered 'sdma_
1131 * prep_slave_sg', 'sdmac->status' would be set to 'DMA_IN_PROGRESS',
1132 * and the second sg would return 'NULL' when entered 'sdma_prep_slave
1133 * _sg'. To avoid this error, in the code, we check if for M2M use,
1134 * the second sg will not return 'NULL' when enters 'sdma_prep_slave
1137 if (!((direction == DMA_MEM_TO_MEM) && (flags == 0))) {
1138 if (sdmac->status == DMA_IN_PROGRESS)
1141 sdmac->status = DMA_IN_PROGRESS;
1143 sdmac->mode = SDMA_MODE_NORMAL;
1145 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1148 sdmac->direction = direction;
1149 ret = sdma_load_context(sdmac);
1153 if (sg_len > NUM_BD) {
1154 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1155 channel, sg_len, NUM_BD);
1160 sdmac->chn_count = 0;
1161 for_each_sg(sgl, sg, sg_len, i) {
1162 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1165 if (sdmac->direction == DMA_MEM_TO_MEM) {
1167 bd->buffer_addr = sg->dma_address;
1169 bd->ext_buffer_addr = sg->dma_address;
1171 bd->buffer_addr = sg->dma_address;
1175 if (count > 0xffff) {
1176 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1177 channel, count, 0xffff);
1182 bd->mode.count = count;
1183 sdmac->chn_count += count;
1185 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
1190 switch (sdmac->word_size) {
1191 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1192 bd->mode.command = 0;
1193 if (count & 3 || sg->dma_address & 3)
1196 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1197 bd->mode.command = 2;
1198 if (count & 1 || sg->dma_address & 1)
1201 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1202 bd->mode.command = 1;
1208 param = BD_DONE | BD_EXTD | BD_CONT;
1210 if (i + 1 == sg_len) {
1216 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1217 i, count, sg->dma_address,
1218 param & BD_WRAP ? "wrap" : "",
1219 param & BD_INTR ? " intr" : "");
1221 bd->mode.status = param;
1224 sdmac->num_bd = sg_len;
1225 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1227 return &sdmac->desc;
1229 sdmac->status = DMA_ERROR;
1233 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1234 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1235 size_t period_len, enum dma_transfer_direction direction)
1237 struct sdma_channel *sdmac = to_sdma_chan(chan);
1238 struct sdma_engine *sdma = sdmac->sdma;
1240 int channel = sdmac->channel;
1241 int ret, i = 0, buf = 0;
1243 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1245 if (sdmac->status == DMA_IN_PROGRESS)
1248 sdmac->status = DMA_IN_PROGRESS;
1249 sdmac->direction = direction;
1251 switch (sdmac->direction) {
1252 case DMA_DEV_TO_DEV:
1253 sdmac->mode = SDMA_MODE_P2P;
1255 case DMA_TRANS_NONE:
1256 sdmac->mode = SDMA_MODE_NO_BD;
1258 case DMA_MEM_TO_DEV:
1259 case DMA_DEV_TO_MEM:
1260 sdmac->mode = SDMA_MODE_LOOP;
1263 pr_err("SDMA direction is not support!");
1267 ret = sdma_load_context(sdmac);
1272 num_periods = buf_len / period_len;
1274 return &sdmac->desc;
1276 if (num_periods > NUM_BD) {
1277 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1278 channel, num_periods, NUM_BD);
1282 if (period_len > 0xffff) {
1283 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1284 channel, period_len, 0xffff);
1288 while (buf < buf_len) {
1289 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1292 bd->buffer_addr = dma_addr;
1294 bd->mode.count = period_len;
1296 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1298 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1299 bd->mode.command = 0;
1301 bd->mode.command = sdmac->word_size;
1303 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1304 if (i + 1 == num_periods)
1307 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1308 i, period_len, dma_addr,
1309 param & BD_WRAP ? "wrap" : "",
1310 param & BD_INTR ? " intr" : "");
1312 bd->mode.status = param;
1314 dma_addr += period_len;
1320 sdmac->num_bd = num_periods;
1321 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1323 return &sdmac->desc;
1325 sdmac->status = DMA_ERROR;
1329 static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1332 struct sdma_channel *sdmac = to_sdma_chan(chan);
1333 struct dma_slave_config *dmaengine_cfg = (void *)arg;
1336 case DMA_TERMINATE_ALL:
1337 sdma_disable_channel(sdmac);
1339 case DMA_SLAVE_CONFIG:
1340 if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
1341 sdmac->per_address = dmaengine_cfg->src_addr;
1342 sdmac->per_address2 = dmaengine_cfg->dst_addr;
1343 sdmac->watermark_level = 0;
1344 sdmac->watermark_level |=
1345 dmaengine_cfg->src_maxburst;
1346 sdmac->watermark_level |=
1347 dmaengine_cfg->dst_maxburst << 16;
1348 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1349 } else if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1350 sdmac->per_address = dmaengine_cfg->src_addr;
1351 sdmac->watermark_level = dmaengine_cfg->src_maxburst;
1352 sdmac->word_size = dmaengine_cfg->src_addr_width;
1353 } else if (dmaengine_cfg->direction == DMA_MEM_TO_DEV) {
1354 sdmac->per_address = dmaengine_cfg->dst_addr;
1355 sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
1356 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1357 } else if (dmaengine_cfg->direction == DMA_MEM_TO_MEM) {
1358 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1360 sdmac->direction = dmaengine_cfg->direction;
1361 return sdma_config_channel(sdmac);
1369 static enum dma_status sdma_tx_status(struct dma_chan *chan,
1370 dma_cookie_t cookie,
1371 struct dma_tx_state *txstate)
1373 struct sdma_channel *sdmac = to_sdma_chan(chan);
1374 dma_cookie_t last_used;
1376 last_used = chan->cookie;
1378 dma_set_tx_state(txstate, sdmac->last_completed, last_used,
1379 sdmac->chn_count - sdmac->chn_real_count);
1381 return sdmac->status;
1384 static void sdma_issue_pending(struct dma_chan *chan)
1387 * Nothing to do. We only have a single descriptor
1391 void sdma_set_event_pending(struct dma_chan *chan)
1393 struct sdma_channel *sdmac = to_sdma_chan(chan);
1394 struct sdma_engine *sdma = sdmac->sdma;
1398 channel = sdmac->channel;
1399 reg = readl_relaxed(sdma->regs + SDMA_H_EVTPEND);
1400 reg |= 1 << channel;
1401 writel_relaxed(reg, sdma->regs + SDMA_H_EVTPEND);
1405 EXPORT_SYMBOL(sdma_set_event_pending);
1407 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 38
1409 static void sdma_add_scripts(struct sdma_engine *sdma,
1410 const struct sdma_script_start_addrs *addr)
1412 s32 *addr_arr = (u32 *)addr;
1413 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1416 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1417 if (addr_arr[i] > 0)
1418 saddr_arr[i] = addr_arr[i];
1421 static int __init sdma_get_firmware(struct sdma_engine *sdma,
1422 const char *cpu_name, int to_version)
1424 const struct firmware *fw;
1426 const struct sdma_firmware_header *header;
1428 const struct sdma_script_start_addrs *addr;
1429 unsigned short *ram_code;
1431 fwname = kasprintf(GFP_KERNEL, "imx/sdma/sdma-%s-to%d.bin",
1432 cpu_name, to_version);
1436 ret = request_firmware(&fw, fwname, sdma->dev);
1443 if (fw->size < sizeof(*header))
1446 header = (struct sdma_firmware_header *)fw->data;
1448 if (header->magic != SDMA_FIRMWARE_MAGIC)
1450 if (header->ram_code_start + header->ram_code_size > fw->size)
1453 addr = (void *)header + header->script_addrs_start;
1454 ram_code = (void *)header + header->ram_code_start;
1456 /* download the RAM image for SDMA */
1457 sdma_load_script(sdma, ram_code,
1458 header->ram_code_size,
1459 addr->ram_code_start_addr);
1460 clk_disable(sdma->clk);
1462 sdma_add_scripts(sdma, addr);
1464 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1465 header->version_major,
1466 header->version_minor);
1469 release_firmware(fw);
1474 static int __init sdma_init(struct sdma_engine *sdma)
1477 dma_addr_t ccb_phys;
1479 switch (sdma->version) {
1481 sdma->num_events = 32;
1484 sdma->num_events = 48;
1487 dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
1491 clk_enable(sdma->clk);
1493 /* Be sure SDMA has not started yet */
1494 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1496 #ifdef CONFIG_SDMA_IRAM
1497 /* Allocate memory for SDMA channel and buffer descriptors */
1498 sdma_iram_vaddr = iram_alloc(SZ_4K, &sdma_iram_paddr);
1499 sdma_iram_pool = gen_pool_create(PAGE_SHIFT/2, -1);
1500 gen_pool_add(sdma_iram_pool, sdma_iram_paddr, SZ_4K, -1);
1502 sdma->channel_control = sdma_iram_malloc(MAX_DMA_CHANNELS *
1503 sizeof(struct sdma_channel_control)
1504 + sizeof(struct sdma_context_data),
1505 (unsigned long *)&ccb_phys);
1507 sdma->channel_control = dma_alloc_coherent(NULL,
1508 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1509 sizeof(struct sdma_context_data),
1510 &ccb_phys, GFP_KERNEL);
1513 if (!sdma->channel_control) {
1518 sdma->context = (void *)sdma->channel_control +
1519 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1520 sdma->context_phys = ccb_phys +
1521 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1523 /* Zero-out the CCB structures array just allocated */
1524 memset(sdma->channel_control, 0,
1525 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1527 /* disable all channels */
1528 for (i = 0; i < sdma->num_events; i++)
1529 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1531 /* All channels have priority 0 */
1532 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1533 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1535 ret = sdma_request_channel(&sdma->channel[0]);
1539 sdma_config_ownership(&sdma->channel[0], false, true, false);
1541 /* Set Command Channel (Channel Zero) */
1542 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1544 /* Set bits of CONFIG register but with static context switching */
1545 /* FIXME: Check whether to set ACR bit depending on clock ratios */
1546 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1548 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1550 /* Set bits of CONFIG register with given context switching mode */
1551 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1553 /* Initializes channel's priorities */
1554 sdma_set_channel_priority(&sdma->channel[0], 7);
1556 clk_disable(sdma->clk);
1561 clk_disable(sdma->clk);
1562 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1566 static int __init sdma_probe(struct platform_device *pdev)
1570 struct resource *iores;
1571 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1573 struct sdma_engine *sdma;
1575 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1579 spin_lock_init(&sdma->channel_0_lock);
1581 sdma->dev = &pdev->dev;
1583 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1584 irq = platform_get_irq(pdev, 0);
1585 if (!iores || irq < 0 || !pdata) {
1590 if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
1592 goto err_request_region;
1595 sdma->clk = clk_get(&pdev->dev, NULL);
1596 if (IS_ERR(sdma->clk)) {
1597 ret = PTR_ERR(sdma->clk);
1601 sdma->regs = ioremap(iores->start, resource_size(iores));
1607 ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1609 goto err_request_irq;
1611 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1612 if (!sdma->script_addrs)
1615 sdma->version = pdata->sdma_version;
1617 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1618 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1620 spin_lock_init(&sdma->irq_reg_lock);
1622 INIT_LIST_HEAD(&sdma->dma_device.channels);
1623 /* Initialize channel parameters */
1624 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1625 struct sdma_channel *sdmac = &sdma->channel[i];
1628 spin_lock_init(&sdmac->lock);
1630 sdmac->chan.device = &sdma->dma_device;
1634 * Add the channel to the DMAC list. Do not add channel 0 though
1635 * because we need it internally in the SDMA driver. This also means
1636 * that channel 0 in dmaengine counting matches sdma channel 1.
1639 list_add_tail(&sdmac->chan.device_node,
1640 &sdma->dma_device.channels);
1643 ret = sdma_init(sdma);
1647 if (pdata->script_addrs)
1648 sdma_add_scripts(sdma, pdata->script_addrs);
1650 sdma_get_firmware(sdma, pdata->cpu_name, pdata->to_version);
1652 sdma->dma_device.dev = &pdev->dev;
1654 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1655 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1656 sdma->dma_device.device_tx_status = sdma_tx_status;
1657 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1658 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1659 sdma->dma_device.device_control = sdma_control;
1660 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1661 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1662 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1664 ret = dma_async_device_register(&sdma->dma_device);
1666 dev_err(&pdev->dev, "unable to register\n");
1670 dev_info(sdma->dev, "initialized\n");
1675 kfree(sdma->script_addrs);
1677 free_irq(irq, sdma);
1679 iounmap(sdma->regs);
1683 release_mem_region(iores->start, resource_size(iores));
1690 static int __exit sdma_remove(struct platform_device *pdev)
1695 static struct platform_driver sdma_driver = {
1699 .remove = __exit_p(sdma_remove),
1702 static int __init sdma_module_init(void)
1704 return platform_driver_probe(&sdma_driver, sdma_probe);
1706 module_init(sdma_module_init);
1708 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1709 MODULE_DESCRIPTION("i.MX SDMA driver");
1710 MODULE_LICENSE("GPL");