2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/amba/bus.h>
25 #include <linux/amba/pl330.h>
26 #include <linux/scatterlist.h>
28 #include <linux/of_dma.h>
29 #include <linux/err.h>
31 #include "dmaengine.h"
32 #define PL330_MAX_CHAN 8
33 #define PL330_MAX_IRQS 32
34 #define PL330_MAX_PERI 32
36 enum pl330_cachectrl {
37 CCTRL0, /* Noncacheable and nonbufferable */
38 CCTRL1, /* Bufferable only */
39 CCTRL2, /* Cacheable, but do not allocate */
40 CCTRL3, /* Cacheable and bufferable, but do not allocate */
41 INVALID1, /* AWCACHE = 0x1000 */
43 CCTRL6, /* Cacheable write-through, allocate on writes only */
44 CCTRL7, /* Cacheable write-back, allocate on writes only */
55 /* Register and Bit field Definitions */
57 #define DS_ST_STOP 0x0
58 #define DS_ST_EXEC 0x1
59 #define DS_ST_CMISS 0x2
60 #define DS_ST_UPDTPC 0x3
62 #define DS_ST_ATBRR 0x5
63 #define DS_ST_QBUSY 0x6
65 #define DS_ST_KILL 0x8
66 #define DS_ST_CMPLT 0x9
67 #define DS_ST_FLTCMP 0xe
68 #define DS_ST_FAULT 0xf
73 #define INTSTATUS 0x28
80 #define FTC(n) (_FTC + (n)*0x4)
83 #define CS(n) (_CS + (n)*0x8)
84 #define CS_CNS (1 << 21)
87 #define CPC(n) (_CPC + (n)*0x8)
90 #define SA(n) (_SA + (n)*0x20)
93 #define DA(n) (_DA + (n)*0x20)
96 #define CC(n) (_CC + (n)*0x20)
98 #define CC_SRCINC (1 << 0)
99 #define CC_DSTINC (1 << 14)
100 #define CC_SRCPRI (1 << 8)
101 #define CC_DSTPRI (1 << 22)
102 #define CC_SRCNS (1 << 9)
103 #define CC_DSTNS (1 << 23)
104 #define CC_SRCIA (1 << 10)
105 #define CC_DSTIA (1 << 24)
106 #define CC_SRCBRSTLEN_SHFT 4
107 #define CC_DSTBRSTLEN_SHFT 18
108 #define CC_SRCBRSTSIZE_SHFT 1
109 #define CC_DSTBRSTSIZE_SHFT 15
110 #define CC_SRCCCTRL_SHFT 11
111 #define CC_SRCCCTRL_MASK 0x7
112 #define CC_DSTCCTRL_SHFT 25
113 #define CC_DRCCCTRL_MASK 0x7
114 #define CC_SWAP_SHFT 28
117 #define LC0(n) (_LC0 + (n)*0x20)
120 #define LC1(n) (_LC1 + (n)*0x20)
122 #define DBGSTATUS 0xd00
123 #define DBG_BUSY (1 << 0)
126 #define DBGINST0 0xd08
127 #define DBGINST1 0xd0c
136 #define PERIPH_ID 0xfe0
137 #define PERIPH_REV_SHIFT 20
138 #define PERIPH_REV_MASK 0xf
139 #define PERIPH_REV_R0P0 0
140 #define PERIPH_REV_R1P0 1
141 #define PERIPH_REV_R1P1 2
143 #define CR0_PERIPH_REQ_SET (1 << 0)
144 #define CR0_BOOT_EN_SET (1 << 1)
145 #define CR0_BOOT_MAN_NS (1 << 2)
146 #define CR0_NUM_CHANS_SHIFT 4
147 #define CR0_NUM_CHANS_MASK 0x7
148 #define CR0_NUM_PERIPH_SHIFT 12
149 #define CR0_NUM_PERIPH_MASK 0x1f
150 #define CR0_NUM_EVENTS_SHIFT 17
151 #define CR0_NUM_EVENTS_MASK 0x1f
153 #define CR1_ICACHE_LEN_SHIFT 0
154 #define CR1_ICACHE_LEN_MASK 0x7
155 #define CR1_NUM_ICACHELINES_SHIFT 4
156 #define CR1_NUM_ICACHELINES_MASK 0xf
158 #define CRD_DATA_WIDTH_SHIFT 0
159 #define CRD_DATA_WIDTH_MASK 0x7
160 #define CRD_WR_CAP_SHIFT 4
161 #define CRD_WR_CAP_MASK 0x7
162 #define CRD_WR_Q_DEP_SHIFT 8
163 #define CRD_WR_Q_DEP_MASK 0xf
164 #define CRD_RD_CAP_SHIFT 12
165 #define CRD_RD_CAP_MASK 0x7
166 #define CRD_RD_Q_DEP_SHIFT 16
167 #define CRD_RD_Q_DEP_MASK 0xf
168 #define CRD_DATA_BUFF_SHIFT 20
169 #define CRD_DATA_BUFF_MASK 0x3ff
172 #define DESIGNER 0x41
174 #define INTEG_CFG 0x0
175 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
177 #define PL330_STATE_STOPPED (1 << 0)
178 #define PL330_STATE_EXECUTING (1 << 1)
179 #define PL330_STATE_WFE (1 << 2)
180 #define PL330_STATE_FAULTING (1 << 3)
181 #define PL330_STATE_COMPLETING (1 << 4)
182 #define PL330_STATE_WFP (1 << 5)
183 #define PL330_STATE_KILLING (1 << 6)
184 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
185 #define PL330_STATE_CACHEMISS (1 << 8)
186 #define PL330_STATE_UPDTPC (1 << 9)
187 #define PL330_STATE_ATBARRIER (1 << 10)
188 #define PL330_STATE_QUEUEBUSY (1 << 11)
189 #define PL330_STATE_INVALID (1 << 15)
191 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
192 | PL330_STATE_WFE | PL330_STATE_FAULTING)
194 #define CMD_DMAADDH 0x54
195 #define CMD_DMAEND 0x00
196 #define CMD_DMAFLUSHP 0x35
197 #define CMD_DMAGO 0xa0
198 #define CMD_DMALD 0x04
199 #define CMD_DMALDP 0x25
200 #define CMD_DMALP 0x20
201 #define CMD_DMALPEND 0x28
202 #define CMD_DMAKILL 0x01
203 #define CMD_DMAMOV 0xbc
204 #define CMD_DMANOP 0x18
205 #define CMD_DMARMB 0x12
206 #define CMD_DMASEV 0x34
207 #define CMD_DMAST 0x08
208 #define CMD_DMASTP 0x29
209 #define CMD_DMASTZ 0x0c
210 #define CMD_DMAWFE 0x36
211 #define CMD_DMAWFP 0x30
212 #define CMD_DMAWMB 0x13
216 #define SZ_DMAFLUSHP 2
220 #define SZ_DMALPEND 2
234 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
235 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
237 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
238 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
241 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
242 * at 1byte/burst for P<->M and M<->M respectively.
243 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
244 * should be enough for P<->M and M<->M respectively.
246 #define MCODE_BUFF_PER_REQ 256
248 /* Use this _only_ to wait on transient states */
249 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
251 #ifdef PL330_DEBUG_MCGEN
252 static unsigned cmd_line;
253 #define PL330_DBGCMD_DUMP(off, x...) do { \
254 printk("%x:", cmd_line); \
258 #define PL330_DBGMC_START(addr) (cmd_line = addr)
260 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
261 #define PL330_DBGMC_START(addr) do {} while (0)
264 /* The number of default descriptors */
266 #define NR_DEFAULT_DESC 16
268 /* Populated by the PL330 core driver for DMA API driver's info */
269 struct pl330_config {
271 #define DMAC_MODE_NS (1 << 0)
273 unsigned int data_bus_width:10; /* In number of bits */
274 unsigned int data_buf_dep:10;
275 unsigned int num_chan:4;
276 unsigned int num_peri:6;
278 unsigned int num_events:6;
283 * Request Configuration.
284 * The PL330 core does not modify this and uses the last
285 * working configuration if the request doesn't provide any.
287 * The Client may want to provide this info only for the
288 * first request and a request with new settings.
290 struct pl330_reqcfg {
291 /* Address Incrementing */
296 * For now, the SRC & DST protection levels
297 * and burst size/length are assumed same.
303 unsigned brst_size:3; /* in power of 2 */
305 enum pl330_cachectrl dcctl;
306 enum pl330_cachectrl scctl;
307 enum pl330_byteswap swap;
308 struct pl330_config *pcfg;
312 * One cycle of DMAC operation.
313 * There may be more than one xfer in a request.
322 /* The xfer callbacks are made with one of these arguments. */
324 /* The all xfers in the request were success. */
326 /* If req aborted due to global error. */
328 /* If req failed due to problem with Channel. */
349 struct dma_pl330_desc;
354 struct dma_pl330_desc *desc;
357 /* ToBeDone for tasklet */
365 struct pl330_thread {
368 /* If the channel is not yet acquired by any client */
371 struct pl330_dmac *dmac;
372 /* Only two at a time */
373 struct _pl330_req req[2];
374 /* Index of the last enqueued request */
376 /* Index of the last submitted request or -1 if the DMA is stopped */
380 enum pl330_dmac_state {
387 /* In the DMAC pool */
390 * Allocated to some channel during prep_xxx
391 * Also may be sitting on the work_list.
395 * Sitting on the work_list and already submitted
396 * to the PL330 core. Not more than two descriptors
397 * of a channel can be BUSY at any time.
401 * Sitting on the channel work_list but xfer done
407 struct dma_pl330_chan {
408 /* Schedule desc completion */
409 struct tasklet_struct task;
411 /* DMA-Engine Channel */
412 struct dma_chan chan;
414 /* List of submitted descriptors */
415 struct list_head submitted_list;
416 /* List of issued descriptors */
417 struct list_head work_list;
418 /* List of completed descriptors */
419 struct list_head completed_list;
421 /* Pointer to the DMAC that manages this channel,
422 * NULL if the channel is available to be acquired.
423 * As the parent, this DMAC also provides descriptors
426 struct pl330_dmac *dmac;
428 /* To protect channel manipulation */
432 * Hardware channel thread of PL330 DMAC. NULL if the channel is
435 struct pl330_thread *thread;
437 /* For D-to-M and M-to-D channels */
438 int burst_sz; /* the peripheral fifo width */
439 int burst_len; /* the number of burst */
440 dma_addr_t fifo_addr;
442 /* for cyclic capability */
447 /* DMA-Engine Device */
448 struct dma_device ddma;
450 /* Holds info about sg limitations */
451 struct device_dma_parameters dma_parms;
453 /* Pool of descriptors available for the DMAC's channels */
454 struct list_head desc_pool;
455 /* To protect desc_pool manipulation */
456 spinlock_t pool_lock;
458 /* Size of MicroCode buffers for each channel. */
460 /* ioremap'ed address of PL330 registers. */
462 /* Populated by the PL330 core driver during pl330_add */
463 struct pl330_config pcfg;
466 /* Maximum possible events/irqs */
468 /* BUS address of MicroCode buffer */
469 dma_addr_t mcode_bus;
470 /* CPU address of MicroCode buffer */
472 /* List of all Channel threads */
473 struct pl330_thread *channels;
474 /* Pointer to the MANAGER thread */
475 struct pl330_thread *manager;
476 /* To handle bad news in interrupt */
477 struct tasklet_struct tasks;
478 struct _pl330_tbd dmac_tbd;
479 /* State of DMAC operation */
480 enum pl330_dmac_state state;
481 /* Holds list of reqs with due callbacks */
482 struct list_head req_done;
484 /* Peripheral channels connected to this DMAC */
485 unsigned int num_peripherals;
486 struct dma_pl330_chan *peripherals; /* keep at end */
489 struct dma_pl330_desc {
490 /* To attach to a queue as child */
491 struct list_head node;
493 /* Descriptor for the DMA Engine API */
494 struct dma_async_tx_descriptor txd;
496 /* Xfer for PL330 core */
497 struct pl330_xfer px;
499 struct pl330_reqcfg rqcfg;
501 enum desc_status status;
503 /* The channel which currently holds this desc */
504 struct dma_pl330_chan *pchan;
506 enum dma_transfer_direction rqtype;
507 /* Index of peripheral for the xfer. */
509 /* Hook to attach to DMAC's list of reqs with due callback */
510 struct list_head rqd;
515 struct dma_pl330_desc *desc;
518 static inline bool _queue_empty(struct pl330_thread *thrd)
520 return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL;
523 static inline bool _queue_full(struct pl330_thread *thrd)
525 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
528 static inline bool is_manager(struct pl330_thread *thrd)
530 return thrd->dmac->manager == thrd;
533 /* If manager of the thread is in Non-Secure mode */
534 static inline bool _manager_ns(struct pl330_thread *thrd)
536 return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
539 static inline u32 get_revision(u32 periph_id)
541 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
544 static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
545 enum pl330_dst da, u16 val)
550 buf[0] = CMD_DMAADDH;
552 *((u16 *)&buf[1]) = val;
554 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
555 da == 1 ? "DA" : "SA", val);
560 static inline u32 _emit_END(unsigned dry_run, u8 buf[])
567 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
572 static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
577 buf[0] = CMD_DMAFLUSHP;
583 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
588 static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
596 buf[0] |= (0 << 1) | (1 << 0);
597 else if (cond == BURST)
598 buf[0] |= (1 << 1) | (1 << 0);
600 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
601 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
606 static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
607 enum pl330_cond cond, u8 peri)
621 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
622 cond == SINGLE ? 'S' : 'B', peri >> 3);
627 static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
628 unsigned loop, u8 cnt)
638 cnt--; /* DMAC increments by 1 internally */
641 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
647 enum pl330_cond cond;
653 static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
654 const struct _arg_LPEND *arg)
656 enum pl330_cond cond = arg->cond;
657 bool forever = arg->forever;
658 unsigned loop = arg->loop;
659 u8 bjump = arg->bjump;
664 buf[0] = CMD_DMALPEND;
673 buf[0] |= (0 << 1) | (1 << 0);
674 else if (cond == BURST)
675 buf[0] |= (1 << 1) | (1 << 0);
679 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
680 forever ? "FE" : "END",
681 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
688 static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
693 buf[0] = CMD_DMAKILL;
698 static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
699 enum dmamov_dst dst, u32 val)
706 *((u32 *)&buf[2]) = val;
708 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
709 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
714 static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
721 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
726 static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
733 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
738 static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
749 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
754 static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
762 buf[0] |= (0 << 1) | (1 << 0);
763 else if (cond == BURST)
764 buf[0] |= (1 << 1) | (1 << 0);
766 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
767 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
772 static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
773 enum pl330_cond cond, u8 peri)
787 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
788 cond == SINGLE ? 'S' : 'B', peri >> 3);
793 static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
800 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
805 static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
820 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
821 ev >> 3, invalidate ? ", I" : "");
826 static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
827 enum pl330_cond cond, u8 peri)
835 buf[0] |= (0 << 1) | (0 << 0);
836 else if (cond == BURST)
837 buf[0] |= (1 << 1) | (0 << 0);
839 buf[0] |= (0 << 1) | (1 << 0);
845 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
846 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
851 static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
858 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
869 static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
870 const struct _arg_GO *arg)
873 u32 addr = arg->addr;
874 unsigned ns = arg->ns;
884 *((u32 *)&buf[2]) = addr;
889 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
891 /* Returns Time-Out */
892 static bool _until_dmac_idle(struct pl330_thread *thrd)
894 void __iomem *regs = thrd->dmac->base;
895 unsigned long loops = msecs_to_loops(5);
898 /* Until Manager is Idle */
899 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
911 static inline void _execute_DBGINSN(struct pl330_thread *thrd,
912 u8 insn[], bool as_manager)
914 void __iomem *regs = thrd->dmac->base;
917 val = (insn[0] << 16) | (insn[1] << 24);
920 val |= (thrd->id << 8); /* Channel Number */
922 writel(val, regs + DBGINST0);
924 val = *((u32 *)&insn[2]);
925 writel(val, regs + DBGINST1);
927 /* If timed out due to halted state-machine */
928 if (_until_dmac_idle(thrd)) {
929 dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
934 writel(0, regs + DBGCMD);
937 static inline u32 _state(struct pl330_thread *thrd)
939 void __iomem *regs = thrd->dmac->base;
942 if (is_manager(thrd))
943 val = readl(regs + DS) & 0xf;
945 val = readl(regs + CS(thrd->id)) & 0xf;
949 return PL330_STATE_STOPPED;
951 return PL330_STATE_EXECUTING;
953 return PL330_STATE_CACHEMISS;
955 return PL330_STATE_UPDTPC;
957 return PL330_STATE_WFE;
959 return PL330_STATE_FAULTING;
961 if (is_manager(thrd))
962 return PL330_STATE_INVALID;
964 return PL330_STATE_ATBARRIER;
966 if (is_manager(thrd))
967 return PL330_STATE_INVALID;
969 return PL330_STATE_QUEUEBUSY;
971 if (is_manager(thrd))
972 return PL330_STATE_INVALID;
974 return PL330_STATE_WFP;
976 if (is_manager(thrd))
977 return PL330_STATE_INVALID;
979 return PL330_STATE_KILLING;
981 if (is_manager(thrd))
982 return PL330_STATE_INVALID;
984 return PL330_STATE_COMPLETING;
986 if (is_manager(thrd))
987 return PL330_STATE_INVALID;
989 return PL330_STATE_FAULT_COMPLETING;
991 return PL330_STATE_INVALID;
995 static void _stop(struct pl330_thread *thrd)
997 void __iomem *regs = thrd->dmac->base;
998 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1000 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1001 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1003 /* Return if nothing needs to be done */
1004 if (_state(thrd) == PL330_STATE_COMPLETING
1005 || _state(thrd) == PL330_STATE_KILLING
1006 || _state(thrd) == PL330_STATE_STOPPED)
1009 _emit_KILL(0, insn);
1011 /* Stop generating interrupts for SEV */
1012 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1014 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1017 /* Start doing req 'idx' of thread 'thrd' */
1018 static bool _trigger(struct pl330_thread *thrd)
1020 void __iomem *regs = thrd->dmac->base;
1021 struct _pl330_req *req;
1022 struct dma_pl330_desc *desc;
1025 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1028 /* Return if already ACTIVE */
1029 if (_state(thrd) != PL330_STATE_STOPPED)
1032 idx = 1 - thrd->lstenq;
1033 if (thrd->req[idx].desc != NULL) {
1034 req = &thrd->req[idx];
1037 if (thrd->req[idx].desc != NULL)
1038 req = &thrd->req[idx];
1043 /* Return if no request */
1049 ns = desc->rqcfg.nonsecure ? 1 : 0;
1051 /* See 'Abort Sources' point-4 at Page 2-25 */
1052 if (_manager_ns(thrd) && !ns)
1053 dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
1054 __func__, __LINE__);
1057 go.addr = req->mc_bus;
1059 _emit_GO(0, insn, &go);
1061 /* Set to generate interrupts for SEV */
1062 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1064 /* Only manager can execute GO */
1065 _execute_DBGINSN(thrd, insn, true);
1067 thrd->req_running = idx;
1072 static bool _start(struct pl330_thread *thrd)
1074 switch (_state(thrd)) {
1075 case PL330_STATE_FAULT_COMPLETING:
1076 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1078 if (_state(thrd) == PL330_STATE_KILLING)
1079 UNTIL(thrd, PL330_STATE_STOPPED)
1081 case PL330_STATE_FAULTING:
1084 case PL330_STATE_KILLING:
1085 case PL330_STATE_COMPLETING:
1086 UNTIL(thrd, PL330_STATE_STOPPED)
1088 case PL330_STATE_STOPPED:
1089 return _trigger(thrd);
1091 case PL330_STATE_WFP:
1092 case PL330_STATE_QUEUEBUSY:
1093 case PL330_STATE_ATBARRIER:
1094 case PL330_STATE_UPDTPC:
1095 case PL330_STATE_CACHEMISS:
1096 case PL330_STATE_EXECUTING:
1099 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1105 static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1106 const struct _xfer_spec *pxs, int cyc)
1109 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
1111 /* check lock-up free version */
1112 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1114 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1115 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1119 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1120 off += _emit_RMB(dry_run, &buf[off]);
1121 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1122 off += _emit_WMB(dry_run, &buf[off]);
1129 static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
1130 const struct _xfer_spec *pxs, int cyc)
1135 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
1136 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
1137 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1138 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
1144 static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
1145 const struct _xfer_spec *pxs, int cyc)
1150 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
1151 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1152 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
1153 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
1159 static int _bursts(unsigned dry_run, u8 buf[],
1160 const struct _xfer_spec *pxs, int cyc)
1164 switch (pxs->desc->rqtype) {
1165 case DMA_MEM_TO_DEV:
1166 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1168 case DMA_DEV_TO_MEM:
1169 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1171 case DMA_MEM_TO_MEM:
1172 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1175 off += 0x40000000; /* Scare off the Client */
1182 /* Returns bytes consumed and updates bursts */
1183 static inline int _loop(unsigned dry_run, u8 buf[],
1184 unsigned long *bursts, const struct _xfer_spec *pxs)
1186 int cyc, cycmax, szlp, szlpend, szbrst, off;
1187 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1188 struct _arg_LPEND lpend;
1190 /* Max iterations possible in DMALP is 256 */
1191 if (*bursts >= 256*256) {
1194 cyc = *bursts / lcnt1 / lcnt0;
1195 } else if (*bursts > 256) {
1197 lcnt0 = *bursts / lcnt1;
1205 szlp = _emit_LP(1, buf, 0, 0);
1206 szbrst = _bursts(1, buf, pxs, 1);
1208 lpend.cond = ALWAYS;
1209 lpend.forever = false;
1212 szlpend = _emit_LPEND(1, buf, &lpend);
1220 * Max bursts that we can unroll due to limit on the
1221 * size of backward jump that can be encoded in DMALPEND
1222 * which is 8-bits and hence 255
1224 cycmax = (255 - (szlp + szlpend)) / szbrst;
1226 cyc = (cycmax < cyc) ? cycmax : cyc;
1231 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1235 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1238 off += _bursts(dry_run, &buf[off], pxs, cyc);
1240 lpend.cond = ALWAYS;
1241 lpend.forever = false;
1243 lpend.bjump = off - ljmp1;
1244 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1247 lpend.cond = ALWAYS;
1248 lpend.forever = false;
1250 lpend.bjump = off - ljmp0;
1251 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1254 *bursts = lcnt1 * cyc;
1261 static inline int _setup_loops(unsigned dry_run, u8 buf[],
1262 const struct _xfer_spec *pxs)
1264 struct pl330_xfer *x = &pxs->desc->px;
1266 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1271 off += _loop(dry_run, &buf[off], &c, pxs);
1278 static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1279 const struct _xfer_spec *pxs)
1281 struct pl330_xfer *x = &pxs->desc->px;
1284 /* DMAMOV SAR, x->src_addr */
1285 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1286 /* DMAMOV DAR, x->dst_addr */
1287 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1290 off += _setup_loops(dry_run, &buf[off], pxs);
1296 * A req is a sequence of one or more xfer units.
1297 * Returns the number of bytes taken to setup the MC for the req.
1299 static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1300 unsigned index, struct _xfer_spec *pxs)
1302 struct _pl330_req *req = &thrd->req[index];
1303 struct pl330_xfer *x;
1304 u8 *buf = req->mc_cpu;
1307 PL330_DBGMC_START(req->mc_bus);
1309 /* DMAMOV CCR, ccr */
1310 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1313 /* Error if xfer length is not aligned at burst size */
1314 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1317 off += _setup_xfer(dry_run, &buf[off], pxs);
1319 /* DMASEV peripheral/event */
1320 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1322 off += _emit_END(dry_run, &buf[off]);
1327 static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1337 /* We set same protection levels for Src and DST for now */
1338 if (rqc->privileged)
1339 ccr |= CC_SRCPRI | CC_DSTPRI;
1341 ccr |= CC_SRCNS | CC_DSTNS;
1342 if (rqc->insnaccess)
1343 ccr |= CC_SRCIA | CC_DSTIA;
1345 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1346 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1348 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1349 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1351 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1352 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1354 ccr |= (rqc->swap << CC_SWAP_SHFT);
1360 * Submit a list of xfers after which the client wants notification.
1361 * Client is not notified after each xfer unit, just once after all
1362 * xfer units are done or some error occurs.
1364 static int pl330_submit_req(struct pl330_thread *thrd,
1365 struct dma_pl330_desc *desc)
1367 struct pl330_dmac *pl330 = thrd->dmac;
1368 struct _xfer_spec xs;
1369 unsigned long flags;
1375 /* No Req or Unacquired Channel or DMAC */
1376 if (!desc || !thrd || thrd->free)
1379 regs = thrd->dmac->base;
1381 if (pl330->state == DYING
1382 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1383 dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
1384 __func__, __LINE__);
1388 /* If request for non-existing peripheral */
1389 if (desc->rqtype != DMA_MEM_TO_MEM &&
1390 desc->peri >= pl330->pcfg.num_peri) {
1391 dev_info(thrd->dmac->ddma.dev,
1392 "%s:%d Invalid peripheral(%u)!\n",
1393 __func__, __LINE__, desc->peri);
1397 spin_lock_irqsave(&pl330->lock, flags);
1399 if (_queue_full(thrd)) {
1404 /* Prefer Secure Channel */
1405 if (!_manager_ns(thrd))
1406 desc->rqcfg.nonsecure = 0;
1408 desc->rqcfg.nonsecure = 1;
1410 ccr = _prepare_ccr(&desc->rqcfg);
1412 idx = thrd->req[0].desc == NULL ? 0 : 1;
1417 /* First dry run to check if req is acceptable */
1418 ret = _setup_req(1, thrd, idx, &xs);
1422 if (ret > pl330->mcbufsz / 2) {
1423 dev_info(pl330->ddma.dev, "%s:%d Trying increasing mcbufsz\n",
1424 __func__, __LINE__);
1429 /* Hook the request */
1431 thrd->req[idx].desc = desc;
1432 _setup_req(0, thrd, idx, &xs);
1437 spin_unlock_irqrestore(&pl330->lock, flags);
1442 static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
1444 struct dma_pl330_chan *pch;
1445 unsigned long flags;
1452 /* If desc aborted */
1456 spin_lock_irqsave(&pch->lock, flags);
1458 desc->status = DONE;
1460 spin_unlock_irqrestore(&pch->lock, flags);
1462 tasklet_schedule(&pch->task);
1465 static void pl330_dotask(unsigned long data)
1467 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1468 unsigned long flags;
1471 spin_lock_irqsave(&pl330->lock, flags);
1473 /* The DMAC itself gone nuts */
1474 if (pl330->dmac_tbd.reset_dmac) {
1475 pl330->state = DYING;
1476 /* Reset the manager too */
1477 pl330->dmac_tbd.reset_mngr = true;
1478 /* Clear the reset flag */
1479 pl330->dmac_tbd.reset_dmac = false;
1482 if (pl330->dmac_tbd.reset_mngr) {
1483 _stop(pl330->manager);
1484 /* Reset all channels */
1485 pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
1486 /* Clear the reset flag */
1487 pl330->dmac_tbd.reset_mngr = false;
1490 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1492 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1493 struct pl330_thread *thrd = &pl330->channels[i];
1494 void __iomem *regs = pl330->base;
1495 enum pl330_op_err err;
1499 if (readl(regs + FSC) & (1 << thrd->id))
1500 err = PL330_ERR_FAIL;
1502 err = PL330_ERR_ABORT;
1504 spin_unlock_irqrestore(&pl330->lock, flags);
1505 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
1506 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
1507 spin_lock_irqsave(&pl330->lock, flags);
1509 thrd->req[0].desc = NULL;
1510 thrd->req[1].desc = NULL;
1511 thrd->req_running = -1;
1513 /* Clear the reset flag */
1514 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1518 spin_unlock_irqrestore(&pl330->lock, flags);
1523 /* Returns 1 if state was updated, 0 otherwise */
1524 static int pl330_update(struct pl330_dmac *pl330)
1526 struct dma_pl330_desc *descdone, *tmp;
1527 unsigned long flags;
1530 int id, ev, ret = 0;
1534 spin_lock_irqsave(&pl330->lock, flags);
1536 val = readl(regs + FSM) & 0x1;
1538 pl330->dmac_tbd.reset_mngr = true;
1540 pl330->dmac_tbd.reset_mngr = false;
1542 val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
1543 pl330->dmac_tbd.reset_chan |= val;
1546 while (i < pl330->pcfg.num_chan) {
1547 if (val & (1 << i)) {
1548 dev_info(pl330->ddma.dev,
1549 "Reset Channel-%d\t CS-%x FTC-%x\n",
1550 i, readl(regs + CS(i)),
1551 readl(regs + FTC(i)));
1552 _stop(&pl330->channels[i]);
1558 /* Check which event happened i.e, thread notified */
1559 val = readl(regs + ES);
1560 if (pl330->pcfg.num_events < 32
1561 && val & ~((1 << pl330->pcfg.num_events) - 1)) {
1562 pl330->dmac_tbd.reset_dmac = true;
1563 dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
1569 for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
1570 if (val & (1 << ev)) { /* Event occurred */
1571 struct pl330_thread *thrd;
1572 u32 inten = readl(regs + INTEN);
1575 /* Clear the event */
1576 if (inten & (1 << ev))
1577 writel(1 << ev, regs + INTCLR);
1581 id = pl330->events[ev];
1583 thrd = &pl330->channels[id];
1585 active = thrd->req_running;
1586 if (active == -1) /* Aborted */
1589 /* Detach the req */
1590 descdone = thrd->req[active].desc;
1591 thrd->req[active].desc = NULL;
1593 /* Get going again ASAP */
1596 /* For now, just make a list of callbacks to be done */
1597 list_add_tail(&descdone->rqd, &pl330->req_done);
1601 /* Now that we are in no hurry, do the callbacks */
1602 list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
1603 list_del(&descdone->rqd);
1604 spin_unlock_irqrestore(&pl330->lock, flags);
1605 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
1606 spin_lock_irqsave(&pl330->lock, flags);
1610 spin_unlock_irqrestore(&pl330->lock, flags);
1612 if (pl330->dmac_tbd.reset_dmac
1613 || pl330->dmac_tbd.reset_mngr
1614 || pl330->dmac_tbd.reset_chan) {
1616 tasklet_schedule(&pl330->tasks);
1622 /* Reserve an event */
1623 static inline int _alloc_event(struct pl330_thread *thrd)
1625 struct pl330_dmac *pl330 = thrd->dmac;
1628 for (ev = 0; ev < pl330->pcfg.num_events; ev++)
1629 if (pl330->events[ev] == -1) {
1630 pl330->events[ev] = thrd->id;
1637 static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1639 return pl330->pcfg.irq_ns & (1 << i);
1642 /* Upon success, returns IdentityToken for the
1643 * allocated channel, NULL otherwise.
1645 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1647 struct pl330_thread *thrd = NULL;
1648 unsigned long flags;
1651 if (pl330->state == DYING)
1654 chans = pl330->pcfg.num_chan;
1656 spin_lock_irqsave(&pl330->lock, flags);
1658 for (i = 0; i < chans; i++) {
1659 thrd = &pl330->channels[i];
1660 if ((thrd->free) && (!_manager_ns(thrd) ||
1661 _chan_ns(pl330, i))) {
1662 thrd->ev = _alloc_event(thrd);
1663 if (thrd->ev >= 0) {
1666 thrd->req[0].desc = NULL;
1667 thrd->req[1].desc = NULL;
1668 thrd->req_running = -1;
1675 spin_unlock_irqrestore(&pl330->lock, flags);
1680 /* Release an event */
1681 static inline void _free_event(struct pl330_thread *thrd, int ev)
1683 struct pl330_dmac *pl330 = thrd->dmac;
1685 /* If the event is valid and was held by the thread */
1686 if (ev >= 0 && ev < pl330->pcfg.num_events
1687 && pl330->events[ev] == thrd->id)
1688 pl330->events[ev] = -1;
1691 static void pl330_release_channel(struct pl330_thread *thrd)
1693 struct pl330_dmac *pl330;
1694 unsigned long flags;
1696 if (!thrd || thrd->free)
1701 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1702 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
1706 spin_lock_irqsave(&pl330->lock, flags);
1707 _free_event(thrd, thrd->ev);
1709 spin_unlock_irqrestore(&pl330->lock, flags);
1712 /* Initialize the structure for PL330 configuration, that can be used
1713 * by the client driver the make best use of the DMAC
1715 static void read_dmac_config(struct pl330_dmac *pl330)
1717 void __iomem *regs = pl330->base;
1720 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1721 val &= CRD_DATA_WIDTH_MASK;
1722 pl330->pcfg.data_bus_width = 8 * (1 << val);
1724 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1725 val &= CRD_DATA_BUFF_MASK;
1726 pl330->pcfg.data_buf_dep = val + 1;
1728 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1729 val &= CR0_NUM_CHANS_MASK;
1731 pl330->pcfg.num_chan = val;
1733 val = readl(regs + CR0);
1734 if (val & CR0_PERIPH_REQ_SET) {
1735 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1737 pl330->pcfg.num_peri = val;
1738 pl330->pcfg.peri_ns = readl(regs + CR4);
1740 pl330->pcfg.num_peri = 0;
1743 val = readl(regs + CR0);
1744 if (val & CR0_BOOT_MAN_NS)
1745 pl330->pcfg.mode |= DMAC_MODE_NS;
1747 pl330->pcfg.mode &= ~DMAC_MODE_NS;
1749 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1750 val &= CR0_NUM_EVENTS_MASK;
1752 pl330->pcfg.num_events = val;
1754 pl330->pcfg.irq_ns = readl(regs + CR3);
1757 static inline void _reset_thread(struct pl330_thread *thrd)
1759 struct pl330_dmac *pl330 = thrd->dmac;
1761 thrd->req[0].mc_cpu = pl330->mcode_cpu
1762 + (thrd->id * pl330->mcbufsz);
1763 thrd->req[0].mc_bus = pl330->mcode_bus
1764 + (thrd->id * pl330->mcbufsz);
1765 thrd->req[0].desc = NULL;
1767 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1768 + pl330->mcbufsz / 2;
1769 thrd->req[1].mc_bus = thrd->req[0].mc_bus
1770 + pl330->mcbufsz / 2;
1771 thrd->req[1].desc = NULL;
1773 thrd->req_running = -1;
1776 static int dmac_alloc_threads(struct pl330_dmac *pl330)
1778 int chans = pl330->pcfg.num_chan;
1779 struct pl330_thread *thrd;
1782 /* Allocate 1 Manager and 'chans' Channel threads */
1783 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
1785 if (!pl330->channels)
1788 /* Init Channel threads */
1789 for (i = 0; i < chans; i++) {
1790 thrd = &pl330->channels[i];
1793 _reset_thread(thrd);
1797 /* MANAGER is indexed at the end */
1798 thrd = &pl330->channels[chans];
1802 pl330->manager = thrd;
1807 static int dmac_alloc_resources(struct pl330_dmac *pl330)
1809 int chans = pl330->pcfg.num_chan;
1813 * Alloc MicroCode buffer for 'chans' Channel threads.
1814 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1816 pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
1817 chans * pl330->mcbufsz,
1818 &pl330->mcode_bus, GFP_KERNEL);
1819 if (!pl330->mcode_cpu) {
1820 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
1821 __func__, __LINE__);
1825 ret = dmac_alloc_threads(pl330);
1827 dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
1828 __func__, __LINE__);
1829 dma_free_coherent(pl330->ddma.dev,
1830 chans * pl330->mcbufsz,
1831 pl330->mcode_cpu, pl330->mcode_bus);
1838 static int pl330_add(struct pl330_dmac *pl330)
1845 /* Check if we can handle this DMAC */
1846 if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
1847 dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
1848 pl330->pcfg.periph_id);
1852 /* Read the configuration of the DMAC */
1853 read_dmac_config(pl330);
1855 if (pl330->pcfg.num_events == 0) {
1856 dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
1857 __func__, __LINE__);
1861 spin_lock_init(&pl330->lock);
1863 INIT_LIST_HEAD(&pl330->req_done);
1865 /* Use default MC buffer size if not provided */
1866 if (!pl330->mcbufsz)
1867 pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1869 /* Mark all events as free */
1870 for (i = 0; i < pl330->pcfg.num_events; i++)
1871 pl330->events[i] = -1;
1873 /* Allocate resources needed by the DMAC */
1874 ret = dmac_alloc_resources(pl330);
1876 dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
1880 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1882 pl330->state = INIT;
1887 static int dmac_free_threads(struct pl330_dmac *pl330)
1889 struct pl330_thread *thrd;
1892 /* Release Channel threads */
1893 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1894 thrd = &pl330->channels[i];
1895 pl330_release_channel(thrd);
1899 kfree(pl330->channels);
1904 static void pl330_del(struct pl330_dmac *pl330)
1906 pl330->state = UNINIT;
1908 tasklet_kill(&pl330->tasks);
1910 /* Free DMAC resources */
1911 dmac_free_threads(pl330);
1913 dma_free_coherent(pl330->ddma.dev,
1914 pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
1918 /* forward declaration */
1919 static struct amba_driver pl330_driver;
1921 static inline struct dma_pl330_chan *
1922 to_pchan(struct dma_chan *ch)
1927 return container_of(ch, struct dma_pl330_chan, chan);
1930 static inline struct dma_pl330_desc *
1931 to_desc(struct dma_async_tx_descriptor *tx)
1933 return container_of(tx, struct dma_pl330_desc, txd);
1936 static inline void fill_queue(struct dma_pl330_chan *pch)
1938 struct dma_pl330_desc *desc;
1941 list_for_each_entry(desc, &pch->work_list, node) {
1943 /* If already submitted */
1944 if (desc->status == BUSY)
1947 ret = pl330_submit_req(pch->thread, desc);
1949 desc->status = BUSY;
1950 } else if (ret == -EAGAIN) {
1951 /* QFull or DMAC Dying */
1954 /* Unacceptable request */
1955 desc->status = DONE;
1956 dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
1957 __func__, __LINE__, desc->txd.cookie);
1958 tasklet_schedule(&pch->task);
1963 static void pl330_tasklet(unsigned long data)
1965 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
1966 struct dma_pl330_desc *desc, *_dt;
1967 unsigned long flags;
1969 spin_lock_irqsave(&pch->lock, flags);
1971 /* Pick up ripe tomatoes */
1972 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
1973 if (desc->status == DONE) {
1975 dma_cookie_complete(&desc->txd);
1976 list_move_tail(&desc->node, &pch->completed_list);
1979 /* Try to submit a req imm. next to the last completed cookie */
1982 /* Make sure the PL330 Channel thread is active */
1983 spin_lock(&pch->thread->dmac->lock);
1984 _start(pch->thread);
1985 spin_unlock(&pch->thread->dmac->lock);
1987 while (!list_empty(&pch->completed_list)) {
1988 dma_async_tx_callback callback;
1989 void *callback_param;
1991 desc = list_first_entry(&pch->completed_list,
1992 struct dma_pl330_desc, node);
1994 callback = desc->txd.callback;
1995 callback_param = desc->txd.callback_param;
1998 desc->status = PREP;
1999 list_move_tail(&desc->node, &pch->work_list);
2001 desc->status = FREE;
2002 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2005 dma_descriptor_unmap(&desc->txd);
2008 spin_unlock_irqrestore(&pch->lock, flags);
2009 callback(callback_param);
2010 spin_lock_irqsave(&pch->lock, flags);
2013 spin_unlock_irqrestore(&pch->lock, flags);
2016 bool pl330_filter(struct dma_chan *chan, void *param)
2020 if (chan->device->dev->driver != &pl330_driver.drv)
2023 peri_id = chan->private;
2024 return *peri_id == (unsigned long)param;
2026 EXPORT_SYMBOL(pl330_filter);
2028 static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2029 struct of_dma *ofdma)
2031 int count = dma_spec->args_count;
2032 struct pl330_dmac *pl330 = ofdma->of_dma_data;
2033 unsigned int chan_id;
2041 chan_id = dma_spec->args[0];
2042 if (chan_id >= pl330->num_peripherals)
2045 return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
2048 static int pl330_alloc_chan_resources(struct dma_chan *chan)
2050 struct dma_pl330_chan *pch = to_pchan(chan);
2051 struct pl330_dmac *pl330 = pch->dmac;
2052 unsigned long flags;
2054 spin_lock_irqsave(&pch->lock, flags);
2056 dma_cookie_init(chan);
2057 pch->cyclic = false;
2059 pch->thread = pl330_request_channel(pl330);
2061 spin_unlock_irqrestore(&pch->lock, flags);
2065 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2067 spin_unlock_irqrestore(&pch->lock, flags);
2072 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
2074 struct dma_pl330_chan *pch = to_pchan(chan);
2075 struct dma_pl330_desc *desc;
2076 unsigned long flags;
2077 struct pl330_dmac *pl330 = pch->dmac;
2078 struct dma_slave_config *slave_config;
2082 case DMA_TERMINATE_ALL:
2083 spin_lock_irqsave(&pch->lock, flags);
2085 spin_lock(&pl330->lock);
2087 spin_unlock(&pl330->lock);
2089 pch->thread->req[0].desc = NULL;
2090 pch->thread->req[1].desc = NULL;
2091 pch->thread->req_running = -1;
2093 /* Mark all desc done */
2094 list_for_each_entry(desc, &pch->submitted_list, node) {
2095 desc->status = FREE;
2096 dma_cookie_complete(&desc->txd);
2099 list_for_each_entry(desc, &pch->work_list , node) {
2100 desc->status = FREE;
2101 dma_cookie_complete(&desc->txd);
2104 list_for_each_entry(desc, &pch->completed_list , node) {
2105 desc->status = FREE;
2106 dma_cookie_complete(&desc->txd);
2109 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2110 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2111 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2112 spin_unlock_irqrestore(&pch->lock, flags);
2114 case DMA_SLAVE_CONFIG:
2115 slave_config = (struct dma_slave_config *)arg;
2117 if (slave_config->direction == DMA_MEM_TO_DEV) {
2118 if (slave_config->dst_addr)
2119 pch->fifo_addr = slave_config->dst_addr;
2120 if (slave_config->dst_addr_width)
2121 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2122 if (slave_config->dst_maxburst)
2123 pch->burst_len = slave_config->dst_maxburst;
2124 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2125 if (slave_config->src_addr)
2126 pch->fifo_addr = slave_config->src_addr;
2127 if (slave_config->src_addr_width)
2128 pch->burst_sz = __ffs(slave_config->src_addr_width);
2129 if (slave_config->src_maxburst)
2130 pch->burst_len = slave_config->src_maxburst;
2134 dev_err(pch->dmac->ddma.dev, "Not supported command.\n");
2141 static void pl330_free_chan_resources(struct dma_chan *chan)
2143 struct dma_pl330_chan *pch = to_pchan(chan);
2144 unsigned long flags;
2146 tasklet_kill(&pch->task);
2148 spin_lock_irqsave(&pch->lock, flags);
2150 pl330_release_channel(pch->thread);
2154 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2156 spin_unlock_irqrestore(&pch->lock, flags);
2159 static enum dma_status
2160 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2161 struct dma_tx_state *txstate)
2163 return dma_cookie_status(chan, cookie, txstate);
2166 static void pl330_issue_pending(struct dma_chan *chan)
2168 struct dma_pl330_chan *pch = to_pchan(chan);
2169 unsigned long flags;
2171 spin_lock_irqsave(&pch->lock, flags);
2172 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2173 spin_unlock_irqrestore(&pch->lock, flags);
2175 pl330_tasklet((unsigned long)pch);
2179 * We returned the last one of the circular list of descriptor(s)
2180 * from prep_xxx, so the argument to submit corresponds to the last
2181 * descriptor of the list.
2183 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2185 struct dma_pl330_desc *desc, *last = to_desc(tx);
2186 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2187 dma_cookie_t cookie;
2188 unsigned long flags;
2190 spin_lock_irqsave(&pch->lock, flags);
2192 /* Assign cookies to all nodes */
2193 while (!list_empty(&last->node)) {
2194 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2196 desc->txd.callback = last->txd.callback;
2197 desc->txd.callback_param = last->txd.callback_param;
2200 dma_cookie_assign(&desc->txd);
2202 list_move_tail(&desc->node, &pch->submitted_list);
2205 cookie = dma_cookie_assign(&last->txd);
2206 list_add_tail(&last->node, &pch->submitted_list);
2207 spin_unlock_irqrestore(&pch->lock, flags);
2212 static inline void _init_desc(struct dma_pl330_desc *desc)
2214 desc->rqcfg.swap = SWAP_NO;
2215 desc->rqcfg.scctl = CCTRL0;
2216 desc->rqcfg.dcctl = CCTRL0;
2217 desc->txd.tx_submit = pl330_tx_submit;
2219 INIT_LIST_HEAD(&desc->node);
2222 /* Returns the number of descriptors added to the DMAC pool */
2223 static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
2225 struct dma_pl330_desc *desc;
2226 unsigned long flags;
2229 desc = kcalloc(count, sizeof(*desc), flg);
2233 spin_lock_irqsave(&pl330->pool_lock, flags);
2235 for (i = 0; i < count; i++) {
2236 _init_desc(&desc[i]);
2237 list_add_tail(&desc[i].node, &pl330->desc_pool);
2240 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2245 static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
2247 struct dma_pl330_desc *desc = NULL;
2248 unsigned long flags;
2250 spin_lock_irqsave(&pl330->pool_lock, flags);
2252 if (!list_empty(&pl330->desc_pool)) {
2253 desc = list_entry(pl330->desc_pool.next,
2254 struct dma_pl330_desc, node);
2256 list_del_init(&desc->node);
2258 desc->status = PREP;
2259 desc->txd.callback = NULL;
2262 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2267 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2269 struct pl330_dmac *pl330 = pch->dmac;
2270 u8 *peri_id = pch->chan.private;
2271 struct dma_pl330_desc *desc;
2273 /* Pluck one desc from the pool of DMAC */
2274 desc = pluck_desc(pl330);
2276 /* If the DMAC pool is empty, alloc new */
2278 if (!add_desc(pl330, GFP_ATOMIC, 1))
2282 desc = pluck_desc(pl330);
2284 dev_err(pch->dmac->ddma.dev,
2285 "%s:%d ALERT!\n", __func__, __LINE__);
2290 /* Initialize the descriptor */
2292 desc->txd.cookie = 0;
2293 async_tx_ack(&desc->txd);
2295 desc->peri = peri_id ? pch->chan.chan_id : 0;
2296 desc->rqcfg.pcfg = &pch->dmac->pcfg;
2298 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2303 static inline void fill_px(struct pl330_xfer *px,
2304 dma_addr_t dst, dma_addr_t src, size_t len)
2311 static struct dma_pl330_desc *
2312 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2313 dma_addr_t src, size_t len)
2315 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2318 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2319 __func__, __LINE__);
2324 * Ideally we should lookout for reqs bigger than
2325 * those that can be programmed with 256 bytes of
2326 * MC buffer, but considering a req size is seldom
2327 * going to be word-unaligned and more than 200MB,
2329 * Also, should the limit is reached we'd rather
2330 * have the platform increase MC buffer size than
2331 * complicating this API driver.
2333 fill_px(&desc->px, dst, src, len);
2338 /* Call after fixing burst size */
2339 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2341 struct dma_pl330_chan *pch = desc->pchan;
2342 struct pl330_dmac *pl330 = pch->dmac;
2345 burst_len = pl330->pcfg.data_bus_width / 8;
2346 burst_len *= pl330->pcfg.data_buf_dep;
2347 burst_len >>= desc->rqcfg.brst_size;
2349 /* src/dst_burst_len can't be more than 16 */
2353 while (burst_len > 1) {
2354 if (!(len % (burst_len << desc->rqcfg.brst_size)))
2362 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2363 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
2364 size_t period_len, enum dma_transfer_direction direction,
2365 unsigned long flags)
2367 struct dma_pl330_desc *desc = NULL, *first = NULL;
2368 struct dma_pl330_chan *pch = to_pchan(chan);
2369 struct pl330_dmac *pl330 = pch->dmac;
2374 if (len % period_len != 0)
2377 if (!is_slave_direction(direction)) {
2378 dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
2379 __func__, __LINE__);
2383 for (i = 0; i < len / period_len; i++) {
2384 desc = pl330_get_desc(pch);
2386 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2387 __func__, __LINE__);
2392 spin_lock_irqsave(&pl330->pool_lock, flags);
2394 while (!list_empty(&first->node)) {
2395 desc = list_entry(first->node.next,
2396 struct dma_pl330_desc, node);
2397 list_move_tail(&desc->node, &pl330->desc_pool);
2400 list_move_tail(&first->node, &pl330->desc_pool);
2402 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2407 switch (direction) {
2408 case DMA_MEM_TO_DEV:
2409 desc->rqcfg.src_inc = 1;
2410 desc->rqcfg.dst_inc = 0;
2412 dst = pch->fifo_addr;
2414 case DMA_DEV_TO_MEM:
2415 desc->rqcfg.src_inc = 0;
2416 desc->rqcfg.dst_inc = 1;
2417 src = pch->fifo_addr;
2424 desc->rqtype = direction;
2425 desc->rqcfg.brst_size = pch->burst_sz;
2426 desc->rqcfg.brst_len = 1;
2427 fill_px(&desc->px, dst, src, period_len);
2432 list_add_tail(&desc->node, &first->node);
2434 dma_addr += period_len;
2441 desc->txd.flags = flags;
2446 static struct dma_async_tx_descriptor *
2447 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2448 dma_addr_t src, size_t len, unsigned long flags)
2450 struct dma_pl330_desc *desc;
2451 struct dma_pl330_chan *pch = to_pchan(chan);
2452 struct pl330_dmac *pl330 = pch->dmac;
2455 if (unlikely(!pch || !len))
2458 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2462 desc->rqcfg.src_inc = 1;
2463 desc->rqcfg.dst_inc = 1;
2464 desc->rqtype = DMA_MEM_TO_MEM;
2466 /* Select max possible burst size */
2467 burst = pl330->pcfg.data_bus_width / 8;
2475 desc->rqcfg.brst_size = 0;
2476 while (burst != (1 << desc->rqcfg.brst_size))
2477 desc->rqcfg.brst_size++;
2479 desc->rqcfg.brst_len = get_burst_len(desc, len);
2481 desc->txd.flags = flags;
2486 static void __pl330_giveback_desc(struct pl330_dmac *pl330,
2487 struct dma_pl330_desc *first)
2489 unsigned long flags;
2490 struct dma_pl330_desc *desc;
2495 spin_lock_irqsave(&pl330->pool_lock, flags);
2497 while (!list_empty(&first->node)) {
2498 desc = list_entry(first->node.next,
2499 struct dma_pl330_desc, node);
2500 list_move_tail(&desc->node, &pl330->desc_pool);
2503 list_move_tail(&first->node, &pl330->desc_pool);
2505 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2508 static struct dma_async_tx_descriptor *
2509 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2510 unsigned int sg_len, enum dma_transfer_direction direction,
2511 unsigned long flg, void *context)
2513 struct dma_pl330_desc *first, *desc = NULL;
2514 struct dma_pl330_chan *pch = to_pchan(chan);
2515 struct scatterlist *sg;
2519 if (unlikely(!pch || !sgl || !sg_len))
2522 addr = pch->fifo_addr;
2526 for_each_sg(sgl, sg, sg_len, i) {
2528 desc = pl330_get_desc(pch);
2530 struct pl330_dmac *pl330 = pch->dmac;
2532 dev_err(pch->dmac->ddma.dev,
2533 "%s:%d Unable to fetch desc\n",
2534 __func__, __LINE__);
2535 __pl330_giveback_desc(pl330, first);
2543 list_add_tail(&desc->node, &first->node);
2545 if (direction == DMA_MEM_TO_DEV) {
2546 desc->rqcfg.src_inc = 1;
2547 desc->rqcfg.dst_inc = 0;
2549 addr, sg_dma_address(sg), sg_dma_len(sg));
2551 desc->rqcfg.src_inc = 0;
2552 desc->rqcfg.dst_inc = 1;
2554 sg_dma_address(sg), addr, sg_dma_len(sg));
2557 desc->rqcfg.brst_size = pch->burst_sz;
2558 desc->rqcfg.brst_len = 1;
2559 desc->rqtype = direction;
2562 /* Return the last desc in the chain */
2563 desc->txd.flags = flg;
2567 static irqreturn_t pl330_irq_handler(int irq, void *data)
2569 if (pl330_update(data))
2575 #define PL330_DMA_BUSWIDTHS \
2576 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2577 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2578 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2579 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2580 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2582 static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
2583 struct dma_slave_caps *caps)
2585 caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
2586 caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
2587 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2588 caps->cmd_pause = false;
2589 caps->cmd_terminate = true;
2590 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2596 pl330_probe(struct amba_device *adev, const struct amba_id *id)
2598 struct dma_pl330_platdata *pdat;
2599 struct pl330_config *pcfg;
2600 struct pl330_dmac *pl330;
2601 struct dma_pl330_chan *pch, *_p;
2602 struct dma_device *pd;
2603 struct resource *res;
2607 pdat = dev_get_platdata(&adev->dev);
2609 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2613 /* Allocate a new DMAC and its Channels */
2614 pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
2616 dev_err(&adev->dev, "unable to allocate mem\n");
2620 pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
2623 pl330->base = devm_ioremap_resource(&adev->dev, res);
2624 if (IS_ERR(pl330->base))
2625 return PTR_ERR(pl330->base);
2627 amba_set_drvdata(adev, pl330);
2629 for (i = 0; i < AMBA_NR_IRQS; i++) {
2632 ret = devm_request_irq(&adev->dev, irq,
2633 pl330_irq_handler, 0,
2634 dev_name(&adev->dev), pl330);
2642 pcfg = &pl330->pcfg;
2644 pcfg->periph_id = adev->periphid;
2645 ret = pl330_add(pl330);
2649 INIT_LIST_HEAD(&pl330->desc_pool);
2650 spin_lock_init(&pl330->pool_lock);
2652 /* Create a descriptor pool of default size */
2653 if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
2654 dev_warn(&adev->dev, "unable to allocate desc\n");
2657 INIT_LIST_HEAD(&pd->channels);
2659 /* Initialize channel parameters */
2661 num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan);
2663 num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
2665 pl330->num_peripherals = num_chan;
2667 pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2668 if (!pl330->peripherals) {
2670 dev_err(&adev->dev, "unable to allocate pl330->peripherals\n");
2674 for (i = 0; i < num_chan; i++) {
2675 pch = &pl330->peripherals[i];
2676 if (!adev->dev.of_node)
2677 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
2679 pch->chan.private = adev->dev.of_node;
2681 INIT_LIST_HEAD(&pch->submitted_list);
2682 INIT_LIST_HEAD(&pch->work_list);
2683 INIT_LIST_HEAD(&pch->completed_list);
2684 spin_lock_init(&pch->lock);
2686 pch->chan.device = pd;
2689 /* Add the channel to the DMAC list */
2690 list_add_tail(&pch->chan.device_node, &pd->channels);
2693 pd->dev = &adev->dev;
2695 pd->cap_mask = pdat->cap_mask;
2697 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
2698 if (pcfg->num_peri) {
2699 dma_cap_set(DMA_SLAVE, pd->cap_mask);
2700 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
2701 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
2705 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
2706 pd->device_free_chan_resources = pl330_free_chan_resources;
2707 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
2708 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
2709 pd->device_tx_status = pl330_tx_status;
2710 pd->device_prep_slave_sg = pl330_prep_slave_sg;
2711 pd->device_control = pl330_control;
2712 pd->device_issue_pending = pl330_issue_pending;
2713 pd->device_slave_caps = pl330_dma_device_slave_caps;
2715 ret = dma_async_device_register(pd);
2717 dev_err(&adev->dev, "unable to register DMAC\n");
2721 if (adev->dev.of_node) {
2722 ret = of_dma_controller_register(adev->dev.of_node,
2723 of_dma_pl330_xlate, pl330);
2726 "unable to register DMA to the generic DT DMA helpers\n");
2730 adev->dev.dma_parms = &pl330->dma_parms;
2733 * This is the limit for transfers with a buswidth of 1, larger
2734 * buswidths will have larger limits.
2736 ret = dma_set_max_seg_size(&adev->dev, 1900800);
2738 dev_err(&adev->dev, "unable to set the seg size\n");
2741 dev_info(&adev->dev,
2742 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
2743 dev_info(&adev->dev,
2744 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2745 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
2746 pcfg->num_peri, pcfg->num_events);
2751 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
2754 /* Remove the channel */
2755 list_del(&pch->chan.device_node);
2757 /* Flush the channel */
2758 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
2759 pl330_free_chan_resources(&pch->chan);
2767 static int pl330_remove(struct amba_device *adev)
2769 struct pl330_dmac *pl330 = amba_get_drvdata(adev);
2770 struct dma_pl330_chan *pch, *_p;
2772 if (adev->dev.of_node)
2773 of_dma_controller_free(adev->dev.of_node);
2775 dma_async_device_unregister(&pl330->ddma);
2778 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
2781 /* Remove the channel */
2782 list_del(&pch->chan.device_node);
2784 /* Flush the channel */
2785 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
2786 pl330_free_chan_resources(&pch->chan);
2794 static struct amba_id pl330_ids[] = {
2802 MODULE_DEVICE_TABLE(amba, pl330_ids);
2804 static struct amba_driver pl330_driver = {
2806 .owner = THIS_MODULE,
2807 .name = "dma-pl330",
2809 .id_table = pl330_ids,
2810 .probe = pl330_probe,
2811 .remove = pl330_remove,
2814 module_amba_driver(pl330_driver);
2816 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
2817 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
2818 MODULE_LICENSE("GPL");