2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
76 #define XILINX_DMA_REG_DMASR 0x0004
77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
89 #define XILINX_DMA_DMASR_IDLE BIT(1)
90 #define XILINX_DMA_DMASR_HALTED BIT(0)
91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
94 #define XILINX_DMA_REG_CURDESC 0x0008
95 #define XILINX_DMA_REG_TAILDESC 0x0010
96 #define XILINX_DMA_REG_REG_INDEX 0x0014
97 #define XILINX_DMA_REG_FRMSTORE 0x0018
98 #define XILINX_DMA_REG_THRESHOLD 0x001c
99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
100 #define XILINX_DMA_REG_PARK_PTR 0x0028
101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
103 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
105 /* Register Direct Mode Registers */
106 #define XILINX_DMA_REG_VSIZE 0x0000
107 #define XILINX_DMA_REG_HSIZE 0x0004
109 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
110 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
111 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
113 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
114 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
116 /* HW specific definitions */
117 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
119 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
120 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
121 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
122 XILINX_DMA_DMASR_ERR_IRQ)
124 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
125 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
126 XILINX_DMA_DMASR_SOF_LATE_ERR | \
127 XILINX_DMA_DMASR_SG_DEC_ERR | \
128 XILINX_DMA_DMASR_SG_SLV_ERR | \
129 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
130 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
131 XILINX_DMA_DMASR_DMA_DEC_ERR | \
132 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
133 XILINX_DMA_DMASR_DMA_INT_ERR)
136 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
137 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
138 * is enabled in the h/w system.
140 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
141 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
144 XILINX_DMA_DMASR_DMA_INT_ERR)
146 /* Axi VDMA Flush on Fsync bits */
147 #define XILINX_DMA_FLUSH_S2MM 3
148 #define XILINX_DMA_FLUSH_MM2S 2
149 #define XILINX_DMA_FLUSH_BOTH 1
151 /* Delay loop counter to prevent hardware failure */
152 #define XILINX_DMA_LOOP_COUNT 1000000
154 /* AXI DMA Specific Registers/Offsets */
155 #define XILINX_DMA_REG_SRCDSTADDR 0x18
156 #define XILINX_DMA_REG_BTT 0x28
158 /* AXI DMA Specific Masks/Bit fields */
159 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
160 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
161 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
162 #define XILINX_DMA_CR_COALESCE_SHIFT 16
163 #define XILINX_DMA_BD_SOP BIT(27)
164 #define XILINX_DMA_BD_EOP BIT(26)
165 #define XILINX_DMA_COALESCE_MAX 255
166 #define XILINX_DMA_NUM_APP_WORDS 5
168 /* Multi-Channel DMA Descriptor offsets*/
169 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
170 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
172 /* Multi-Channel DMA Masks/Shifts */
173 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
174 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
175 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
176 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
177 #define XILINX_DMA_BD_STRIDE_SHIFT 0
178 #define XILINX_DMA_BD_VSIZE_SHIFT 19
180 /* AXI CDMA Specific Registers/Offsets */
181 #define XILINX_CDMA_REG_SRCADDR 0x18
182 #define XILINX_CDMA_REG_DSTADDR 0x20
184 /* AXI CDMA Specific Masks */
185 #define XILINX_CDMA_CR_SGMODE BIT(3)
188 * struct xilinx_vdma_desc_hw - Hardware Descriptor
189 * @next_desc: Next Descriptor Pointer @0x00
190 * @pad1: Reserved @0x04
191 * @buf_addr: Buffer address @0x08
192 * @buf_addr_msb: MSB of Buffer address @0x0C
193 * @vsize: Vertical Size @0x10
194 * @hsize: Horizontal Size @0x14
195 * @stride: Number of bytes between the first
196 * pixels of each horizontal line @0x18
198 struct xilinx_vdma_desc_hw {
209 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
210 * @next_desc: Next Descriptor Pointer @0x00
211 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
212 * @buf_addr: Buffer address @0x08
213 * @buf_addr_msb: MSB of Buffer address @0x0C
214 * @pad1: Reserved @0x10
215 * @pad2: Reserved @0x14
216 * @control: Control field @0x18
217 * @status: Status field @0x1C
218 * @app: APP Fields @0x20 - 0x30
220 struct xilinx_axidma_desc_hw {
229 u32 app[XILINX_DMA_NUM_APP_WORDS];
233 * struct xilinx_cdma_desc_hw - Hardware Descriptor
234 * @next_desc: Next Descriptor Pointer @0x00
235 * @next_descmsb: Next Descriptor Pointer MSB @0x04
236 * @src_addr: Source address @0x08
237 * @src_addrmsb: Source address MSB @0x0C
238 * @dest_addr: Destination address @0x10
239 * @dest_addrmsb: Destination address MSB @0x14
240 * @control: Control field @0x18
241 * @status: Status field @0x1C
243 struct xilinx_cdma_desc_hw {
255 * struct xilinx_vdma_tx_segment - Descriptor segment
256 * @hw: Hardware descriptor
257 * @node: Node in the descriptor segments list
258 * @phys: Physical address of segment
260 struct xilinx_vdma_tx_segment {
261 struct xilinx_vdma_desc_hw hw;
262 struct list_head node;
267 * struct xilinx_axidma_tx_segment - Descriptor segment
268 * @hw: Hardware descriptor
269 * @node: Node in the descriptor segments list
270 * @phys: Physical address of segment
272 struct xilinx_axidma_tx_segment {
273 struct xilinx_axidma_desc_hw hw;
274 struct list_head node;
279 * struct xilinx_cdma_tx_segment - Descriptor segment
280 * @hw: Hardware descriptor
281 * @node: Node in the descriptor segments list
282 * @phys: Physical address of segment
284 struct xilinx_cdma_tx_segment {
285 struct xilinx_cdma_desc_hw hw;
286 struct list_head node;
291 * struct xilinx_dma_tx_descriptor - Per Transaction structure
292 * @async_tx: Async transaction descriptor
293 * @segments: TX segments list
294 * @node: Node in the channel descriptors list
295 * @cyclic: Check for cyclic transfers.
297 struct xilinx_dma_tx_descriptor {
298 struct dma_async_tx_descriptor async_tx;
299 struct list_head segments;
300 struct list_head node;
305 * struct xilinx_dma_chan - Driver specific DMA channel structure
306 * @xdev: Driver specific device structure
307 * @ctrl_offset: Control registers offset
308 * @desc_offset: TX descriptor registers offset
309 * @lock: Descriptor operation lock
310 * @pending_list: Descriptors waiting
311 * @active_list: Descriptors ready to submit
312 * @done_list: Complete descriptors
313 * @common: DMA common channel
314 * @desc_pool: Descriptors pool
315 * @dev: The dma device
318 * @direction: Transfer direction
319 * @num_frms: Number of frames
320 * @has_sg: Support scatter transfers
321 * @cyclic: Check for cyclic transfers.
322 * @genlock: Support genlock mode
323 * @err: Channel has errors
324 * @tasklet: Cleanup work after irq
325 * @config: Device configuration info
326 * @flush_on_fsync: Flush on Frame sync
327 * @desc_pendingcount: Descriptor pending count
328 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
329 * @desc_submitcount: Descriptor h/w submitted count
330 * @residue: Residue for AXI DMA
331 * @seg_v: Statically allocated segments base
332 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
333 * @start_transfer: Differentiate b/w DMA IP's transfer
334 * @stop_transfer: Differentiate b/w DMA IP's quiesce
336 struct xilinx_dma_chan {
337 struct xilinx_dma_device *xdev;
341 struct list_head pending_list;
342 struct list_head active_list;
343 struct list_head done_list;
344 struct dma_chan common;
345 struct dma_pool *desc_pool;
349 enum dma_transfer_direction direction;
355 struct tasklet_struct tasklet;
356 struct xilinx_vdma_config config;
358 u32 desc_pendingcount;
360 u32 desc_submitcount;
362 struct xilinx_axidma_tx_segment *seg_v;
363 struct xilinx_axidma_tx_segment *cyclic_seg_v;
364 void (*start_transfer)(struct xilinx_dma_chan *chan);
365 int (*stop_transfer)(struct xilinx_dma_chan *chan);
369 struct xilinx_dma_config {
370 enum xdma_ip_type dmatype;
371 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
372 struct clk **tx_clk, struct clk **txs_clk,
373 struct clk **rx_clk, struct clk **rxs_clk);
377 * struct xilinx_dma_device - DMA device structure
378 * @regs: I/O mapped base address
379 * @dev: Device Structure
380 * @common: DMA device structure
381 * @chan: Driver specific DMA channel
382 * @has_sg: Specifies whether Scatter-Gather is present or not
383 * @mcdma: Specifies whether Multi-Channel is present or not
384 * @flush_on_fsync: Flush on frame sync
385 * @ext_addr: Indicates 64 bit addressing is supported by dma device
386 * @pdev: Platform device structure pointer
387 * @dma_config: DMA config structure
388 * @axi_clk: DMA Axi4-lite interace clock
389 * @tx_clk: DMA mm2s clock
390 * @txs_clk: DMA mm2s stream clock
391 * @rx_clk: DMA s2mm clock
392 * @rxs_clk: DMA s2mm stream clock
393 * @nr_channels: Number of channels DMA device supports
394 * @chan_id: DMA channel identifier
396 struct xilinx_dma_device {
399 struct dma_device common;
400 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
405 struct platform_device *pdev;
406 const struct xilinx_dma_config *dma_config;
417 #define to_xilinx_chan(chan) \
418 container_of(chan, struct xilinx_dma_chan, common)
419 #define to_dma_tx_descriptor(tx) \
420 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
421 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
422 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
423 cond, delay_us, timeout_us)
426 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
428 return ioread32(chan->xdev->regs + reg);
431 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
433 iowrite32(value, chan->xdev->regs + reg);
436 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
439 dma_write(chan, chan->desc_offset + reg, value);
442 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
444 return dma_read(chan, chan->ctrl_offset + reg);
447 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
450 dma_write(chan, chan->ctrl_offset + reg, value);
453 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
456 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
459 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
462 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
466 * vdma_desc_write_64 - 64-bit descriptor write
467 * @chan: Driver specific VDMA channel
468 * @reg: Register to write
469 * @value_lsb: lower address of the descriptor.
470 * @value_msb: upper address of the descriptor.
472 * Since vdma driver is trying to write to a register offset which is not a
473 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
474 * instead of a single 64 bit register write.
476 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
477 u32 value_lsb, u32 value_msb)
479 /* Write the lsb 32 bits*/
480 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
482 /* Write the msb 32 bits */
483 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
486 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
488 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
491 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
495 dma_writeq(chan, reg, addr);
497 dma_ctrl_write(chan, reg, addr);
500 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
501 struct xilinx_axidma_desc_hw *hw,
502 dma_addr_t buf_addr, size_t sg_used,
505 if (chan->ext_addr) {
506 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
507 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
510 hw->buf_addr = buf_addr + sg_used + period_len;
514 /* -----------------------------------------------------------------------------
515 * Descriptors and segments alloc and free
519 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
520 * @chan: Driver specific DMA channel
522 * Return: The allocated segment on success and NULL on failure.
524 static struct xilinx_vdma_tx_segment *
525 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
527 struct xilinx_vdma_tx_segment *segment;
530 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
534 segment->phys = phys;
540 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
541 * @chan: Driver specific DMA channel
543 * Return: The allocated segment on success and NULL on failure.
545 static struct xilinx_cdma_tx_segment *
546 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
548 struct xilinx_cdma_tx_segment *segment;
551 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
555 segment->phys = phys;
561 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
562 * @chan: Driver specific DMA channel
564 * Return: The allocated segment on success and NULL on failure.
566 static struct xilinx_axidma_tx_segment *
567 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
569 struct xilinx_axidma_tx_segment *segment;
572 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
576 segment->phys = phys;
582 * xilinx_dma_free_tx_segment - Free transaction segment
583 * @chan: Driver specific DMA channel
584 * @segment: DMA transaction segment
586 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
587 struct xilinx_axidma_tx_segment *segment)
589 dma_pool_free(chan->desc_pool, segment, segment->phys);
593 * xilinx_cdma_free_tx_segment - Free transaction segment
594 * @chan: Driver specific DMA channel
595 * @segment: DMA transaction segment
597 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
598 struct xilinx_cdma_tx_segment *segment)
600 dma_pool_free(chan->desc_pool, segment, segment->phys);
604 * xilinx_vdma_free_tx_segment - Free transaction segment
605 * @chan: Driver specific DMA channel
606 * @segment: DMA transaction segment
608 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
609 struct xilinx_vdma_tx_segment *segment)
611 dma_pool_free(chan->desc_pool, segment, segment->phys);
615 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
616 * @chan: Driver specific DMA channel
618 * Return: The allocated descriptor on success and NULL on failure.
620 static struct xilinx_dma_tx_descriptor *
621 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
623 struct xilinx_dma_tx_descriptor *desc;
625 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
629 INIT_LIST_HEAD(&desc->segments);
635 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
636 * @chan: Driver specific DMA channel
637 * @desc: DMA transaction descriptor
640 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
641 struct xilinx_dma_tx_descriptor *desc)
643 struct xilinx_vdma_tx_segment *segment, *next;
644 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
645 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
650 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
651 list_for_each_entry_safe(segment, next, &desc->segments, node) {
652 list_del(&segment->node);
653 xilinx_vdma_free_tx_segment(chan, segment);
655 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
656 list_for_each_entry_safe(cdma_segment, cdma_next,
657 &desc->segments, node) {
658 list_del(&cdma_segment->node);
659 xilinx_cdma_free_tx_segment(chan, cdma_segment);
662 list_for_each_entry_safe(axidma_segment, axidma_next,
663 &desc->segments, node) {
664 list_del(&axidma_segment->node);
665 xilinx_dma_free_tx_segment(chan, axidma_segment);
672 /* Required functions */
675 * xilinx_dma_free_desc_list - Free descriptors list
676 * @chan: Driver specific DMA channel
677 * @list: List to parse and delete the descriptor
679 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
680 struct list_head *list)
682 struct xilinx_dma_tx_descriptor *desc, *next;
684 list_for_each_entry_safe(desc, next, list, node) {
685 list_del(&desc->node);
686 xilinx_dma_free_tx_descriptor(chan, desc);
691 * xilinx_dma_free_descriptors - Free channel descriptors
692 * @chan: Driver specific DMA channel
694 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
698 spin_lock_irqsave(&chan->lock, flags);
700 xilinx_dma_free_desc_list(chan, &chan->pending_list);
701 xilinx_dma_free_desc_list(chan, &chan->done_list);
702 xilinx_dma_free_desc_list(chan, &chan->active_list);
704 spin_unlock_irqrestore(&chan->lock, flags);
708 * xilinx_dma_free_chan_resources - Free channel resources
709 * @dchan: DMA channel
711 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
713 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
715 dev_dbg(chan->dev, "Free all channel resources.\n");
717 xilinx_dma_free_descriptors(chan);
718 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
719 xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
720 xilinx_dma_free_tx_segment(chan, chan->seg_v);
722 dma_pool_destroy(chan->desc_pool);
723 chan->desc_pool = NULL;
727 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
728 * @chan: Driver specific dma channel
729 * @desc: dma transaction descriptor
730 * @flags: flags for spin lock
732 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
733 struct xilinx_dma_tx_descriptor *desc,
734 unsigned long *flags)
736 dma_async_tx_callback callback;
737 void *callback_param;
739 callback = desc->async_tx.callback;
740 callback_param = desc->async_tx.callback_param;
742 spin_unlock_irqrestore(&chan->lock, *flags);
743 callback(callback_param);
744 spin_lock_irqsave(&chan->lock, *flags);
749 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
750 * @chan: Driver specific DMA channel
752 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
754 struct xilinx_dma_tx_descriptor *desc, *next;
757 spin_lock_irqsave(&chan->lock, flags);
759 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
760 struct dmaengine_desc_callback cb;
763 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
767 /* Remove from the list of running transactions */
768 list_del(&desc->node);
770 /* Run the link descriptor callback function */
771 dmaengine_desc_get_callback(&desc->async_tx, &cb);
772 if (dmaengine_desc_callback_valid(&cb)) {
773 spin_unlock_irqrestore(&chan->lock, flags);
774 dmaengine_desc_callback_invoke(&cb, NULL);
775 spin_lock_irqsave(&chan->lock, flags);
778 /* Run any dependencies, then free the descriptor */
779 dma_run_dependencies(&desc->async_tx);
780 xilinx_dma_free_tx_descriptor(chan, desc);
783 spin_unlock_irqrestore(&chan->lock, flags);
787 * xilinx_dma_do_tasklet - Schedule completion tasklet
788 * @data: Pointer to the Xilinx DMA channel structure
790 static void xilinx_dma_do_tasklet(unsigned long data)
792 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
794 xilinx_dma_chan_desc_cleanup(chan);
798 * xilinx_dma_alloc_chan_resources - Allocate channel resources
799 * @dchan: DMA channel
801 * Return: '0' on success and failure value on error
803 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
805 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
807 /* Has this channel already been allocated? */
812 * We need the descriptor to be aligned to 64bytes
813 * for meeting Xilinx VDMA specification requirement.
815 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
816 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
818 sizeof(struct xilinx_axidma_tx_segment),
819 __alignof__(struct xilinx_axidma_tx_segment),
821 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
822 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
824 sizeof(struct xilinx_cdma_tx_segment),
825 __alignof__(struct xilinx_cdma_tx_segment),
828 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
830 sizeof(struct xilinx_vdma_tx_segment),
831 __alignof__(struct xilinx_vdma_tx_segment),
835 if (!chan->desc_pool) {
837 "unable to allocate channel %d descriptor pool\n",
842 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
844 * For AXI DMA case after submitting a pending_list, keep
845 * an extra segment allocated so that the "next descriptor"
846 * pointer on the tail descriptor always points to a
847 * valid descriptor, even when paused after reaching taildesc.
848 * This way, it is possible to issue additional
849 * transfers without halting and restarting the channel.
851 chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
854 * For cyclic DMA mode we need to program the tail Descriptor
855 * register with a value which is not a part of the BD chain
856 * so allocating a desc segment during channel allocation for
857 * programming tail descriptor.
859 chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
862 dma_cookie_init(dchan);
864 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
865 /* For AXI DMA resetting once channel will reset the
866 * other channel as well so enable the interrupts here.
868 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
869 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
872 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
873 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
874 XILINX_CDMA_CR_SGMODE);
880 * xilinx_dma_tx_status - Get DMA transaction status
881 * @dchan: DMA channel
882 * @cookie: Transaction identifier
883 * @txstate: Transaction state
885 * Return: DMA transaction status
887 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
889 struct dma_tx_state *txstate)
891 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
892 struct xilinx_dma_tx_descriptor *desc;
893 struct xilinx_axidma_tx_segment *segment;
894 struct xilinx_axidma_desc_hw *hw;
899 ret = dma_cookie_status(dchan, cookie, txstate);
900 if (ret == DMA_COMPLETE || !txstate)
903 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
904 spin_lock_irqsave(&chan->lock, flags);
906 desc = list_last_entry(&chan->active_list,
907 struct xilinx_dma_tx_descriptor, node);
909 list_for_each_entry(segment, &desc->segments, node) {
911 residue += (hw->control - hw->status) &
912 XILINX_DMA_MAX_TRANS_LEN;
915 spin_unlock_irqrestore(&chan->lock, flags);
917 chan->residue = residue;
918 dma_set_residue(txstate, chan->residue);
925 * xilinx_dma_is_running - Check if DMA channel is running
926 * @chan: Driver specific DMA channel
928 * Return: '1' if running, '0' if not.
930 static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
932 return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
933 XILINX_DMA_DMASR_HALTED) &&
934 (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
935 XILINX_DMA_DMACR_RUNSTOP);
939 * xilinx_dma_is_idle - Check if DMA channel is idle
940 * @chan: Driver specific DMA channel
942 * Return: '1' if idle, '0' if not.
944 static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
946 return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
947 XILINX_DMA_DMASR_IDLE;
951 * xilinx_dma_stop_transfer - Halt DMA channel
952 * @chan: Driver specific DMA channel
954 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
958 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
960 /* Wait for the hardware to halt */
961 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
962 val & XILINX_DMA_DMASR_HALTED, 0,
963 XILINX_DMA_LOOP_COUNT);
967 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
968 * @chan: Driver specific DMA channel
970 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
974 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
975 val & XILINX_DMA_DMASR_IDLE, 0,
976 XILINX_DMA_LOOP_COUNT);
980 * xilinx_dma_start - Start DMA channel
981 * @chan: Driver specific DMA channel
983 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
988 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
990 /* Wait for the hardware to start */
991 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
992 !(val & XILINX_DMA_DMASR_HALTED), 0,
993 XILINX_DMA_LOOP_COUNT);
996 dev_err(chan->dev, "Cannot start channel %p: %x\n",
997 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1004 * xilinx_vdma_start_transfer - Starts VDMA transfer
1005 * @chan: Driver specific channel struct pointer
1007 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1009 struct xilinx_vdma_config *config = &chan->config;
1010 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1012 struct xilinx_vdma_tx_segment *tail_segment;
1014 /* This function was invoked with lock held */
1018 if (list_empty(&chan->pending_list))
1021 desc = list_first_entry(&chan->pending_list,
1022 struct xilinx_dma_tx_descriptor, node);
1023 tail_desc = list_last_entry(&chan->pending_list,
1024 struct xilinx_dma_tx_descriptor, node);
1026 tail_segment = list_last_entry(&tail_desc->segments,
1027 struct xilinx_vdma_tx_segment, node);
1029 /* If it is SG mode and hardware is busy, cannot submit */
1030 if (chan->has_sg && xilinx_dma_is_running(chan) &&
1031 !xilinx_dma_is_idle(chan)) {
1032 dev_dbg(chan->dev, "DMA controller still busy\n");
1037 * If hardware is idle, then all descriptors on the running lists are
1038 * done, start new transfers
1041 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1042 desc->async_tx.phys);
1044 /* Configure the hardware using info in the config structure */
1045 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1047 if (config->frm_cnt_en)
1048 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1050 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1052 /* Configure channel to allow number frame buffers */
1053 dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
1054 chan->desc_pendingcount);
1057 * With SG, start with circular mode, so that BDs can be fetched.
1058 * In direct register mode, if not parking, enable circular mode
1060 if (chan->has_sg || !config->park)
1061 reg |= XILINX_DMA_DMACR_CIRC_EN;
1064 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1066 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1068 if (config->park && (config->park_frm >= 0) &&
1069 (config->park_frm < chan->num_frms)) {
1070 if (chan->direction == DMA_MEM_TO_DEV)
1071 dma_write(chan, XILINX_DMA_REG_PARK_PTR,
1073 XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
1075 dma_write(chan, XILINX_DMA_REG_PARK_PTR,
1077 XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
1080 /* Start the hardware */
1081 xilinx_dma_start(chan);
1086 /* Start the transfer */
1088 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1089 tail_segment->phys);
1091 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1094 if (chan->desc_submitcount < chan->num_frms)
1095 i = chan->desc_submitcount;
1097 list_for_each_entry(segment, &desc->segments, node) {
1099 vdma_desc_write_64(chan,
1100 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1101 segment->hw.buf_addr,
1102 segment->hw.buf_addr_msb);
1104 vdma_desc_write(chan,
1105 XILINX_VDMA_REG_START_ADDRESS(i++),
1106 segment->hw.buf_addr);
1114 /* HW expects these parameters to be same for one transaction */
1115 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1116 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1118 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1121 if (!chan->has_sg) {
1122 list_del(&desc->node);
1123 list_add_tail(&desc->node, &chan->active_list);
1124 chan->desc_submitcount++;
1125 chan->desc_pendingcount--;
1126 if (chan->desc_submitcount == chan->num_frms)
1127 chan->desc_submitcount = 0;
1129 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1130 chan->desc_pendingcount = 0;
1135 * xilinx_cdma_start_transfer - Starts cdma transfer
1136 * @chan: Driver specific channel struct pointer
1138 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1140 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1141 struct xilinx_cdma_tx_segment *tail_segment;
1142 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1147 if (list_empty(&chan->pending_list))
1150 head_desc = list_first_entry(&chan->pending_list,
1151 struct xilinx_dma_tx_descriptor, node);
1152 tail_desc = list_last_entry(&chan->pending_list,
1153 struct xilinx_dma_tx_descriptor, node);
1154 tail_segment = list_last_entry(&tail_desc->segments,
1155 struct xilinx_cdma_tx_segment, node);
1157 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1158 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1159 ctrl_reg |= chan->desc_pendingcount <<
1160 XILINX_DMA_CR_COALESCE_SHIFT;
1161 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1165 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1166 head_desc->async_tx.phys);
1168 /* Update tail ptr register which will start the transfer */
1169 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1170 tail_segment->phys);
1172 /* In simple mode */
1173 struct xilinx_cdma_tx_segment *segment;
1174 struct xilinx_cdma_desc_hw *hw;
1176 segment = list_first_entry(&head_desc->segments,
1177 struct xilinx_cdma_tx_segment,
1182 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
1183 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
1185 /* Start the transfer */
1186 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1187 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1190 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1191 chan->desc_pendingcount = 0;
1195 * xilinx_dma_start_transfer - Starts DMA transfer
1196 * @chan: Driver specific channel struct pointer
1198 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1200 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1201 struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
1207 if (list_empty(&chan->pending_list))
1210 /* If it is SG mode and hardware is busy, cannot submit */
1211 if (chan->has_sg && xilinx_dma_is_running(chan) &&
1212 !xilinx_dma_is_idle(chan)) {
1213 dev_dbg(chan->dev, "DMA controller still busy\n");
1217 head_desc = list_first_entry(&chan->pending_list,
1218 struct xilinx_dma_tx_descriptor, node);
1219 tail_desc = list_last_entry(&chan->pending_list,
1220 struct xilinx_dma_tx_descriptor, node);
1221 tail_segment = list_last_entry(&tail_desc->segments,
1222 struct xilinx_axidma_tx_segment, node);
1224 if (chan->has_sg && !chan->xdev->mcdma) {
1225 old_head = list_first_entry(&head_desc->segments,
1226 struct xilinx_axidma_tx_segment, node);
1227 new_head = chan->seg_v;
1228 /* Copy Buffer Descriptor fields. */
1229 new_head->hw = old_head->hw;
1231 /* Swap and save new reserve */
1232 list_replace_init(&old_head->node, &new_head->node);
1233 chan->seg_v = old_head;
1235 tail_segment->hw.next_desc = chan->seg_v->phys;
1236 head_desc->async_tx.phys = new_head->phys;
1239 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1241 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1242 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1243 reg |= chan->desc_pendingcount <<
1244 XILINX_DMA_CR_COALESCE_SHIFT;
1245 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1248 if (chan->has_sg && !chan->xdev->mcdma)
1249 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1250 head_desc->async_tx.phys);
1252 if (chan->has_sg && chan->xdev->mcdma) {
1253 if (chan->direction == DMA_MEM_TO_DEV) {
1254 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1255 head_desc->async_tx.phys);
1258 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1259 head_desc->async_tx.phys);
1261 dma_ctrl_write(chan,
1262 XILINX_DMA_MCRX_CDESC(chan->tdest),
1263 head_desc->async_tx.phys);
1268 xilinx_dma_start(chan);
1273 /* Start the transfer */
1274 if (chan->has_sg && !chan->xdev->mcdma) {
1276 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1277 chan->cyclic_seg_v->phys);
1279 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1280 tail_segment->phys);
1281 } else if (chan->has_sg && chan->xdev->mcdma) {
1282 if (chan->direction == DMA_MEM_TO_DEV) {
1283 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1284 tail_segment->phys);
1287 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1288 tail_segment->phys);
1290 dma_ctrl_write(chan,
1291 XILINX_DMA_MCRX_TDESC(chan->tdest),
1292 tail_segment->phys);
1296 struct xilinx_axidma_tx_segment *segment;
1297 struct xilinx_axidma_desc_hw *hw;
1299 segment = list_first_entry(&head_desc->segments,
1300 struct xilinx_axidma_tx_segment,
1304 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1306 /* Start the transfer */
1307 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1308 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1311 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1312 chan->desc_pendingcount = 0;
1316 * xilinx_dma_issue_pending - Issue pending transactions
1317 * @dchan: DMA channel
1319 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1321 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1322 unsigned long flags;
1324 spin_lock_irqsave(&chan->lock, flags);
1325 chan->start_transfer(chan);
1326 spin_unlock_irqrestore(&chan->lock, flags);
1330 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1331 * @chan : xilinx DMA channel
1335 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1337 struct xilinx_dma_tx_descriptor *desc, *next;
1339 /* This function was invoked with lock held */
1340 if (list_empty(&chan->active_list))
1343 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1344 list_del(&desc->node);
1346 dma_cookie_complete(&desc->async_tx);
1347 list_add_tail(&desc->node, &chan->done_list);
1352 * xilinx_dma_reset - Reset DMA channel
1353 * @chan: Driver specific DMA channel
1355 * Return: '0' on success and failure value on error
1357 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1362 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1364 /* Wait for the hardware to finish reset */
1365 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1366 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1367 XILINX_DMA_LOOP_COUNT);
1370 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1371 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1372 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1382 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1383 * @chan: Driver specific DMA channel
1385 * Return: '0' on success and failure value on error
1387 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1392 err = xilinx_dma_reset(chan);
1396 /* Enable interrupts */
1397 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1398 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1404 * xilinx_dma_irq_handler - DMA Interrupt handler
1406 * @data: Pointer to the Xilinx DMA channel structure
1408 * Return: IRQ_HANDLED/IRQ_NONE
1410 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1412 struct xilinx_dma_chan *chan = data;
1415 /* Read the status and ack the interrupts. */
1416 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1417 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1420 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1421 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1423 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1425 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1426 * error is recoverable, ignore it. Otherwise flag the error.
1428 * Only recoverable errors can be cleared in the DMASR register,
1429 * make sure not to write to other error bits to 1.
1431 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1433 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1434 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1436 if (!chan->flush_on_fsync ||
1437 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1439 "Channel %p has errors %x, cdr %x tdr %x\n",
1441 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1442 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1447 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1449 * Device takes too long to do the transfer when user requires
1452 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1455 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1456 spin_lock(&chan->lock);
1457 xilinx_dma_complete_descriptor(chan);
1458 chan->start_transfer(chan);
1459 spin_unlock(&chan->lock);
1462 tasklet_schedule(&chan->tasklet);
1467 * append_desc_queue - Queuing descriptor
1468 * @chan: Driver specific dma channel
1469 * @desc: dma transaction descriptor
1471 static void append_desc_queue(struct xilinx_dma_chan *chan,
1472 struct xilinx_dma_tx_descriptor *desc)
1474 struct xilinx_vdma_tx_segment *tail_segment;
1475 struct xilinx_dma_tx_descriptor *tail_desc;
1476 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1477 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1479 if (list_empty(&chan->pending_list))
1483 * Add the hardware descriptor to the chain of hardware descriptors
1484 * that already exists in memory.
1486 tail_desc = list_last_entry(&chan->pending_list,
1487 struct xilinx_dma_tx_descriptor, node);
1488 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1489 tail_segment = list_last_entry(&tail_desc->segments,
1490 struct xilinx_vdma_tx_segment,
1492 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1493 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1494 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1495 struct xilinx_cdma_tx_segment,
1497 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1499 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1500 struct xilinx_axidma_tx_segment,
1502 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1506 * Add the software descriptor and all children to the list
1507 * of pending transactions
1510 list_add_tail(&desc->node, &chan->pending_list);
1511 chan->desc_pendingcount++;
1513 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1514 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1515 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1516 chan->desc_pendingcount = chan->num_frms;
1521 * xilinx_dma_tx_submit - Submit DMA transaction
1522 * @tx: Async transaction descriptor
1524 * Return: cookie value on success and failure value on error
1526 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1528 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1529 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1530 dma_cookie_t cookie;
1531 unsigned long flags;
1535 xilinx_dma_free_tx_descriptor(chan, desc);
1541 * If reset fails, need to hard reset the system.
1542 * Channel is no longer functional
1544 err = xilinx_dma_chan_reset(chan);
1549 spin_lock_irqsave(&chan->lock, flags);
1551 cookie = dma_cookie_assign(tx);
1553 /* Put this transaction onto the tail of the pending queue */
1554 append_desc_queue(chan, desc);
1557 chan->cyclic = true;
1559 spin_unlock_irqrestore(&chan->lock, flags);
1565 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1566 * DMA_SLAVE transaction
1567 * @dchan: DMA channel
1568 * @xt: Interleaved template pointer
1569 * @flags: transfer ack flags
1571 * Return: Async transaction descriptor on success and NULL on failure
1573 static struct dma_async_tx_descriptor *
1574 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1575 struct dma_interleaved_template *xt,
1576 unsigned long flags)
1578 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1579 struct xilinx_dma_tx_descriptor *desc;
1580 struct xilinx_vdma_tx_segment *segment, *prev = NULL;
1581 struct xilinx_vdma_desc_hw *hw;
1583 if (!is_slave_direction(xt->dir))
1586 if (!xt->numf || !xt->sgl[0].size)
1589 if (xt->frame_size != 1)
1592 /* Allocate a transaction descriptor. */
1593 desc = xilinx_dma_alloc_tx_descriptor(chan);
1597 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1598 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1599 async_tx_ack(&desc->async_tx);
1601 /* Allocate the link descriptor from DMA pool */
1602 segment = xilinx_vdma_alloc_tx_segment(chan);
1606 /* Fill in the hardware descriptor */
1608 hw->vsize = xt->numf;
1609 hw->hsize = xt->sgl[0].size;
1610 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1611 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1612 hw->stride |= chan->config.frm_dly <<
1613 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1615 if (xt->dir != DMA_MEM_TO_DEV) {
1616 if (chan->ext_addr) {
1617 hw->buf_addr = lower_32_bits(xt->dst_start);
1618 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1620 hw->buf_addr = xt->dst_start;
1623 if (chan->ext_addr) {
1624 hw->buf_addr = lower_32_bits(xt->src_start);
1625 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1627 hw->buf_addr = xt->src_start;
1631 /* Insert the segment into the descriptor segments list. */
1632 list_add_tail(&segment->node, &desc->segments);
1636 /* Link the last hardware descriptor with the first. */
1637 segment = list_first_entry(&desc->segments,
1638 struct xilinx_vdma_tx_segment, node);
1639 desc->async_tx.phys = segment->phys;
1641 return &desc->async_tx;
1644 xilinx_dma_free_tx_descriptor(chan, desc);
1649 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1650 * @dchan: DMA channel
1651 * @dma_dst: destination address
1652 * @dma_src: source address
1653 * @len: transfer length
1654 * @flags: transfer ack flags
1656 * Return: Async transaction descriptor on success and NULL on failure
1658 static struct dma_async_tx_descriptor *
1659 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1660 dma_addr_t dma_src, size_t len, unsigned long flags)
1662 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1663 struct xilinx_dma_tx_descriptor *desc;
1664 struct xilinx_cdma_tx_segment *segment;
1665 struct xilinx_cdma_desc_hw *hw;
1667 if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
1670 desc = xilinx_dma_alloc_tx_descriptor(chan);
1674 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1675 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1677 /* Allocate the link descriptor from DMA pool */
1678 segment = xilinx_cdma_alloc_tx_segment(chan);
1684 hw->src_addr = dma_src;
1685 hw->dest_addr = dma_dst;
1686 if (chan->ext_addr) {
1687 hw->src_addr_msb = upper_32_bits(dma_src);
1688 hw->dest_addr_msb = upper_32_bits(dma_dst);
1691 /* Insert the segment into the descriptor segments list. */
1692 list_add_tail(&segment->node, &desc->segments);
1694 desc->async_tx.phys = segment->phys;
1695 hw->next_desc = segment->phys;
1697 return &desc->async_tx;
1700 xilinx_dma_free_tx_descriptor(chan, desc);
1705 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1706 * @dchan: DMA channel
1707 * @sgl: scatterlist to transfer to/from
1708 * @sg_len: number of entries in @scatterlist
1709 * @direction: DMA direction
1710 * @flags: transfer ack flags
1711 * @context: APP words of the descriptor
1713 * Return: Async transaction descriptor on success and NULL on failure
1715 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1716 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1717 enum dma_transfer_direction direction, unsigned long flags,
1720 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1721 struct xilinx_dma_tx_descriptor *desc;
1722 struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
1723 u32 *app_w = (u32 *)context;
1724 struct scatterlist *sg;
1729 if (!is_slave_direction(direction))
1732 /* Allocate a transaction descriptor. */
1733 desc = xilinx_dma_alloc_tx_descriptor(chan);
1737 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1738 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1740 /* Build transactions using information in the scatter gather list */
1741 for_each_sg(sgl, sg, sg_len, i) {
1744 /* Loop until the entire scatterlist entry is used */
1745 while (sg_used < sg_dma_len(sg)) {
1746 struct xilinx_axidma_desc_hw *hw;
1748 /* Get a free segment */
1749 segment = xilinx_axidma_alloc_tx_segment(chan);
1754 * Calculate the maximum number of bytes to transfer,
1755 * making sure it is less than the hw limit
1757 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1758 XILINX_DMA_MAX_TRANS_LEN);
1761 /* Fill in the descriptor */
1762 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1767 if (chan->direction == DMA_MEM_TO_DEV) {
1769 memcpy(hw->app, app_w, sizeof(u32) *
1770 XILINX_DMA_NUM_APP_WORDS);
1774 prev->hw.next_desc = segment->phys;
1780 * Insert the segment into the descriptor segments
1783 list_add_tail(&segment->node, &desc->segments);
1787 segment = list_first_entry(&desc->segments,
1788 struct xilinx_axidma_tx_segment, node);
1789 desc->async_tx.phys = segment->phys;
1790 prev->hw.next_desc = segment->phys;
1792 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1793 if (chan->direction == DMA_MEM_TO_DEV) {
1794 segment->hw.control |= XILINX_DMA_BD_SOP;
1795 segment = list_last_entry(&desc->segments,
1796 struct xilinx_axidma_tx_segment,
1798 segment->hw.control |= XILINX_DMA_BD_EOP;
1801 return &desc->async_tx;
1804 xilinx_dma_free_tx_descriptor(chan, desc);
1809 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1810 * @chan: DMA channel
1811 * @sgl: scatterlist to transfer to/from
1812 * @sg_len: number of entries in @scatterlist
1813 * @direction: DMA direction
1814 * @flags: transfer ack flags
1816 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1817 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1818 size_t period_len, enum dma_transfer_direction direction,
1819 unsigned long flags)
1821 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1822 struct xilinx_dma_tx_descriptor *desc;
1823 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1824 size_t copy, sg_used;
1825 unsigned int num_periods;
1832 num_periods = buf_len / period_len;
1837 if (!is_slave_direction(direction))
1840 /* Allocate a transaction descriptor. */
1841 desc = xilinx_dma_alloc_tx_descriptor(chan);
1845 chan->direction = direction;
1846 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1847 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1849 for (i = 0; i < num_periods; ++i) {
1852 while (sg_used < period_len) {
1853 struct xilinx_axidma_desc_hw *hw;
1855 /* Get a free segment */
1856 segment = xilinx_axidma_alloc_tx_segment(chan);
1861 * Calculate the maximum number of bytes to transfer,
1862 * making sure it is less than the hw limit
1864 copy = min_t(size_t, period_len - sg_used,
1865 XILINX_DMA_MAX_TRANS_LEN);
1867 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1872 prev->hw.next_desc = segment->phys;
1878 * Insert the segment into the descriptor segments
1881 list_add_tail(&segment->node, &desc->segments);
1885 head_segment = list_first_entry(&desc->segments,
1886 struct xilinx_axidma_tx_segment, node);
1887 desc->async_tx.phys = head_segment->phys;
1889 desc->cyclic = true;
1890 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1891 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1892 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1894 segment = list_last_entry(&desc->segments,
1895 struct xilinx_axidma_tx_segment,
1897 segment->hw.next_desc = (u32) head_segment->phys;
1899 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1900 if (direction == DMA_MEM_TO_DEV) {
1901 head_segment->hw.control |= XILINX_DMA_BD_SOP;
1902 segment->hw.control |= XILINX_DMA_BD_EOP;
1905 return &desc->async_tx;
1908 xilinx_dma_free_tx_descriptor(chan, desc);
1913 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1914 * DMA_SLAVE transaction
1915 * @dchan: DMA channel
1916 * @xt: Interleaved template pointer
1917 * @flags: transfer ack flags
1919 * Return: Async transaction descriptor on success and NULL on failure
1921 static struct dma_async_tx_descriptor *
1922 xilinx_dma_prep_interleaved(struct dma_chan *dchan,
1923 struct dma_interleaved_template *xt,
1924 unsigned long flags)
1926 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1927 struct xilinx_dma_tx_descriptor *desc;
1928 struct xilinx_axidma_tx_segment *segment;
1929 struct xilinx_axidma_desc_hw *hw;
1931 if (!is_slave_direction(xt->dir))
1934 if (!xt->numf || !xt->sgl[0].size)
1937 if (xt->frame_size != 1)
1940 /* Allocate a transaction descriptor. */
1941 desc = xilinx_dma_alloc_tx_descriptor(chan);
1945 chan->direction = xt->dir;
1946 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1947 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1949 /* Get a free segment */
1950 segment = xilinx_axidma_alloc_tx_segment(chan);
1956 /* Fill in the descriptor */
1957 if (xt->dir != DMA_MEM_TO_DEV)
1958 hw->buf_addr = xt->dst_start;
1960 hw->buf_addr = xt->src_start;
1962 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
1963 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
1964 XILINX_DMA_BD_VSIZE_MASK;
1965 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
1966 XILINX_DMA_BD_STRIDE_MASK;
1967 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
1970 * Insert the segment into the descriptor segments
1973 list_add_tail(&segment->node, &desc->segments);
1976 segment = list_first_entry(&desc->segments,
1977 struct xilinx_axidma_tx_segment, node);
1978 desc->async_tx.phys = segment->phys;
1980 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1981 if (xt->dir == DMA_MEM_TO_DEV) {
1982 segment->hw.control |= XILINX_DMA_BD_SOP;
1983 segment = list_last_entry(&desc->segments,
1984 struct xilinx_axidma_tx_segment,
1986 segment->hw.control |= XILINX_DMA_BD_EOP;
1989 return &desc->async_tx;
1992 xilinx_dma_free_tx_descriptor(chan, desc);
1997 * xilinx_dma_terminate_all - Halt the channel and free descriptors
1998 * @chan: Driver specific DMA Channel pointer
2000 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2002 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2007 xilinx_dma_chan_reset(chan);
2009 err = chan->stop_transfer(chan);
2011 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2012 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2016 /* Remove and free all of the descriptors in the lists */
2017 xilinx_dma_free_descriptors(chan);
2020 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2021 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2022 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2023 chan->cyclic = false;
2030 * xilinx_dma_channel_set_config - Configure VDMA channel
2031 * Run-time configuration for Axi VDMA, supports:
2032 * . halt the channel
2033 * . configure interrupt coalescing and inter-packet delay threshold
2034 * . start/stop parking
2037 * @dchan: DMA channel
2038 * @cfg: VDMA device configuration pointer
2040 * Return: '0' on success and failure value on error
2042 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2043 struct xilinx_vdma_config *cfg)
2045 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2049 return xilinx_dma_chan_reset(chan);
2051 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2053 chan->config.frm_dly = cfg->frm_dly;
2054 chan->config.park = cfg->park;
2056 /* genlock settings */
2057 chan->config.gen_lock = cfg->gen_lock;
2058 chan->config.master = cfg->master;
2060 if (cfg->gen_lock && chan->genlock) {
2061 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2062 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2065 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2067 chan->config.park_frm = cfg->park_frm;
2069 chan->config.park_frm = -1;
2071 chan->config.coalesc = cfg->coalesc;
2072 chan->config.delay = cfg->delay;
2074 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2075 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2076 chan->config.coalesc = cfg->coalesc;
2079 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2080 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2081 chan->config.delay = cfg->delay;
2084 /* FSync Source selection */
2085 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2086 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2088 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2092 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2094 /* -----------------------------------------------------------------------------
2099 * xilinx_dma_chan_remove - Per Channel remove function
2100 * @chan: Driver specific DMA channel
2102 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2104 /* Disable all interrupts */
2105 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2106 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2109 free_irq(chan->irq, chan);
2111 tasklet_kill(&chan->tasklet);
2113 list_del(&chan->common.device_node);
2116 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2117 struct clk **tx_clk, struct clk **rx_clk,
2118 struct clk **sg_clk, struct clk **tmp_clk)
2124 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2125 if (IS_ERR(*axi_clk)) {
2126 err = PTR_ERR(*axi_clk);
2127 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
2131 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2132 if (IS_ERR(*tx_clk))
2135 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2136 if (IS_ERR(*rx_clk))
2139 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2140 if (IS_ERR(*sg_clk))
2143 err = clk_prepare_enable(*axi_clk);
2145 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2149 err = clk_prepare_enable(*tx_clk);
2151 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2152 goto err_disable_axiclk;
2155 err = clk_prepare_enable(*rx_clk);
2157 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
2158 goto err_disable_txclk;
2161 err = clk_prepare_enable(*sg_clk);
2163 dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
2164 goto err_disable_rxclk;
2170 clk_disable_unprepare(*rx_clk);
2172 clk_disable_unprepare(*tx_clk);
2174 clk_disable_unprepare(*axi_clk);
2179 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2180 struct clk **dev_clk, struct clk **tmp_clk,
2181 struct clk **tmp1_clk, struct clk **tmp2_clk)
2189 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2190 if (IS_ERR(*axi_clk)) {
2191 err = PTR_ERR(*axi_clk);
2192 dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
2196 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2197 if (IS_ERR(*dev_clk)) {
2198 err = PTR_ERR(*dev_clk);
2199 dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
2203 err = clk_prepare_enable(*axi_clk);
2205 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2209 err = clk_prepare_enable(*dev_clk);
2211 dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
2212 goto err_disable_axiclk;
2218 clk_disable_unprepare(*axi_clk);
2223 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2224 struct clk **tx_clk, struct clk **txs_clk,
2225 struct clk **rx_clk, struct clk **rxs_clk)
2229 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2230 if (IS_ERR(*axi_clk)) {
2231 err = PTR_ERR(*axi_clk);
2232 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
2236 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2237 if (IS_ERR(*tx_clk))
2240 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2241 if (IS_ERR(*txs_clk))
2244 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2245 if (IS_ERR(*rx_clk))
2248 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2249 if (IS_ERR(*rxs_clk))
2252 err = clk_prepare_enable(*axi_clk);
2254 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2258 err = clk_prepare_enable(*tx_clk);
2260 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2261 goto err_disable_axiclk;
2264 err = clk_prepare_enable(*txs_clk);
2266 dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
2267 goto err_disable_txclk;
2270 err = clk_prepare_enable(*rx_clk);
2272 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
2273 goto err_disable_txsclk;
2276 err = clk_prepare_enable(*rxs_clk);
2278 dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
2279 goto err_disable_rxclk;
2285 clk_disable_unprepare(*rx_clk);
2287 clk_disable_unprepare(*txs_clk);
2289 clk_disable_unprepare(*tx_clk);
2291 clk_disable_unprepare(*axi_clk);
2296 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2298 clk_disable_unprepare(xdev->rxs_clk);
2299 clk_disable_unprepare(xdev->rx_clk);
2300 clk_disable_unprepare(xdev->txs_clk);
2301 clk_disable_unprepare(xdev->tx_clk);
2302 clk_disable_unprepare(xdev->axi_clk);
2306 * xilinx_dma_chan_probe - Per Channel Probing
2307 * It get channel features from the device tree entry and
2308 * initialize special channel handling routines
2310 * @xdev: Driver specific device structure
2311 * @node: Device node
2313 * Return: '0' on success and failure value on error
2315 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2316 struct device_node *node, int chan_id)
2318 struct xilinx_dma_chan *chan;
2319 bool has_dre = false;
2323 /* Allocate and initialize the channel structure */
2324 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2328 chan->dev = xdev->dev;
2330 chan->has_sg = xdev->has_sg;
2331 chan->desc_pendingcount = 0x0;
2332 chan->ext_addr = xdev->ext_addr;
2334 spin_lock_init(&chan->lock);
2335 INIT_LIST_HEAD(&chan->pending_list);
2336 INIT_LIST_HEAD(&chan->done_list);
2337 INIT_LIST_HEAD(&chan->active_list);
2339 /* Retrieve the channel properties from the device tree */
2340 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2342 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2344 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2346 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2349 width = value >> 3; /* Convert bits to bytes */
2351 /* If data width is greater than 8 bytes, DRE is not in hw */
2356 xdev->common.copy_align = fls(width - 1);
2358 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2359 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2360 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2361 chan->direction = DMA_MEM_TO_DEV;
2363 chan->tdest = chan_id;
2365 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2366 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2367 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2369 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2370 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2371 chan->flush_on_fsync = true;
2373 } else if (of_device_is_compatible(node,
2374 "xlnx,axi-vdma-s2mm-channel") ||
2375 of_device_is_compatible(node,
2376 "xlnx,axi-dma-s2mm-channel")) {
2377 chan->direction = DMA_DEV_TO_MEM;
2379 chan->tdest = chan_id - xdev->nr_channels;
2381 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2382 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2383 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2385 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2386 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2387 chan->flush_on_fsync = true;
2390 dev_err(xdev->dev, "Invalid channel compatible node\n");
2394 /* Request the interrupt */
2395 chan->irq = irq_of_parse_and_map(node, 0);
2396 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2397 "xilinx-dma-controller", chan);
2399 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2403 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2404 chan->start_transfer = xilinx_dma_start_transfer;
2405 chan->stop_transfer = xilinx_dma_stop_transfer;
2406 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2407 chan->start_transfer = xilinx_cdma_start_transfer;
2408 chan->stop_transfer = xilinx_cdma_stop_transfer;
2410 chan->start_transfer = xilinx_vdma_start_transfer;
2411 chan->stop_transfer = xilinx_dma_stop_transfer;
2414 /* Initialize the tasklet */
2415 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2416 (unsigned long)chan);
2419 * Initialize the DMA channel and add it to the DMA engine channels
2422 chan->common.device = &xdev->common;
2424 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2425 xdev->chan[chan->id] = chan;
2427 /* Reset the channel */
2428 err = xilinx_dma_chan_reset(chan);
2430 dev_err(xdev->dev, "Reset channel failed\n");
2438 * xilinx_dma_child_probe - Per child node probe
2439 * It get number of dma-channels per child node from
2440 * device-tree and initializes all the channels.
2442 * @xdev: Driver specific device structure
2443 * @node: Device node
2447 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2448 struct device_node *node) {
2449 int ret, i, nr_channels = 1;
2451 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2452 if ((ret < 0) && xdev->mcdma)
2453 dev_warn(xdev->dev, "missing dma-channels property\n");
2455 for (i = 0; i < nr_channels; i++)
2456 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2458 xdev->nr_channels += nr_channels;
2464 * of_dma_xilinx_xlate - Translation function
2465 * @dma_spec: Pointer to DMA specifier as found in the device tree
2466 * @ofdma: Pointer to DMA controller data
2468 * Return: DMA channel pointer on success and NULL on error
2470 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2471 struct of_dma *ofdma)
2473 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2474 int chan_id = dma_spec->args[0];
2476 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2479 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2482 static const struct xilinx_dma_config axidma_config = {
2483 .dmatype = XDMA_TYPE_AXIDMA,
2484 .clk_init = axidma_clk_init,
2487 static const struct xilinx_dma_config axicdma_config = {
2488 .dmatype = XDMA_TYPE_CDMA,
2489 .clk_init = axicdma_clk_init,
2492 static const struct xilinx_dma_config axivdma_config = {
2493 .dmatype = XDMA_TYPE_VDMA,
2494 .clk_init = axivdma_clk_init,
2497 static const struct of_device_id xilinx_dma_of_ids[] = {
2498 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2499 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2500 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2503 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2506 * xilinx_dma_probe - Driver probe function
2507 * @pdev: Pointer to the platform_device structure
2509 * Return: '0' on success and failure value on error
2511 static int xilinx_dma_probe(struct platform_device *pdev)
2513 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2514 struct clk **, struct clk **, struct clk **)
2516 struct device_node *node = pdev->dev.of_node;
2517 struct xilinx_dma_device *xdev;
2518 struct device_node *child, *np = pdev->dev.of_node;
2519 struct resource *io;
2520 u32 num_frames, addr_width;
2523 /* Allocate and initialize the DMA engine structure */
2524 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2528 xdev->dev = &pdev->dev;
2530 const struct of_device_id *match;
2532 match = of_match_node(xilinx_dma_of_ids, np);
2533 if (match && match->data) {
2534 xdev->dma_config = match->data;
2535 clk_init = xdev->dma_config->clk_init;
2539 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2540 &xdev->rx_clk, &xdev->rxs_clk);
2544 /* Request and map I/O memory */
2545 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2546 xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2547 if (IS_ERR(xdev->regs))
2548 return PTR_ERR(xdev->regs);
2550 /* Retrieve the DMA engine properties from the device tree */
2551 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2552 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2553 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2555 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2556 err = of_property_read_u32(node, "xlnx,num-fstores",
2560 "missing xlnx,num-fstores property\n");
2564 err = of_property_read_u32(node, "xlnx,flush-fsync",
2565 &xdev->flush_on_fsync);
2568 "missing xlnx,flush-fsync property\n");
2571 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2573 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2575 if (addr_width > 32)
2576 xdev->ext_addr = true;
2578 xdev->ext_addr = false;
2580 /* Set the dma mask bits */
2581 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2583 /* Initialize the DMA engine */
2584 xdev->common.dev = &pdev->dev;
2586 INIT_LIST_HEAD(&xdev->common.channels);
2587 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2588 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2589 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2592 xdev->common.device_alloc_chan_resources =
2593 xilinx_dma_alloc_chan_resources;
2594 xdev->common.device_free_chan_resources =
2595 xilinx_dma_free_chan_resources;
2596 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2597 xdev->common.device_tx_status = xilinx_dma_tx_status;
2598 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2599 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2600 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2601 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2602 xdev->common.device_prep_dma_cyclic =
2603 xilinx_dma_prep_dma_cyclic;
2604 xdev->common.device_prep_interleaved_dma =
2605 xilinx_dma_prep_interleaved;
2606 /* Residue calculation is supported by only AXI DMA */
2607 xdev->common.residue_granularity =
2608 DMA_RESIDUE_GRANULARITY_SEGMENT;
2609 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2610 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2611 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2613 xdev->common.device_prep_interleaved_dma =
2614 xilinx_vdma_dma_prep_interleaved;
2617 platform_set_drvdata(pdev, xdev);
2619 /* Initialize the channels */
2620 for_each_child_of_node(node, child) {
2621 err = xilinx_dma_child_probe(xdev, child);
2626 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2627 for (i = 0; i < xdev->nr_channels; i++)
2629 xdev->chan[i]->num_frms = num_frames;
2632 /* Register the DMA engine with the core */
2633 dma_async_device_register(&xdev->common);
2635 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2638 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2639 dma_async_device_unregister(&xdev->common);
2643 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2648 xdma_disable_allclks(xdev);
2650 for (i = 0; i < xdev->nr_channels; i++)
2652 xilinx_dma_chan_remove(xdev->chan[i]);
2658 * xilinx_dma_remove - Driver remove function
2659 * @pdev: Pointer to the platform_device structure
2661 * Return: Always '0'
2663 static int xilinx_dma_remove(struct platform_device *pdev)
2665 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2668 of_dma_controller_free(pdev->dev.of_node);
2670 dma_async_device_unregister(&xdev->common);
2672 for (i = 0; i < xdev->nr_channels; i++)
2674 xilinx_dma_chan_remove(xdev->chan[i]);
2676 xdma_disable_allclks(xdev);
2681 static struct platform_driver xilinx_vdma_driver = {
2683 .name = "xilinx-vdma",
2684 .of_match_table = xilinx_dma_of_ids,
2686 .probe = xilinx_dma_probe,
2687 .remove = xilinx_dma_remove,
2690 module_platform_driver(xilinx_vdma_driver);
2692 MODULE_AUTHOR("Xilinx, Inc.");
2693 MODULE_DESCRIPTION("Xilinx VDMA driver");
2694 MODULE_LICENSE("GPL v2");