2 * driver/dma/ste_dma40.c
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
18 #include <plat/ste_dma40.h>
20 #include "ste_dma40_ll.h"
22 #define D40_NAME "dma40"
24 #define D40_PHY_CHAN -1
26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan) (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500
33 #define D40_ALLOC_FREE (1 << 31)
34 #define D40_ALLOC_PHY (1 << 30)
35 #define D40_ALLOC_LOG_FREE 0
37 /* The number of free d40_desc to keep in memory before starting
39 #define D40_DESC_CACHE_SIZE 50
41 /* Hardware designer of the block */
42 #define D40_PERIPHID2_DESIGNER 0x8
45 * enum 40_command - The different commands and/or statuses.
47 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
48 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
49 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
50 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
55 D40_DMA_SUSPEND_REQ = 2,
60 * struct d40_lli_pool - Structure for keeping LLIs in memory
62 * @base: Pointer to memory area when the pre_alloc_lli's are not large
63 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
64 * pre_alloc_lli is used.
65 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
66 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
67 * one buffer to one buffer.
72 /* Space for dst and src, plus an extra for padding */
73 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
77 * struct d40_desc - A descriptor is one DMA job.
79 * @lli_phy: LLI settings for physical channel. Both src and dst=
80 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
82 * @lli_log: Same as above but for logical channels.
83 * @lli_pool: The pool with two entries pre-allocated.
84 * @lli_len: Number of llis of current descriptor.
85 * @lli_count: Number of transfered llis.
86 * @lli_tx_len: Max number of LLIs per transfer, there can be
87 * many transfer for one descriptor.
88 * @txd: DMA engine struct. Used for among other things for communication
91 * @dir: The transfer direction of this job.
92 * @is_in_client_list: true if the client owns this descriptor.
94 * This descriptor is used for both logical and physical transfers.
99 struct d40_phy_lli_bidir lli_phy;
101 struct d40_log_lli_bidir lli_log;
103 struct d40_lli_pool lli_pool;
108 struct dma_async_tx_descriptor txd;
109 struct list_head node;
111 enum dma_data_direction dir;
112 bool is_in_client_list;
116 * struct d40_lcla_pool - LCLA pool settings and data.
118 * @base: The virtual address of LCLA.
119 * @phy: Physical base address of LCLA.
120 * @base_size: size of lcla.
121 * @lock: Lock to protect the content in this struct.
122 * @alloc_map: Mapping between physical channel and LCLA entries.
123 * @num_blocks: The number of entries of alloc_map. Equals to the
124 * number of physical channels.
126 struct d40_lcla_pool {
129 resource_size_t base_size;
136 * struct d40_phy_res - struct for handling eventlines mapped to physical
139 * @lock: A lock protection this entity.
140 * @num: The physical channel number of this entity.
141 * @allocated_src: Bit mapped to show which src event line's are mapped to
142 * this physical channel. Can also be free or physically allocated.
143 * @allocated_dst: Same as for src but is dst.
144 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
145 * event line number. Both allocated_src and allocated_dst can not be
146 * allocated to a physical channel, since the interrupt handler has then
147 * no way of figure out which one the interrupt belongs to.
159 * struct d40_chan - Struct that describes a channel.
161 * @lock: A spinlock to protect this struct.
162 * @log_num: The logical number, if any of this channel.
163 * @completed: Starts with 1, after first interrupt it is set to dma engine's
165 * @pending_tx: The number of pending transfers. Used between interrupt handler
167 * @busy: Set to true when transfer is ongoing on this channel.
168 * @phy_chan: Pointer to physical channel which this instance runs on.
169 * @chan: DMA engine handle.
170 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
171 * transfer and call client callback.
172 * @client: Cliented owned descriptor list.
173 * @active: Active descriptor.
174 * @queue: Queued jobs.
175 * @free: List of free descripts, ready to be reused.
176 * @free_len: Number of descriptors in the free list.
177 * @dma_cfg: The client configuration of this dma channel.
178 * @base: Pointer to the device instance struct.
179 * @src_def_cfg: Default cfg register setting for src.
180 * @dst_def_cfg: Default cfg register setting for dst.
181 * @log_def: Default logical channel settings.
182 * @lcla: Space for one dst src pair for logical channel transfers.
183 * @lcpa: Pointer to dst and src lcpa settings.
185 * This struct can either "be" a logical or a physical channel.
190 /* ID of the most recent completed transfer */
194 struct d40_phy_res *phy_chan;
195 struct dma_chan chan;
196 struct tasklet_struct tasklet;
197 struct list_head client;
198 struct list_head active;
199 struct list_head queue;
200 struct list_head free;
202 struct stedma40_chan_cfg dma_cfg;
203 struct d40_base *base;
204 /* Default register configurations */
207 struct d40_def_lcsp log_def;
208 struct d40_lcla_elem lcla;
209 struct d40_log_lli_full *lcpa;
213 * struct d40_base - The big global struct, one for each probe'd instance.
215 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
216 * @execmd_lock: Lock for execute command usage since several channels share
217 * the same physical register.
218 * @dev: The device structure.
219 * @virtbase: The virtual base address of the DMA's register.
220 * @clk: Pointer to the DMA clock structure.
221 * @phy_start: Physical memory start of the DMA registers.
222 * @phy_size: Size of the DMA register map.
223 * @irq: The IRQ number.
224 * @num_phy_chans: The number of physical channels. Read from HW. This
225 * is the number of available channels for this driver, not counting "Secure
226 * mode" allocated physical channels.
227 * @num_log_chans: The number of logical channels. Calculated from
229 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
230 * @dma_slave: dma_device channels that can do only do slave transfers.
231 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
232 * @phy_chans: Room for all possible physical channels in system.
233 * @log_chans: Room for all possible logical channels in system.
234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
235 * to log_chans entries.
236 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
237 * to phy_chans entries.
238 * @plat_data: Pointer to provided platform_data which is the driver
240 * @phy_res: Vector containing all physical channels.
241 * @lcla_pool: lcla pool settings and data.
242 * @lcpa_base: The virtual mapped address of LCPA.
243 * @phy_lcpa: The physical address of the LCPA.
244 * @lcpa_size: The size of the LCPA area.
247 spinlock_t interrupt_lock;
248 spinlock_t execmd_lock;
250 void __iomem *virtbase;
252 phys_addr_t phy_start;
253 resource_size_t phy_size;
257 struct dma_device dma_both;
258 struct dma_device dma_slave;
259 struct dma_device dma_memcpy;
260 struct d40_chan *phy_chans;
261 struct d40_chan *log_chans;
262 struct d40_chan **lookup_log_chans;
263 struct d40_chan **lookup_phy_chans;
264 struct stedma40_platform_data *plat_data;
265 /* Physical half channels */
266 struct d40_phy_res *phy_res;
267 struct d40_lcla_pool lcla_pool;
270 resource_size_t lcpa_size;
274 * struct d40_interrupt_lookup - lookup table for interrupt handler
276 * @src: Interrupt mask register.
277 * @clr: Interrupt clear register.
278 * @is_error: true if this is an error interrupt.
279 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
280 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
282 struct d40_interrupt_lookup {
290 * struct d40_reg_val - simple lookup struct
292 * @reg: The register.
293 * @val: The value that belongs to the register in reg.
300 static int d40_pool_lli_alloc(struct d40_desc *d40d,
301 int lli_len, bool is_log)
307 align = sizeof(struct d40_log_lli);
309 align = sizeof(struct d40_phy_lli);
312 base = d40d->lli_pool.pre_alloc_lli;
313 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
314 d40d->lli_pool.base = NULL;
316 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
318 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
319 d40d->lli_pool.base = base;
321 if (d40d->lli_pool.base == NULL)
326 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
328 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
331 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
333 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
336 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
337 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
343 static void d40_pool_lli_free(struct d40_desc *d40d)
345 kfree(d40d->lli_pool.base);
346 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = 0;
348 d40d->lli_log.src = NULL;
349 d40d->lli_log.dst = NULL;
350 d40d->lli_phy.src = NULL;
351 d40d->lli_phy.dst = NULL;
352 d40d->lli_phy.src_addr = 0;
353 d40d->lli_phy.dst_addr = 0;
356 static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
357 struct d40_desc *desc)
359 dma_cookie_t cookie = d40c->chan.cookie;
364 d40c->chan.cookie = cookie;
365 desc->txd.cookie = cookie;
370 static void d40_desc_remove(struct d40_desc *d40d)
372 list_del(&d40d->node);
375 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
377 struct d40_desc *desc;
381 if (!list_empty(&d40c->client)) {
382 list_for_each_entry_safe(d, _d, &d40c->client, node)
383 if (async_tx_test_ack(&d->txd)) {
384 d40_pool_lli_free(d);
391 if (list_empty(&d40c->free)) {
392 /* Alloc new desc because we're out of used ones */
393 desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
396 INIT_LIST_HEAD(&desc->node);
398 /* Reuse an old desc. */
399 desc = list_first_entry(&d40c->free,
402 list_del(&desc->node);
409 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
411 if (d40c->free_len < D40_DESC_CACHE_SIZE) {
412 list_add_tail(&d40d->node, &d40c->free);
418 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
420 list_add_tail(&desc->node, &d40c->active);
423 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
427 if (list_empty(&d40c->active))
430 d = list_first_entry(&d40c->active,
436 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
438 list_add_tail(&desc->node, &d40c->queue);
441 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
445 if (list_empty(&d40c->queue))
448 d = list_first_entry(&d40c->queue,
454 /* Support functions for logical channels */
456 static int d40_lcla_id_get(struct d40_chan *d40c,
457 struct d40_lcla_pool *pool)
461 struct d40_log_lli *lcla_lidx_base =
462 pool->base + d40c->phy_chan->num * 1024;
464 int lli_per_log = d40c->base->plat_data->llis_per_log;
466 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
469 if (pool->num_blocks > 32)
472 spin_lock(&pool->lock);
474 for (i = 0; i < pool->num_blocks; i++) {
475 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
476 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
481 if (src_id >= pool->num_blocks)
484 for (; i < pool->num_blocks; i++) {
485 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
486 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
492 if (dst_id == src_id)
495 d40c->lcla.src_id = src_id;
496 d40c->lcla.dst_id = dst_id;
497 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
498 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
501 spin_unlock(&pool->lock);
504 spin_unlock(&pool->lock);
508 static void d40_lcla_id_put(struct d40_chan *d40c,
509 struct d40_lcla_pool *pool,
515 d40c->lcla.src_id = -1;
516 d40c->lcla.dst_id = -1;
518 spin_lock(&pool->lock);
519 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
520 spin_unlock(&pool->lock);
523 static int d40_channel_execute_command(struct d40_chan *d40c,
524 enum d40_command command)
527 void __iomem *active_reg;
531 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
533 if (d40c->phy_chan->num % 2 == 0)
534 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
536 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
538 if (command == D40_DMA_SUSPEND_REQ) {
539 status = (readl(active_reg) &
540 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
541 D40_CHAN_POS(d40c->phy_chan->num);
543 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
547 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
549 if (command == D40_DMA_SUSPEND_REQ) {
551 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
552 status = (readl(active_reg) &
553 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
554 D40_CHAN_POS(d40c->phy_chan->num);
558 * Reduce the number of bus accesses while
559 * waiting for the DMA to suspend.
563 if (status == D40_DMA_STOP ||
564 status == D40_DMA_SUSPENDED)
568 if (i == D40_SUSPEND_MAX_IT) {
569 dev_err(&d40c->chan.dev->device,
570 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
571 __func__, d40c->phy_chan->num, d40c->log_num,
579 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
583 static void d40_term_all(struct d40_chan *d40c)
585 struct d40_desc *d40d;
589 /* Release active descriptors */
590 while ((d40d = d40_first_active_get(d40c))) {
591 d40_desc_remove(d40d);
593 /* Return desc to free-list */
594 d40_desc_free(d40c, d40d);
597 /* Release queued descriptors waiting for transfer */
598 while ((d40d = d40_first_queued(d40c))) {
599 d40_desc_remove(d40d);
601 /* Return desc to free-list */
602 d40_desc_free(d40c, d40d);
605 /* Release client owned descriptors */
606 if (!list_empty(&d40c->client))
607 list_for_each_entry_safe(d, _d, &d40c->client, node) {
608 d40_pool_lli_free(d);
610 /* Return desc to free-list */
611 d40_desc_free(d40c, d40d);
614 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
616 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
619 d40c->pending_tx = 0;
623 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
629 val = D40_ACTIVATE_EVENTLINE;
631 val = D40_DEACTIVATE_EVENTLINE;
633 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
635 /* Enable event line connected to device (or memcpy) */
636 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
637 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
638 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
640 writel((val << D40_EVENTLINE_POS(event)) |
641 ~D40_EVENTLINE_MASK(event),
642 d40c->base->virtbase + D40_DREG_PCBASE +
643 d40c->phy_chan->num * D40_DREG_PCDELTA +
646 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
647 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
649 writel((val << D40_EVENTLINE_POS(event)) |
650 ~D40_EVENTLINE_MASK(event),
651 d40c->base->virtbase + D40_DREG_PCBASE +
652 d40c->phy_chan->num * D40_DREG_PCDELTA +
656 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
659 static u32 d40_chan_has_events(struct d40_chan *d40c)
663 /* If SSLNK or SDLNK is zero all events are disabled */
664 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
665 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
666 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
667 d40c->phy_chan->num * D40_DREG_PCDELTA +
670 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
671 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
672 d40c->phy_chan->num * D40_DREG_PCDELTA +
677 static void d40_config_enable_lidx(struct d40_chan *d40c)
679 /* Set LIDX for lcla */
680 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
681 D40_SREG_ELEM_LOG_LIDX_MASK,
682 d40c->base->virtbase + D40_DREG_PCBASE +
683 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
685 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
686 D40_SREG_ELEM_LOG_LIDX_MASK,
687 d40c->base->virtbase + D40_DREG_PCBASE +
688 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
691 static int d40_config_write(struct d40_chan *d40c)
697 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
701 /* Odd addresses are even addresses + 4 */
702 addr_base = (d40c->phy_chan->num % 2) * 4;
703 /* Setup channel mode to logical or physical */
704 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
705 D40_CHAN_POS(d40c->phy_chan->num);
706 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
708 /* Setup operational mode option register */
709 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
710 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
712 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
714 if (d40c->log_num != D40_PHY_CHAN) {
715 /* Set default config for CFG reg */
716 writel(d40c->src_def_cfg,
717 d40c->base->virtbase + D40_DREG_PCBASE +
718 d40c->phy_chan->num * D40_DREG_PCDELTA +
720 writel(d40c->dst_def_cfg,
721 d40c->base->virtbase + D40_DREG_PCBASE +
722 d40c->phy_chan->num * D40_DREG_PCDELTA +
725 d40_config_enable_lidx(d40c);
730 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
733 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
734 d40_phy_lli_write(d40c->base->virtbase,
738 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
739 struct d40_log_lli *src = d40d->lli_log.src;
740 struct d40_log_lli *dst = d40d->lli_log.dst;
742 src += d40d->lli_count;
743 dst += d40d->lli_count;
744 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
747 d40c->base->plat_data->llis_per_log);
749 d40d->lli_count += d40d->lli_tx_len;
752 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
754 struct d40_chan *d40c = container_of(tx->chan,
757 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
760 spin_lock_irqsave(&d40c->lock, flags);
762 tx->cookie = d40_assign_cookie(d40c, d40d);
764 d40_desc_queue(d40c, d40d);
766 spin_unlock_irqrestore(&d40c->lock, flags);
771 static int d40_start(struct d40_chan *d40c)
775 if (d40c->log_num != D40_PHY_CHAN) {
776 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
779 d40_config_set_event(d40c, true);
782 err = d40_channel_execute_command(d40c, D40_DMA_RUN);
787 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
789 struct d40_desc *d40d;
792 /* Start queued jobs, if any */
793 d40d = d40_first_queued(d40c);
798 /* Remove from queue */
799 d40_desc_remove(d40d);
801 /* Add to active queue */
802 d40_desc_submit(d40c, d40d);
804 /* Initiate DMA job */
805 d40_desc_load(d40c, d40d);
808 err = d40_start(d40c);
817 /* called from interrupt context */
818 static void dma_tc_handle(struct d40_chan *d40c)
820 struct d40_desc *d40d;
825 /* Get first active entry from list */
826 d40d = d40_first_active_get(d40c);
831 if (d40d->lli_count < d40d->lli_len) {
833 d40_desc_load(d40c, d40d);
835 (void) d40_start(d40c);
839 if (d40_queue_start(d40c) == NULL)
843 tasklet_schedule(&d40c->tasklet);
847 static void dma_tasklet(unsigned long data)
849 struct d40_chan *d40c = (struct d40_chan *) data;
850 struct d40_desc *d40d_fin;
852 dma_async_tx_callback callback;
853 void *callback_param;
855 spin_lock_irqsave(&d40c->lock, flags);
857 /* Get first active entry from list */
858 d40d_fin = d40_first_active_get(d40c);
860 if (d40d_fin == NULL)
863 d40c->completed = d40d_fin->txd.cookie;
866 * If terminating a channel pending_tx is set to zero.
867 * This prevents any finished active jobs to return to the client.
869 if (d40c->pending_tx == 0) {
870 spin_unlock_irqrestore(&d40c->lock, flags);
874 /* Callback to client */
875 callback = d40d_fin->txd.callback;
876 callback_param = d40d_fin->txd.callback_param;
878 if (async_tx_test_ack(&d40d_fin->txd)) {
879 d40_pool_lli_free(d40d_fin);
880 d40_desc_remove(d40d_fin);
881 /* Return desc to free-list */
882 d40_desc_free(d40c, d40d_fin);
884 if (!d40d_fin->is_in_client_list) {
885 d40_desc_remove(d40d_fin);
886 list_add_tail(&d40d_fin->node, &d40c->client);
887 d40d_fin->is_in_client_list = true;
893 if (d40c->pending_tx)
894 tasklet_schedule(&d40c->tasklet);
896 spin_unlock_irqrestore(&d40c->lock, flags);
899 callback(callback_param);
904 /* Rescue manouver if receiving double interrupts */
905 if (d40c->pending_tx > 0)
907 spin_unlock_irqrestore(&d40c->lock, flags);
910 static irqreturn_t d40_handle_interrupt(int irq, void *data)
912 static const struct d40_interrupt_lookup il[] = {
913 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
914 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
915 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
916 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
917 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
918 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
919 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
920 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
921 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
922 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
926 u32 regs[ARRAY_SIZE(il)];
931 struct d40_chan *d40c;
933 struct d40_base *base = data;
935 spin_lock_irqsave(&base->interrupt_lock, flags);
937 /* Read interrupt status of both logical and physical channels */
938 for (i = 0; i < ARRAY_SIZE(il); i++)
939 regs[i] = readl(base->virtbase + il[i].src);
943 chan = find_next_bit((unsigned long *)regs,
944 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
946 /* No more set bits found? */
947 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
950 row = chan / BITS_PER_LONG;
951 idx = chan & (BITS_PER_LONG - 1);
954 tmp = readl(base->virtbase + il[row].clr);
956 writel(tmp, base->virtbase + il[row].clr);
958 if (il[row].offset == D40_PHY_CHAN)
959 d40c = base->lookup_phy_chans[idx];
961 d40c = base->lookup_log_chans[il[row].offset + idx];
962 spin_lock(&d40c->lock);
964 if (!il[row].is_error)
967 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
968 __func__, chan, il[row].offset, idx);
970 spin_unlock(&d40c->lock);
973 spin_unlock_irqrestore(&base->interrupt_lock, flags);
979 static int d40_validate_conf(struct d40_chan *d40c,
980 struct stedma40_chan_cfg *conf)
983 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
984 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
985 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
986 == STEDMA40_CHANNEL_IN_LOG_MODE;
988 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
989 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
990 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
995 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
996 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
997 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1002 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1003 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1004 dev_err(&d40c->chan.dev->device,
1005 "[%s] No event line\n", __func__);
1009 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1010 (src_event_group != dst_event_group)) {
1011 dev_err(&d40c->chan.dev->device,
1012 "[%s] Invalid event group\n", __func__);
1016 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1018 * DMAC HW supports it. Will be added to this driver,
1019 * in case any dma client requires it.
1021 dev_err(&d40c->chan.dev->device,
1022 "[%s] periph to periph not supported\n",
1030 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1031 int log_event_line, bool is_log)
1033 unsigned long flags;
1034 spin_lock_irqsave(&phy->lock, flags);
1036 /* Physical interrupts are masked per physical full channel */
1037 if (phy->allocated_src == D40_ALLOC_FREE &&
1038 phy->allocated_dst == D40_ALLOC_FREE) {
1039 phy->allocated_dst = D40_ALLOC_PHY;
1040 phy->allocated_src = D40_ALLOC_PHY;
1046 /* Logical channel */
1048 if (phy->allocated_src == D40_ALLOC_PHY)
1051 if (phy->allocated_src == D40_ALLOC_FREE)
1052 phy->allocated_src = D40_ALLOC_LOG_FREE;
1054 if (!(phy->allocated_src & (1 << log_event_line))) {
1055 phy->allocated_src |= 1 << log_event_line;
1060 if (phy->allocated_dst == D40_ALLOC_PHY)
1063 if (phy->allocated_dst == D40_ALLOC_FREE)
1064 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1066 if (!(phy->allocated_dst & (1 << log_event_line))) {
1067 phy->allocated_dst |= 1 << log_event_line;
1074 spin_unlock_irqrestore(&phy->lock, flags);
1077 spin_unlock_irqrestore(&phy->lock, flags);
1081 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1084 unsigned long flags;
1085 bool is_free = false;
1087 spin_lock_irqsave(&phy->lock, flags);
1088 if (!log_event_line) {
1089 /* Physical interrupts are masked per physical full channel */
1090 phy->allocated_dst = D40_ALLOC_FREE;
1091 phy->allocated_src = D40_ALLOC_FREE;
1096 /* Logical channel */
1098 phy->allocated_src &= ~(1 << log_event_line);
1099 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1100 phy->allocated_src = D40_ALLOC_FREE;
1102 phy->allocated_dst &= ~(1 << log_event_line);
1103 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1104 phy->allocated_dst = D40_ALLOC_FREE;
1107 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1111 spin_unlock_irqrestore(&phy->lock, flags);
1116 static int d40_allocate_channel(struct d40_chan *d40c)
1121 struct d40_phy_res *phys;
1126 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1127 == STEDMA40_CHANNEL_IN_LOG_MODE;
1130 phys = d40c->base->phy_res;
1132 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1133 dev_type = d40c->dma_cfg.src_dev_type;
1134 log_num = 2 * dev_type;
1136 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1137 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1138 /* dst event lines are used for logical memcpy */
1139 dev_type = d40c->dma_cfg.dst_dev_type;
1140 log_num = 2 * dev_type + 1;
1145 event_group = D40_TYPE_TO_GROUP(dev_type);
1146 event_line = D40_TYPE_TO_EVENT(dev_type);
1149 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1150 /* Find physical half channel */
1151 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1153 if (d40_alloc_mask_set(&phys[i], is_src,
1158 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1159 int phy_num = j + event_group * 2;
1160 for (i = phy_num; i < phy_num + 2; i++) {
1161 if (d40_alloc_mask_set(&phys[i], is_src,
1168 d40c->phy_chan = &phys[i];
1169 d40c->log_num = D40_PHY_CHAN;
1175 /* Find logical channel */
1176 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1177 int phy_num = j + event_group * 2;
1179 * Spread logical channels across all available physical rather
1180 * than pack every logical channel at the first available phy
1184 for (i = phy_num; i < phy_num + 2; i++) {
1185 if (d40_alloc_mask_set(&phys[i], is_src,
1186 event_line, is_log))
1190 for (i = phy_num + 1; i >= phy_num; i--) {
1191 if (d40_alloc_mask_set(&phys[i], is_src,
1192 event_line, is_log))
1200 d40c->phy_chan = &phys[i];
1201 d40c->log_num = log_num;
1205 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1207 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1213 static int d40_config_chan(struct d40_chan *d40c,
1214 struct stedma40_chan_cfg *info)
1217 /* Fill in basic CFG register values */
1218 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1219 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1221 if (d40c->log_num != D40_PHY_CHAN) {
1222 d40_log_cfg(&d40c->dma_cfg,
1223 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1225 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1226 d40c->lcpa = d40c->base->lcpa_base +
1227 d40c->dma_cfg.src_dev_type * 32;
1229 d40c->lcpa = d40c->base->lcpa_base +
1230 d40c->dma_cfg.dst_dev_type * 32 + 16;
1233 /* Write channel configuration to the DMA */
1234 return d40_config_write(d40c);
1237 static int d40_config_memcpy(struct d40_chan *d40c)
1239 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1241 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1242 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1243 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1244 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1245 memcpy[d40c->chan.chan_id];
1247 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1248 dma_has_cap(DMA_SLAVE, cap)) {
1249 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1251 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1260 static int d40_free_dma(struct d40_chan *d40c)
1265 struct d40_phy_res *phy = d40c->phy_chan;
1268 /* Terminate all queued and active transfers */
1272 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1277 if (phy->allocated_src == D40_ALLOC_FREE &&
1278 phy->allocated_dst == D40_ALLOC_FREE) {
1279 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1285 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1287 dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
1292 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1293 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1294 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1295 dir = D40_CHAN_REG_SDLNK;
1297 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1298 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1299 dir = D40_CHAN_REG_SSLNK;
1302 dev_err(&d40c->chan.dev->device,
1303 "[%s] Unknown direction\n", __func__);
1307 if (d40c->log_num != D40_PHY_CHAN) {
1309 * Release logical channel, deactivate the event line during
1310 * the time physical res is suspended.
1312 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1313 D40_EVENTLINE_MASK(event),
1314 d40c->base->virtbase + D40_DREG_PCBASE +
1315 phy->num * D40_DREG_PCDELTA + dir);
1317 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1320 * Check if there are more logical allocation
1321 * on this phy channel.
1323 if (!d40_alloc_mask_free(phy, is_src, event)) {
1324 /* Resume the other logical channels if any */
1325 if (d40_chan_has_events(d40c)) {
1326 res = d40_channel_execute_command(d40c,
1329 dev_err(&d40c->chan.dev->device,
1330 "[%s] Executing RUN command\n",
1338 d40_alloc_mask_free(phy, is_src, 0);
1340 /* Release physical channel */
1341 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1343 dev_err(&d40c->chan.dev->device,
1344 "[%s] Failed to stop channel\n", __func__);
1347 d40c->phy_chan = NULL;
1348 /* Invalidate channel type */
1349 d40c->dma_cfg.channel_type = 0;
1350 d40c->base->lookup_phy_chans[phy->num] = NULL;
1357 static int d40_pause(struct dma_chan *chan)
1359 struct d40_chan *d40c =
1360 container_of(chan, struct d40_chan, chan);
1363 unsigned long flags;
1365 spin_lock_irqsave(&d40c->lock, flags);
1367 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1369 if (d40c->log_num != D40_PHY_CHAN) {
1370 d40_config_set_event(d40c, false);
1371 /* Resume the other logical channels if any */
1372 if (d40_chan_has_events(d40c))
1373 res = d40_channel_execute_command(d40c,
1378 spin_unlock_irqrestore(&d40c->lock, flags);
1382 static bool d40_is_paused(struct d40_chan *d40c)
1384 bool is_paused = false;
1385 unsigned long flags;
1386 void __iomem *active_reg;
1391 spin_lock_irqsave(&d40c->lock, flags);
1393 if (d40c->log_num == D40_PHY_CHAN) {
1394 if (d40c->phy_chan->num % 2 == 0)
1395 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1397 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1399 status = (readl(active_reg) &
1400 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1401 D40_CHAN_POS(d40c->phy_chan->num);
1402 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1408 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1412 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1413 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1414 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1415 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1416 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1418 dev_err(&d40c->chan.dev->device,
1419 "[%s] Unknown direction\n", __func__);
1422 status = d40_chan_has_events(d40c);
1423 status = (status & D40_EVENTLINE_MASK(event)) >>
1424 D40_EVENTLINE_POS(event);
1426 if (status != D40_DMA_RUN)
1429 /* Resume the other logical channels if any */
1430 if (d40_chan_has_events(d40c))
1431 res = d40_channel_execute_command(d40c,
1435 spin_unlock_irqrestore(&d40c->lock, flags);
1441 static bool d40_tx_is_linked(struct d40_chan *d40c)
1445 if (d40c->log_num != D40_PHY_CHAN)
1446 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1448 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1449 d40c->phy_chan->num * D40_DREG_PCDELTA +
1450 D40_CHAN_REG_SDLNK) &
1451 D40_SREG_LNK_PHYS_LNK_MASK;
1455 static u32 d40_residue(struct d40_chan *d40c)
1459 if (d40c->log_num != D40_PHY_CHAN)
1460 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1461 >> D40_MEM_LCSP2_ECNT_POS;
1463 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1464 d40c->phy_chan->num * D40_DREG_PCDELTA +
1465 D40_CHAN_REG_SDELT) &
1466 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1467 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1470 static int d40_resume(struct dma_chan *chan)
1472 struct d40_chan *d40c =
1473 container_of(chan, struct d40_chan, chan);
1475 unsigned long flags;
1477 spin_lock_irqsave(&d40c->lock, flags);
1479 if (d40c->log_num != D40_PHY_CHAN) {
1480 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1484 /* If bytes left to transfer or linked tx resume job */
1485 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1486 d40_config_set_event(d40c, true);
1487 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1489 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1490 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1493 spin_unlock_irqrestore(&d40c->lock, flags);
1497 static u32 stedma40_residue(struct dma_chan *chan)
1499 struct d40_chan *d40c =
1500 container_of(chan, struct d40_chan, chan);
1502 unsigned long flags;
1504 spin_lock_irqsave(&d40c->lock, flags);
1505 bytes_left = d40_residue(d40c);
1506 spin_unlock_irqrestore(&d40c->lock, flags);
1511 /* Public DMA functions in addition to the DMA engine framework */
1513 int stedma40_set_psize(struct dma_chan *chan,
1517 struct d40_chan *d40c =
1518 container_of(chan, struct d40_chan, chan);
1519 unsigned long flags;
1521 spin_lock_irqsave(&d40c->lock, flags);
1523 if (d40c->log_num != D40_PHY_CHAN) {
1524 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1525 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1526 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1527 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1531 if (src_psize == STEDMA40_PSIZE_PHY_1)
1532 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1534 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1535 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1536 D40_SREG_CFG_PSIZE_POS);
1537 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1540 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1541 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1543 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1544 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1545 D40_SREG_CFG_PSIZE_POS);
1546 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1549 spin_unlock_irqrestore(&d40c->lock, flags);
1552 EXPORT_SYMBOL(stedma40_set_psize);
1554 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1555 struct scatterlist *sgl_dst,
1556 struct scatterlist *sgl_src,
1557 unsigned int sgl_len,
1558 unsigned long flags)
1561 struct d40_desc *d40d;
1562 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1567 spin_lock_irqsave(&d40c->lock, flg);
1568 d40d = d40_desc_get(d40c);
1573 memset(d40d, 0, sizeof(struct d40_desc));
1574 d40d->lli_len = sgl_len;
1575 d40d->lli_tx_len = d40d->lli_len;
1576 d40d->txd.flags = flags;
1578 if (d40c->log_num != D40_PHY_CHAN) {
1579 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1580 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1584 * Check if there is space available in lcla. If not,
1585 * split list into 1-length and run only in lcpa
1588 if (d40_lcla_id_get(d40c,
1589 &d40c->base->lcla_pool) != 0)
1590 d40d->lli_tx_len = 1;
1592 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1593 dev_err(&d40c->chan.dev->device,
1594 "[%s] Out of memory\n", __func__);
1598 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1602 d40c->log_def.lcsp1,
1603 d40c->dma_cfg.src_info.data_width,
1604 flags & DMA_PREP_INTERRUPT,
1606 d40c->base->plat_data->llis_per_log);
1608 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1612 d40c->log_def.lcsp3,
1613 d40c->dma_cfg.dst_info.data_width,
1614 flags & DMA_PREP_INTERRUPT,
1616 d40c->base->plat_data->llis_per_log);
1620 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1621 dev_err(&d40c->chan.dev->device,
1622 "[%s] Out of memory\n", __func__);
1626 res = d40_phy_sg_to_lli(sgl_src,
1630 d40d->lli_phy.src_addr,
1632 d40c->dma_cfg.src_info.data_width,
1633 d40c->dma_cfg.src_info.psize,
1639 res = d40_phy_sg_to_lli(sgl_dst,
1643 d40d->lli_phy.dst_addr,
1645 d40c->dma_cfg.dst_info.data_width,
1646 d40c->dma_cfg.dst_info.psize,
1652 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1653 d40d->lli_pool.size, DMA_TO_DEVICE);
1656 dma_async_tx_descriptor_init(&d40d->txd, chan);
1658 d40d->txd.tx_submit = d40_tx_submit;
1660 spin_unlock_irqrestore(&d40c->lock, flg);
1664 spin_unlock_irqrestore(&d40c->lock, flg);
1667 EXPORT_SYMBOL(stedma40_memcpy_sg);
1669 bool stedma40_filter(struct dma_chan *chan, void *data)
1671 struct stedma40_chan_cfg *info = data;
1672 struct d40_chan *d40c =
1673 container_of(chan, struct d40_chan, chan);
1677 err = d40_validate_conf(d40c, info);
1679 d40c->dma_cfg = *info;
1681 err = d40_config_memcpy(d40c);
1685 EXPORT_SYMBOL(stedma40_filter);
1687 /* DMA ENGINE functions */
1688 static int d40_alloc_chan_resources(struct dma_chan *chan)
1691 unsigned long flags;
1692 struct d40_chan *d40c =
1693 container_of(chan, struct d40_chan, chan);
1695 spin_lock_irqsave(&d40c->lock, flags);
1697 d40c->completed = chan->cookie = 1;
1700 * If no dma configuration is set (channel_type == 0)
1701 * use default configuration
1703 if (d40c->dma_cfg.channel_type == 0) {
1704 err = d40_config_memcpy(d40c);
1709 err = d40_allocate_channel(d40c);
1711 dev_err(&d40c->chan.dev->device,
1712 "[%s] Failed to allocate channel\n", __func__);
1716 err = d40_config_chan(d40c, &d40c->dma_cfg);
1718 dev_err(&d40c->chan.dev->device,
1719 "[%s] Failed to configure channel\n",
1724 spin_unlock_irqrestore(&d40c->lock, flags);
1728 (void) d40_free_dma(d40c);
1730 spin_unlock_irqrestore(&d40c->lock, flags);
1731 dev_err(&d40c->chan.dev->device,
1732 "[%s] Channel allocation failed\n", __func__);
1736 static void d40_free_chan_resources(struct dma_chan *chan)
1738 struct d40_chan *d40c =
1739 container_of(chan, struct d40_chan, chan);
1741 unsigned long flags;
1743 spin_lock_irqsave(&d40c->lock, flags);
1745 err = d40_free_dma(d40c);
1748 dev_err(&d40c->chan.dev->device,
1749 "[%s] Failed to free channel\n", __func__);
1750 spin_unlock_irqrestore(&d40c->lock, flags);
1753 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1757 unsigned long flags)
1759 struct d40_desc *d40d;
1760 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1765 spin_lock_irqsave(&d40c->lock, flg);
1766 d40d = d40_desc_get(d40c);
1769 dev_err(&d40c->chan.dev->device,
1770 "[%s] Descriptor is NULL\n", __func__);
1774 memset(d40d, 0, sizeof(struct d40_desc));
1776 d40d->txd.flags = flags;
1778 dma_async_tx_descriptor_init(&d40d->txd, chan);
1780 d40d->txd.tx_submit = d40_tx_submit;
1782 if (d40c->log_num != D40_PHY_CHAN) {
1784 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1785 dev_err(&d40c->chan.dev->device,
1786 "[%s] Out of memory\n", __func__);
1790 d40d->lli_tx_len = 1;
1792 d40_log_fill_lli(d40d->lli_log.src,
1796 d40c->log_def.lcsp1,
1797 d40c->dma_cfg.src_info.data_width,
1800 d40_log_fill_lli(d40d->lli_log.dst,
1804 d40c->log_def.lcsp3,
1805 d40c->dma_cfg.dst_info.data_width,
1810 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1811 dev_err(&d40c->chan.dev->device,
1812 "[%s] Out of memory\n", __func__);
1816 err = d40_phy_fill_lli(d40d->lli_phy.src,
1819 d40c->dma_cfg.src_info.psize,
1823 d40c->dma_cfg.src_info.data_width,
1828 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1831 d40c->dma_cfg.dst_info.psize,
1835 d40c->dma_cfg.dst_info.data_width,
1841 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1842 d40d->lli_pool.size, DMA_TO_DEVICE);
1845 spin_unlock_irqrestore(&d40c->lock, flg);
1849 dev_err(&d40c->chan.dev->device,
1850 "[%s] Failed filling in PHY LLI\n", __func__);
1851 d40_pool_lli_free(d40d);
1853 spin_unlock_irqrestore(&d40c->lock, flg);
1857 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1858 struct d40_chan *d40c,
1859 struct scatterlist *sgl,
1860 unsigned int sg_len,
1861 enum dma_data_direction direction,
1862 unsigned long flags)
1864 dma_addr_t dev_addr = 0;
1867 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1868 dev_err(&d40c->chan.dev->device,
1869 "[%s] Out of memory\n", __func__);
1873 d40d->lli_len = sg_len;
1874 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1875 d40d->lli_tx_len = d40d->lli_len;
1877 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1881 * Check if there is space available in lcla.
1882 * If not, split list into 1-length and run only
1885 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
1886 d40d->lli_tx_len = 1;
1888 if (direction == DMA_FROM_DEVICE) {
1889 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1890 total_size = d40_log_sg_to_dev(&d40c->lcla,
1894 d40c->dma_cfg.src_info.data_width,
1895 d40c->dma_cfg.dst_info.data_width,
1897 flags & DMA_PREP_INTERRUPT,
1898 dev_addr, d40d->lli_tx_len,
1899 d40c->base->plat_data->llis_per_log);
1900 } else if (direction == DMA_TO_DEVICE) {
1901 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1902 total_size = d40_log_sg_to_dev(&d40c->lcla,
1906 d40c->dma_cfg.src_info.data_width,
1907 d40c->dma_cfg.dst_info.data_width,
1909 flags & DMA_PREP_INTERRUPT,
1910 dev_addr, d40d->lli_tx_len,
1911 d40c->base->plat_data->llis_per_log);
1920 static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1921 struct d40_chan *d40c,
1922 struct scatterlist *sgl,
1923 unsigned int sgl_len,
1924 enum dma_data_direction direction,
1925 unsigned long flags)
1927 dma_addr_t src_dev_addr;
1928 dma_addr_t dst_dev_addr;
1931 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1932 dev_err(&d40c->chan.dev->device,
1933 "[%s] Out of memory\n", __func__);
1937 d40d->lli_len = sgl_len;
1938 d40d->lli_tx_len = sgl_len;
1940 if (direction == DMA_FROM_DEVICE) {
1942 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1943 } else if (direction == DMA_TO_DEVICE) {
1944 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1949 res = d40_phy_sg_to_lli(sgl,
1953 d40d->lli_phy.src_addr,
1955 d40c->dma_cfg.src_info.data_width,
1956 d40c->dma_cfg.src_info.psize,
1961 res = d40_phy_sg_to_lli(sgl,
1965 d40d->lli_phy.dst_addr,
1967 d40c->dma_cfg.dst_info.data_width,
1968 d40c->dma_cfg.dst_info.psize,
1973 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1974 d40d->lli_pool.size, DMA_TO_DEVICE);
1978 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1979 struct scatterlist *sgl,
1980 unsigned int sg_len,
1981 enum dma_data_direction direction,
1982 unsigned long flags)
1984 struct d40_desc *d40d;
1985 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1990 if (d40c->dma_cfg.pre_transfer)
1991 d40c->dma_cfg.pre_transfer(chan,
1992 d40c->dma_cfg.pre_transfer_data,
1995 spin_lock_irqsave(&d40c->lock, flg);
1996 d40d = d40_desc_get(d40c);
1997 spin_unlock_irqrestore(&d40c->lock, flg);
2002 memset(d40d, 0, sizeof(struct d40_desc));
2004 if (d40c->log_num != D40_PHY_CHAN)
2005 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2008 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2011 dev_err(&d40c->chan.dev->device,
2012 "[%s] Failed to prepare %s slave sg job: %d\n",
2014 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2018 d40d->txd.flags = flags;
2020 dma_async_tx_descriptor_init(&d40d->txd, chan);
2022 d40d->txd.tx_submit = d40_tx_submit;
2027 static enum dma_status d40_tx_status(struct dma_chan *chan,
2028 dma_cookie_t cookie,
2029 struct dma_tx_state *txstate)
2031 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2032 dma_cookie_t last_used;
2033 dma_cookie_t last_complete;
2036 last_complete = d40c->completed;
2037 last_used = chan->cookie;
2039 if (d40_is_paused(d40c))
2042 ret = dma_async_is_complete(cookie, last_complete, last_used);
2044 dma_set_tx_state(txstate, last_complete, last_used,
2045 stedma40_residue(chan));
2050 static void d40_issue_pending(struct dma_chan *chan)
2052 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2053 unsigned long flags;
2055 spin_lock_irqsave(&d40c->lock, flags);
2057 /* Busy means that pending jobs are already being processed */
2059 (void) d40_queue_start(d40c);
2061 spin_unlock_irqrestore(&d40c->lock, flags);
2064 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2067 unsigned long flags;
2068 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2071 case DMA_TERMINATE_ALL:
2072 spin_lock_irqsave(&d40c->lock, flags);
2074 spin_unlock_irqrestore(&d40c->lock, flags);
2077 return d40_pause(chan);
2079 return d40_resume(chan);
2082 /* Other commands are unimplemented */
2086 /* Initialization functions */
2088 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2089 struct d40_chan *chans, int offset,
2093 struct d40_chan *d40c;
2095 INIT_LIST_HEAD(&dma->channels);
2097 for (i = offset; i < offset + num_chans; i++) {
2100 d40c->chan.device = dma;
2102 /* Invalidate lcla element */
2103 d40c->lcla.src_id = -1;
2104 d40c->lcla.dst_id = -1;
2106 spin_lock_init(&d40c->lock);
2108 d40c->log_num = D40_PHY_CHAN;
2110 INIT_LIST_HEAD(&d40c->free);
2111 INIT_LIST_HEAD(&d40c->active);
2112 INIT_LIST_HEAD(&d40c->queue);
2113 INIT_LIST_HEAD(&d40c->client);
2117 tasklet_init(&d40c->tasklet, dma_tasklet,
2118 (unsigned long) d40c);
2120 list_add_tail(&d40c->chan.device_node,
2125 static int __init d40_dmaengine_init(struct d40_base *base,
2126 int num_reserved_chans)
2130 d40_chan_init(base, &base->dma_slave, base->log_chans,
2131 0, base->num_log_chans);
2133 dma_cap_zero(base->dma_slave.cap_mask);
2134 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2136 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2137 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2138 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2139 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2140 base->dma_slave.device_tx_status = d40_tx_status;
2141 base->dma_slave.device_issue_pending = d40_issue_pending;
2142 base->dma_slave.device_control = d40_control;
2143 base->dma_slave.dev = base->dev;
2145 err = dma_async_device_register(&base->dma_slave);
2149 "[%s] Failed to register slave channels\n",
2154 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2155 base->num_log_chans, base->plat_data->memcpy_len);
2157 dma_cap_zero(base->dma_memcpy.cap_mask);
2158 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2160 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2161 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2162 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2163 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2164 base->dma_memcpy.device_tx_status = d40_tx_status;
2165 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2166 base->dma_memcpy.device_control = d40_control;
2167 base->dma_memcpy.dev = base->dev;
2169 * This controller can only access address at even
2170 * 32bit boundaries, i.e. 2^2
2172 base->dma_memcpy.copy_align = 2;
2174 err = dma_async_device_register(&base->dma_memcpy);
2178 "[%s] Failed to regsiter memcpy only channels\n",
2183 d40_chan_init(base, &base->dma_both, base->phy_chans,
2184 0, num_reserved_chans);
2186 dma_cap_zero(base->dma_both.cap_mask);
2187 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2188 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2190 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2191 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2192 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2193 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2194 base->dma_both.device_tx_status = d40_tx_status;
2195 base->dma_both.device_issue_pending = d40_issue_pending;
2196 base->dma_both.device_control = d40_control;
2197 base->dma_both.dev = base->dev;
2198 base->dma_both.copy_align = 2;
2199 err = dma_async_device_register(&base->dma_both);
2203 "[%s] Failed to register logical and physical capable channels\n",
2209 dma_async_device_unregister(&base->dma_memcpy);
2211 dma_async_device_unregister(&base->dma_slave);
2216 /* Initialization functions. */
2218 static int __init d40_phy_res_init(struct d40_base *base)
2221 int num_phy_chans_avail = 0;
2223 int odd_even_bit = -2;
2225 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2226 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2228 for (i = 0; i < base->num_phy_chans; i++) {
2229 base->phy_res[i].num = i;
2230 odd_even_bit += 2 * ((i % 2) == 0);
2231 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2232 /* Mark security only channels as occupied */
2233 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2234 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2236 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2237 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2238 num_phy_chans_avail++;
2240 spin_lock_init(&base->phy_res[i].lock);
2242 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2243 num_phy_chans_avail, base->num_phy_chans);
2245 /* Verify settings extended vs standard */
2246 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2248 for (i = 0; i < base->num_phy_chans; i++) {
2250 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2251 (val[0] & 0x3) != 1)
2253 "[%s] INFO: channel %d is misconfigured (%d)\n",
2254 __func__, i, val[0] & 0x3);
2256 val[0] = val[0] >> 2;
2259 return num_phy_chans_avail;
2262 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2264 static const struct d40_reg_val dma_id_regs[] = {
2266 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2267 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2269 * D40_DREG_PERIPHID2 Depends on HW revision:
2270 * MOP500/HREF ED has 0x0008,
2272 * HREF V1 has 0x0028
2274 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2277 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2278 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2279 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2280 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2282 struct stedma40_platform_data *plat_data;
2283 struct clk *clk = NULL;
2284 void __iomem *virtbase = NULL;
2285 struct resource *res = NULL;
2286 struct d40_base *base = NULL;
2287 int num_log_chans = 0;
2291 clk = clk_get(&pdev->dev, NULL);
2294 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2301 /* Get IO for DMAC base address */
2302 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2306 if (request_mem_region(res->start, resource_size(res),
2307 D40_NAME " I/O base") == NULL)
2310 virtbase = ioremap(res->start, resource_size(res));
2314 /* HW version check */
2315 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2316 if (dma_id_regs[i].val !=
2317 readl(virtbase + dma_id_regs[i].reg)) {
2319 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2323 readl(virtbase + dma_id_regs[i].reg));
2328 i = readl(virtbase + D40_DREG_PERIPHID2);
2330 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2332 "[%s] Unknown designer! Got %x wanted %x\n",
2333 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2337 /* The number of physical channels on this HW */
2338 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2340 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2341 (i >> 4) & 0xf, res->start);
2343 plat_data = pdev->dev.platform_data;
2345 /* Count the number of logical channels in use */
2346 for (i = 0; i < plat_data->dev_len; i++)
2347 if (plat_data->dev_rx[i] != 0)
2350 for (i = 0; i < plat_data->dev_len; i++)
2351 if (plat_data->dev_tx[i] != 0)
2354 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2355 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2356 sizeof(struct d40_chan), GFP_KERNEL);
2359 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2364 base->num_phy_chans = num_phy_chans;
2365 base->num_log_chans = num_log_chans;
2366 base->phy_start = res->start;
2367 base->phy_size = resource_size(res);
2368 base->virtbase = virtbase;
2369 base->plat_data = plat_data;
2370 base->dev = &pdev->dev;
2371 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2372 base->log_chans = &base->phy_chans[num_phy_chans];
2374 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2379 base->lookup_phy_chans = kzalloc(num_phy_chans *
2380 sizeof(struct d40_chan *),
2382 if (!base->lookup_phy_chans)
2385 if (num_log_chans + plat_data->memcpy_len) {
2387 * The max number of logical channels are event lines for all
2388 * src devices and dst devices
2390 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2391 sizeof(struct d40_chan *),
2393 if (!base->lookup_log_chans)
2396 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2398 if (!base->lcla_pool.alloc_map)
2411 release_mem_region(res->start,
2412 resource_size(res));
2417 kfree(base->lcla_pool.alloc_map);
2418 kfree(base->lookup_log_chans);
2419 kfree(base->lookup_phy_chans);
2420 kfree(base->phy_res);
2427 static void __init d40_hw_init(struct d40_base *base)
2430 static const struct d40_reg_val dma_init_reg[] = {
2431 /* Clock every part of the DMA block from start */
2432 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2434 /* Interrupts on all logical channels */
2435 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2436 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2437 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2438 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2439 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2440 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2441 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2442 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2443 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2444 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2445 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2446 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2449 u32 prmseo[2] = {0, 0};
2450 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2454 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2455 writel(dma_init_reg[i].val,
2456 base->virtbase + dma_init_reg[i].reg);
2458 /* Configure all our dma channels to default settings */
2459 for (i = 0; i < base->num_phy_chans; i++) {
2461 activeo[i % 2] = activeo[i % 2] << 2;
2463 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2465 activeo[i % 2] |= 3;
2469 /* Enable interrupt # */
2470 pcmis = (pcmis << 1) | 1;
2472 /* Clear interrupt # */
2473 pcicr = (pcicr << 1) | 1;
2475 /* Set channel to physical mode */
2476 prmseo[i % 2] = prmseo[i % 2] << 2;
2481 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2482 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2483 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2484 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2486 /* Write which interrupt to enable */
2487 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2489 /* Write which interrupt to clear */
2490 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2494 static int __init d40_probe(struct platform_device *pdev)
2498 struct d40_base *base;
2499 struct resource *res = NULL;
2500 int num_reserved_chans;
2503 base = d40_hw_detect_init(pdev);
2508 num_reserved_chans = d40_phy_res_init(base);
2510 platform_set_drvdata(pdev, base);
2512 spin_lock_init(&base->interrupt_lock);
2513 spin_lock_init(&base->execmd_lock);
2515 /* Get IO for logical channel parameter address */
2516 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2520 "[%s] No \"lcpa\" memory resource\n",
2524 base->lcpa_size = resource_size(res);
2525 base->phy_lcpa = res->start;
2527 if (request_mem_region(res->start, resource_size(res),
2528 D40_NAME " I/O lcpa") == NULL) {
2531 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2532 __func__, res->start, res->end);
2536 /* We make use of ESRAM memory for this. */
2537 val = readl(base->virtbase + D40_DREG_LCPA);
2538 if (res->start != val && val != 0) {
2539 dev_warn(&pdev->dev,
2540 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2541 __func__, val, res->start);
2543 writel(res->start, base->virtbase + D40_DREG_LCPA);
2545 base->lcpa_base = ioremap(res->start, resource_size(res));
2546 if (!base->lcpa_base) {
2549 "[%s] Failed to ioremap LCPA region\n",
2553 /* Get IO for logical channel link address */
2554 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2558 "[%s] No \"lcla\" resource defined\n",
2563 base->lcla_pool.base_size = resource_size(res);
2564 base->lcla_pool.phy = res->start;
2566 if (request_mem_region(res->start, resource_size(res),
2567 D40_NAME " I/O lcla") == NULL) {
2570 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2571 __func__, res->start, res->end);
2574 val = readl(base->virtbase + D40_DREG_LCLA);
2575 if (res->start != val && val != 0) {
2576 dev_warn(&pdev->dev,
2577 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2578 __func__, val, res->start);
2580 writel(res->start, base->virtbase + D40_DREG_LCLA);
2582 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2583 if (!base->lcla_pool.base) {
2586 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2587 __func__, res->start, res->end);
2591 spin_lock_init(&base->lcla_pool.lock);
2593 base->lcla_pool.num_blocks = base->num_phy_chans;
2595 base->irq = platform_get_irq(pdev, 0);
2597 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2600 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2604 err = d40_dmaengine_init(base, num_reserved_chans);
2610 dev_info(base->dev, "initialized\n");
2616 iounmap(base->virtbase);
2617 if (base->lcla_pool.phy)
2618 release_mem_region(base->lcla_pool.phy,
2619 base->lcla_pool.base_size);
2621 release_mem_region(base->phy_lcpa,
2623 if (base->phy_start)
2624 release_mem_region(base->phy_start,
2627 clk_disable(base->clk);
2631 kfree(base->lcla_pool.alloc_map);
2632 kfree(base->lookup_log_chans);
2633 kfree(base->lookup_phy_chans);
2634 kfree(base->phy_res);
2638 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2642 static struct platform_driver d40_driver = {
2644 .owner = THIS_MODULE,
2649 int __init stedma40_init(void)
2651 return platform_driver_probe(&d40_driver, d40_probe);
2653 arch_initcall(stedma40_init);