2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/err.h>
20 #include <linux/amba/bus.h>
22 #include <plat/ste_dma40.h>
24 #include "dmaengine.h"
25 #include "ste_dma40_ll.h"
27 #define D40_NAME "dma40"
29 #define D40_PHY_CHAN -1
31 /* For masking out/in 2 bit channel positions */
32 #define D40_CHAN_POS(chan) (2 * (chan / 2))
33 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
35 /* Maximum iterations taken before giving up suspending a channel */
36 #define D40_SUSPEND_MAX_IT 500
39 #define DMA40_AUTOSUSPEND_DELAY 100
41 /* Hardware requirement on LCLA alignment */
42 #define LCLA_ALIGNMENT 0x40000
44 /* Max number of links per event group */
45 #define D40_LCLA_LINK_PER_EVENT_GRP 128
46 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
48 /* Attempts before giving up to trying to get pages that are aligned */
49 #define MAX_LCLA_ALLOC_ATTEMPTS 256
51 /* Bit markings for allocation map */
52 #define D40_ALLOC_FREE (1 << 31)
53 #define D40_ALLOC_PHY (1 << 30)
54 #define D40_ALLOC_LOG_FREE 0
57 * enum 40_command - The different commands and/or statuses.
59 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
60 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
61 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
62 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
67 D40_DMA_SUSPEND_REQ = 2,
72 * These are the registers that has to be saved and later restored
73 * when the DMA hw is powered off.
74 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
76 static u32 d40_backup_regs[] = {
85 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
87 /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
88 static u32 d40_backup_regs_v3[] = {
107 #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
109 static u32 d40_backup_regs_chan[] = {
121 * struct d40_lli_pool - Structure for keeping LLIs in memory
123 * @base: Pointer to memory area when the pre_alloc_lli's are not large
124 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
125 * pre_alloc_lli is used.
126 * @dma_addr: DMA address, if mapped
127 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
128 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
129 * one buffer to one buffer.
131 struct d40_lli_pool {
135 /* Space for dst and src, plus an extra for padding */
136 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
140 * struct d40_desc - A descriptor is one DMA job.
142 * @lli_phy: LLI settings for physical channel. Both src and dst=
143 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
144 * lli_len equals one.
145 * @lli_log: Same as above but for logical channels.
146 * @lli_pool: The pool with two entries pre-allocated.
147 * @lli_len: Number of llis of current descriptor.
148 * @lli_current: Number of transferred llis.
149 * @lcla_alloc: Number of LCLA entries allocated.
150 * @txd: DMA engine struct. Used for among other things for communication
153 * @is_in_client_list: true if the client owns this descriptor.
154 * @cyclic: true if this is a cyclic job
156 * This descriptor is used for both logical and physical transfers.
160 struct d40_phy_lli_bidir lli_phy;
162 struct d40_log_lli_bidir lli_log;
164 struct d40_lli_pool lli_pool;
169 struct dma_async_tx_descriptor txd;
170 struct list_head node;
172 bool is_in_client_list;
177 * struct d40_lcla_pool - LCLA pool settings and data.
179 * @base: The virtual address of LCLA. 18 bit aligned.
180 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
181 * This pointer is only there for clean-up on error.
182 * @pages: The number of pages needed for all physical channels.
183 * Only used later for clean-up on error
184 * @lock: Lock to protect the content in this struct.
185 * @alloc_map: big map over which LCLA entry is own by which job.
187 struct d40_lcla_pool {
190 void *base_unaligned;
193 struct d40_desc **alloc_map;
197 * struct d40_phy_res - struct for handling eventlines mapped to physical
200 * @lock: A lock protection this entity.
201 * @reserved: True if used by secure world or otherwise.
202 * @num: The physical channel number of this entity.
203 * @allocated_src: Bit mapped to show which src event line's are mapped to
204 * this physical channel. Can also be free or physically allocated.
205 * @allocated_dst: Same as for src but is dst.
206 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
220 * struct d40_chan - Struct that describes a channel.
222 * @lock: A spinlock to protect this struct.
223 * @log_num: The logical number, if any of this channel.
224 * @pending_tx: The number of pending transfers. Used between interrupt handler
226 * @busy: Set to true when transfer is ongoing on this channel.
227 * @phy_chan: Pointer to physical channel which this instance runs on. If this
228 * point is NULL, then the channel is not allocated.
229 * @chan: DMA engine handle.
230 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
231 * transfer and call client callback.
232 * @client: Cliented owned descriptor list.
233 * @pending_queue: Submitted jobs, to be issued by issue_pending()
234 * @active: Active descriptor.
235 * @queue: Queued jobs.
236 * @prepare_queue: Prepared jobs.
237 * @dma_cfg: The client configuration of this dma channel.
238 * @configured: whether the dma_cfg configuration is valid
239 * @base: Pointer to the device instance struct.
240 * @src_def_cfg: Default cfg register setting for src.
241 * @dst_def_cfg: Default cfg register setting for dst.
242 * @log_def: Default logical channel settings.
243 * @lcpa: Pointer to dst and src lcpa settings.
244 * @runtime_addr: runtime configured address.
245 * @runtime_direction: runtime configured direction.
247 * This struct can either "be" a logical or a physical channel.
254 struct d40_phy_res *phy_chan;
255 struct dma_chan chan;
256 struct tasklet_struct tasklet;
257 struct list_head client;
258 struct list_head pending_queue;
259 struct list_head active;
260 struct list_head queue;
261 struct list_head prepare_queue;
262 struct stedma40_chan_cfg dma_cfg;
264 struct d40_base *base;
265 /* Default register configurations */
268 struct d40_def_lcsp log_def;
269 struct d40_log_lli_full *lcpa;
270 /* Runtime reconfiguration */
271 dma_addr_t runtime_addr;
272 enum dma_transfer_direction runtime_direction;
276 * struct d40_base - The big global struct, one for each probe'd instance.
278 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
279 * @execmd_lock: Lock for execute command usage since several channels share
280 * the same physical register.
281 * @dev: The device structure.
282 * @virtbase: The virtual base address of the DMA's register.
283 * @rev: silicon revision detected.
284 * @clk: Pointer to the DMA clock structure.
285 * @phy_start: Physical memory start of the DMA registers.
286 * @phy_size: Size of the DMA register map.
287 * @irq: The IRQ number.
288 * @num_phy_chans: The number of physical channels. Read from HW. This
289 * is the number of available channels for this driver, not counting "Secure
290 * mode" allocated physical channels.
291 * @num_log_chans: The number of logical channels. Calculated from
293 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
294 * @dma_slave: dma_device channels that can do only do slave transfers.
295 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
296 * @phy_chans: Room for all possible physical channels in system.
297 * @log_chans: Room for all possible logical channels in system.
298 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
299 * to log_chans entries.
300 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
301 * to phy_chans entries.
302 * @plat_data: Pointer to provided platform_data which is the driver
304 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
305 * @phy_res: Vector containing all physical channels.
306 * @lcla_pool: lcla pool settings and data.
307 * @lcpa_base: The virtual mapped address of LCPA.
308 * @phy_lcpa: The physical address of the LCPA.
309 * @lcpa_size: The size of the LCPA area.
310 * @desc_slab: cache for descriptors.
311 * @reg_val_backup: Here the values of some hardware registers are stored
312 * before the DMA is powered off. They are restored when the power is back on.
313 * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
315 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
316 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
317 * @initialized: true if the dma has been initialized
320 spinlock_t interrupt_lock;
321 spinlock_t execmd_lock;
323 void __iomem *virtbase;
326 phys_addr_t phy_start;
327 resource_size_t phy_size;
331 struct dma_device dma_both;
332 struct dma_device dma_slave;
333 struct dma_device dma_memcpy;
334 struct d40_chan *phy_chans;
335 struct d40_chan *log_chans;
336 struct d40_chan **lookup_log_chans;
337 struct d40_chan **lookup_phy_chans;
338 struct stedma40_platform_data *plat_data;
339 struct regulator *lcpa_regulator;
340 /* Physical half channels */
341 struct d40_phy_res *phy_res;
342 struct d40_lcla_pool lcla_pool;
345 resource_size_t lcpa_size;
346 struct kmem_cache *desc_slab;
347 u32 reg_val_backup[BACKUP_REGS_SZ];
348 u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
349 u32 *reg_val_backup_chan;
350 u16 gcc_pwr_off_mask;
355 * struct d40_interrupt_lookup - lookup table for interrupt handler
357 * @src: Interrupt mask register.
358 * @clr: Interrupt clear register.
359 * @is_error: true if this is an error interrupt.
360 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
361 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
363 struct d40_interrupt_lookup {
371 * struct d40_reg_val - simple lookup struct
373 * @reg: The register.
374 * @val: The value that belongs to the register in reg.
381 static struct device *chan2dev(struct d40_chan *d40c)
383 return &d40c->chan.dev->device;
386 static bool chan_is_physical(struct d40_chan *chan)
388 return chan->log_num == D40_PHY_CHAN;
391 static bool chan_is_logical(struct d40_chan *chan)
393 return !chan_is_physical(chan);
396 static void __iomem *chan_base(struct d40_chan *chan)
398 return chan->base->virtbase + D40_DREG_PCBASE +
399 chan->phy_chan->num * D40_DREG_PCDELTA;
402 #define d40_err(dev, format, arg...) \
403 dev_err(dev, "[%s] " format, __func__, ## arg)
405 #define chan_err(d40c, format, arg...) \
406 d40_err(chan2dev(d40c), format, ## arg)
408 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
411 bool is_log = chan_is_logical(d40c);
416 align = sizeof(struct d40_log_lli);
418 align = sizeof(struct d40_phy_lli);
421 base = d40d->lli_pool.pre_alloc_lli;
422 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
423 d40d->lli_pool.base = NULL;
425 d40d->lli_pool.size = lli_len * 2 * align;
427 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
428 d40d->lli_pool.base = base;
430 if (d40d->lli_pool.base == NULL)
435 d40d->lli_log.src = PTR_ALIGN(base, align);
436 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
438 d40d->lli_pool.dma_addr = 0;
440 d40d->lli_phy.src = PTR_ALIGN(base, align);
441 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
443 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
448 if (dma_mapping_error(d40c->base->dev,
449 d40d->lli_pool.dma_addr)) {
450 kfree(d40d->lli_pool.base);
451 d40d->lli_pool.base = NULL;
452 d40d->lli_pool.dma_addr = 0;
460 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
462 if (d40d->lli_pool.dma_addr)
463 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
464 d40d->lli_pool.size, DMA_TO_DEVICE);
466 kfree(d40d->lli_pool.base);
467 d40d->lli_pool.base = NULL;
468 d40d->lli_pool.size = 0;
469 d40d->lli_log.src = NULL;
470 d40d->lli_log.dst = NULL;
471 d40d->lli_phy.src = NULL;
472 d40d->lli_phy.dst = NULL;
475 static int d40_lcla_alloc_one(struct d40_chan *d40c,
476 struct d40_desc *d40d)
483 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
485 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
488 * Allocate both src and dst at the same time, therefore the half
489 * start on 1 since 0 can't be used since zero is used as end marker.
491 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
492 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
493 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
500 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
505 static int d40_lcla_free_all(struct d40_chan *d40c,
506 struct d40_desc *d40d)
512 if (chan_is_physical(d40c))
515 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
517 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
518 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
519 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
520 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
521 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
523 if (d40d->lcla_alloc == 0) {
530 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
536 static void d40_desc_remove(struct d40_desc *d40d)
538 list_del(&d40d->node);
541 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
543 struct d40_desc *desc = NULL;
545 if (!list_empty(&d40c->client)) {
549 list_for_each_entry_safe(d, _d, &d40c->client, node) {
550 if (async_tx_test_ack(&d->txd)) {
553 memset(desc, 0, sizeof(*desc));
560 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
563 INIT_LIST_HEAD(&desc->node);
568 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
571 d40_pool_lli_free(d40c, d40d);
572 d40_lcla_free_all(d40c, d40d);
573 kmem_cache_free(d40c->base->desc_slab, d40d);
576 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
578 list_add_tail(&desc->node, &d40c->active);
581 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
583 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
584 struct d40_phy_lli *lli_src = desc->lli_phy.src;
585 void __iomem *base = chan_base(chan);
587 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
588 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
589 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
590 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
592 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
593 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
594 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
595 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
598 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
600 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
601 struct d40_log_lli_bidir *lli = &desc->lli_log;
602 int lli_current = desc->lli_current;
603 int lli_len = desc->lli_len;
604 bool cyclic = desc->cyclic;
605 int curr_lcla = -EINVAL;
607 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
611 * We may have partially running cyclic transfers, in case we did't get
612 * enough LCLA entries.
614 linkback = cyclic && lli_current == 0;
617 * For linkback, we need one LCLA even with only one link, because we
618 * can't link back to the one in LCPA space
620 if (linkback || (lli_len - lli_current > 1)) {
621 curr_lcla = d40_lcla_alloc_one(chan, desc);
622 first_lcla = curr_lcla;
626 * For linkback, we normally load the LCPA in the loop since we need to
627 * link it to the second LCLA and not the first. However, if we
628 * couldn't even get a first LCLA, then we have to run in LCPA and
631 if (!linkback || curr_lcla == -EINVAL) {
632 unsigned int flags = 0;
634 if (curr_lcla == -EINVAL)
635 flags |= LLI_TERM_INT;
637 d40_log_lli_lcpa_write(chan->lcpa,
638 &lli->dst[lli_current],
639 &lli->src[lli_current],
648 for (; lli_current < lli_len; lli_current++) {
649 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
651 struct d40_log_lli *lcla = pool->base + lcla_offset;
652 unsigned int flags = 0;
655 if (lli_current + 1 < lli_len)
656 next_lcla = d40_lcla_alloc_one(chan, desc);
658 next_lcla = linkback ? first_lcla : -EINVAL;
660 if (cyclic || next_lcla == -EINVAL)
661 flags |= LLI_TERM_INT;
663 if (linkback && curr_lcla == first_lcla) {
664 /* First link goes in both LCPA and LCLA */
665 d40_log_lli_lcpa_write(chan->lcpa,
666 &lli->dst[lli_current],
667 &lli->src[lli_current],
672 * One unused LCLA in the cyclic case if the very first
675 d40_log_lli_lcla_write(lcla,
676 &lli->dst[lli_current],
677 &lli->src[lli_current],
681 * Cache maintenance is not needed if lcla is
684 if (!use_esram_lcla) {
685 dma_sync_single_range_for_device(chan->base->dev,
686 pool->dma_addr, lcla_offset,
687 2 * sizeof(struct d40_log_lli),
690 curr_lcla = next_lcla;
692 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
699 desc->lli_current = lli_current;
702 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
704 if (chan_is_physical(d40c)) {
705 d40_phy_lli_load(d40c, d40d);
706 d40d->lli_current = d40d->lli_len;
708 d40_log_lli_to_lcxa(d40c, d40d);
711 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
715 if (list_empty(&d40c->active))
718 d = list_first_entry(&d40c->active,
724 /* remove desc from current queue and add it to the pending_queue */
725 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
727 d40_desc_remove(desc);
728 desc->is_in_client_list = false;
729 list_add_tail(&desc->node, &d40c->pending_queue);
732 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
736 if (list_empty(&d40c->pending_queue))
739 d = list_first_entry(&d40c->pending_queue,
745 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
749 if (list_empty(&d40c->queue))
752 d = list_first_entry(&d40c->queue,
758 static int d40_psize_2_burst_size(bool is_log, int psize)
761 if (psize == STEDMA40_PSIZE_LOG_1)
764 if (psize == STEDMA40_PSIZE_PHY_1)
772 * The dma only supports transmitting packages up to
773 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
774 * dma elements required to send the entire sg list
776 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
779 u32 max_w = max(data_width1, data_width2);
780 u32 min_w = min(data_width1, data_width2);
781 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
783 if (seg_max > STEDMA40_MAX_SEG_SIZE)
784 seg_max -= (1 << max_w);
786 if (!IS_ALIGNED(size, 1 << max_w))
792 dmalen = size / seg_max;
793 if (dmalen * seg_max < size)
799 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
800 u32 data_width1, u32 data_width2)
802 struct scatterlist *sg;
807 for_each_sg(sgl, sg, sg_len, i) {
808 ret = d40_size_2_dmalen(sg_dma_len(sg),
809 data_width1, data_width2);
819 static void dma40_backup(void __iomem *baseaddr, u32 *backup,
820 u32 *regaddr, int num, bool save)
824 for (i = 0; i < num; i++) {
825 void __iomem *addr = baseaddr + regaddr[i];
828 backup[i] = readl_relaxed(addr);
830 writel_relaxed(backup[i], addr);
834 static void d40_save_restore_registers(struct d40_base *base, bool save)
838 /* Save/Restore channel specific registers */
839 for (i = 0; i < base->num_phy_chans; i++) {
843 if (base->phy_res[i].reserved)
846 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
847 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
849 dma40_backup(addr, &base->reg_val_backup_chan[idx],
850 d40_backup_regs_chan,
851 ARRAY_SIZE(d40_backup_regs_chan),
855 /* Save/Restore global registers */
856 dma40_backup(base->virtbase, base->reg_val_backup,
857 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
860 /* Save/Restore registers only existing on dma40 v3 and later */
862 dma40_backup(base->virtbase, base->reg_val_backup_v3,
864 ARRAY_SIZE(d40_backup_regs_v3),
868 static void d40_save_restore_registers(struct d40_base *base, bool save)
873 static int d40_channel_execute_command(struct d40_chan *d40c,
874 enum d40_command command)
878 void __iomem *active_reg;
883 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
885 if (d40c->phy_chan->num % 2 == 0)
886 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
888 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
890 if (command == D40_DMA_SUSPEND_REQ) {
891 status = (readl(active_reg) &
892 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
893 D40_CHAN_POS(d40c->phy_chan->num);
895 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
899 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
900 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
903 if (command == D40_DMA_SUSPEND_REQ) {
905 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
906 status = (readl(active_reg) &
907 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
908 D40_CHAN_POS(d40c->phy_chan->num);
912 * Reduce the number of bus accesses while
913 * waiting for the DMA to suspend.
917 if (status == D40_DMA_STOP ||
918 status == D40_DMA_SUSPENDED)
922 if (i == D40_SUSPEND_MAX_IT) {
924 "unable to suspend the chl %d (log: %d) status %x\n",
925 d40c->phy_chan->num, d40c->log_num,
933 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
937 static void d40_term_all(struct d40_chan *d40c)
939 struct d40_desc *d40d;
942 /* Release active descriptors */
943 while ((d40d = d40_first_active_get(d40c))) {
944 d40_desc_remove(d40d);
945 d40_desc_free(d40c, d40d);
948 /* Release queued descriptors waiting for transfer */
949 while ((d40d = d40_first_queued(d40c))) {
950 d40_desc_remove(d40d);
951 d40_desc_free(d40c, d40d);
954 /* Release pending descriptors */
955 while ((d40d = d40_first_pending(d40c))) {
956 d40_desc_remove(d40d);
957 d40_desc_free(d40c, d40d);
960 /* Release client owned descriptors */
961 if (!list_empty(&d40c->client))
962 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
963 d40_desc_remove(d40d);
964 d40_desc_free(d40c, d40d);
967 /* Release descriptors in prepare queue */
968 if (!list_empty(&d40c->prepare_queue))
969 list_for_each_entry_safe(d40d, _d,
970 &d40c->prepare_queue, node) {
971 d40_desc_remove(d40d);
972 d40_desc_free(d40c, d40d);
975 d40c->pending_tx = 0;
979 static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
982 void __iomem *addr = chan_base(d40c) + reg;
986 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
987 | ~D40_EVENTLINE_MASK(event), addr);
992 * The hardware sometimes doesn't register the enable when src and dst
993 * event lines are active on the same logical channel. Retry to ensure
994 * it does. Usually only one retry is sufficient.
998 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
999 | ~D40_EVENTLINE_MASK(event), addr);
1001 if (readl(addr) & D40_EVENTLINE_MASK(event))
1006 dev_dbg(chan2dev(d40c),
1007 "[%s] workaround enable S%cLNK (%d tries)\n",
1008 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1014 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
1016 unsigned long flags;
1018 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1020 /* Enable event line connected to device (or memcpy) */
1021 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1022 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
1023 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1025 __d40_config_set_event(d40c, do_enable, event,
1026 D40_CHAN_REG_SSLNK);
1029 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
1030 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1032 __d40_config_set_event(d40c, do_enable, event,
1033 D40_CHAN_REG_SDLNK);
1036 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1039 static u32 d40_chan_has_events(struct d40_chan *d40c)
1041 void __iomem *chanbase = chan_base(d40c);
1044 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1045 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1050 static u32 d40_get_prmo(struct d40_chan *d40c)
1052 static const unsigned int phy_map[] = {
1053 [STEDMA40_PCHAN_BASIC_MODE]
1054 = D40_DREG_PRMO_PCHAN_BASIC,
1055 [STEDMA40_PCHAN_MODULO_MODE]
1056 = D40_DREG_PRMO_PCHAN_MODULO,
1057 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1058 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1060 static const unsigned int log_map[] = {
1061 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1062 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1063 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1064 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1065 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1066 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1069 if (chan_is_physical(d40c))
1070 return phy_map[d40c->dma_cfg.mode_opt];
1072 return log_map[d40c->dma_cfg.mode_opt];
1075 static void d40_config_write(struct d40_chan *d40c)
1080 /* Odd addresses are even addresses + 4 */
1081 addr_base = (d40c->phy_chan->num % 2) * 4;
1082 /* Setup channel mode to logical or physical */
1083 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1084 D40_CHAN_POS(d40c->phy_chan->num);
1085 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1087 /* Setup operational mode option register */
1088 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1090 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1092 if (chan_is_logical(d40c)) {
1093 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1094 & D40_SREG_ELEM_LOG_LIDX_MASK;
1095 void __iomem *chanbase = chan_base(d40c);
1097 /* Set default config for CFG reg */
1098 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1099 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1101 /* Set LIDX for lcla */
1102 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1103 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1105 /* Clear LNK which will be used by d40_chan_has_events() */
1106 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1107 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1111 static u32 d40_residue(struct d40_chan *d40c)
1115 if (chan_is_logical(d40c))
1116 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1117 >> D40_MEM_LCSP2_ECNT_POS;
1119 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1120 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1121 >> D40_SREG_ELEM_PHY_ECNT_POS;
1124 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1127 static bool d40_tx_is_linked(struct d40_chan *d40c)
1131 if (chan_is_logical(d40c))
1132 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1134 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1135 & D40_SREG_LNK_PHYS_LNK_MASK;
1140 static int d40_pause(struct d40_chan *d40c)
1143 unsigned long flags;
1148 pm_runtime_get_sync(d40c->base->dev);
1149 spin_lock_irqsave(&d40c->lock, flags);
1151 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1153 if (chan_is_logical(d40c)) {
1154 d40_config_set_event(d40c, false);
1155 /* Resume the other logical channels if any */
1156 if (d40_chan_has_events(d40c))
1157 res = d40_channel_execute_command(d40c,
1161 pm_runtime_mark_last_busy(d40c->base->dev);
1162 pm_runtime_put_autosuspend(d40c->base->dev);
1163 spin_unlock_irqrestore(&d40c->lock, flags);
1167 static int d40_resume(struct d40_chan *d40c)
1170 unsigned long flags;
1175 spin_lock_irqsave(&d40c->lock, flags);
1176 pm_runtime_get_sync(d40c->base->dev);
1177 if (d40c->base->rev == 0)
1178 if (chan_is_logical(d40c)) {
1179 res = d40_channel_execute_command(d40c,
1180 D40_DMA_SUSPEND_REQ);
1184 /* If bytes left to transfer or linked tx resume job */
1185 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1187 if (chan_is_logical(d40c))
1188 d40_config_set_event(d40c, true);
1190 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1194 pm_runtime_mark_last_busy(d40c->base->dev);
1195 pm_runtime_put_autosuspend(d40c->base->dev);
1196 spin_unlock_irqrestore(&d40c->lock, flags);
1200 static int d40_terminate_all(struct d40_chan *chan)
1202 unsigned long flags;
1205 ret = d40_pause(chan);
1206 if (!ret && chan_is_physical(chan))
1207 ret = d40_channel_execute_command(chan, D40_DMA_STOP);
1209 spin_lock_irqsave(&chan->lock, flags);
1211 spin_unlock_irqrestore(&chan->lock, flags);
1216 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1218 struct d40_chan *d40c = container_of(tx->chan,
1221 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1222 unsigned long flags;
1223 dma_cookie_t cookie;
1225 spin_lock_irqsave(&d40c->lock, flags);
1226 cookie = dma_cookie_assign(tx);
1227 d40_desc_queue(d40c, d40d);
1228 spin_unlock_irqrestore(&d40c->lock, flags);
1233 static int d40_start(struct d40_chan *d40c)
1235 if (d40c->base->rev == 0) {
1238 if (chan_is_logical(d40c)) {
1239 err = d40_channel_execute_command(d40c,
1240 D40_DMA_SUSPEND_REQ);
1246 if (chan_is_logical(d40c))
1247 d40_config_set_event(d40c, true);
1249 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1252 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1254 struct d40_desc *d40d;
1257 /* Start queued jobs, if any */
1258 d40d = d40_first_queued(d40c);
1264 pm_runtime_get_sync(d40c->base->dev);
1266 /* Remove from queue */
1267 d40_desc_remove(d40d);
1269 /* Add to active queue */
1270 d40_desc_submit(d40c, d40d);
1272 /* Initiate DMA job */
1273 d40_desc_load(d40c, d40d);
1276 err = d40_start(d40c);
1285 /* called from interrupt context */
1286 static void dma_tc_handle(struct d40_chan *d40c)
1288 struct d40_desc *d40d;
1290 /* Get first active entry from list */
1291 d40d = d40_first_active_get(d40c);
1298 * If this was a paritially loaded list, we need to reloaded
1299 * it, and only when the list is completed. We need to check
1300 * for done because the interrupt will hit for every link, and
1301 * not just the last one.
1303 if (d40d->lli_current < d40d->lli_len
1304 && !d40_tx_is_linked(d40c)
1305 && !d40_residue(d40c)) {
1306 d40_lcla_free_all(d40c, d40d);
1307 d40_desc_load(d40c, d40d);
1308 (void) d40_start(d40c);
1310 if (d40d->lli_current == d40d->lli_len)
1311 d40d->lli_current = 0;
1314 d40_lcla_free_all(d40c, d40d);
1316 if (d40d->lli_current < d40d->lli_len) {
1317 d40_desc_load(d40c, d40d);
1319 (void) d40_start(d40c);
1323 if (d40_queue_start(d40c) == NULL)
1325 pm_runtime_mark_last_busy(d40c->base->dev);
1326 pm_runtime_put_autosuspend(d40c->base->dev);
1330 tasklet_schedule(&d40c->tasklet);
1334 static void dma_tasklet(unsigned long data)
1336 struct d40_chan *d40c = (struct d40_chan *) data;
1337 struct d40_desc *d40d;
1338 unsigned long flags;
1339 dma_async_tx_callback callback;
1340 void *callback_param;
1342 spin_lock_irqsave(&d40c->lock, flags);
1344 /* Get first active entry from list */
1345 d40d = d40_first_active_get(d40c);
1350 dma_cookie_complete(&d40d->txd);
1353 * If terminating a channel pending_tx is set to zero.
1354 * This prevents any finished active jobs to return to the client.
1356 if (d40c->pending_tx == 0) {
1357 spin_unlock_irqrestore(&d40c->lock, flags);
1361 /* Callback to client */
1362 callback = d40d->txd.callback;
1363 callback_param = d40d->txd.callback_param;
1365 if (!d40d->cyclic) {
1366 if (async_tx_test_ack(&d40d->txd)) {
1367 d40_desc_remove(d40d);
1368 d40_desc_free(d40c, d40d);
1370 if (!d40d->is_in_client_list) {
1371 d40_desc_remove(d40d);
1372 d40_lcla_free_all(d40c, d40d);
1373 list_add_tail(&d40d->node, &d40c->client);
1374 d40d->is_in_client_list = true;
1381 if (d40c->pending_tx)
1382 tasklet_schedule(&d40c->tasklet);
1384 spin_unlock_irqrestore(&d40c->lock, flags);
1386 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1387 callback(callback_param);
1392 /* Rescue manoeuvre if receiving double interrupts */
1393 if (d40c->pending_tx > 0)
1395 spin_unlock_irqrestore(&d40c->lock, flags);
1398 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1400 static const struct d40_interrupt_lookup il[] = {
1401 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1402 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1403 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1404 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1405 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1406 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1407 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1408 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1409 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1410 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1414 u32 regs[ARRAY_SIZE(il)];
1418 struct d40_chan *d40c;
1419 unsigned long flags;
1420 struct d40_base *base = data;
1422 spin_lock_irqsave(&base->interrupt_lock, flags);
1424 /* Read interrupt status of both logical and physical channels */
1425 for (i = 0; i < ARRAY_SIZE(il); i++)
1426 regs[i] = readl(base->virtbase + il[i].src);
1430 chan = find_next_bit((unsigned long *)regs,
1431 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1433 /* No more set bits found? */
1434 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1437 row = chan / BITS_PER_LONG;
1438 idx = chan & (BITS_PER_LONG - 1);
1441 writel(1 << idx, base->virtbase + il[row].clr);
1443 if (il[row].offset == D40_PHY_CHAN)
1444 d40c = base->lookup_phy_chans[idx];
1446 d40c = base->lookup_log_chans[il[row].offset + idx];
1447 spin_lock(&d40c->lock);
1449 if (!il[row].is_error)
1450 dma_tc_handle(d40c);
1452 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1453 chan, il[row].offset, idx);
1455 spin_unlock(&d40c->lock);
1458 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1463 static int d40_validate_conf(struct d40_chan *d40c,
1464 struct stedma40_chan_cfg *conf)
1467 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1468 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1469 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1472 chan_err(d40c, "Invalid direction.\n");
1476 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1477 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1478 d40c->runtime_addr == 0) {
1480 chan_err(d40c, "Invalid TX channel address (%d)\n",
1481 conf->dst_dev_type);
1485 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1486 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1487 d40c->runtime_addr == 0) {
1488 chan_err(d40c, "Invalid RX channel address (%d)\n",
1489 conf->src_dev_type);
1493 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1494 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1495 chan_err(d40c, "Invalid dst\n");
1499 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1500 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1501 chan_err(d40c, "Invalid src\n");
1505 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1506 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1507 chan_err(d40c, "No event line\n");
1511 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1512 (src_event_group != dst_event_group)) {
1513 chan_err(d40c, "Invalid event group\n");
1517 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1519 * DMAC HW supports it. Will be added to this driver,
1520 * in case any dma client requires it.
1522 chan_err(d40c, "periph to periph not supported\n");
1526 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1527 (1 << conf->src_info.data_width) !=
1528 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1529 (1 << conf->dst_info.data_width)) {
1531 * The DMAC hardware only supports
1532 * src (burst x width) == dst (burst x width)
1535 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1542 static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1543 bool is_src, int log_event_line, bool is_log,
1546 unsigned long flags;
1547 spin_lock_irqsave(&phy->lock, flags);
1549 *first_user = ((phy->allocated_src | phy->allocated_dst)
1553 /* Physical interrupts are masked per physical full channel */
1554 if (phy->allocated_src == D40_ALLOC_FREE &&
1555 phy->allocated_dst == D40_ALLOC_FREE) {
1556 phy->allocated_dst = D40_ALLOC_PHY;
1557 phy->allocated_src = D40_ALLOC_PHY;
1563 /* Logical channel */
1565 if (phy->allocated_src == D40_ALLOC_PHY)
1568 if (phy->allocated_src == D40_ALLOC_FREE)
1569 phy->allocated_src = D40_ALLOC_LOG_FREE;
1571 if (!(phy->allocated_src & (1 << log_event_line))) {
1572 phy->allocated_src |= 1 << log_event_line;
1577 if (phy->allocated_dst == D40_ALLOC_PHY)
1580 if (phy->allocated_dst == D40_ALLOC_FREE)
1581 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1583 if (!(phy->allocated_dst & (1 << log_event_line))) {
1584 phy->allocated_dst |= 1 << log_event_line;
1591 spin_unlock_irqrestore(&phy->lock, flags);
1594 spin_unlock_irqrestore(&phy->lock, flags);
1598 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1601 unsigned long flags;
1602 bool is_free = false;
1604 spin_lock_irqsave(&phy->lock, flags);
1605 if (!log_event_line) {
1606 phy->allocated_dst = D40_ALLOC_FREE;
1607 phy->allocated_src = D40_ALLOC_FREE;
1612 /* Logical channel */
1614 phy->allocated_src &= ~(1 << log_event_line);
1615 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1616 phy->allocated_src = D40_ALLOC_FREE;
1618 phy->allocated_dst &= ~(1 << log_event_line);
1619 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1620 phy->allocated_dst = D40_ALLOC_FREE;
1623 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1627 spin_unlock_irqrestore(&phy->lock, flags);
1632 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1637 struct d40_phy_res *phys;
1642 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1644 phys = d40c->base->phy_res;
1646 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1647 dev_type = d40c->dma_cfg.src_dev_type;
1648 log_num = 2 * dev_type;
1650 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1651 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1652 /* dst event lines are used for logical memcpy */
1653 dev_type = d40c->dma_cfg.dst_dev_type;
1654 log_num = 2 * dev_type + 1;
1659 event_group = D40_TYPE_TO_GROUP(dev_type);
1660 event_line = D40_TYPE_TO_EVENT(dev_type);
1663 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1664 /* Find physical half channel */
1665 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1667 if (d40_alloc_mask_set(&phys[i], is_src,
1673 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1674 int phy_num = j + event_group * 2;
1675 for (i = phy_num; i < phy_num + 2; i++) {
1676 if (d40_alloc_mask_set(&phys[i],
1686 d40c->phy_chan = &phys[i];
1687 d40c->log_num = D40_PHY_CHAN;
1693 /* Find logical channel */
1694 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1695 int phy_num = j + event_group * 2;
1697 if (d40c->dma_cfg.use_fixed_channel) {
1698 i = d40c->dma_cfg.phy_channel;
1700 if ((i != phy_num) && (i != phy_num + 1)) {
1701 dev_err(chan2dev(d40c),
1702 "invalid fixed phy channel %d\n", i);
1706 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1707 is_log, first_phy_user))
1710 dev_err(chan2dev(d40c),
1711 "could not allocate fixed phy channel %d\n", i);
1716 * Spread logical channels across all available physical rather
1717 * than pack every logical channel at the first available phy
1721 for (i = phy_num; i < phy_num + 2; i++) {
1722 if (d40_alloc_mask_set(&phys[i], is_src,
1728 for (i = phy_num + 1; i >= phy_num; i--) {
1729 if (d40_alloc_mask_set(&phys[i], is_src,
1739 d40c->phy_chan = &phys[i];
1740 d40c->log_num = log_num;
1744 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1746 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1752 static int d40_config_memcpy(struct d40_chan *d40c)
1754 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1756 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1757 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1758 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1759 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1760 memcpy[d40c->chan.chan_id];
1762 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1763 dma_has_cap(DMA_SLAVE, cap)) {
1764 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1766 chan_err(d40c, "No memcpy\n");
1774 static int d40_free_dma(struct d40_chan *d40c)
1779 struct d40_phy_res *phy = d40c->phy_chan;
1782 /* Terminate all queued and active transfers */
1786 chan_err(d40c, "phy == null\n");
1790 if (phy->allocated_src == D40_ALLOC_FREE &&
1791 phy->allocated_dst == D40_ALLOC_FREE) {
1792 chan_err(d40c, "channel already free\n");
1796 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1797 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1798 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1800 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1801 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1804 chan_err(d40c, "Unknown direction\n");
1808 pm_runtime_get_sync(d40c->base->dev);
1809 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1811 chan_err(d40c, "suspend failed\n");
1815 if (chan_is_logical(d40c)) {
1816 /* Release logical channel, deactivate the event line */
1818 d40_config_set_event(d40c, false);
1819 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1822 * Check if there are more logical allocation
1823 * on this phy channel.
1825 if (!d40_alloc_mask_free(phy, is_src, event)) {
1826 /* Resume the other logical channels if any */
1827 if (d40_chan_has_events(d40c)) {
1828 res = d40_channel_execute_command(d40c,
1832 "Executing RUN command\n");
1837 (void) d40_alloc_mask_free(phy, is_src, 0);
1840 /* Release physical channel */
1841 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1843 chan_err(d40c, "Failed to stop channel\n");
1848 pm_runtime_mark_last_busy(d40c->base->dev);
1849 pm_runtime_put_autosuspend(d40c->base->dev);
1853 d40c->phy_chan = NULL;
1854 d40c->configured = false;
1855 d40c->base->lookup_phy_chans[phy->num] = NULL;
1858 pm_runtime_mark_last_busy(d40c->base->dev);
1859 pm_runtime_put_autosuspend(d40c->base->dev);
1863 static bool d40_is_paused(struct d40_chan *d40c)
1865 void __iomem *chanbase = chan_base(d40c);
1866 bool is_paused = false;
1867 unsigned long flags;
1868 void __iomem *active_reg;
1872 spin_lock_irqsave(&d40c->lock, flags);
1874 if (chan_is_physical(d40c)) {
1875 if (d40c->phy_chan->num % 2 == 0)
1876 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1878 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1880 status = (readl(active_reg) &
1881 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1882 D40_CHAN_POS(d40c->phy_chan->num);
1883 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1889 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1890 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1891 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1892 status = readl(chanbase + D40_CHAN_REG_SDLNK);
1893 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1894 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1895 status = readl(chanbase + D40_CHAN_REG_SSLNK);
1897 chan_err(d40c, "Unknown direction\n");
1901 status = (status & D40_EVENTLINE_MASK(event)) >>
1902 D40_EVENTLINE_POS(event);
1904 if (status != D40_DMA_RUN)
1907 spin_unlock_irqrestore(&d40c->lock, flags);
1913 static u32 stedma40_residue(struct dma_chan *chan)
1915 struct d40_chan *d40c =
1916 container_of(chan, struct d40_chan, chan);
1918 unsigned long flags;
1920 spin_lock_irqsave(&d40c->lock, flags);
1921 bytes_left = d40_residue(d40c);
1922 spin_unlock_irqrestore(&d40c->lock, flags);
1928 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
1929 struct scatterlist *sg_src, struct scatterlist *sg_dst,
1930 unsigned int sg_len, dma_addr_t src_dev_addr,
1931 dma_addr_t dst_dev_addr)
1933 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1934 struct stedma40_half_channel_info *src_info = &cfg->src_info;
1935 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1938 ret = d40_log_sg_to_lli(sg_src, sg_len,
1941 chan->log_def.lcsp1,
1942 src_info->data_width,
1943 dst_info->data_width);
1945 ret = d40_log_sg_to_lli(sg_dst, sg_len,
1948 chan->log_def.lcsp3,
1949 dst_info->data_width,
1950 src_info->data_width);
1952 return ret < 0 ? ret : 0;
1956 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
1957 struct scatterlist *sg_src, struct scatterlist *sg_dst,
1958 unsigned int sg_len, dma_addr_t src_dev_addr,
1959 dma_addr_t dst_dev_addr)
1961 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1962 struct stedma40_half_channel_info *src_info = &cfg->src_info;
1963 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1964 unsigned long flags = 0;
1968 flags |= LLI_CYCLIC | LLI_TERM_INT;
1970 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
1972 virt_to_phys(desc->lli_phy.src),
1974 src_info, dst_info, flags);
1976 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
1978 virt_to_phys(desc->lli_phy.dst),
1980 dst_info, src_info, flags);
1982 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
1983 desc->lli_pool.size, DMA_TO_DEVICE);
1985 return ret < 0 ? ret : 0;
1989 static struct d40_desc *
1990 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
1991 unsigned int sg_len, unsigned long dma_flags)
1993 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1994 struct d40_desc *desc;
1997 desc = d40_desc_get(chan);
2001 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2002 cfg->dst_info.data_width);
2003 if (desc->lli_len < 0) {
2004 chan_err(chan, "Unaligned size\n");
2008 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2010 chan_err(chan, "Could not allocate lli\n");
2015 desc->lli_current = 0;
2016 desc->txd.flags = dma_flags;
2017 desc->txd.tx_submit = d40_tx_submit;
2019 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2024 d40_desc_free(chan, desc);
2029 d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
2031 struct stedma40_platform_data *plat = chan->base->plat_data;
2032 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2033 dma_addr_t addr = 0;
2035 if (chan->runtime_addr)
2036 return chan->runtime_addr;
2038 if (direction == DMA_DEV_TO_MEM)
2039 addr = plat->dev_rx[cfg->src_dev_type];
2040 else if (direction == DMA_MEM_TO_DEV)
2041 addr = plat->dev_tx[cfg->dst_dev_type];
2046 static struct dma_async_tx_descriptor *
2047 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2048 struct scatterlist *sg_dst, unsigned int sg_len,
2049 enum dma_transfer_direction direction, unsigned long dma_flags)
2051 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2052 dma_addr_t src_dev_addr = 0;
2053 dma_addr_t dst_dev_addr = 0;
2054 struct d40_desc *desc;
2055 unsigned long flags;
2058 if (!chan->phy_chan) {
2059 chan_err(chan, "Cannot prepare unallocated channel\n");
2064 spin_lock_irqsave(&chan->lock, flags);
2066 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2070 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2071 desc->cyclic = true;
2073 if (direction != DMA_NONE) {
2074 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
2076 if (direction == DMA_DEV_TO_MEM)
2077 src_dev_addr = dev_addr;
2078 else if (direction == DMA_MEM_TO_DEV)
2079 dst_dev_addr = dev_addr;
2082 if (chan_is_logical(chan))
2083 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2084 sg_len, src_dev_addr, dst_dev_addr);
2086 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2087 sg_len, src_dev_addr, dst_dev_addr);
2090 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2091 chan_is_logical(chan) ? "log" : "phy", ret);
2096 * add descriptor to the prepare queue in order to be able
2097 * to free them later in terminate_all
2099 list_add_tail(&desc->node, &chan->prepare_queue);
2101 spin_unlock_irqrestore(&chan->lock, flags);
2107 d40_desc_free(chan, desc);
2108 spin_unlock_irqrestore(&chan->lock, flags);
2112 bool stedma40_filter(struct dma_chan *chan, void *data)
2114 struct stedma40_chan_cfg *info = data;
2115 struct d40_chan *d40c =
2116 container_of(chan, struct d40_chan, chan);
2120 err = d40_validate_conf(d40c, info);
2122 d40c->dma_cfg = *info;
2124 err = d40_config_memcpy(d40c);
2127 d40c->configured = true;
2131 EXPORT_SYMBOL(stedma40_filter);
2133 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2135 bool realtime = d40c->dma_cfg.realtime;
2136 bool highprio = d40c->dma_cfg.high_priority;
2137 u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
2138 u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
2139 u32 event = D40_TYPE_TO_EVENT(dev_type);
2140 u32 group = D40_TYPE_TO_GROUP(dev_type);
2141 u32 bit = 1 << event;
2143 /* Destination event lines are stored in the upper halfword */
2147 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2148 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2151 static void d40_set_prio_realtime(struct d40_chan *d40c)
2153 if (d40c->base->rev < 3)
2156 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
2157 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
2158 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
2160 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
2161 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
2162 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
2165 /* DMA ENGINE functions */
2166 static int d40_alloc_chan_resources(struct dma_chan *chan)
2169 unsigned long flags;
2170 struct d40_chan *d40c =
2171 container_of(chan, struct d40_chan, chan);
2173 spin_lock_irqsave(&d40c->lock, flags);
2175 dma_cookie_init(chan);
2177 /* If no dma configuration is set use default configuration (memcpy) */
2178 if (!d40c->configured) {
2179 err = d40_config_memcpy(d40c);
2181 chan_err(d40c, "Failed to configure memcpy channel\n");
2186 err = d40_allocate_channel(d40c, &is_free_phy);
2188 chan_err(d40c, "Failed to allocate channel\n");
2189 d40c->configured = false;
2193 pm_runtime_get_sync(d40c->base->dev);
2194 /* Fill in basic CFG register values */
2195 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
2196 &d40c->dst_def_cfg, chan_is_logical(d40c));
2198 d40_set_prio_realtime(d40c);
2200 if (chan_is_logical(d40c)) {
2201 d40_log_cfg(&d40c->dma_cfg,
2202 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2204 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
2205 d40c->lcpa = d40c->base->lcpa_base +
2206 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
2208 d40c->lcpa = d40c->base->lcpa_base +
2209 d40c->dma_cfg.dst_dev_type *
2210 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2213 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2214 chan_is_logical(d40c) ? "logical" : "physical",
2215 d40c->phy_chan->num,
2216 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2220 * Only write channel configuration to the DMA if the physical
2221 * resource is free. In case of multiple logical channels
2222 * on the same physical resource, only the first write is necessary.
2225 d40_config_write(d40c);
2227 pm_runtime_mark_last_busy(d40c->base->dev);
2228 pm_runtime_put_autosuspend(d40c->base->dev);
2229 spin_unlock_irqrestore(&d40c->lock, flags);
2233 static void d40_free_chan_resources(struct dma_chan *chan)
2235 struct d40_chan *d40c =
2236 container_of(chan, struct d40_chan, chan);
2238 unsigned long flags;
2240 if (d40c->phy_chan == NULL) {
2241 chan_err(d40c, "Cannot free unallocated channel\n");
2246 spin_lock_irqsave(&d40c->lock, flags);
2248 err = d40_free_dma(d40c);
2251 chan_err(d40c, "Failed to free channel\n");
2252 spin_unlock_irqrestore(&d40c->lock, flags);
2255 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2259 unsigned long dma_flags)
2261 struct scatterlist dst_sg;
2262 struct scatterlist src_sg;
2264 sg_init_table(&dst_sg, 1);
2265 sg_init_table(&src_sg, 1);
2267 sg_dma_address(&dst_sg) = dst;
2268 sg_dma_address(&src_sg) = src;
2270 sg_dma_len(&dst_sg) = size;
2271 sg_dma_len(&src_sg) = size;
2273 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
2276 static struct dma_async_tx_descriptor *
2277 d40_prep_memcpy_sg(struct dma_chan *chan,
2278 struct scatterlist *dst_sg, unsigned int dst_nents,
2279 struct scatterlist *src_sg, unsigned int src_nents,
2280 unsigned long dma_flags)
2282 if (dst_nents != src_nents)
2285 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2288 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2289 struct scatterlist *sgl,
2290 unsigned int sg_len,
2291 enum dma_transfer_direction direction,
2292 unsigned long dma_flags,
2295 if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
2298 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2301 static struct dma_async_tx_descriptor *
2302 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2303 size_t buf_len, size_t period_len,
2304 enum dma_transfer_direction direction, void *context)
2306 unsigned int periods = buf_len / period_len;
2307 struct dma_async_tx_descriptor *txd;
2308 struct scatterlist *sg;
2311 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2312 for (i = 0; i < periods; i++) {
2313 sg_dma_address(&sg[i]) = dma_addr;
2314 sg_dma_len(&sg[i]) = period_len;
2315 dma_addr += period_len;
2318 sg[periods].offset = 0;
2319 sg[periods].length = 0;
2320 sg[periods].page_link =
2321 ((unsigned long)sg | 0x01) & ~0x02;
2323 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2324 DMA_PREP_INTERRUPT);
2331 static enum dma_status d40_tx_status(struct dma_chan *chan,
2332 dma_cookie_t cookie,
2333 struct dma_tx_state *txstate)
2335 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2336 enum dma_status ret;
2338 if (d40c->phy_chan == NULL) {
2339 chan_err(d40c, "Cannot read status of unallocated channel\n");
2343 ret = dma_cookie_status(chan, cookie, txstate);
2344 if (ret != DMA_SUCCESS)
2345 dma_set_residue(txstate, stedma40_residue(chan));
2347 if (d40_is_paused(d40c))
2353 static void d40_issue_pending(struct dma_chan *chan)
2355 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2356 unsigned long flags;
2358 if (d40c->phy_chan == NULL) {
2359 chan_err(d40c, "Channel is not allocated!\n");
2363 spin_lock_irqsave(&d40c->lock, flags);
2365 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2367 /* Busy means that queued jobs are already being processed */
2369 (void) d40_queue_start(d40c);
2371 spin_unlock_irqrestore(&d40c->lock, flags);
2375 dma40_config_to_halfchannel(struct d40_chan *d40c,
2376 struct stedma40_half_channel_info *info,
2377 enum dma_slave_buswidth width,
2380 enum stedma40_periph_data_width addr_width;
2384 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2385 addr_width = STEDMA40_BYTE_WIDTH;
2387 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2388 addr_width = STEDMA40_HALFWORD_WIDTH;
2390 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2391 addr_width = STEDMA40_WORD_WIDTH;
2393 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2394 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2397 dev_err(d40c->base->dev,
2398 "illegal peripheral address width "
2404 if (chan_is_logical(d40c)) {
2406 psize = STEDMA40_PSIZE_LOG_16;
2407 else if (maxburst >= 8)
2408 psize = STEDMA40_PSIZE_LOG_8;
2409 else if (maxburst >= 4)
2410 psize = STEDMA40_PSIZE_LOG_4;
2412 psize = STEDMA40_PSIZE_LOG_1;
2415 psize = STEDMA40_PSIZE_PHY_16;
2416 else if (maxburst >= 8)
2417 psize = STEDMA40_PSIZE_PHY_8;
2418 else if (maxburst >= 4)
2419 psize = STEDMA40_PSIZE_PHY_4;
2421 psize = STEDMA40_PSIZE_PHY_1;
2424 info->data_width = addr_width;
2425 info->psize = psize;
2426 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2431 /* Runtime reconfiguration extension */
2432 static int d40_set_runtime_config(struct dma_chan *chan,
2433 struct dma_slave_config *config)
2435 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2436 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2437 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2438 dma_addr_t config_addr;
2439 u32 src_maxburst, dst_maxburst;
2442 src_addr_width = config->src_addr_width;
2443 src_maxburst = config->src_maxburst;
2444 dst_addr_width = config->dst_addr_width;
2445 dst_maxburst = config->dst_maxburst;
2447 if (config->direction == DMA_DEV_TO_MEM) {
2448 dma_addr_t dev_addr_rx =
2449 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2451 config_addr = config->src_addr;
2453 dev_dbg(d40c->base->dev,
2454 "channel has a pre-wired RX address %08x "
2455 "overriding with %08x\n",
2456 dev_addr_rx, config_addr);
2457 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2458 dev_dbg(d40c->base->dev,
2459 "channel was not configured for peripheral "
2460 "to memory transfer (%d) overriding\n",
2462 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2464 /* Configure the memory side */
2465 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2466 dst_addr_width = src_addr_width;
2467 if (dst_maxburst == 0)
2468 dst_maxburst = src_maxburst;
2470 } else if (config->direction == DMA_MEM_TO_DEV) {
2471 dma_addr_t dev_addr_tx =
2472 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2474 config_addr = config->dst_addr;
2476 dev_dbg(d40c->base->dev,
2477 "channel has a pre-wired TX address %08x "
2478 "overriding with %08x\n",
2479 dev_addr_tx, config_addr);
2480 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2481 dev_dbg(d40c->base->dev,
2482 "channel was not configured for memory "
2483 "to peripheral transfer (%d) overriding\n",
2485 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2487 /* Configure the memory side */
2488 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2489 src_addr_width = dst_addr_width;
2490 if (src_maxburst == 0)
2491 src_maxburst = dst_maxburst;
2493 dev_err(d40c->base->dev,
2494 "unrecognized channel direction %d\n",
2499 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2500 dev_err(d40c->base->dev,
2501 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2509 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2515 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2521 /* Fill in register values */
2522 if (chan_is_logical(d40c))
2523 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2525 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2526 &d40c->dst_def_cfg, false);
2528 /* These settings will take precedence later */
2529 d40c->runtime_addr = config_addr;
2530 d40c->runtime_direction = config->direction;
2531 dev_dbg(d40c->base->dev,
2532 "configured channel %s for %s, data width %d/%d, "
2533 "maxburst %d/%d elements, LE, no flow control\n",
2534 dma_chan_name(chan),
2535 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2536 src_addr_width, dst_addr_width,
2537 src_maxburst, dst_maxburst);
2542 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2545 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2547 if (d40c->phy_chan == NULL) {
2548 chan_err(d40c, "Channel is not allocated!\n");
2553 case DMA_TERMINATE_ALL:
2554 return d40_terminate_all(d40c);
2556 return d40_pause(d40c);
2558 return d40_resume(d40c);
2559 case DMA_SLAVE_CONFIG:
2560 return d40_set_runtime_config(chan,
2561 (struct dma_slave_config *) arg);
2566 /* Other commands are unimplemented */
2570 /* Initialization functions */
2572 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2573 struct d40_chan *chans, int offset,
2577 struct d40_chan *d40c;
2579 INIT_LIST_HEAD(&dma->channels);
2581 for (i = offset; i < offset + num_chans; i++) {
2584 d40c->chan.device = dma;
2586 spin_lock_init(&d40c->lock);
2588 d40c->log_num = D40_PHY_CHAN;
2590 INIT_LIST_HEAD(&d40c->active);
2591 INIT_LIST_HEAD(&d40c->queue);
2592 INIT_LIST_HEAD(&d40c->pending_queue);
2593 INIT_LIST_HEAD(&d40c->client);
2594 INIT_LIST_HEAD(&d40c->prepare_queue);
2596 tasklet_init(&d40c->tasklet, dma_tasklet,
2597 (unsigned long) d40c);
2599 list_add_tail(&d40c->chan.device_node,
2604 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2606 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2607 dev->device_prep_slave_sg = d40_prep_slave_sg;
2609 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2610 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2613 * This controller can only access address at even
2614 * 32bit boundaries, i.e. 2^2
2616 dev->copy_align = 2;
2619 if (dma_has_cap(DMA_SG, dev->cap_mask))
2620 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2622 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2623 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2625 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2626 dev->device_free_chan_resources = d40_free_chan_resources;
2627 dev->device_issue_pending = d40_issue_pending;
2628 dev->device_tx_status = d40_tx_status;
2629 dev->device_control = d40_control;
2630 dev->dev = base->dev;
2633 static int __init d40_dmaengine_init(struct d40_base *base,
2634 int num_reserved_chans)
2638 d40_chan_init(base, &base->dma_slave, base->log_chans,
2639 0, base->num_log_chans);
2641 dma_cap_zero(base->dma_slave.cap_mask);
2642 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2643 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2645 d40_ops_init(base, &base->dma_slave);
2647 err = dma_async_device_register(&base->dma_slave);
2650 d40_err(base->dev, "Failed to register slave channels\n");
2654 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2655 base->num_log_chans, base->plat_data->memcpy_len);
2657 dma_cap_zero(base->dma_memcpy.cap_mask);
2658 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2659 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2661 d40_ops_init(base, &base->dma_memcpy);
2663 err = dma_async_device_register(&base->dma_memcpy);
2667 "Failed to regsiter memcpy only channels\n");
2671 d40_chan_init(base, &base->dma_both, base->phy_chans,
2672 0, num_reserved_chans);
2674 dma_cap_zero(base->dma_both.cap_mask);
2675 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2676 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2677 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2678 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2680 d40_ops_init(base, &base->dma_both);
2681 err = dma_async_device_register(&base->dma_both);
2685 "Failed to register logical and physical capable channels\n");
2690 dma_async_device_unregister(&base->dma_memcpy);
2692 dma_async_device_unregister(&base->dma_slave);
2697 /* Suspend resume functionality */
2699 static int dma40_pm_suspend(struct device *dev)
2701 struct platform_device *pdev = to_platform_device(dev);
2702 struct d40_base *base = platform_get_drvdata(pdev);
2704 if (!pm_runtime_suspended(dev))
2707 if (base->lcpa_regulator)
2708 ret = regulator_disable(base->lcpa_regulator);
2712 static int dma40_runtime_suspend(struct device *dev)
2714 struct platform_device *pdev = to_platform_device(dev);
2715 struct d40_base *base = platform_get_drvdata(pdev);
2717 d40_save_restore_registers(base, true);
2719 /* Don't disable/enable clocks for v1 due to HW bugs */
2721 writel_relaxed(base->gcc_pwr_off_mask,
2722 base->virtbase + D40_DREG_GCC);
2727 static int dma40_runtime_resume(struct device *dev)
2729 struct platform_device *pdev = to_platform_device(dev);
2730 struct d40_base *base = platform_get_drvdata(pdev);
2732 if (base->initialized)
2733 d40_save_restore_registers(base, false);
2735 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
2736 base->virtbase + D40_DREG_GCC);
2740 static int dma40_resume(struct device *dev)
2742 struct platform_device *pdev = to_platform_device(dev);
2743 struct d40_base *base = platform_get_drvdata(pdev);
2746 if (base->lcpa_regulator)
2747 ret = regulator_enable(base->lcpa_regulator);
2752 static const struct dev_pm_ops dma40_pm_ops = {
2753 .suspend = dma40_pm_suspend,
2754 .runtime_suspend = dma40_runtime_suspend,
2755 .runtime_resume = dma40_runtime_resume,
2756 .resume = dma40_resume,
2758 #define DMA40_PM_OPS (&dma40_pm_ops)
2760 #define DMA40_PM_OPS NULL
2763 /* Initialization functions. */
2765 static int __init d40_phy_res_init(struct d40_base *base)
2768 int num_phy_chans_avail = 0;
2770 int odd_even_bit = -2;
2771 int gcc = D40_DREG_GCC_ENA;
2773 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2774 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2776 for (i = 0; i < base->num_phy_chans; i++) {
2777 base->phy_res[i].num = i;
2778 odd_even_bit += 2 * ((i % 2) == 0);
2779 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2780 /* Mark security only channels as occupied */
2781 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2782 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2783 base->phy_res[i].reserved = true;
2784 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
2786 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
2791 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2792 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2793 base->phy_res[i].reserved = false;
2794 num_phy_chans_avail++;
2796 spin_lock_init(&base->phy_res[i].lock);
2799 /* Mark disabled channels as occupied */
2800 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2801 int chan = base->plat_data->disabled_channels[i];
2803 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2804 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
2805 base->phy_res[chan].reserved = true;
2806 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
2808 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
2810 num_phy_chans_avail--;
2813 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2814 num_phy_chans_avail, base->num_phy_chans);
2816 /* Verify settings extended vs standard */
2817 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2819 for (i = 0; i < base->num_phy_chans; i++) {
2821 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2822 (val[0] & 0x3) != 1)
2824 "[%s] INFO: channel %d is misconfigured (%d)\n",
2825 __func__, i, val[0] & 0x3);
2827 val[0] = val[0] >> 2;
2831 * To keep things simple, Enable all clocks initially.
2832 * The clocks will get managed later post channel allocation.
2833 * The clocks for the event lines on which reserved channels exists
2834 * are not managed here.
2836 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
2837 base->gcc_pwr_off_mask = gcc;
2839 return num_phy_chans_avail;
2842 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2844 struct stedma40_platform_data *plat_data;
2845 struct clk *clk = NULL;
2846 void __iomem *virtbase = NULL;
2847 struct resource *res = NULL;
2848 struct d40_base *base = NULL;
2849 int num_log_chans = 0;
2856 clk = clk_get(&pdev->dev, NULL);
2859 d40_err(&pdev->dev, "No matching clock found\n");
2865 /* Get IO for DMAC base address */
2866 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2870 if (request_mem_region(res->start, resource_size(res),
2871 D40_NAME " I/O base") == NULL)
2874 virtbase = ioremap(res->start, resource_size(res));
2878 /* This is just a regular AMBA PrimeCell ID actually */
2879 for (pid = 0, i = 0; i < 4; i++)
2880 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
2882 for (cid = 0, i = 0; i < 4; i++)
2883 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
2886 if (cid != AMBA_CID) {
2887 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
2890 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
2891 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2892 AMBA_MANF_BITS(pid),
2898 * DB8500ed has revision 0
2900 * DB8500v1 has revision 2
2901 * DB8500v2 has revision 3
2903 rev = AMBA_REV_BITS(pid);
2905 /* The number of physical channels on this HW */
2906 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2908 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2911 plat_data = pdev->dev.platform_data;
2913 /* Count the number of logical channels in use */
2914 for (i = 0; i < plat_data->dev_len; i++)
2915 if (plat_data->dev_rx[i] != 0)
2918 for (i = 0; i < plat_data->dev_len; i++)
2919 if (plat_data->dev_tx[i] != 0)
2922 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2923 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2924 sizeof(struct d40_chan), GFP_KERNEL);
2927 d40_err(&pdev->dev, "Out of memory\n");
2933 base->num_phy_chans = num_phy_chans;
2934 base->num_log_chans = num_log_chans;
2935 base->phy_start = res->start;
2936 base->phy_size = resource_size(res);
2937 base->virtbase = virtbase;
2938 base->plat_data = plat_data;
2939 base->dev = &pdev->dev;
2940 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2941 base->log_chans = &base->phy_chans[num_phy_chans];
2943 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2948 base->lookup_phy_chans = kzalloc(num_phy_chans *
2949 sizeof(struct d40_chan *),
2951 if (!base->lookup_phy_chans)
2954 if (num_log_chans + plat_data->memcpy_len) {
2956 * The max number of logical channels are event lines for all
2957 * src devices and dst devices
2959 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2960 sizeof(struct d40_chan *),
2962 if (!base->lookup_log_chans)
2966 base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
2967 sizeof(d40_backup_regs_chan),
2969 if (!base->reg_val_backup_chan)
2972 base->lcla_pool.alloc_map =
2973 kzalloc(num_phy_chans * sizeof(struct d40_desc *)
2974 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
2975 if (!base->lcla_pool.alloc_map)
2978 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2979 0, SLAB_HWCACHE_ALIGN,
2981 if (base->desc_slab == NULL)
2994 release_mem_region(res->start,
2995 resource_size(res));
3000 kfree(base->lcla_pool.alloc_map);
3001 kfree(base->lookup_log_chans);
3002 kfree(base->lookup_phy_chans);
3003 kfree(base->phy_res);
3010 static void __init d40_hw_init(struct d40_base *base)
3013 static struct d40_reg_val dma_init_reg[] = {
3014 /* Clock every part of the DMA block from start */
3015 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
3017 /* Interrupts on all logical channels */
3018 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
3019 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
3020 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
3021 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
3022 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
3023 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
3024 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
3025 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
3026 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
3027 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
3028 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
3029 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
3032 u32 prmseo[2] = {0, 0};
3033 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3037 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
3038 writel(dma_init_reg[i].val,
3039 base->virtbase + dma_init_reg[i].reg);
3041 /* Configure all our dma channels to default settings */
3042 for (i = 0; i < base->num_phy_chans; i++) {
3044 activeo[i % 2] = activeo[i % 2] << 2;
3046 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3048 activeo[i % 2] |= 3;
3052 /* Enable interrupt # */
3053 pcmis = (pcmis << 1) | 1;
3055 /* Clear interrupt # */
3056 pcicr = (pcicr << 1) | 1;
3058 /* Set channel to physical mode */
3059 prmseo[i % 2] = prmseo[i % 2] << 2;
3064 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3065 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3066 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3067 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3069 /* Write which interrupt to enable */
3070 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
3072 /* Write which interrupt to clear */
3073 writel(pcicr, base->virtbase + D40_DREG_PCICR);
3077 static int __init d40_lcla_allocate(struct d40_base *base)
3079 struct d40_lcla_pool *pool = &base->lcla_pool;
3080 unsigned long *page_list;
3085 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3086 * To full fill this hardware requirement without wasting 256 kb
3087 * we allocate pages until we get an aligned one.
3089 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
3097 /* Calculating how many pages that are required */
3098 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3100 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3101 page_list[i] = __get_free_pages(GFP_KERNEL,
3102 base->lcla_pool.pages);
3103 if (!page_list[i]) {
3105 d40_err(base->dev, "Failed to allocate %d pages.\n",
3106 base->lcla_pool.pages);
3108 for (j = 0; j < i; j++)
3109 free_pages(page_list[j], base->lcla_pool.pages);
3113 if ((virt_to_phys((void *)page_list[i]) &
3114 (LCLA_ALIGNMENT - 1)) == 0)
3118 for (j = 0; j < i; j++)
3119 free_pages(page_list[j], base->lcla_pool.pages);
3121 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3122 base->lcla_pool.base = (void *)page_list[i];
3125 * After many attempts and no succees with finding the correct
3126 * alignment, try with allocating a big buffer.
3129 "[%s] Failed to get %d pages @ 18 bit align.\n",
3130 __func__, base->lcla_pool.pages);
3131 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3132 base->num_phy_chans +
3135 if (!base->lcla_pool.base_unaligned) {
3140 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3144 pool->dma_addr = dma_map_single(base->dev, pool->base,
3145 SZ_1K * base->num_phy_chans,
3147 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3153 writel(virt_to_phys(base->lcla_pool.base),
3154 base->virtbase + D40_DREG_LCLA);
3160 static int __init d40_probe(struct platform_device *pdev)
3164 struct d40_base *base;
3165 struct resource *res = NULL;
3166 int num_reserved_chans;
3169 base = d40_hw_detect_init(pdev);
3174 num_reserved_chans = d40_phy_res_init(base);
3176 platform_set_drvdata(pdev, base);
3178 spin_lock_init(&base->interrupt_lock);
3179 spin_lock_init(&base->execmd_lock);
3181 /* Get IO for logical channel parameter address */
3182 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3185 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3188 base->lcpa_size = resource_size(res);
3189 base->phy_lcpa = res->start;
3191 if (request_mem_region(res->start, resource_size(res),
3192 D40_NAME " I/O lcpa") == NULL) {
3195 "Failed to request LCPA region 0x%x-0x%x\n",
3196 res->start, res->end);
3200 /* We make use of ESRAM memory for this. */
3201 val = readl(base->virtbase + D40_DREG_LCPA);
3202 if (res->start != val && val != 0) {
3203 dev_warn(&pdev->dev,
3204 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
3205 __func__, val, res->start);
3207 writel(res->start, base->virtbase + D40_DREG_LCPA);
3209 base->lcpa_base = ioremap(res->start, resource_size(res));
3210 if (!base->lcpa_base) {
3212 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3215 /* If lcla has to be located in ESRAM we don't need to allocate */
3216 if (base->plat_data->use_esram_lcla) {
3217 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3222 "No \"lcla_esram\" memory resource\n");
3225 base->lcla_pool.base = ioremap(res->start,
3226 resource_size(res));
3227 if (!base->lcla_pool.base) {
3229 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3232 writel(res->start, base->virtbase + D40_DREG_LCLA);
3235 ret = d40_lcla_allocate(base);
3237 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3242 spin_lock_init(&base->lcla_pool.lock);
3244 base->irq = platform_get_irq(pdev, 0);
3246 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3248 d40_err(&pdev->dev, "No IRQ defined\n");
3252 pm_runtime_irq_safe(base->dev);
3253 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3254 pm_runtime_use_autosuspend(base->dev);
3255 pm_runtime_enable(base->dev);
3256 pm_runtime_resume(base->dev);
3258 if (base->plat_data->use_esram_lcla) {
3260 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3261 if (IS_ERR(base->lcpa_regulator)) {
3262 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3263 base->lcpa_regulator = NULL;
3267 ret = regulator_enable(base->lcpa_regulator);
3270 "Failed to enable lcpa_regulator\n");
3271 regulator_put(base->lcpa_regulator);
3272 base->lcpa_regulator = NULL;
3277 base->initialized = true;
3278 err = d40_dmaengine_init(base, num_reserved_chans);
3284 dev_info(base->dev, "initialized\n");
3289 if (base->desc_slab)
3290 kmem_cache_destroy(base->desc_slab);
3292 iounmap(base->virtbase);
3294 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3295 iounmap(base->lcla_pool.base);
3296 base->lcla_pool.base = NULL;
3299 if (base->lcla_pool.dma_addr)
3300 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3301 SZ_1K * base->num_phy_chans,
3304 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3305 free_pages((unsigned long)base->lcla_pool.base,
3306 base->lcla_pool.pages);
3308 kfree(base->lcla_pool.base_unaligned);
3311 release_mem_region(base->phy_lcpa,
3313 if (base->phy_start)
3314 release_mem_region(base->phy_start,
3317 clk_disable(base->clk);
3321 if (base->lcpa_regulator) {
3322 regulator_disable(base->lcpa_regulator);
3323 regulator_put(base->lcpa_regulator);
3326 kfree(base->lcla_pool.alloc_map);
3327 kfree(base->lookup_log_chans);
3328 kfree(base->lookup_phy_chans);
3329 kfree(base->phy_res);
3333 d40_err(&pdev->dev, "probe failed\n");
3337 static struct platform_driver d40_driver = {
3339 .owner = THIS_MODULE,
3345 static int __init stedma40_init(void)
3347 return platform_driver_probe(&d40_driver, d40_probe);
3349 subsys_initcall(stedma40_init);