]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/dma/pxa_dma.c
drm/amdgpu/gmc8: skip MC ucode loading on SR-IOV capable boards
[karo-tx-linux.git] / drivers / dma / pxa_dma.c
1 /*
2  * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #include <linux/err.h>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/interrupt.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
16 #include <linux/dmaengine.h>
17 #include <linux/platform_device.h>
18 #include <linux/device.h>
19 #include <linux/platform_data/mmp_dma.h>
20 #include <linux/dmapool.h>
21 #include <linux/of_device.h>
22 #include <linux/of_dma.h>
23 #include <linux/of.h>
24 #include <linux/dma/pxa-dma.h>
25
26 #include "dmaengine.h"
27 #include "virt-dma.h"
28
29 #define DCSR(n)         (0x0000 + ((n) << 2))
30 #define DALGN(n)        0x00a0
31 #define DINT            0x00f0
32 #define DDADR(n)        (0x0200 + ((n) << 4))
33 #define DSADR(n)        (0x0204 + ((n) << 4))
34 #define DTADR(n)        (0x0208 + ((n) << 4))
35 #define DCMD(n)         (0x020c + ((n) << 4))
36
37 #define PXA_DCSR_RUN            BIT(31) /* Run Bit (read / write) */
38 #define PXA_DCSR_NODESC         BIT(30) /* No-Descriptor Fetch (read / write) */
39 #define PXA_DCSR_STOPIRQEN      BIT(29) /* Stop Interrupt Enable (R/W) */
40 #define PXA_DCSR_REQPEND        BIT(8)  /* Request Pending (read-only) */
41 #define PXA_DCSR_STOPSTATE      BIT(3)  /* Stop State (read-only) */
42 #define PXA_DCSR_ENDINTR        BIT(2)  /* End Interrupt (read / write) */
43 #define PXA_DCSR_STARTINTR      BIT(1)  /* Start Interrupt (read / write) */
44 #define PXA_DCSR_BUSERR         BIT(0)  /* Bus Error Interrupt (read / write) */
45
46 #define PXA_DCSR_EORIRQEN       BIT(28) /* End of Receive IRQ Enable (R/W) */
47 #define PXA_DCSR_EORJMPEN       BIT(27) /* Jump to next descriptor on EOR */
48 #define PXA_DCSR_EORSTOPEN      BIT(26) /* STOP on an EOR */
49 #define PXA_DCSR_SETCMPST       BIT(25) /* Set Descriptor Compare Status */
50 #define PXA_DCSR_CLRCMPST       BIT(24) /* Clear Descriptor Compare Status */
51 #define PXA_DCSR_CMPST          BIT(10) /* The Descriptor Compare Status */
52 #define PXA_DCSR_EORINTR        BIT(9)  /* The end of Receive */
53
54 #define DRCMR_MAPVLD    BIT(7)  /* Map Valid (read / write) */
55 #define DRCMR_CHLNUM    0x1f    /* mask for Channel Number (read / write) */
56
57 #define DDADR_DESCADDR  0xfffffff0      /* Address of next descriptor (mask) */
58 #define DDADR_STOP      BIT(0)  /* Stop (read / write) */
59
60 #define PXA_DCMD_INCSRCADDR     BIT(31) /* Source Address Increment Setting. */
61 #define PXA_DCMD_INCTRGADDR     BIT(30) /* Target Address Increment Setting. */
62 #define PXA_DCMD_FLOWSRC        BIT(29) /* Flow Control by the source. */
63 #define PXA_DCMD_FLOWTRG        BIT(28) /* Flow Control by the target. */
64 #define PXA_DCMD_STARTIRQEN     BIT(22) /* Start Interrupt Enable */
65 #define PXA_DCMD_ENDIRQEN       BIT(21) /* End Interrupt Enable */
66 #define PXA_DCMD_ENDIAN         BIT(18) /* Device Endian-ness. */
67 #define PXA_DCMD_BURST8         (1 << 16)       /* 8 byte burst */
68 #define PXA_DCMD_BURST16        (2 << 16)       /* 16 byte burst */
69 #define PXA_DCMD_BURST32        (3 << 16)       /* 32 byte burst */
70 #define PXA_DCMD_WIDTH1         (1 << 14)       /* 1 byte width */
71 #define PXA_DCMD_WIDTH2         (2 << 14)       /* 2 byte width (HalfWord) */
72 #define PXA_DCMD_WIDTH4         (3 << 14)       /* 4 byte width (Word) */
73 #define PXA_DCMD_LENGTH         0x01fff         /* length mask (max = 8K - 1) */
74
75 #define PDMA_ALIGNMENT          3
76 #define PDMA_MAX_DESC_BYTES     (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
77
78 struct pxad_desc_hw {
79         u32 ddadr;      /* Points to the next descriptor + flags */
80         u32 dsadr;      /* DSADR value for the current transfer */
81         u32 dtadr;      /* DTADR value for the current transfer */
82         u32 dcmd;       /* DCMD value for the current transfer */
83 } __aligned(16);
84
85 struct pxad_desc_sw {
86         struct virt_dma_desc    vd;             /* Virtual descriptor */
87         int                     nb_desc;        /* Number of hw. descriptors */
88         size_t                  len;            /* Number of bytes xfered */
89         dma_addr_t              first;          /* First descriptor's addr */
90
91         /* At least one descriptor has an src/dst address not multiple of 8 */
92         bool                    misaligned;
93         bool                    cyclic;
94         struct dma_pool         *desc_pool;     /* Channel's used allocator */
95
96         struct pxad_desc_hw     *hw_desc[];     /* DMA coherent descriptors */
97 };
98
99 struct pxad_phy {
100         int                     idx;
101         void __iomem            *base;
102         struct pxad_chan        *vchan;
103 };
104
105 struct pxad_chan {
106         struct virt_dma_chan    vc;             /* Virtual channel */
107         u32                     drcmr;          /* Requestor of the channel */
108         enum pxad_chan_prio     prio;           /* Required priority of phy */
109         /*
110          * At least one desc_sw in submitted or issued transfers on this channel
111          * has one address such as: addr % 8 != 0. This implies the DALGN
112          * setting on the phy.
113          */
114         bool                    misaligned;
115         struct dma_slave_config cfg;            /* Runtime config */
116
117         /* protected by vc->lock */
118         struct pxad_phy         *phy;
119         struct dma_pool         *desc_pool;     /* Descriptors pool */
120 };
121
122 struct pxad_device {
123         struct dma_device               slave;
124         int                             nr_chans;
125         void __iomem                    *base;
126         struct pxad_phy                 *phys;
127         spinlock_t                      phy_lock;       /* Phy association */
128 #ifdef CONFIG_DEBUG_FS
129         struct dentry                   *dbgfs_root;
130         struct dentry                   *dbgfs_state;
131         struct dentry                   **dbgfs_chan;
132 #endif
133 };
134
135 #define tx_to_pxad_desc(tx)                                     \
136         container_of(tx, struct pxad_desc_sw, async_tx)
137 #define to_pxad_chan(dchan)                                     \
138         container_of(dchan, struct pxad_chan, vc.chan)
139 #define to_pxad_dev(dmadev)                                     \
140         container_of(dmadev, struct pxad_device, slave)
141 #define to_pxad_sw_desc(_vd)                            \
142         container_of((_vd), struct pxad_desc_sw, vd)
143
144 #define _phy_readl_relaxed(phy, _reg)                                   \
145         readl_relaxed((phy)->base + _reg((phy)->idx))
146 #define phy_readl_relaxed(phy, _reg)                                    \
147         ({                                                              \
148                 u32 _v;                                                 \
149                 _v = readl_relaxed((phy)->base + _reg((phy)->idx));     \
150                 dev_vdbg(&phy->vchan->vc.chan.dev->device,              \
151                          "%s(): readl(%s): 0x%08x\n", __func__, #_reg,  \
152                           _v);                                          \
153                 _v;                                                     \
154         })
155 #define phy_writel(phy, val, _reg)                                      \
156         do {                                                            \
157                 writel((val), (phy)->base + _reg((phy)->idx));          \
158                 dev_vdbg(&phy->vchan->vc.chan.dev->device,              \
159                          "%s(): writel(0x%08x, %s)\n",                  \
160                          __func__, (u32)(val), #_reg);                  \
161         } while (0)
162 #define phy_writel_relaxed(phy, val, _reg)                              \
163         do {                                                            \
164                 writel_relaxed((val), (phy)->base + _reg((phy)->idx));  \
165                 dev_vdbg(&phy->vchan->vc.chan.dev->device,              \
166                          "%s(): writel_relaxed(0x%08x, %s)\n",          \
167                          __func__, (u32)(val), #_reg);                  \
168         } while (0)
169
170 static unsigned int pxad_drcmr(unsigned int line)
171 {
172         if (line < 64)
173                 return 0x100 + line * 4;
174         return 0x1000 + line * 4;
175 }
176
177 /*
178  * Debug fs
179  */
180 #ifdef CONFIG_DEBUG_FS
181 #include <linux/debugfs.h>
182 #include <linux/uaccess.h>
183 #include <linux/seq_file.h>
184
185 static int dbg_show_requester_chan(struct seq_file *s, void *p)
186 {
187         struct pxad_phy *phy = s->private;
188         int i;
189         u32 drcmr;
190
191         seq_printf(s, "DMA channel %d requester :\n", phy->idx);
192         for (i = 0; i < 70; i++) {
193                 drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
194                 if ((drcmr & DRCMR_CHLNUM) == phy->idx)
195                         seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
196                                    !!(drcmr & DRCMR_MAPVLD));
197         }
198         return 0;
199 }
200
201 static inline int dbg_burst_from_dcmd(u32 dcmd)
202 {
203         int burst = (dcmd >> 16) & 0x3;
204
205         return burst ? 4 << burst : 0;
206 }
207
208 static int is_phys_valid(unsigned long addr)
209 {
210         return pfn_valid(__phys_to_pfn(addr));
211 }
212
213 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
214 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
215
216 static int dbg_show_descriptors(struct seq_file *s, void *p)
217 {
218         struct pxad_phy *phy = s->private;
219         int i, max_show = 20, burst, width;
220         u32 dcmd;
221         unsigned long phys_desc, ddadr;
222         struct pxad_desc_hw *desc;
223
224         phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR);
225
226         seq_printf(s, "DMA channel %d descriptors :\n", phy->idx);
227         seq_printf(s, "[%03d] First descriptor unknown\n", 0);
228         for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
229                 desc = phys_to_virt(phys_desc);
230                 dcmd = desc->dcmd;
231                 burst = dbg_burst_from_dcmd(dcmd);
232                 width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
233
234                 seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
235                            i, phys_desc, desc);
236                 seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
237                 seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
238                 seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
239                 seq_printf(s, "\tDCMD  = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
240                            dcmd,
241                            PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
242                            PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
243                            PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
244                            PXA_DCMD_STR(ENDIAN), burst, width,
245                            dcmd & PXA_DCMD_LENGTH);
246                 phys_desc = desc->ddadr;
247         }
248         if (i == max_show)
249                 seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
250                            i, phys_desc);
251         else
252                 seq_printf(s, "[%03d] Desc at %08lx is %s\n",
253                            i, phys_desc, phys_desc == DDADR_STOP ?
254                            "DDADR_STOP" : "invalid");
255
256         return 0;
257 }
258
259 static int dbg_show_chan_state(struct seq_file *s, void *p)
260 {
261         struct pxad_phy *phy = s->private;
262         u32 dcsr, dcmd;
263         int burst, width;
264         static const char * const str_prio[] = {
265                 "high", "normal", "low", "invalid"
266         };
267
268         dcsr = _phy_readl_relaxed(phy, DCSR);
269         dcmd = _phy_readl_relaxed(phy, DCMD);
270         burst = dbg_burst_from_dcmd(dcmd);
271         width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
272
273         seq_printf(s, "DMA channel %d\n", phy->idx);
274         seq_printf(s, "\tPriority : %s\n",
275                           str_prio[(phy->idx & 0xf) / 4]);
276         seq_printf(s, "\tUnaligned transfer bit: %s\n",
277                           _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ?
278                           "yes" : "no");
279         seq_printf(s, "\tDCSR  = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
280                    dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
281                    PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
282                    PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN),
283                    PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST),
284                    PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR),
285                    PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE),
286                    PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR),
287                    PXA_DCSR_STR(BUSERR));
288
289         seq_printf(s, "\tDCMD  = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
290                    dcmd,
291                    PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
292                    PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
293                    PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
294                    PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH);
295         seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR));
296         seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR));
297         seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR));
298
299         return 0;
300 }
301
302 static int dbg_show_state(struct seq_file *s, void *p)
303 {
304         struct pxad_device *pdev = s->private;
305
306         /* basic device status */
307         seq_puts(s, "DMA engine status\n");
308         seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans);
309
310         return 0;
311 }
312
313 #define DBGFS_FUNC_DECL(name) \
314 static int dbg_open_##name(struct inode *inode, struct file *file) \
315 { \
316         return single_open(file, dbg_show_##name, inode->i_private); \
317 } \
318 static const struct file_operations dbg_fops_##name = { \
319         .owner          = THIS_MODULE, \
320         .open           = dbg_open_##name, \
321         .llseek         = seq_lseek, \
322         .read           = seq_read, \
323         .release        = single_release, \
324 }
325
326 DBGFS_FUNC_DECL(state);
327 DBGFS_FUNC_DECL(chan_state);
328 DBGFS_FUNC_DECL(descriptors);
329 DBGFS_FUNC_DECL(requester_chan);
330
331 static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
332                                              int ch, struct dentry *chandir)
333 {
334         char chan_name[11];
335         struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
336         struct dentry *chan_reqs = NULL;
337         void *dt;
338
339         scnprintf(chan_name, sizeof(chan_name), "%d", ch);
340         chan = debugfs_create_dir(chan_name, chandir);
341         dt = (void *)&pdev->phys[ch];
342
343         if (chan)
344                 chan_state = debugfs_create_file("state", 0400, chan, dt,
345                                                  &dbg_fops_chan_state);
346         if (chan_state)
347                 chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
348                                                  &dbg_fops_descriptors);
349         if (chan_descr)
350                 chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
351                                                 &dbg_fops_requester_chan);
352         if (!chan_reqs)
353                 goto err_state;
354
355         return chan;
356
357 err_state:
358         debugfs_remove_recursive(chan);
359         return NULL;
360 }
361
362 static void pxad_init_debugfs(struct pxad_device *pdev)
363 {
364         int i;
365         struct dentry *chandir;
366
367         pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
368         if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root)
369                 goto err_root;
370
371         pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root,
372                                                 pdev, &dbg_fops_state);
373         if (!pdev->dbgfs_state)
374                 goto err_state;
375
376         pdev->dbgfs_chan =
377                 kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state),
378                               GFP_KERNEL);
379         if (!pdev->dbgfs_chan)
380                 goto err_alloc;
381
382         chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
383         if (!chandir)
384                 goto err_chandir;
385
386         for (i = 0; i < pdev->nr_chans; i++) {
387                 pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
388                 if (!pdev->dbgfs_chan[i])
389                         goto err_chans;
390         }
391
392         return;
393 err_chans:
394 err_chandir:
395         kfree(pdev->dbgfs_chan);
396 err_alloc:
397 err_state:
398         debugfs_remove_recursive(pdev->dbgfs_root);
399 err_root:
400         pr_err("pxad: debugfs is not available\n");
401 }
402
403 static void pxad_cleanup_debugfs(struct pxad_device *pdev)
404 {
405         debugfs_remove_recursive(pdev->dbgfs_root);
406 }
407 #else
408 static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
409 static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
410 #endif
411
412 /*
413  * In the transition phase where legacy pxa handling is done at the same time as
414  * mmp_dma, the DMA physical channel split between the 2 DMA providers is done
415  * through legacy_reserved. Legacy code reserves DMA channels by settings
416  * corresponding bits in legacy_reserved.
417  */
418 static u32 legacy_reserved;
419 static u32 legacy_unavailable;
420
421 static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
422 {
423         int prio, i;
424         struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device);
425         struct pxad_phy *phy, *found = NULL;
426         unsigned long flags;
427
428         /*
429          * dma channel priorities
430          * ch 0 - 3,  16 - 19  <--> (0)
431          * ch 4 - 7,  20 - 23  <--> (1)
432          * ch 8 - 11, 24 - 27  <--> (2)
433          * ch 12 - 15, 28 - 31  <--> (3)
434          */
435
436         spin_lock_irqsave(&pdev->phy_lock, flags);
437         for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) {
438                 for (i = 0; i < pdev->nr_chans; i++) {
439                         if (prio != (i & 0xf) >> 2)
440                                 continue;
441                         if ((i < 32) && (legacy_reserved & BIT(i)))
442                                 continue;
443                         phy = &pdev->phys[i];
444                         if (!phy->vchan) {
445                                 phy->vchan = pchan;
446                                 found = phy;
447                                 if (i < 32)
448                                         legacy_unavailable |= BIT(i);
449                                 goto out_unlock;
450                         }
451                 }
452         }
453
454 out_unlock:
455         spin_unlock_irqrestore(&pdev->phy_lock, flags);
456         dev_dbg(&pchan->vc.chan.dev->device,
457                 "%s(): phy=%p(%d)\n", __func__, found,
458                 found ? found->idx : -1);
459
460         return found;
461 }
462
463 static void pxad_free_phy(struct pxad_chan *chan)
464 {
465         struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
466         unsigned long flags;
467         u32 reg;
468         int i;
469
470         dev_dbg(&chan->vc.chan.dev->device,
471                 "%s(): freeing\n", __func__);
472         if (!chan->phy)
473                 return;
474
475         /* clear the channel mapping in DRCMR */
476         if (chan->drcmr <= DRCMR_CHLNUM) {
477                 reg = pxad_drcmr(chan->drcmr);
478                 writel_relaxed(0, chan->phy->base + reg);
479         }
480
481         spin_lock_irqsave(&pdev->phy_lock, flags);
482         for (i = 0; i < 32; i++)
483                 if (chan->phy == &pdev->phys[i])
484                         legacy_unavailable &= ~BIT(i);
485         chan->phy->vchan = NULL;
486         chan->phy = NULL;
487         spin_unlock_irqrestore(&pdev->phy_lock, flags);
488 }
489
490 static bool is_chan_running(struct pxad_chan *chan)
491 {
492         u32 dcsr;
493         struct pxad_phy *phy = chan->phy;
494
495         if (!phy)
496                 return false;
497         dcsr = phy_readl_relaxed(phy, DCSR);
498         return dcsr & PXA_DCSR_RUN;
499 }
500
501 static bool is_running_chan_misaligned(struct pxad_chan *chan)
502 {
503         u32 dalgn;
504
505         BUG_ON(!chan->phy);
506         dalgn = phy_readl_relaxed(chan->phy, DALGN);
507         return dalgn & (BIT(chan->phy->idx));
508 }
509
510 static void phy_enable(struct pxad_phy *phy, bool misaligned)
511 {
512         u32 reg, dalgn;
513
514         if (!phy->vchan)
515                 return;
516
517         dev_dbg(&phy->vchan->vc.chan.dev->device,
518                 "%s(); phy=%p(%d) misaligned=%d\n", __func__,
519                 phy, phy->idx, misaligned);
520
521         if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
522                 reg = pxad_drcmr(phy->vchan->drcmr);
523                 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
524         }
525
526         dalgn = phy_readl_relaxed(phy, DALGN);
527         if (misaligned)
528                 dalgn |= BIT(phy->idx);
529         else
530                 dalgn &= ~BIT(phy->idx);
531         phy_writel_relaxed(phy, dalgn, DALGN);
532
533         phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR |
534                    PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR);
535 }
536
537 static void phy_disable(struct pxad_phy *phy)
538 {
539         u32 dcsr;
540
541         if (!phy)
542                 return;
543
544         dcsr = phy_readl_relaxed(phy, DCSR);
545         dev_dbg(&phy->vchan->vc.chan.dev->device,
546                 "%s(): phy=%p(%d)\n", __func__, phy, phy->idx);
547         phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR);
548 }
549
550 static void pxad_launch_chan(struct pxad_chan *chan,
551                                  struct pxad_desc_sw *desc)
552 {
553         dev_dbg(&chan->vc.chan.dev->device,
554                 "%s(): desc=%p\n", __func__, desc);
555         if (!chan->phy) {
556                 chan->phy = lookup_phy(chan);
557                 if (!chan->phy) {
558                         dev_dbg(&chan->vc.chan.dev->device,
559                                 "%s(): no free dma channel\n", __func__);
560                         return;
561                 }
562         }
563
564         /*
565          * Program the descriptor's address into the DMA controller,
566          * then start the DMA transaction
567          */
568         phy_writel(chan->phy, desc->first, DDADR);
569         phy_enable(chan->phy, chan->misaligned);
570 }
571
572 static void set_updater_desc(struct pxad_desc_sw *sw_desc,
573                              unsigned long flags)
574 {
575         struct pxad_desc_hw *updater =
576                 sw_desc->hw_desc[sw_desc->nb_desc - 1];
577         dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr;
578
579         updater->ddadr = DDADR_STOP;
580         updater->dsadr = dma;
581         updater->dtadr = dma + 8;
582         updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 |
583                 (PXA_DCMD_LENGTH & sizeof(u32));
584         if (flags & DMA_PREP_INTERRUPT)
585                 updater->dcmd |= PXA_DCMD_ENDIRQEN;
586 }
587
588 static bool is_desc_completed(struct virt_dma_desc *vd)
589 {
590         struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
591         struct pxad_desc_hw *updater =
592                 sw_desc->hw_desc[sw_desc->nb_desc - 1];
593
594         return updater->dtadr != (updater->dsadr + 8);
595 }
596
597 static void pxad_desc_chain(struct virt_dma_desc *vd1,
598                                 struct virt_dma_desc *vd2)
599 {
600         struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1);
601         struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2);
602         dma_addr_t dma_to_chain;
603
604         dma_to_chain = desc2->first;
605         desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain;
606 }
607
608 static bool pxad_try_hotchain(struct virt_dma_chan *vc,
609                                   struct virt_dma_desc *vd)
610 {
611         struct virt_dma_desc *vd_last_issued = NULL;
612         struct pxad_chan *chan = to_pxad_chan(&vc->chan);
613
614         /*
615          * Attempt to hot chain the tx if the phy is still running. This is
616          * considered successful only if either the channel is still running
617          * after the chaining, or if the chained transfer is completed after
618          * having been hot chained.
619          * A change of alignment is not allowed, and forbids hotchaining.
620          */
621         if (is_chan_running(chan)) {
622                 BUG_ON(list_empty(&vc->desc_issued));
623
624                 if (!is_running_chan_misaligned(chan) &&
625                     to_pxad_sw_desc(vd)->misaligned)
626                         return false;
627
628                 vd_last_issued = list_entry(vc->desc_issued.prev,
629                                             struct virt_dma_desc, node);
630                 pxad_desc_chain(vd_last_issued, vd);
631                 if (is_chan_running(chan) || is_desc_completed(vd_last_issued))
632                         return true;
633         }
634
635         return false;
636 }
637
638 static unsigned int clear_chan_irq(struct pxad_phy *phy)
639 {
640         u32 dcsr;
641         u32 dint = readl(phy->base + DINT);
642
643         if (!(dint & BIT(phy->idx)))
644                 return PXA_DCSR_RUN;
645
646         /* clear irq */
647         dcsr = phy_readl_relaxed(phy, DCSR);
648         phy_writel(phy, dcsr, DCSR);
649         if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan))
650                 dev_warn(&phy->vchan->vc.chan.dev->device,
651                          "%s(chan=%p): PXA_DCSR_BUSERR\n",
652                          __func__, &phy->vchan);
653
654         return dcsr & ~PXA_DCSR_RUN;
655 }
656
657 static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
658 {
659         struct pxad_phy *phy = dev_id;
660         struct pxad_chan *chan = phy->vchan;
661         struct virt_dma_desc *vd, *tmp;
662         unsigned int dcsr;
663         unsigned long flags;
664
665         BUG_ON(!chan);
666
667         dcsr = clear_chan_irq(phy);
668         if (dcsr & PXA_DCSR_RUN)
669                 return IRQ_NONE;
670
671         spin_lock_irqsave(&chan->vc.lock, flags);
672         list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
673                 dev_dbg(&chan->vc.chan.dev->device,
674                         "%s(): checking txd %p[%x]: completed=%d\n",
675                         __func__, vd, vd->tx.cookie, is_desc_completed(vd));
676                 if (is_desc_completed(vd)) {
677                         list_del(&vd->node);
678                         vchan_cookie_complete(vd);
679                 } else {
680                         break;
681                 }
682         }
683
684         if (dcsr & PXA_DCSR_STOPSTATE) {
685                 dev_dbg(&chan->vc.chan.dev->device,
686                 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
687                         __func__,
688                         list_empty(&chan->vc.desc_submitted),
689                         list_empty(&chan->vc.desc_issued));
690                 phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR);
691
692                 if (list_empty(&chan->vc.desc_issued)) {
693                         chan->misaligned =
694                                 !list_empty(&chan->vc.desc_submitted);
695                 } else {
696                         vd = list_first_entry(&chan->vc.desc_issued,
697                                               struct virt_dma_desc, node);
698                         pxad_launch_chan(chan, to_pxad_sw_desc(vd));
699                 }
700         }
701         spin_unlock_irqrestore(&chan->vc.lock, flags);
702
703         return IRQ_HANDLED;
704 }
705
706 static irqreturn_t pxad_int_handler(int irq, void *dev_id)
707 {
708         struct pxad_device *pdev = dev_id;
709         struct pxad_phy *phy;
710         u32 dint = readl(pdev->base + DINT);
711         int i, ret = IRQ_NONE;
712
713         while (dint) {
714                 i = __ffs(dint);
715                 dint &= (dint - 1);
716                 phy = &pdev->phys[i];
717                 if ((i < 32) && (legacy_reserved & BIT(i)))
718                         continue;
719                 if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
720                         ret = IRQ_HANDLED;
721         }
722
723         return ret;
724 }
725
726 static int pxad_alloc_chan_resources(struct dma_chan *dchan)
727 {
728         struct pxad_chan *chan = to_pxad_chan(dchan);
729         struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
730
731         if (chan->desc_pool)
732                 return 1;
733
734         chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
735                                           pdev->slave.dev,
736                                           sizeof(struct pxad_desc_hw),
737                                           __alignof__(struct pxad_desc_hw),
738                                           0);
739         if (!chan->desc_pool) {
740                 dev_err(&chan->vc.chan.dev->device,
741                         "%s(): unable to allocate descriptor pool\n",
742                         __func__);
743                 return -ENOMEM;
744         }
745
746         return 1;
747 }
748
749 static void pxad_free_chan_resources(struct dma_chan *dchan)
750 {
751         struct pxad_chan *chan = to_pxad_chan(dchan);
752
753         vchan_free_chan_resources(&chan->vc);
754         dma_pool_destroy(chan->desc_pool);
755         chan->desc_pool = NULL;
756
757 }
758
759 static void pxad_free_desc(struct virt_dma_desc *vd)
760 {
761         int i;
762         dma_addr_t dma;
763         struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
764
765         BUG_ON(sw_desc->nb_desc == 0);
766         for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
767                 if (i > 0)
768                         dma = sw_desc->hw_desc[i - 1]->ddadr;
769                 else
770                         dma = sw_desc->first;
771                 dma_pool_free(sw_desc->desc_pool,
772                               sw_desc->hw_desc[i], dma);
773         }
774         sw_desc->nb_desc = 0;
775         kfree(sw_desc);
776 }
777
778 static struct pxad_desc_sw *
779 pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
780 {
781         struct pxad_desc_sw *sw_desc;
782         dma_addr_t dma;
783         int i;
784
785         sw_desc = kzalloc(sizeof(*sw_desc) +
786                           nb_hw_desc * sizeof(struct pxad_desc_hw *),
787                           GFP_NOWAIT);
788         if (!sw_desc)
789                 return NULL;
790         sw_desc->desc_pool = chan->desc_pool;
791
792         for (i = 0; i < nb_hw_desc; i++) {
793                 sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool,
794                                                      GFP_NOWAIT, &dma);
795                 if (!sw_desc->hw_desc[i]) {
796                         dev_err(&chan->vc.chan.dev->device,
797                                 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
798                                 __func__, i, sw_desc->desc_pool);
799                         goto err;
800                 }
801
802                 if (i == 0)
803                         sw_desc->first = dma;
804                 else
805                         sw_desc->hw_desc[i - 1]->ddadr = dma;
806                 sw_desc->nb_desc++;
807         }
808
809         return sw_desc;
810 err:
811         pxad_free_desc(&sw_desc->vd);
812         return NULL;
813 }
814
815 static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx)
816 {
817         struct virt_dma_chan *vc = to_virt_chan(tx->chan);
818         struct pxad_chan *chan = to_pxad_chan(&vc->chan);
819         struct virt_dma_desc *vd_chained = NULL,
820                 *vd = container_of(tx, struct virt_dma_desc, tx);
821         dma_cookie_t cookie;
822         unsigned long flags;
823
824         set_updater_desc(to_pxad_sw_desc(vd), tx->flags);
825
826         spin_lock_irqsave(&vc->lock, flags);
827         cookie = dma_cookie_assign(tx);
828
829         if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) {
830                 list_move_tail(&vd->node, &vc->desc_issued);
831                 dev_dbg(&chan->vc.chan.dev->device,
832                         "%s(): txd %p[%x]: submitted (hot linked)\n",
833                         __func__, vd, cookie);
834                 goto out;
835         }
836
837         /*
838          * Fallback to placing the tx in the submitted queue
839          */
840         if (!list_empty(&vc->desc_submitted)) {
841                 vd_chained = list_entry(vc->desc_submitted.prev,
842                                         struct virt_dma_desc, node);
843                 /*
844                  * Only chain the descriptors if no new misalignment is
845                  * introduced. If a new misalignment is chained, let the channel
846                  * stop, and be relaunched in misalign mode from the irq
847                  * handler.
848                  */
849                 if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned)
850                         pxad_desc_chain(vd_chained, vd);
851                 else
852                         vd_chained = NULL;
853         }
854         dev_dbg(&chan->vc.chan.dev->device,
855                 "%s(): txd %p[%x]: submitted (%s linked)\n",
856                 __func__, vd, cookie, vd_chained ? "cold" : "not");
857         list_move_tail(&vd->node, &vc->desc_submitted);
858         chan->misaligned |= to_pxad_sw_desc(vd)->misaligned;
859
860 out:
861         spin_unlock_irqrestore(&vc->lock, flags);
862         return cookie;
863 }
864
865 static void pxad_issue_pending(struct dma_chan *dchan)
866 {
867         struct pxad_chan *chan = to_pxad_chan(dchan);
868         struct virt_dma_desc *vd_first;
869         unsigned long flags;
870
871         spin_lock_irqsave(&chan->vc.lock, flags);
872         if (list_empty(&chan->vc.desc_submitted))
873                 goto out;
874
875         vd_first = list_first_entry(&chan->vc.desc_submitted,
876                                     struct virt_dma_desc, node);
877         dev_dbg(&chan->vc.chan.dev->device,
878                 "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie);
879
880         vchan_issue_pending(&chan->vc);
881         if (!pxad_try_hotchain(&chan->vc, vd_first))
882                 pxad_launch_chan(chan, to_pxad_sw_desc(vd_first));
883 out:
884         spin_unlock_irqrestore(&chan->vc.lock, flags);
885 }
886
887 static inline struct dma_async_tx_descriptor *
888 pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
889                  unsigned long tx_flags)
890 {
891         struct dma_async_tx_descriptor *tx;
892         struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
893
894         INIT_LIST_HEAD(&vd->node);
895         tx = vchan_tx_prep(vc, vd, tx_flags);
896         tx->tx_submit = pxad_tx_submit;
897         dev_dbg(&chan->vc.chan.dev->device,
898                 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__,
899                 vc, vd, vd->tx.cookie,
900                 tx_flags);
901
902         return tx;
903 }
904
905 static void pxad_get_config(struct pxad_chan *chan,
906                             enum dma_transfer_direction dir,
907                             u32 *dcmd, u32 *dev_src, u32 *dev_dst)
908 {
909         u32 maxburst = 0, dev_addr = 0;
910         enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
911
912         *dcmd = 0;
913         if (dir == DMA_DEV_TO_MEM) {
914                 maxburst = chan->cfg.src_maxburst;
915                 width = chan->cfg.src_addr_width;
916                 dev_addr = chan->cfg.src_addr;
917                 *dev_src = dev_addr;
918                 *dcmd |= PXA_DCMD_INCTRGADDR;
919                 if (chan->drcmr <= DRCMR_CHLNUM)
920                         *dcmd |= PXA_DCMD_FLOWSRC;
921         }
922         if (dir == DMA_MEM_TO_DEV) {
923                 maxburst = chan->cfg.dst_maxburst;
924                 width = chan->cfg.dst_addr_width;
925                 dev_addr = chan->cfg.dst_addr;
926                 *dev_dst = dev_addr;
927                 *dcmd |= PXA_DCMD_INCSRCADDR;
928                 if (chan->drcmr <= DRCMR_CHLNUM)
929                         *dcmd |= PXA_DCMD_FLOWTRG;
930         }
931         if (dir == DMA_MEM_TO_MEM)
932                 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
933                         PXA_DCMD_INCSRCADDR;
934
935         dev_dbg(&chan->vc.chan.dev->device,
936                 "%s(): dev_addr=0x%x maxburst=%d width=%d  dir=%d\n",
937                 __func__, dev_addr, maxburst, width, dir);
938
939         if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
940                 *dcmd |= PXA_DCMD_WIDTH1;
941         else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
942                 *dcmd |= PXA_DCMD_WIDTH2;
943         else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
944                 *dcmd |= PXA_DCMD_WIDTH4;
945
946         if (maxburst == 8)
947                 *dcmd |= PXA_DCMD_BURST8;
948         else if (maxburst == 16)
949                 *dcmd |= PXA_DCMD_BURST16;
950         else if (maxburst == 32)
951                 *dcmd |= PXA_DCMD_BURST32;
952
953         /* FIXME: drivers should be ported over to use the filter
954          * function. Once that's done, the following two lines can
955          * be removed.
956          */
957         if (chan->cfg.slave_id)
958                 chan->drcmr = chan->cfg.slave_id;
959 }
960
961 static struct dma_async_tx_descriptor *
962 pxad_prep_memcpy(struct dma_chan *dchan,
963                  dma_addr_t dma_dst, dma_addr_t dma_src,
964                  size_t len, unsigned long flags)
965 {
966         struct pxad_chan *chan = to_pxad_chan(dchan);
967         struct pxad_desc_sw *sw_desc;
968         struct pxad_desc_hw *hw_desc;
969         u32 dcmd;
970         unsigned int i, nb_desc = 0;
971         size_t copy;
972
973         if (!dchan || !len)
974                 return NULL;
975
976         dev_dbg(&chan->vc.chan.dev->device,
977                 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
978                 __func__, (unsigned long)dma_dst, (unsigned long)dma_src,
979                 len, flags);
980         pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL);
981
982         nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES);
983         sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
984         if (!sw_desc)
985                 return NULL;
986         sw_desc->len = len;
987
988         if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) ||
989             !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT))
990                 sw_desc->misaligned = true;
991
992         i = 0;
993         do {
994                 hw_desc = sw_desc->hw_desc[i++];
995                 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
996                 hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy);
997                 hw_desc->dsadr = dma_src;
998                 hw_desc->dtadr = dma_dst;
999                 len -= copy;
1000                 dma_src += copy;
1001                 dma_dst += copy;
1002         } while (len);
1003         set_updater_desc(sw_desc, flags);
1004
1005         return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1006 }
1007
1008 static struct dma_async_tx_descriptor *
1009 pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
1010                    unsigned int sg_len, enum dma_transfer_direction dir,
1011                    unsigned long flags, void *context)
1012 {
1013         struct pxad_chan *chan = to_pxad_chan(dchan);
1014         struct pxad_desc_sw *sw_desc;
1015         size_t len, avail;
1016         struct scatterlist *sg;
1017         dma_addr_t dma;
1018         u32 dcmd, dsadr = 0, dtadr = 0;
1019         unsigned int nb_desc = 0, i, j = 0;
1020
1021         if ((sgl == NULL) || (sg_len == 0))
1022                 return NULL;
1023
1024         pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1025         dev_dbg(&chan->vc.chan.dev->device,
1026                 "%s(): dir=%d flags=%lx\n", __func__, dir, flags);
1027
1028         for_each_sg(sgl, sg, sg_len, i)
1029                 nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES);
1030         sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1031         if (!sw_desc)
1032                 return NULL;
1033
1034         for_each_sg(sgl, sg, sg_len, i) {
1035                 dma = sg_dma_address(sg);
1036                 avail = sg_dma_len(sg);
1037                 sw_desc->len += avail;
1038
1039                 do {
1040                         len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
1041                         if (dma & 0x7)
1042                                 sw_desc->misaligned = true;
1043
1044                         sw_desc->hw_desc[j]->dcmd =
1045                                 dcmd | (PXA_DCMD_LENGTH & len);
1046                         sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma;
1047                         sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma;
1048
1049                         dma += len;
1050                         avail -= len;
1051                 } while (avail);
1052         }
1053         set_updater_desc(sw_desc, flags);
1054
1055         return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1056 }
1057
1058 static struct dma_async_tx_descriptor *
1059 pxad_prep_dma_cyclic(struct dma_chan *dchan,
1060                      dma_addr_t buf_addr, size_t len, size_t period_len,
1061                      enum dma_transfer_direction dir, unsigned long flags)
1062 {
1063         struct pxad_chan *chan = to_pxad_chan(dchan);
1064         struct pxad_desc_sw *sw_desc;
1065         struct pxad_desc_hw **phw_desc;
1066         dma_addr_t dma;
1067         u32 dcmd, dsadr = 0, dtadr = 0;
1068         unsigned int nb_desc = 0;
1069
1070         if (!dchan || !len || !period_len)
1071                 return NULL;
1072         if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) {
1073                 dev_err(&chan->vc.chan.dev->device,
1074                         "Unsupported direction for cyclic DMA\n");
1075                 return NULL;
1076         }
1077         /* the buffer length must be a multiple of period_len */
1078         if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES ||
1079             !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT))
1080                 return NULL;
1081
1082         pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1083         dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len);
1084         dev_dbg(&chan->vc.chan.dev->device,
1085                 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1086                 __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
1087
1088         nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES);
1089         nb_desc *= DIV_ROUND_UP(len, period_len);
1090         sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1091         if (!sw_desc)
1092                 return NULL;
1093         sw_desc->cyclic = true;
1094         sw_desc->len = len;
1095
1096         phw_desc = sw_desc->hw_desc;
1097         dma = buf_addr;
1098         do {
1099                 phw_desc[0]->dsadr = dsadr ? dsadr : dma;
1100                 phw_desc[0]->dtadr = dtadr ? dtadr : dma;
1101                 phw_desc[0]->dcmd = dcmd;
1102                 phw_desc++;
1103                 dma += period_len;
1104                 len -= period_len;
1105         } while (len);
1106         set_updater_desc(sw_desc, flags);
1107
1108         return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1109 }
1110
1111 static int pxad_config(struct dma_chan *dchan,
1112                        struct dma_slave_config *cfg)
1113 {
1114         struct pxad_chan *chan = to_pxad_chan(dchan);
1115
1116         if (!dchan)
1117                 return -EINVAL;
1118
1119         chan->cfg = *cfg;
1120         return 0;
1121 }
1122
1123 static int pxad_terminate_all(struct dma_chan *dchan)
1124 {
1125         struct pxad_chan *chan = to_pxad_chan(dchan);
1126         struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1127         struct virt_dma_desc *vd = NULL;
1128         unsigned long flags;
1129         struct pxad_phy *phy;
1130         LIST_HEAD(head);
1131
1132         dev_dbg(&chan->vc.chan.dev->device,
1133                 "%s(): vchan %p: terminate all\n", __func__, &chan->vc);
1134
1135         spin_lock_irqsave(&chan->vc.lock, flags);
1136         vchan_get_all_descriptors(&chan->vc, &head);
1137
1138         list_for_each_entry(vd, &head, node) {
1139                 dev_dbg(&chan->vc.chan.dev->device,
1140                         "%s(): cancelling txd %p[%x] (completed=%d)", __func__,
1141                         vd, vd->tx.cookie, is_desc_completed(vd));
1142         }
1143
1144         phy = chan->phy;
1145         if (phy) {
1146                 phy_disable(chan->phy);
1147                 pxad_free_phy(chan);
1148                 chan->phy = NULL;
1149                 spin_lock(&pdev->phy_lock);
1150                 phy->vchan = NULL;
1151                 spin_unlock(&pdev->phy_lock);
1152         }
1153         spin_unlock_irqrestore(&chan->vc.lock, flags);
1154         vchan_dma_desc_free_list(&chan->vc, &head);
1155
1156         return 0;
1157 }
1158
1159 static unsigned int pxad_residue(struct pxad_chan *chan,
1160                                  dma_cookie_t cookie)
1161 {
1162         struct virt_dma_desc *vd = NULL;
1163         struct pxad_desc_sw *sw_desc = NULL;
1164         struct pxad_desc_hw *hw_desc = NULL;
1165         u32 curr, start, len, end, residue = 0;
1166         unsigned long flags;
1167         bool passed = false;
1168         int i;
1169
1170         /*
1171          * If the channel does not have a phy pointer anymore, it has already
1172          * been completed. Therefore, its residue is 0.
1173          */
1174         if (!chan->phy)
1175                 return 0;
1176
1177         spin_lock_irqsave(&chan->vc.lock, flags);
1178
1179         vd = vchan_find_desc(&chan->vc, cookie);
1180         if (!vd)
1181                 goto out;
1182
1183         sw_desc = to_pxad_sw_desc(vd);
1184         if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1185                 curr = phy_readl_relaxed(chan->phy, DSADR);
1186         else
1187                 curr = phy_readl_relaxed(chan->phy, DTADR);
1188
1189         /*
1190          * curr has to be actually read before checking descriptor
1191          * completion, so that a curr inside a status updater
1192          * descriptor implies the following test returns true, and
1193          * preventing reordering of curr load and the test.
1194          */
1195         rmb();
1196         if (is_desc_completed(vd))
1197                 goto out;
1198
1199         for (i = 0; i < sw_desc->nb_desc - 1; i++) {
1200                 hw_desc = sw_desc->hw_desc[i];
1201                 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1202                         start = hw_desc->dsadr;
1203                 else
1204                         start = hw_desc->dtadr;
1205                 len = hw_desc->dcmd & PXA_DCMD_LENGTH;
1206                 end = start + len;
1207
1208                 /*
1209                  * 'passed' will be latched once we found the descriptor
1210                  * which lies inside the boundaries of the curr
1211                  * pointer. All descriptors that occur in the list
1212                  * _after_ we found that partially handled descriptor
1213                  * are still to be processed and are hence added to the
1214                  * residual bytes counter.
1215                  */
1216
1217                 if (passed) {
1218                         residue += len;
1219                 } else if (curr >= start && curr <= end) {
1220                         residue += end - curr;
1221                         passed = true;
1222                 }
1223         }
1224         if (!passed)
1225                 residue = sw_desc->len;
1226
1227 out:
1228         spin_unlock_irqrestore(&chan->vc.lock, flags);
1229         dev_dbg(&chan->vc.chan.dev->device,
1230                 "%s(): txd %p[%x] sw_desc=%p: %d\n",
1231                 __func__, vd, cookie, sw_desc, residue);
1232         return residue;
1233 }
1234
1235 static enum dma_status pxad_tx_status(struct dma_chan *dchan,
1236                                       dma_cookie_t cookie,
1237                                       struct dma_tx_state *txstate)
1238 {
1239         struct pxad_chan *chan = to_pxad_chan(dchan);
1240         enum dma_status ret;
1241
1242         ret = dma_cookie_status(dchan, cookie, txstate);
1243         if (likely(txstate && (ret != DMA_ERROR)))
1244                 dma_set_residue(txstate, pxad_residue(chan, cookie));
1245
1246         return ret;
1247 }
1248
1249 static void pxad_free_channels(struct dma_device *dmadev)
1250 {
1251         struct pxad_chan *c, *cn;
1252
1253         list_for_each_entry_safe(c, cn, &dmadev->channels,
1254                                  vc.chan.device_node) {
1255                 list_del(&c->vc.chan.device_node);
1256                 tasklet_kill(&c->vc.task);
1257         }
1258 }
1259
1260 static int pxad_remove(struct platform_device *op)
1261 {
1262         struct pxad_device *pdev = platform_get_drvdata(op);
1263
1264         pxad_cleanup_debugfs(pdev);
1265         pxad_free_channels(&pdev->slave);
1266         dma_async_device_unregister(&pdev->slave);
1267         return 0;
1268 }
1269
1270 static int pxad_init_phys(struct platform_device *op,
1271                           struct pxad_device *pdev,
1272                           unsigned int nb_phy_chans)
1273 {
1274         int irq0, irq, nr_irq = 0, i, ret;
1275         struct pxad_phy *phy;
1276
1277         irq0 = platform_get_irq(op, 0);
1278         if (irq0 < 0)
1279                 return irq0;
1280
1281         pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans,
1282                                   sizeof(pdev->phys[0]), GFP_KERNEL);
1283         if (!pdev->phys)
1284                 return -ENOMEM;
1285
1286         for (i = 0; i < nb_phy_chans; i++)
1287                 if (platform_get_irq(op, i) > 0)
1288                         nr_irq++;
1289
1290         for (i = 0; i < nb_phy_chans; i++) {
1291                 phy = &pdev->phys[i];
1292                 phy->base = pdev->base;
1293                 phy->idx = i;
1294                 irq = platform_get_irq(op, i);
1295                 if ((nr_irq > 1) && (irq > 0))
1296                         ret = devm_request_irq(&op->dev, irq,
1297                                                pxad_chan_handler,
1298                                                IRQF_SHARED, "pxa-dma", phy);
1299                 if ((nr_irq == 1) && (i == 0))
1300                         ret = devm_request_irq(&op->dev, irq0,
1301                                                pxad_int_handler,
1302                                                IRQF_SHARED, "pxa-dma", pdev);
1303                 if (ret) {
1304                         dev_err(pdev->slave.dev,
1305                                 "%s(): can't request irq %d:%d\n", __func__,
1306                                 irq, ret);
1307                         return ret;
1308                 }
1309         }
1310
1311         return 0;
1312 }
1313
1314 static const struct of_device_id const pxad_dt_ids[] = {
1315         { .compatible = "marvell,pdma-1.0", },
1316         {}
1317 };
1318 MODULE_DEVICE_TABLE(of, pxad_dt_ids);
1319
1320 static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1321                                            struct of_dma *ofdma)
1322 {
1323         struct pxad_device *d = ofdma->of_dma_data;
1324         struct dma_chan *chan;
1325
1326         chan = dma_get_any_slave_channel(&d->slave);
1327         if (!chan)
1328                 return NULL;
1329
1330         to_pxad_chan(chan)->drcmr = dma_spec->args[0];
1331         to_pxad_chan(chan)->prio = dma_spec->args[1];
1332
1333         return chan;
1334 }
1335
1336 static int pxad_init_dmadev(struct platform_device *op,
1337                             struct pxad_device *pdev,
1338                             unsigned int nr_phy_chans)
1339 {
1340         int ret;
1341         unsigned int i;
1342         struct pxad_chan *c;
1343
1344         pdev->nr_chans = nr_phy_chans;
1345         INIT_LIST_HEAD(&pdev->slave.channels);
1346         pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1347         pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1348         pdev->slave.device_tx_status = pxad_tx_status;
1349         pdev->slave.device_issue_pending = pxad_issue_pending;
1350         pdev->slave.device_config = pxad_config;
1351         pdev->slave.device_terminate_all = pxad_terminate_all;
1352
1353         if (op->dev.coherent_dma_mask)
1354                 dma_set_mask(&op->dev, op->dev.coherent_dma_mask);
1355         else
1356                 dma_set_mask(&op->dev, DMA_BIT_MASK(32));
1357
1358         ret = pxad_init_phys(op, pdev, nr_phy_chans);
1359         if (ret)
1360                 return ret;
1361
1362         for (i = 0; i < nr_phy_chans; i++) {
1363                 c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
1364                 if (!c)
1365                         return -ENOMEM;
1366                 c->vc.desc_free = pxad_free_desc;
1367                 vchan_init(&c->vc, &pdev->slave);
1368         }
1369
1370         return dma_async_device_register(&pdev->slave);
1371 }
1372
1373 static int pxad_probe(struct platform_device *op)
1374 {
1375         struct pxad_device *pdev;
1376         const struct of_device_id *of_id;
1377         struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1378         struct resource *iores;
1379         int ret, dma_channels = 0;
1380         const enum dma_slave_buswidth widths =
1381                 DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
1382                 DMA_SLAVE_BUSWIDTH_4_BYTES;
1383
1384         pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1385         if (!pdev)
1386                 return -ENOMEM;
1387
1388         spin_lock_init(&pdev->phy_lock);
1389
1390         iores = platform_get_resource(op, IORESOURCE_MEM, 0);
1391         pdev->base = devm_ioremap_resource(&op->dev, iores);
1392         if (IS_ERR(pdev->base))
1393                 return PTR_ERR(pdev->base);
1394
1395         of_id = of_match_device(pxad_dt_ids, &op->dev);
1396         if (of_id)
1397                 of_property_read_u32(op->dev.of_node, "#dma-channels",
1398                                      &dma_channels);
1399         else if (pdata && pdata->dma_channels)
1400                 dma_channels = pdata->dma_channels;
1401         else
1402                 dma_channels = 32;      /* default 32 channel */
1403
1404         dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1405         dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1406         dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask);
1407         dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask);
1408         pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
1409         pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
1410         pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
1411
1412         pdev->slave.copy_align = PDMA_ALIGNMENT;
1413         pdev->slave.src_addr_widths = widths;
1414         pdev->slave.dst_addr_widths = widths;
1415         pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1416         pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1417         pdev->slave.descriptor_reuse = true;
1418
1419         pdev->slave.dev = &op->dev;
1420         ret = pxad_init_dmadev(op, pdev, dma_channels);
1421         if (ret) {
1422                 dev_err(pdev->slave.dev, "unable to register\n");
1423                 return ret;
1424         }
1425
1426         if (op->dev.of_node) {
1427                 /* Device-tree DMA controller registration */
1428                 ret = of_dma_controller_register(op->dev.of_node,
1429                                                  pxad_dma_xlate, pdev);
1430                 if (ret < 0) {
1431                         dev_err(pdev->slave.dev,
1432                                 "of_dma_controller_register failed\n");
1433                         return ret;
1434                 }
1435         }
1436
1437         platform_set_drvdata(op, pdev);
1438         pxad_init_debugfs(pdev);
1439         dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
1440         return 0;
1441 }
1442
1443 static const struct platform_device_id pxad_id_table[] = {
1444         { "pxa-dma", },
1445         { },
1446 };
1447
1448 static struct platform_driver pxad_driver = {
1449         .driver         = {
1450                 .name   = "pxa-dma",
1451                 .of_match_table = pxad_dt_ids,
1452         },
1453         .id_table       = pxad_id_table,
1454         .probe          = pxad_probe,
1455         .remove         = pxad_remove,
1456 };
1457
1458 bool pxad_filter_fn(struct dma_chan *chan, void *param)
1459 {
1460         struct pxad_chan *c = to_pxad_chan(chan);
1461         struct pxad_param *p = param;
1462
1463         if (chan->device->dev->driver != &pxad_driver.driver)
1464                 return false;
1465
1466         c->drcmr = p->drcmr;
1467         c->prio = p->prio;
1468
1469         return true;
1470 }
1471 EXPORT_SYMBOL_GPL(pxad_filter_fn);
1472
1473 int pxad_toggle_reserved_channel(int legacy_channel)
1474 {
1475         if (legacy_unavailable & (BIT(legacy_channel)))
1476                 return -EBUSY;
1477         legacy_reserved ^= BIT(legacy_channel);
1478         return 0;
1479 }
1480 EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel);
1481
1482 module_platform_driver(pxad_driver);
1483
1484 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1485 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1486 MODULE_LICENSE("GPL v2");