]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/dma/amba-pl08x.c
Merge branch 'for-usb-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah...
[mv-sheeva.git] / drivers / dma / amba-pl08x.c
1 /*
2  * Copyright (c) 2006 ARM Ltd.
3  * Copyright (c) 2010 ST-Ericsson SA
4  *
5  * Author: Peter Pearse <peter.pearse@arm.com>
6  * Author: Linus Walleij <linus.walleij@stericsson.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program; if not, write to the Free Software Foundation, Inc., 59
20  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
21  *
22  * The full GNU General Public License is in this distribution in the file
23  * called COPYING.
24  *
25  * Documentation: ARM DDI 0196G == PL080
26  * Documentation: ARM DDI 0218E == PL081
27  *
28  * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
29  * channel.
30  *
31  * The PL080 has 8 channels available for simultaneous use, and the PL081
32  * has only two channels. So on these DMA controllers the number of channels
33  * and the number of incoming DMA signals are two totally different things.
34  * It is usually not possible to theoretically handle all physical signals,
35  * so a multiplexing scheme with possible denial of use is necessary.
36  *
37  * The PL080 has a dual bus master, PL081 has a single master.
38  *
39  * Memory to peripheral transfer may be visualized as
40  *      Get data from memory to DMAC
41  *      Until no data left
42  *              On burst request from peripheral
43  *                      Destination burst from DMAC to peripheral
44  *                      Clear burst request
45  *      Raise terminal count interrupt
46  *
47  * For peripherals with a FIFO:
48  * Source      burst size == half the depth of the peripheral FIFO
49  * Destination burst size == the depth of the peripheral FIFO
50  *
51  * (Bursts are irrelevant for mem to mem transfers - there are no burst
52  * signals, the DMA controller will simply facilitate its AHB master.)
53  *
54  * ASSUMES default (little) endianness for DMA transfers
55  *
56  * The PL08x has two flow control settings:
57  *  - DMAC flow control: the transfer size defines the number of transfers
58  *    which occur for the current LLI entry, and the DMAC raises TC at the
59  *    end of every LLI entry.  Observed behaviour shows the DMAC listening
60  *    to both the BREQ and SREQ signals (contrary to documented),
61  *    transferring data if either is active.  The LBREQ and LSREQ signals
62  *    are ignored.
63  *
64  *  - Peripheral flow control: the transfer size is ignored (and should be
65  *    zero).  The data is transferred from the current LLI entry, until
66  *    after the final transfer signalled by LBREQ or LSREQ.  The DMAC
67  *    will then move to the next LLI entry.
68  *
69  * Global TODO:
70  * - Break out common code from arch/arm/mach-s3c64xx and share
71  */
72 #include <linux/amba/bus.h>
73 #include <linux/amba/pl08x.h>
74 #include <linux/debugfs.h>
75 #include <linux/delay.h>
76 #include <linux/device.h>
77 #include <linux/dmaengine.h>
78 #include <linux/dmapool.h>
79 #include <linux/dma-mapping.h>
80 #include <linux/init.h>
81 #include <linux/interrupt.h>
82 #include <linux/module.h>
83 #include <linux/pm_runtime.h>
84 #include <linux/seq_file.h>
85 #include <linux/slab.h>
86 #include <asm/hardware/pl080.h>
87
88 #define DRIVER_NAME     "pl08xdmac"
89
90 static struct amba_driver pl08x_amba_driver;
91
92 /**
93  * struct vendor_data - vendor-specific config parameters for PL08x derivatives
94  * @channels: the number of channels available in this variant
95  * @dualmaster: whether this version supports dual AHB masters or not.
96  */
97 struct vendor_data {
98         u8 channels;
99         bool dualmaster;
100 };
101
102 /*
103  * PL08X private data structures
104  * An LLI struct - see PL08x TRM.  Note that next uses bit[0] as a bus bit,
105  * start & end do not - their bus bit info is in cctl.  Also note that these
106  * are fixed 32-bit quantities.
107  */
108 struct pl08x_lli {
109         u32 src;
110         u32 dst;
111         u32 lli;
112         u32 cctl;
113 };
114
115 /**
116  * struct pl08x_driver_data - the local state holder for the PL08x
117  * @slave: slave engine for this instance
118  * @memcpy: memcpy engine for this instance
119  * @base: virtual memory base (remapped) for the PL08x
120  * @adev: the corresponding AMBA (PrimeCell) bus entry
121  * @vd: vendor data for this PL08x variant
122  * @pd: platform data passed in from the platform/machine
123  * @phy_chans: array of data for the physical channels
124  * @pool: a pool for the LLI descriptors
125  * @pool_ctr: counter of LLIs in the pool
126  * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
127  * fetches
128  * @mem_buses: set to indicate memory transfers on AHB2.
129  * @lock: a spinlock for this struct
130  */
131 struct pl08x_driver_data {
132         struct dma_device slave;
133         struct dma_device memcpy;
134         void __iomem *base;
135         struct amba_device *adev;
136         const struct vendor_data *vd;
137         struct pl08x_platform_data *pd;
138         struct pl08x_phy_chan *phy_chans;
139         struct dma_pool *pool;
140         int pool_ctr;
141         u8 lli_buses;
142         u8 mem_buses;
143         spinlock_t lock;
144 };
145
146 /*
147  * PL08X specific defines
148  */
149
150 /* Size (bytes) of each LLI buffer allocated for one transfer */
151 # define PL08X_LLI_TSFR_SIZE    0x2000
152
153 /* Maximum times we call dma_pool_alloc on this pool without freeing */
154 #define MAX_NUM_TSFR_LLIS       (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
155 #define PL08X_ALIGN             8
156
157 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
158 {
159         return container_of(chan, struct pl08x_dma_chan, chan);
160 }
161
162 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
163 {
164         return container_of(tx, struct pl08x_txd, tx);
165 }
166
167 /*
168  * Physical channel handling
169  */
170
171 /* Whether a certain channel is busy or not */
172 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
173 {
174         unsigned int val;
175
176         val = readl(ch->base + PL080_CH_CONFIG);
177         return val & PL080_CONFIG_ACTIVE;
178 }
179
180 /*
181  * Set the initial DMA register values i.e. those for the first LLI
182  * The next LLI pointer and the configuration interrupt bit have
183  * been set when the LLIs were constructed.  Poke them into the hardware
184  * and start the transfer.
185  */
186 static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
187         struct pl08x_txd *txd)
188 {
189         struct pl08x_driver_data *pl08x = plchan->host;
190         struct pl08x_phy_chan *phychan = plchan->phychan;
191         struct pl08x_lli *lli = &txd->llis_va[0];
192         u32 val;
193
194         plchan->at = txd;
195
196         /* Wait for channel inactive */
197         while (pl08x_phy_channel_busy(phychan))
198                 cpu_relax();
199
200         dev_vdbg(&pl08x->adev->dev,
201                 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
202                 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
203                 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
204                 txd->ccfg);
205
206         writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
207         writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
208         writel(lli->lli, phychan->base + PL080_CH_LLI);
209         writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
210         writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
211
212         /* Enable the DMA channel */
213         /* Do not access config register until channel shows as disabled */
214         while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
215                 cpu_relax();
216
217         /* Do not access config register until channel shows as inactive */
218         val = readl(phychan->base + PL080_CH_CONFIG);
219         while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
220                 val = readl(phychan->base + PL080_CH_CONFIG);
221
222         writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
223 }
224
225 /*
226  * Pause the channel by setting the HALT bit.
227  *
228  * For M->P transfers, pause the DMAC first and then stop the peripheral -
229  * the FIFO can only drain if the peripheral is still requesting data.
230  * (note: this can still timeout if the DMAC FIFO never drains of data.)
231  *
232  * For P->M transfers, disable the peripheral first to stop it filling
233  * the DMAC FIFO, and then pause the DMAC.
234  */
235 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
236 {
237         u32 val;
238         int timeout;
239
240         /* Set the HALT bit and wait for the FIFO to drain */
241         val = readl(ch->base + PL080_CH_CONFIG);
242         val |= PL080_CONFIG_HALT;
243         writel(val, ch->base + PL080_CH_CONFIG);
244
245         /* Wait for channel inactive */
246         for (timeout = 1000; timeout; timeout--) {
247                 if (!pl08x_phy_channel_busy(ch))
248                         break;
249                 udelay(1);
250         }
251         if (pl08x_phy_channel_busy(ch))
252                 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
253 }
254
255 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
256 {
257         u32 val;
258
259         /* Clear the HALT bit */
260         val = readl(ch->base + PL080_CH_CONFIG);
261         val &= ~PL080_CONFIG_HALT;
262         writel(val, ch->base + PL080_CH_CONFIG);
263 }
264
265 /*
266  * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
267  * clears any pending interrupt status.  This should not be used for
268  * an on-going transfer, but as a method of shutting down a channel
269  * (eg, when it's no longer used) or terminating a transfer.
270  */
271 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
272         struct pl08x_phy_chan *ch)
273 {
274         u32 val = readl(ch->base + PL080_CH_CONFIG);
275
276         val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
277                  PL080_CONFIG_TC_IRQ_MASK);
278
279         writel(val, ch->base + PL080_CH_CONFIG);
280
281         writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
282         writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
283 }
284
285 static inline u32 get_bytes_in_cctl(u32 cctl)
286 {
287         /* The source width defines the number of bytes */
288         u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
289
290         switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
291         case PL080_WIDTH_8BIT:
292                 break;
293         case PL080_WIDTH_16BIT:
294                 bytes *= 2;
295                 break;
296         case PL080_WIDTH_32BIT:
297                 bytes *= 4;
298                 break;
299         }
300         return bytes;
301 }
302
303 /* The channel should be paused when calling this */
304 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
305 {
306         struct pl08x_phy_chan *ch;
307         struct pl08x_txd *txd;
308         unsigned long flags;
309         size_t bytes = 0;
310
311         spin_lock_irqsave(&plchan->lock, flags);
312         ch = plchan->phychan;
313         txd = plchan->at;
314
315         /*
316          * Follow the LLIs to get the number of remaining
317          * bytes in the currently active transaction.
318          */
319         if (ch && txd) {
320                 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
321
322                 /* First get the remaining bytes in the active transfer */
323                 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
324
325                 if (clli) {
326                         struct pl08x_lli *llis_va = txd->llis_va;
327                         dma_addr_t llis_bus = txd->llis_bus;
328                         int index;
329
330                         BUG_ON(clli < llis_bus || clli >= llis_bus +
331                                 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
332
333                         /*
334                          * Locate the next LLI - as this is an array,
335                          * it's simple maths to find.
336                          */
337                         index = (clli - llis_bus) / sizeof(struct pl08x_lli);
338
339                         for (; index < MAX_NUM_TSFR_LLIS; index++) {
340                                 bytes += get_bytes_in_cctl(llis_va[index].cctl);
341
342                                 /*
343                                  * A LLI pointer of 0 terminates the LLI list
344                                  */
345                                 if (!llis_va[index].lli)
346                                         break;
347                         }
348                 }
349         }
350
351         /* Sum up all queued transactions */
352         if (!list_empty(&plchan->pend_list)) {
353                 struct pl08x_txd *txdi;
354                 list_for_each_entry(txdi, &plchan->pend_list, node) {
355                         struct pl08x_sg *dsg;
356                         list_for_each_entry(dsg, &txd->dsg_list, node)
357                                 bytes += dsg->len;
358                 }
359         }
360
361         spin_unlock_irqrestore(&plchan->lock, flags);
362
363         return bytes;
364 }
365
366 /*
367  * Allocate a physical channel for a virtual channel
368  *
369  * Try to locate a physical channel to be used for this transfer. If all
370  * are taken return NULL and the requester will have to cope by using
371  * some fallback PIO mode or retrying later.
372  */
373 static struct pl08x_phy_chan *
374 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
375                       struct pl08x_dma_chan *virt_chan)
376 {
377         struct pl08x_phy_chan *ch = NULL;
378         unsigned long flags;
379         int i;
380
381         for (i = 0; i < pl08x->vd->channels; i++) {
382                 ch = &pl08x->phy_chans[i];
383
384                 spin_lock_irqsave(&ch->lock, flags);
385
386                 if (!ch->serving) {
387                         ch->serving = virt_chan;
388                         ch->signal = -1;
389                         spin_unlock_irqrestore(&ch->lock, flags);
390                         break;
391                 }
392
393                 spin_unlock_irqrestore(&ch->lock, flags);
394         }
395
396         if (i == pl08x->vd->channels) {
397                 /* No physical channel available, cope with it */
398                 return NULL;
399         }
400
401         pm_runtime_get_sync(&pl08x->adev->dev);
402         return ch;
403 }
404
405 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
406                                          struct pl08x_phy_chan *ch)
407 {
408         unsigned long flags;
409
410         spin_lock_irqsave(&ch->lock, flags);
411
412         /* Stop the channel and clear its interrupts */
413         pl08x_terminate_phy_chan(pl08x, ch);
414
415         pm_runtime_put(&pl08x->adev->dev);
416
417         /* Mark it as free */
418         ch->serving = NULL;
419         spin_unlock_irqrestore(&ch->lock, flags);
420 }
421
422 /*
423  * LLI handling
424  */
425
426 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
427 {
428         switch (coded) {
429         case PL080_WIDTH_8BIT:
430                 return 1;
431         case PL080_WIDTH_16BIT:
432                 return 2;
433         case PL080_WIDTH_32BIT:
434                 return 4;
435         default:
436                 break;
437         }
438         BUG();
439         return 0;
440 }
441
442 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
443                                   size_t tsize)
444 {
445         u32 retbits = cctl;
446
447         /* Remove all src, dst and transfer size bits */
448         retbits &= ~PL080_CONTROL_DWIDTH_MASK;
449         retbits &= ~PL080_CONTROL_SWIDTH_MASK;
450         retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
451
452         /* Then set the bits according to the parameters */
453         switch (srcwidth) {
454         case 1:
455                 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
456                 break;
457         case 2:
458                 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
459                 break;
460         case 4:
461                 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
462                 break;
463         default:
464                 BUG();
465                 break;
466         }
467
468         switch (dstwidth) {
469         case 1:
470                 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
471                 break;
472         case 2:
473                 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
474                 break;
475         case 4:
476                 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
477                 break;
478         default:
479                 BUG();
480                 break;
481         }
482
483         retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
484         return retbits;
485 }
486
487 struct pl08x_lli_build_data {
488         struct pl08x_txd *txd;
489         struct pl08x_bus_data srcbus;
490         struct pl08x_bus_data dstbus;
491         size_t remainder;
492         u32 lli_bus;
493 };
494
495 /*
496  * Autoselect a master bus to use for the transfer. Slave will be the chosen as
497  * victim in case src & dest are not similarly aligned. i.e. If after aligning
498  * masters address with width requirements of transfer (by sending few byte by
499  * byte data), slave is still not aligned, then its width will be reduced to
500  * BYTE.
501  * - prefers the destination bus if both available
502  * - prefers bus with fixed address (i.e. peripheral)
503  */
504 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
505         struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
506 {
507         if (!(cctl & PL080_CONTROL_DST_INCR)) {
508                 *mbus = &bd->dstbus;
509                 *sbus = &bd->srcbus;
510         } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
511                 *mbus = &bd->srcbus;
512                 *sbus = &bd->dstbus;
513         } else {
514                 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
515                         *mbus = &bd->dstbus;
516                         *sbus = &bd->srcbus;
517                 } else {
518                         *mbus = &bd->srcbus;
519                         *sbus = &bd->dstbus;
520                 }
521         }
522 }
523
524 /*
525  * Fills in one LLI for a certain transfer descriptor and advance the counter
526  */
527 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
528         int num_llis, int len, u32 cctl)
529 {
530         struct pl08x_lli *llis_va = bd->txd->llis_va;
531         dma_addr_t llis_bus = bd->txd->llis_bus;
532
533         BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
534
535         llis_va[num_llis].cctl = cctl;
536         llis_va[num_llis].src = bd->srcbus.addr;
537         llis_va[num_llis].dst = bd->dstbus.addr;
538         llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
539                 sizeof(struct pl08x_lli);
540         llis_va[num_llis].lli |= bd->lli_bus;
541
542         if (cctl & PL080_CONTROL_SRC_INCR)
543                 bd->srcbus.addr += len;
544         if (cctl & PL080_CONTROL_DST_INCR)
545                 bd->dstbus.addr += len;
546
547         BUG_ON(bd->remainder < len);
548
549         bd->remainder -= len;
550 }
551
552 static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
553                 u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
554 {
555         *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
556         pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
557         (*total_bytes) += len;
558 }
559
560 /*
561  * This fills in the table of LLIs for the transfer descriptor
562  * Note that we assume we never have to change the burst sizes
563  * Return 0 for error
564  */
565 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
566                               struct pl08x_txd *txd)
567 {
568         struct pl08x_bus_data *mbus, *sbus;
569         struct pl08x_lli_build_data bd;
570         int num_llis = 0;
571         u32 cctl, early_bytes = 0;
572         size_t max_bytes_per_lli, total_bytes;
573         struct pl08x_lli *llis_va;
574         struct pl08x_sg *dsg;
575
576         txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
577         if (!txd->llis_va) {
578                 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
579                 return 0;
580         }
581
582         pl08x->pool_ctr++;
583
584         bd.txd = txd;
585         bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
586         cctl = txd->cctl;
587
588         /* Find maximum width of the source bus */
589         bd.srcbus.maxwidth =
590                 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
591                                        PL080_CONTROL_SWIDTH_SHIFT);
592
593         /* Find maximum width of the destination bus */
594         bd.dstbus.maxwidth =
595                 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
596                                        PL080_CONTROL_DWIDTH_SHIFT);
597
598         list_for_each_entry(dsg, &txd->dsg_list, node) {
599                 total_bytes = 0;
600                 cctl = txd->cctl;
601
602                 bd.srcbus.addr = dsg->src_addr;
603                 bd.dstbus.addr = dsg->dst_addr;
604                 bd.remainder = dsg->len;
605                 bd.srcbus.buswidth = bd.srcbus.maxwidth;
606                 bd.dstbus.buswidth = bd.dstbus.maxwidth;
607
608                 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
609
610                 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
611                         bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
612                         bd.srcbus.buswidth,
613                         bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
614                         bd.dstbus.buswidth,
615                         bd.remainder);
616                 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
617                         mbus == &bd.srcbus ? "src" : "dst",
618                         sbus == &bd.srcbus ? "src" : "dst");
619
620                 /*
621                  * Zero length is only allowed if all these requirements are
622                  * met:
623                  * - flow controller is peripheral.
624                  * - src.addr is aligned to src.width
625                  * - dst.addr is aligned to dst.width
626                  *
627                  * sg_len == 1 should be true, as there can be two cases here:
628                  *
629                  * - Memory addresses are contiguous and are not scattered.
630                  *   Here, Only one sg will be passed by user driver, with
631                  *   memory address and zero length. We pass this to controller
632                  *   and after the transfer it will receive the last burst
633                  *   request from peripheral and so transfer finishes.
634                  *
635                  * - Memory addresses are scattered and are not contiguous.
636                  *   Here, Obviously as DMA controller doesn't know when a lli's
637                  *   transfer gets over, it can't load next lli. So in this
638                  *   case, there has to be an assumption that only one lli is
639                  *   supported. Thus, we can't have scattered addresses.
640                  */
641                 if (!bd.remainder) {
642                         u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
643                                 PL080_CONFIG_FLOW_CONTROL_SHIFT;
644                         if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
645                                         (fc <= PL080_FLOW_SRC2DST_SRC))) {
646                                 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
647                                         __func__);
648                                 return 0;
649                         }
650
651                         if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
652                                         (bd.srcbus.addr % bd.srcbus.buswidth)) {
653                                 dev_err(&pl08x->adev->dev,
654                                         "%s src & dst address must be aligned to src"
655                                         " & dst width if peripheral is flow controller",
656                                         __func__);
657                                 return 0;
658                         }
659
660                         cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
661                                         bd.dstbus.buswidth, 0);
662                         pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
663                         break;
664                 }
665
666                 /*
667                  * Send byte by byte for following cases
668                  * - Less than a bus width available
669                  * - until master bus is aligned
670                  */
671                 if (bd.remainder < mbus->buswidth)
672                         early_bytes = bd.remainder;
673                 else if ((mbus->addr) % (mbus->buswidth)) {
674                         early_bytes = mbus->buswidth - (mbus->addr) %
675                                 (mbus->buswidth);
676                         if ((bd.remainder - early_bytes) < mbus->buswidth)
677                                 early_bytes = bd.remainder;
678                 }
679
680                 if (early_bytes) {
681                         dev_vdbg(&pl08x->adev->dev,
682                                 "%s byte width LLIs (remain 0x%08x)\n",
683                                 __func__, bd.remainder);
684                         prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
685                                 &total_bytes);
686                 }
687
688                 if (bd.remainder) {
689                         /*
690                          * Master now aligned
691                          * - if slave is not then we must set its width down
692                          */
693                         if (sbus->addr % sbus->buswidth) {
694                                 dev_dbg(&pl08x->adev->dev,
695                                         "%s set down bus width to one byte\n",
696                                         __func__);
697
698                                 sbus->buswidth = 1;
699                         }
700
701                         /*
702                          * Bytes transferred = tsize * src width, not
703                          * MIN(buswidths)
704                          */
705                         max_bytes_per_lli = bd.srcbus.buswidth *
706                                 PL080_CONTROL_TRANSFER_SIZE_MASK;
707                         dev_vdbg(&pl08x->adev->dev,
708                                 "%s max bytes per lli = %zu\n",
709                                 __func__, max_bytes_per_lli);
710
711                         /*
712                          * Make largest possible LLIs until less than one bus
713                          * width left
714                          */
715                         while (bd.remainder > (mbus->buswidth - 1)) {
716                                 size_t lli_len, tsize, width;
717
718                                 /*
719                                  * If enough left try to send max possible,
720                                  * otherwise try to send the remainder
721                                  */
722                                 lli_len = min(bd.remainder, max_bytes_per_lli);
723
724                                 /*
725                                  * Check against maximum bus alignment:
726                                  * Calculate actual transfer size in relation to
727                                  * bus width an get a maximum remainder of the
728                                  * highest bus width - 1
729                                  */
730                                 width = max(mbus->buswidth, sbus->buswidth);
731                                 lli_len = (lli_len / width) * width;
732                                 tsize = lli_len / bd.srcbus.buswidth;
733
734                                 dev_vdbg(&pl08x->adev->dev,
735                                         "%s fill lli with single lli chunk of "
736                                         "size 0x%08zx (remainder 0x%08zx)\n",
737                                         __func__, lli_len, bd.remainder);
738
739                                 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
740                                         bd.dstbus.buswidth, tsize);
741                                 pl08x_fill_lli_for_desc(&bd, num_llis++,
742                                                 lli_len, cctl);
743                                 total_bytes += lli_len;
744                         }
745
746                         /*
747                          * Send any odd bytes
748                          */
749                         if (bd.remainder) {
750                                 dev_vdbg(&pl08x->adev->dev,
751                                         "%s align with boundary, send odd bytes (remain %zu)\n",
752                                         __func__, bd.remainder);
753                                 prep_byte_width_lli(&bd, &cctl, bd.remainder,
754                                                 num_llis++, &total_bytes);
755                         }
756                 }
757
758                 if (total_bytes != dsg->len) {
759                         dev_err(&pl08x->adev->dev,
760                                 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
761                                 __func__, total_bytes, dsg->len);
762                         return 0;
763                 }
764
765                 if (num_llis >= MAX_NUM_TSFR_LLIS) {
766                         dev_err(&pl08x->adev->dev,
767                                 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
768                                 __func__, (u32) MAX_NUM_TSFR_LLIS);
769                         return 0;
770                 }
771         }
772
773         llis_va = txd->llis_va;
774         /* The final LLI terminates the LLI. */
775         llis_va[num_llis - 1].lli = 0;
776         /* The final LLI element shall also fire an interrupt. */
777         llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
778
779 #ifdef VERBOSE_DEBUG
780         {
781                 int i;
782
783                 dev_vdbg(&pl08x->adev->dev,
784                          "%-3s %-9s  %-10s %-10s %-10s %s\n",
785                          "lli", "", "csrc", "cdst", "clli", "cctl");
786                 for (i = 0; i < num_llis; i++) {
787                         dev_vdbg(&pl08x->adev->dev,
788                                  "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
789                                  i, &llis_va[i], llis_va[i].src,
790                                  llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
791                                 );
792                 }
793         }
794 #endif
795
796         return num_llis;
797 }
798
799 /* You should call this with the struct pl08x lock held */
800 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
801                            struct pl08x_txd *txd)
802 {
803         struct pl08x_sg *dsg, *_dsg;
804
805         /* Free the LLI */
806         if (txd->llis_va)
807                 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
808
809         pl08x->pool_ctr--;
810
811         list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
812                 list_del(&dsg->node);
813                 kfree(dsg);
814         }
815
816         kfree(txd);
817 }
818
819 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
820                                 struct pl08x_dma_chan *plchan)
821 {
822         struct pl08x_txd *txdi = NULL;
823         struct pl08x_txd *next;
824
825         if (!list_empty(&plchan->pend_list)) {
826                 list_for_each_entry_safe(txdi,
827                                          next, &plchan->pend_list, node) {
828                         list_del(&txdi->node);
829                         pl08x_free_txd(pl08x, txdi);
830                 }
831         }
832 }
833
834 /*
835  * The DMA ENGINE API
836  */
837 static int pl08x_alloc_chan_resources(struct dma_chan *chan)
838 {
839         return 0;
840 }
841
842 static void pl08x_free_chan_resources(struct dma_chan *chan)
843 {
844 }
845
846 /*
847  * This should be called with the channel plchan->lock held
848  */
849 static int prep_phy_channel(struct pl08x_dma_chan *plchan,
850                             struct pl08x_txd *txd)
851 {
852         struct pl08x_driver_data *pl08x = plchan->host;
853         struct pl08x_phy_chan *ch;
854         int ret;
855
856         /* Check if we already have a channel */
857         if (plchan->phychan) {
858                 ch = plchan->phychan;
859                 goto got_channel;
860         }
861
862         ch = pl08x_get_phy_channel(pl08x, plchan);
863         if (!ch) {
864                 /* No physical channel available, cope with it */
865                 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
866                 return -EBUSY;
867         }
868
869         /*
870          * OK we have a physical channel: for memcpy() this is all we
871          * need, but for slaves the physical signals may be muxed!
872          * Can the platform allow us to use this channel?
873          */
874         if (plchan->slave && pl08x->pd->get_signal) {
875                 ret = pl08x->pd->get_signal(plchan);
876                 if (ret < 0) {
877                         dev_dbg(&pl08x->adev->dev,
878                                 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
879                                 ch->id, plchan->name);
880                         /* Release physical channel & return */
881                         pl08x_put_phy_channel(pl08x, ch);
882                         return -EBUSY;
883                 }
884                 ch->signal = ret;
885         }
886
887         plchan->phychan = ch;
888         dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
889                  ch->id,
890                  ch->signal,
891                  plchan->name);
892
893 got_channel:
894         /* Assign the flow control signal to this channel */
895         if (txd->direction == DMA_MEM_TO_DEV)
896                 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
897         else if (txd->direction == DMA_DEV_TO_MEM)
898                 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
899
900         plchan->phychan_hold++;
901
902         return 0;
903 }
904
905 static void release_phy_channel(struct pl08x_dma_chan *plchan)
906 {
907         struct pl08x_driver_data *pl08x = plchan->host;
908
909         if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
910                 pl08x->pd->put_signal(plchan);
911                 plchan->phychan->signal = -1;
912         }
913         pl08x_put_phy_channel(pl08x, plchan->phychan);
914         plchan->phychan = NULL;
915 }
916
917 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
918 {
919         struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
920         struct pl08x_txd *txd = to_pl08x_txd(tx);
921         unsigned long flags;
922
923         spin_lock_irqsave(&plchan->lock, flags);
924
925         plchan->chan.cookie += 1;
926         if (plchan->chan.cookie < 0)
927                 plchan->chan.cookie = 1;
928         tx->cookie = plchan->chan.cookie;
929
930         /* Put this onto the pending list */
931         list_add_tail(&txd->node, &plchan->pend_list);
932
933         /*
934          * If there was no physical channel available for this memcpy,
935          * stack the request up and indicate that the channel is waiting
936          * for a free physical channel.
937          */
938         if (!plchan->slave && !plchan->phychan) {
939                 /* Do this memcpy whenever there is a channel ready */
940                 plchan->state = PL08X_CHAN_WAITING;
941                 plchan->waiting = txd;
942         } else {
943                 plchan->phychan_hold--;
944         }
945
946         spin_unlock_irqrestore(&plchan->lock, flags);
947
948         return tx->cookie;
949 }
950
951 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
952                 struct dma_chan *chan, unsigned long flags)
953 {
954         struct dma_async_tx_descriptor *retval = NULL;
955
956         return retval;
957 }
958
959 /*
960  * Code accessing dma_async_is_complete() in a tight loop may give problems.
961  * If slaves are relying on interrupts to signal completion this function
962  * must not be called with interrupts disabled.
963  */
964 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
965                 dma_cookie_t cookie, struct dma_tx_state *txstate)
966 {
967         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
968         dma_cookie_t last_used;
969         dma_cookie_t last_complete;
970         enum dma_status ret;
971         u32 bytesleft = 0;
972
973         last_used = plchan->chan.cookie;
974         last_complete = plchan->lc;
975
976         ret = dma_async_is_complete(cookie, last_complete, last_used);
977         if (ret == DMA_SUCCESS) {
978                 dma_set_tx_state(txstate, last_complete, last_used, 0);
979                 return ret;
980         }
981
982         /*
983          * This cookie not complete yet
984          */
985         last_used = plchan->chan.cookie;
986         last_complete = plchan->lc;
987
988         /* Get number of bytes left in the active transactions and queue */
989         bytesleft = pl08x_getbytes_chan(plchan);
990
991         dma_set_tx_state(txstate, last_complete, last_used,
992                          bytesleft);
993
994         if (plchan->state == PL08X_CHAN_PAUSED)
995                 return DMA_PAUSED;
996
997         /* Whether waiting or running, we're in progress */
998         return DMA_IN_PROGRESS;
999 }
1000
1001 /* PrimeCell DMA extension */
1002 struct burst_table {
1003         u32 burstwords;
1004         u32 reg;
1005 };
1006
1007 static const struct burst_table burst_sizes[] = {
1008         {
1009                 .burstwords = 256,
1010                 .reg = PL080_BSIZE_256,
1011         },
1012         {
1013                 .burstwords = 128,
1014                 .reg = PL080_BSIZE_128,
1015         },
1016         {
1017                 .burstwords = 64,
1018                 .reg = PL080_BSIZE_64,
1019         },
1020         {
1021                 .burstwords = 32,
1022                 .reg = PL080_BSIZE_32,
1023         },
1024         {
1025                 .burstwords = 16,
1026                 .reg = PL080_BSIZE_16,
1027         },
1028         {
1029                 .burstwords = 8,
1030                 .reg = PL080_BSIZE_8,
1031         },
1032         {
1033                 .burstwords = 4,
1034                 .reg = PL080_BSIZE_4,
1035         },
1036         {
1037                 .burstwords = 0,
1038                 .reg = PL080_BSIZE_1,
1039         },
1040 };
1041
1042 /*
1043  * Given the source and destination available bus masks, select which
1044  * will be routed to each port.  We try to have source and destination
1045  * on separate ports, but always respect the allowable settings.
1046  */
1047 static u32 pl08x_select_bus(u8 src, u8 dst)
1048 {
1049         u32 cctl = 0;
1050
1051         if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1052                 cctl |= PL080_CONTROL_DST_AHB2;
1053         if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1054                 cctl |= PL080_CONTROL_SRC_AHB2;
1055
1056         return cctl;
1057 }
1058
1059 static u32 pl08x_cctl(u32 cctl)
1060 {
1061         cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1062                   PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1063                   PL080_CONTROL_PROT_MASK);
1064
1065         /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1066         return cctl | PL080_CONTROL_PROT_SYS;
1067 }
1068
1069 static u32 pl08x_width(enum dma_slave_buswidth width)
1070 {
1071         switch (width) {
1072         case DMA_SLAVE_BUSWIDTH_1_BYTE:
1073                 return PL080_WIDTH_8BIT;
1074         case DMA_SLAVE_BUSWIDTH_2_BYTES:
1075                 return PL080_WIDTH_16BIT;
1076         case DMA_SLAVE_BUSWIDTH_4_BYTES:
1077                 return PL080_WIDTH_32BIT;
1078         default:
1079                 return ~0;
1080         }
1081 }
1082
1083 static u32 pl08x_burst(u32 maxburst)
1084 {
1085         int i;
1086
1087         for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1088                 if (burst_sizes[i].burstwords <= maxburst)
1089                         break;
1090
1091         return burst_sizes[i].reg;
1092 }
1093
1094 static int dma_set_runtime_config(struct dma_chan *chan,
1095                                   struct dma_slave_config *config)
1096 {
1097         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1098         struct pl08x_driver_data *pl08x = plchan->host;
1099         enum dma_slave_buswidth addr_width;
1100         u32 width, burst, maxburst;
1101         u32 cctl = 0;
1102
1103         if (!plchan->slave)
1104                 return -EINVAL;
1105
1106         /* Transfer direction */
1107         plchan->runtime_direction = config->direction;
1108         if (config->direction == DMA_MEM_TO_DEV) {
1109                 addr_width = config->dst_addr_width;
1110                 maxburst = config->dst_maxburst;
1111         } else if (config->direction == DMA_DEV_TO_MEM) {
1112                 addr_width = config->src_addr_width;
1113                 maxburst = config->src_maxburst;
1114         } else {
1115                 dev_err(&pl08x->adev->dev,
1116                         "bad runtime_config: alien transfer direction\n");
1117                 return -EINVAL;
1118         }
1119
1120         width = pl08x_width(addr_width);
1121         if (width == ~0) {
1122                 dev_err(&pl08x->adev->dev,
1123                         "bad runtime_config: alien address width\n");
1124                 return -EINVAL;
1125         }
1126
1127         cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1128         cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1129
1130         /*
1131          * If this channel will only request single transfers, set this
1132          * down to ONE element.  Also select one element if no maxburst
1133          * is specified.
1134          */
1135         if (plchan->cd->single)
1136                 maxburst = 1;
1137
1138         burst = pl08x_burst(maxburst);
1139         cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1140         cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1141
1142         if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
1143                 plchan->src_addr = config->src_addr;
1144                 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
1145                         pl08x_select_bus(plchan->cd->periph_buses,
1146                                          pl08x->mem_buses);
1147         } else {
1148                 plchan->dst_addr = config->dst_addr;
1149                 plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
1150                         pl08x_select_bus(pl08x->mem_buses,
1151                                          plchan->cd->periph_buses);
1152         }
1153
1154         dev_dbg(&pl08x->adev->dev,
1155                 "configured channel %s (%s) for %s, data width %d, "
1156                 "maxburst %d words, LE, CCTL=0x%08x\n",
1157                 dma_chan_name(chan), plchan->name,
1158                 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
1159                 addr_width,
1160                 maxburst,
1161                 cctl);
1162
1163         return 0;
1164 }
1165
1166 /*
1167  * Slave transactions callback to the slave device to allow
1168  * synchronization of slave DMA signals with the DMAC enable
1169  */
1170 static void pl08x_issue_pending(struct dma_chan *chan)
1171 {
1172         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1173         unsigned long flags;
1174
1175         spin_lock_irqsave(&plchan->lock, flags);
1176         /* Something is already active, or we're waiting for a channel... */
1177         if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1178                 spin_unlock_irqrestore(&plchan->lock, flags);
1179                 return;
1180         }
1181
1182         /* Take the first element in the queue and execute it */
1183         if (!list_empty(&plchan->pend_list)) {
1184                 struct pl08x_txd *next;
1185
1186                 next = list_first_entry(&plchan->pend_list,
1187                                         struct pl08x_txd,
1188                                         node);
1189                 list_del(&next->node);
1190                 plchan->state = PL08X_CHAN_RUNNING;
1191
1192                 pl08x_start_txd(plchan, next);
1193         }
1194
1195         spin_unlock_irqrestore(&plchan->lock, flags);
1196 }
1197
1198 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1199                                         struct pl08x_txd *txd)
1200 {
1201         struct pl08x_driver_data *pl08x = plchan->host;
1202         unsigned long flags;
1203         int num_llis, ret;
1204
1205         num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1206         if (!num_llis) {
1207                 spin_lock_irqsave(&plchan->lock, flags);
1208                 pl08x_free_txd(pl08x, txd);
1209                 spin_unlock_irqrestore(&plchan->lock, flags);
1210                 return -EINVAL;
1211         }
1212
1213         spin_lock_irqsave(&plchan->lock, flags);
1214
1215         /*
1216          * See if we already have a physical channel allocated,
1217          * else this is the time to try to get one.
1218          */
1219         ret = prep_phy_channel(plchan, txd);
1220         if (ret) {
1221                 /*
1222                  * No physical channel was available.
1223                  *
1224                  * memcpy transfers can be sorted out at submission time.
1225                  *
1226                  * Slave transfers may have been denied due to platform
1227                  * channel muxing restrictions.  Since there is no guarantee
1228                  * that this will ever be resolved, and the signal must be
1229                  * acquired AFTER acquiring the physical channel, we will let
1230                  * them be NACK:ed with -EBUSY here. The drivers can retry
1231                  * the prep() call if they are eager on doing this using DMA.
1232                  */
1233                 if (plchan->slave) {
1234                         pl08x_free_txd_list(pl08x, plchan);
1235                         pl08x_free_txd(pl08x, txd);
1236                         spin_unlock_irqrestore(&plchan->lock, flags);
1237                         return -EBUSY;
1238                 }
1239         } else
1240                 /*
1241                  * Else we're all set, paused and ready to roll, status
1242                  * will switch to PL08X_CHAN_RUNNING when we call
1243                  * issue_pending(). If there is something running on the
1244                  * channel already we don't change its state.
1245                  */
1246                 if (plchan->state == PL08X_CHAN_IDLE)
1247                         plchan->state = PL08X_CHAN_PAUSED;
1248
1249         spin_unlock_irqrestore(&plchan->lock, flags);
1250
1251         return 0;
1252 }
1253
1254 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1255         unsigned long flags)
1256 {
1257         struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1258
1259         if (txd) {
1260                 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1261                 txd->tx.flags = flags;
1262                 txd->tx.tx_submit = pl08x_tx_submit;
1263                 INIT_LIST_HEAD(&txd->node);
1264                 INIT_LIST_HEAD(&txd->dsg_list);
1265
1266                 /* Always enable error and terminal interrupts */
1267                 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1268                             PL080_CONFIG_TC_IRQ_MASK;
1269         }
1270         return txd;
1271 }
1272
1273 /*
1274  * Initialize a descriptor to be used by memcpy submit
1275  */
1276 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1277                 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1278                 size_t len, unsigned long flags)
1279 {
1280         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1281         struct pl08x_driver_data *pl08x = plchan->host;
1282         struct pl08x_txd *txd;
1283         struct pl08x_sg *dsg;
1284         int ret;
1285
1286         txd = pl08x_get_txd(plchan, flags);
1287         if (!txd) {
1288                 dev_err(&pl08x->adev->dev,
1289                         "%s no memory for descriptor\n", __func__);
1290                 return NULL;
1291         }
1292
1293         dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1294         if (!dsg) {
1295                 pl08x_free_txd(pl08x, txd);
1296                 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1297                                 __func__);
1298                 return NULL;
1299         }
1300         list_add_tail(&dsg->node, &txd->dsg_list);
1301
1302         txd->direction = DMA_NONE;
1303         dsg->src_addr = src;
1304         dsg->dst_addr = dest;
1305         dsg->len = len;
1306
1307         /* Set platform data for m2m */
1308         txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1309         txd->cctl = pl08x->pd->memcpy_channel.cctl &
1310                         ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1311
1312         /* Both to be incremented or the code will break */
1313         txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1314
1315         if (pl08x->vd->dualmaster)
1316                 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1317                                               pl08x->mem_buses);
1318
1319         ret = pl08x_prep_channel_resources(plchan, txd);
1320         if (ret)
1321                 return NULL;
1322
1323         return &txd->tx;
1324 }
1325
1326 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1327                 struct dma_chan *chan, struct scatterlist *sgl,
1328                 unsigned int sg_len, enum dma_transfer_direction direction,
1329                 unsigned long flags)
1330 {
1331         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1332         struct pl08x_driver_data *pl08x = plchan->host;
1333         struct pl08x_txd *txd;
1334         struct pl08x_sg *dsg;
1335         struct scatterlist *sg;
1336         dma_addr_t slave_addr;
1337         int ret, tmp;
1338
1339         dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1340                         __func__, sgl->length, plchan->name);
1341
1342         txd = pl08x_get_txd(plchan, flags);
1343         if (!txd) {
1344                 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1345                 return NULL;
1346         }
1347
1348         if (direction != plchan->runtime_direction)
1349                 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1350                         "the direction configured for the PrimeCell\n",
1351                         __func__);
1352
1353         /*
1354          * Set up addresses, the PrimeCell configured address
1355          * will take precedence since this may configure the
1356          * channel target address dynamically at runtime.
1357          */
1358         txd->direction = direction;
1359
1360         if (direction == DMA_MEM_TO_DEV) {
1361                 txd->cctl = plchan->dst_cctl;
1362                 slave_addr = plchan->dst_addr;
1363         } else if (direction == DMA_DEV_TO_MEM) {
1364                 txd->cctl = plchan->src_cctl;
1365                 slave_addr = plchan->src_addr;
1366         } else {
1367                 pl08x_free_txd(pl08x, txd);
1368                 dev_err(&pl08x->adev->dev,
1369                         "%s direction unsupported\n", __func__);
1370                 return NULL;
1371         }
1372
1373         if (plchan->cd->device_fc)
1374                 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
1375                         PL080_FLOW_PER2MEM_PER;
1376         else
1377                 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
1378                         PL080_FLOW_PER2MEM;
1379
1380         txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1381
1382         for_each_sg(sgl, sg, sg_len, tmp) {
1383                 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1384                 if (!dsg) {
1385                         pl08x_free_txd(pl08x, txd);
1386                         dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1387                                         __func__);
1388                         return NULL;
1389                 }
1390                 list_add_tail(&dsg->node, &txd->dsg_list);
1391
1392                 dsg->len = sg_dma_len(sg);
1393                 if (direction == DMA_MEM_TO_DEV) {
1394                         dsg->src_addr = sg_phys(sg);
1395                         dsg->dst_addr = slave_addr;
1396                 } else {
1397                         dsg->src_addr = slave_addr;
1398                         dsg->dst_addr = sg_phys(sg);
1399                 }
1400         }
1401
1402         ret = pl08x_prep_channel_resources(plchan, txd);
1403         if (ret)
1404                 return NULL;
1405
1406         return &txd->tx;
1407 }
1408
1409 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1410                          unsigned long arg)
1411 {
1412         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1413         struct pl08x_driver_data *pl08x = plchan->host;
1414         unsigned long flags;
1415         int ret = 0;
1416
1417         /* Controls applicable to inactive channels */
1418         if (cmd == DMA_SLAVE_CONFIG) {
1419                 return dma_set_runtime_config(chan,
1420                                               (struct dma_slave_config *)arg);
1421         }
1422
1423         /*
1424          * Anything succeeds on channels with no physical allocation and
1425          * no queued transfers.
1426          */
1427         spin_lock_irqsave(&plchan->lock, flags);
1428         if (!plchan->phychan && !plchan->at) {
1429                 spin_unlock_irqrestore(&plchan->lock, flags);
1430                 return 0;
1431         }
1432
1433         switch (cmd) {
1434         case DMA_TERMINATE_ALL:
1435                 plchan->state = PL08X_CHAN_IDLE;
1436
1437                 if (plchan->phychan) {
1438                         pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1439
1440                         /*
1441                          * Mark physical channel as free and free any slave
1442                          * signal
1443                          */
1444                         release_phy_channel(plchan);
1445                 }
1446                 /* Dequeue jobs and free LLIs */
1447                 if (plchan->at) {
1448                         pl08x_free_txd(pl08x, plchan->at);
1449                         plchan->at = NULL;
1450                 }
1451                 /* Dequeue jobs not yet fired as well */
1452                 pl08x_free_txd_list(pl08x, plchan);
1453                 break;
1454         case DMA_PAUSE:
1455                 pl08x_pause_phy_chan(plchan->phychan);
1456                 plchan->state = PL08X_CHAN_PAUSED;
1457                 break;
1458         case DMA_RESUME:
1459                 pl08x_resume_phy_chan(plchan->phychan);
1460                 plchan->state = PL08X_CHAN_RUNNING;
1461                 break;
1462         default:
1463                 /* Unknown command */
1464                 ret = -ENXIO;
1465                 break;
1466         }
1467
1468         spin_unlock_irqrestore(&plchan->lock, flags);
1469
1470         return ret;
1471 }
1472
1473 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1474 {
1475         struct pl08x_dma_chan *plchan;
1476         char *name = chan_id;
1477
1478         /* Reject channels for devices not bound to this driver */
1479         if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1480                 return false;
1481
1482         plchan = to_pl08x_chan(chan);
1483
1484         /* Check that the channel is not taken! */
1485         if (!strcmp(plchan->name, name))
1486                 return true;
1487
1488         return false;
1489 }
1490
1491 /*
1492  * Just check that the device is there and active
1493  * TODO: turn this bit on/off depending on the number of physical channels
1494  * actually used, if it is zero... well shut it off. That will save some
1495  * power. Cut the clock at the same time.
1496  */
1497 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1498 {
1499         writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1500 }
1501
1502 static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1503 {
1504         struct device *dev = txd->tx.chan->device->dev;
1505         struct pl08x_sg *dsg;
1506
1507         if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1508                 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1509                         list_for_each_entry(dsg, &txd->dsg_list, node)
1510                                 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1511                                                 DMA_TO_DEVICE);
1512                 else {
1513                         list_for_each_entry(dsg, &txd->dsg_list, node)
1514                                 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1515                                                 DMA_TO_DEVICE);
1516                 }
1517         }
1518         if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1519                 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1520                         list_for_each_entry(dsg, &txd->dsg_list, node)
1521                                 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1522                                                 DMA_FROM_DEVICE);
1523                 else
1524                         list_for_each_entry(dsg, &txd->dsg_list, node)
1525                                 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1526                                                 DMA_FROM_DEVICE);
1527         }
1528 }
1529
1530 static void pl08x_tasklet(unsigned long data)
1531 {
1532         struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1533         struct pl08x_driver_data *pl08x = plchan->host;
1534         struct pl08x_txd *txd;
1535         unsigned long flags;
1536
1537         spin_lock_irqsave(&plchan->lock, flags);
1538
1539         txd = plchan->at;
1540         plchan->at = NULL;
1541
1542         if (txd) {
1543                 /* Update last completed */
1544                 plchan->lc = txd->tx.cookie;
1545         }
1546
1547         /* If a new descriptor is queued, set it up plchan->at is NULL here */
1548         if (!list_empty(&plchan->pend_list)) {
1549                 struct pl08x_txd *next;
1550
1551                 next = list_first_entry(&plchan->pend_list,
1552                                         struct pl08x_txd,
1553                                         node);
1554                 list_del(&next->node);
1555
1556                 pl08x_start_txd(plchan, next);
1557         } else if (plchan->phychan_hold) {
1558                 /*
1559                  * This channel is still in use - we have a new txd being
1560                  * prepared and will soon be queued.  Don't give up the
1561                  * physical channel.
1562                  */
1563         } else {
1564                 struct pl08x_dma_chan *waiting = NULL;
1565
1566                 /*
1567                  * No more jobs, so free up the physical channel
1568                  * Free any allocated signal on slave transfers too
1569                  */
1570                 release_phy_channel(plchan);
1571                 plchan->state = PL08X_CHAN_IDLE;
1572
1573                 /*
1574                  * And NOW before anyone else can grab that free:d up
1575                  * physical channel, see if there is some memcpy pending
1576                  * that seriously needs to start because of being stacked
1577                  * up while we were choking the physical channels with data.
1578                  */
1579                 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1580                                     chan.device_node) {
1581                         if (waiting->state == PL08X_CHAN_WAITING &&
1582                                 waiting->waiting != NULL) {
1583                                 int ret;
1584
1585                                 /* This should REALLY not fail now */
1586                                 ret = prep_phy_channel(waiting,
1587                                                        waiting->waiting);
1588                                 BUG_ON(ret);
1589                                 waiting->phychan_hold--;
1590                                 waiting->state = PL08X_CHAN_RUNNING;
1591                                 waiting->waiting = NULL;
1592                                 pl08x_issue_pending(&waiting->chan);
1593                                 break;
1594                         }
1595                 }
1596         }
1597
1598         spin_unlock_irqrestore(&plchan->lock, flags);
1599
1600         if (txd) {
1601                 dma_async_tx_callback callback = txd->tx.callback;
1602                 void *callback_param = txd->tx.callback_param;
1603
1604                 /* Don't try to unmap buffers on slave channels */
1605                 if (!plchan->slave)
1606                         pl08x_unmap_buffers(txd);
1607
1608                 /* Free the descriptor */
1609                 spin_lock_irqsave(&plchan->lock, flags);
1610                 pl08x_free_txd(pl08x, txd);
1611                 spin_unlock_irqrestore(&plchan->lock, flags);
1612
1613                 /* Callback to signal completion */
1614                 if (callback)
1615                         callback(callback_param);
1616         }
1617 }
1618
1619 static irqreturn_t pl08x_irq(int irq, void *dev)
1620 {
1621         struct pl08x_driver_data *pl08x = dev;
1622         u32 mask = 0, err, tc, i;
1623
1624         /* check & clear - ERR & TC interrupts */
1625         err = readl(pl08x->base + PL080_ERR_STATUS);
1626         if (err) {
1627                 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1628                         __func__, err);
1629                 writel(err, pl08x->base + PL080_ERR_CLEAR);
1630         }
1631         tc = readl(pl08x->base + PL080_INT_STATUS);
1632         if (tc)
1633                 writel(tc, pl08x->base + PL080_TC_CLEAR);
1634
1635         if (!err && !tc)
1636                 return IRQ_NONE;
1637
1638         for (i = 0; i < pl08x->vd->channels; i++) {
1639                 if (((1 << i) & err) || ((1 << i) & tc)) {
1640                         /* Locate physical channel */
1641                         struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1642                         struct pl08x_dma_chan *plchan = phychan->serving;
1643
1644                         if (!plchan) {
1645                                 dev_err(&pl08x->adev->dev,
1646                                         "%s Error TC interrupt on unused channel: 0x%08x\n",
1647                                         __func__, i);
1648                                 continue;
1649                         }
1650
1651                         /* Schedule tasklet on this channel */
1652                         tasklet_schedule(&plchan->tasklet);
1653                         mask |= (1 << i);
1654                 }
1655         }
1656
1657         return mask ? IRQ_HANDLED : IRQ_NONE;
1658 }
1659
1660 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1661 {
1662         u32 cctl = pl08x_cctl(chan->cd->cctl);
1663
1664         chan->slave = true;
1665         chan->name = chan->cd->bus_id;
1666         chan->src_addr = chan->cd->addr;
1667         chan->dst_addr = chan->cd->addr;
1668         chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
1669                 pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
1670         chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
1671                 pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
1672 }
1673
1674 /*
1675  * Initialise the DMAC memcpy/slave channels.
1676  * Make a local wrapper to hold required data
1677  */
1678 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1679                 struct dma_device *dmadev, unsigned int channels, bool slave)
1680 {
1681         struct pl08x_dma_chan *chan;
1682         int i;
1683
1684         INIT_LIST_HEAD(&dmadev->channels);
1685
1686         /*
1687          * Register as many many memcpy as we have physical channels,
1688          * we won't always be able to use all but the code will have
1689          * to cope with that situation.
1690          */
1691         for (i = 0; i < channels; i++) {
1692                 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1693                 if (!chan) {
1694                         dev_err(&pl08x->adev->dev,
1695                                 "%s no memory for channel\n", __func__);
1696                         return -ENOMEM;
1697                 }
1698
1699                 chan->host = pl08x;
1700                 chan->state = PL08X_CHAN_IDLE;
1701
1702                 if (slave) {
1703                         chan->cd = &pl08x->pd->slave_channels[i];
1704                         pl08x_dma_slave_init(chan);
1705                 } else {
1706                         chan->cd = &pl08x->pd->memcpy_channel;
1707                         chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1708                         if (!chan->name) {
1709                                 kfree(chan);
1710                                 return -ENOMEM;
1711                         }
1712                 }
1713                 if (chan->cd->circular_buffer) {
1714                         dev_err(&pl08x->adev->dev,
1715                                 "channel %s: circular buffers not supported\n",
1716                                 chan->name);
1717                         kfree(chan);
1718                         continue;
1719                 }
1720                 dev_dbg(&pl08x->adev->dev,
1721                          "initialize virtual channel \"%s\"\n",
1722                          chan->name);
1723
1724                 chan->chan.device = dmadev;
1725                 chan->chan.cookie = 0;
1726                 chan->lc = 0;
1727
1728                 spin_lock_init(&chan->lock);
1729                 INIT_LIST_HEAD(&chan->pend_list);
1730                 tasklet_init(&chan->tasklet, pl08x_tasklet,
1731                              (unsigned long) chan);
1732
1733                 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1734         }
1735         dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1736                  i, slave ? "slave" : "memcpy");
1737         return i;
1738 }
1739
1740 static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1741 {
1742         struct pl08x_dma_chan *chan = NULL;
1743         struct pl08x_dma_chan *next;
1744
1745         list_for_each_entry_safe(chan,
1746                                  next, &dmadev->channels, chan.device_node) {
1747                 list_del(&chan->chan.device_node);
1748                 kfree(chan);
1749         }
1750 }
1751
1752 #ifdef CONFIG_DEBUG_FS
1753 static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1754 {
1755         switch (state) {
1756         case PL08X_CHAN_IDLE:
1757                 return "idle";
1758         case PL08X_CHAN_RUNNING:
1759                 return "running";
1760         case PL08X_CHAN_PAUSED:
1761                 return "paused";
1762         case PL08X_CHAN_WAITING:
1763                 return "waiting";
1764         default:
1765                 break;
1766         }
1767         return "UNKNOWN STATE";
1768 }
1769
1770 static int pl08x_debugfs_show(struct seq_file *s, void *data)
1771 {
1772         struct pl08x_driver_data *pl08x = s->private;
1773         struct pl08x_dma_chan *chan;
1774         struct pl08x_phy_chan *ch;
1775         unsigned long flags;
1776         int i;
1777
1778         seq_printf(s, "PL08x physical channels:\n");
1779         seq_printf(s, "CHANNEL:\tUSER:\n");
1780         seq_printf(s, "--------\t-----\n");
1781         for (i = 0; i < pl08x->vd->channels; i++) {
1782                 struct pl08x_dma_chan *virt_chan;
1783
1784                 ch = &pl08x->phy_chans[i];
1785
1786                 spin_lock_irqsave(&ch->lock, flags);
1787                 virt_chan = ch->serving;
1788
1789                 seq_printf(s, "%d\t\t%s\n",
1790                            ch->id, virt_chan ? virt_chan->name : "(none)");
1791
1792                 spin_unlock_irqrestore(&ch->lock, flags);
1793         }
1794
1795         seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1796         seq_printf(s, "CHANNEL:\tSTATE:\n");
1797         seq_printf(s, "--------\t------\n");
1798         list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1799                 seq_printf(s, "%s\t\t%s\n", chan->name,
1800                            pl08x_state_str(chan->state));
1801         }
1802
1803         seq_printf(s, "\nPL08x virtual slave channels:\n");
1804         seq_printf(s, "CHANNEL:\tSTATE:\n");
1805         seq_printf(s, "--------\t------\n");
1806         list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1807                 seq_printf(s, "%s\t\t%s\n", chan->name,
1808                            pl08x_state_str(chan->state));
1809         }
1810
1811         return 0;
1812 }
1813
1814 static int pl08x_debugfs_open(struct inode *inode, struct file *file)
1815 {
1816         return single_open(file, pl08x_debugfs_show, inode->i_private);
1817 }
1818
1819 static const struct file_operations pl08x_debugfs_operations = {
1820         .open           = pl08x_debugfs_open,
1821         .read           = seq_read,
1822         .llseek         = seq_lseek,
1823         .release        = single_release,
1824 };
1825
1826 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1827 {
1828         /* Expose a simple debugfs interface to view all clocks */
1829         (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
1830                         S_IFREG | S_IRUGO, NULL, pl08x,
1831                         &pl08x_debugfs_operations);
1832 }
1833
1834 #else
1835 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1836 {
1837 }
1838 #endif
1839
1840 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1841 {
1842         struct pl08x_driver_data *pl08x;
1843         const struct vendor_data *vd = id->data;
1844         int ret = 0;
1845         int i;
1846
1847         ret = amba_request_regions(adev, NULL);
1848         if (ret)
1849                 return ret;
1850
1851         /* Create the driver state holder */
1852         pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
1853         if (!pl08x) {
1854                 ret = -ENOMEM;
1855                 goto out_no_pl08x;
1856         }
1857
1858         pm_runtime_set_active(&adev->dev);
1859         pm_runtime_enable(&adev->dev);
1860
1861         /* Initialize memcpy engine */
1862         dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1863         pl08x->memcpy.dev = &adev->dev;
1864         pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1865         pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
1866         pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
1867         pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1868         pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
1869         pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
1870         pl08x->memcpy.device_control = pl08x_control;
1871
1872         /* Initialize slave engine */
1873         dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
1874         pl08x->slave.dev = &adev->dev;
1875         pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1876         pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
1877         pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1878         pl08x->slave.device_tx_status = pl08x_dma_tx_status;
1879         pl08x->slave.device_issue_pending = pl08x_issue_pending;
1880         pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
1881         pl08x->slave.device_control = pl08x_control;
1882
1883         /* Get the platform data */
1884         pl08x->pd = dev_get_platdata(&adev->dev);
1885         if (!pl08x->pd) {
1886                 dev_err(&adev->dev, "no platform data supplied\n");
1887                 goto out_no_platdata;
1888         }
1889
1890         /* Assign useful pointers to the driver state */
1891         pl08x->adev = adev;
1892         pl08x->vd = vd;
1893
1894         /* By default, AHB1 only.  If dualmaster, from platform */
1895         pl08x->lli_buses = PL08X_AHB1;
1896         pl08x->mem_buses = PL08X_AHB1;
1897         if (pl08x->vd->dualmaster) {
1898                 pl08x->lli_buses = pl08x->pd->lli_buses;
1899                 pl08x->mem_buses = pl08x->pd->mem_buses;
1900         }
1901
1902         /* A DMA memory pool for LLIs, align on 1-byte boundary */
1903         pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1904                         PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
1905         if (!pl08x->pool) {
1906                 ret = -ENOMEM;
1907                 goto out_no_lli_pool;
1908         }
1909
1910         spin_lock_init(&pl08x->lock);
1911
1912         pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
1913         if (!pl08x->base) {
1914                 ret = -ENOMEM;
1915                 goto out_no_ioremap;
1916         }
1917
1918         /* Turn on the PL08x */
1919         pl08x_ensure_on(pl08x);
1920
1921         /* Attach the interrupt handler */
1922         writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1923         writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
1924
1925         ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
1926                           DRIVER_NAME, pl08x);
1927         if (ret) {
1928                 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
1929                         __func__, adev->irq[0]);
1930                 goto out_no_irq;
1931         }
1932
1933         /* Initialize physical channels */
1934         pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
1935                         GFP_KERNEL);
1936         if (!pl08x->phy_chans) {
1937                 dev_err(&adev->dev, "%s failed to allocate "
1938                         "physical channel holders\n",
1939                         __func__);
1940                 goto out_no_phychans;
1941         }
1942
1943         for (i = 0; i < vd->channels; i++) {
1944                 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
1945
1946                 ch->id = i;
1947                 ch->base = pl08x->base + PL080_Cx_BASE(i);
1948                 spin_lock_init(&ch->lock);
1949                 ch->serving = NULL;
1950                 ch->signal = -1;
1951                 dev_dbg(&adev->dev, "physical channel %d is %s\n",
1952                         i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1953         }
1954
1955         /* Register as many memcpy channels as there are physical channels */
1956         ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
1957                                               pl08x->vd->channels, false);
1958         if (ret <= 0) {
1959                 dev_warn(&pl08x->adev->dev,
1960                          "%s failed to enumerate memcpy channels - %d\n",
1961                          __func__, ret);
1962                 goto out_no_memcpy;
1963         }
1964         pl08x->memcpy.chancnt = ret;
1965
1966         /* Register slave channels */
1967         ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
1968                         pl08x->pd->num_slave_channels, true);
1969         if (ret <= 0) {
1970                 dev_warn(&pl08x->adev->dev,
1971                         "%s failed to enumerate slave channels - %d\n",
1972                                 __func__, ret);
1973                 goto out_no_slave;
1974         }
1975         pl08x->slave.chancnt = ret;
1976
1977         ret = dma_async_device_register(&pl08x->memcpy);
1978         if (ret) {
1979                 dev_warn(&pl08x->adev->dev,
1980                         "%s failed to register memcpy as an async device - %d\n",
1981                         __func__, ret);
1982                 goto out_no_memcpy_reg;
1983         }
1984
1985         ret = dma_async_device_register(&pl08x->slave);
1986         if (ret) {
1987                 dev_warn(&pl08x->adev->dev,
1988                         "%s failed to register slave as an async device - %d\n",
1989                         __func__, ret);
1990                 goto out_no_slave_reg;
1991         }
1992
1993         amba_set_drvdata(adev, pl08x);
1994         init_pl08x_debugfs(pl08x);
1995         dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
1996                  amba_part(adev), amba_rev(adev),
1997                  (unsigned long long)adev->res.start, adev->irq[0]);
1998
1999         pm_runtime_put(&adev->dev);
2000         return 0;
2001
2002 out_no_slave_reg:
2003         dma_async_device_unregister(&pl08x->memcpy);
2004 out_no_memcpy_reg:
2005         pl08x_free_virtual_channels(&pl08x->slave);
2006 out_no_slave:
2007         pl08x_free_virtual_channels(&pl08x->memcpy);
2008 out_no_memcpy:
2009         kfree(pl08x->phy_chans);
2010 out_no_phychans:
2011         free_irq(adev->irq[0], pl08x);
2012 out_no_irq:
2013         iounmap(pl08x->base);
2014 out_no_ioremap:
2015         dma_pool_destroy(pl08x->pool);
2016 out_no_lli_pool:
2017 out_no_platdata:
2018         pm_runtime_put(&adev->dev);
2019         pm_runtime_disable(&adev->dev);
2020
2021         kfree(pl08x);
2022 out_no_pl08x:
2023         amba_release_regions(adev);
2024         return ret;
2025 }
2026
2027 /* PL080 has 8 channels and the PL080 have just 2 */
2028 static struct vendor_data vendor_pl080 = {
2029         .channels = 8,
2030         .dualmaster = true,
2031 };
2032
2033 static struct vendor_data vendor_pl081 = {
2034         .channels = 2,
2035         .dualmaster = false,
2036 };
2037
2038 static struct amba_id pl08x_ids[] = {
2039         /* PL080 */
2040         {
2041                 .id     = 0x00041080,
2042                 .mask   = 0x000fffff,
2043                 .data   = &vendor_pl080,
2044         },
2045         /* PL081 */
2046         {
2047                 .id     = 0x00041081,
2048                 .mask   = 0x000fffff,
2049                 .data   = &vendor_pl081,
2050         },
2051         /* Nomadik 8815 PL080 variant */
2052         {
2053                 .id     = 0x00280880,
2054                 .mask   = 0x00ffffff,
2055                 .data   = &vendor_pl080,
2056         },
2057         { 0, 0 },
2058 };
2059
2060 MODULE_DEVICE_TABLE(amba, pl08x_ids);
2061
2062 static struct amba_driver pl08x_amba_driver = {
2063         .drv.name       = DRIVER_NAME,
2064         .id_table       = pl08x_ids,
2065         .probe          = pl08x_probe,
2066 };
2067
2068 static int __init pl08x_init(void)
2069 {
2070         int retval;
2071         retval = amba_driver_register(&pl08x_amba_driver);
2072         if (retval)
2073                 printk(KERN_WARNING DRIVER_NAME
2074                        "failed to register as an AMBA device (%d)\n",
2075                        retval);
2076         return retval;
2077 }
2078 subsys_initcall(pl08x_init);