]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/dma/amba-pl08x.c
ARM: PL08x: avoid duplicating registers in txd and phychan structures
[mv-sheeva.git] / drivers / dma / amba-pl08x.c
1 /*
2  * Copyright (c) 2006 ARM Ltd.
3  * Copyright (c) 2010 ST-Ericsson SA
4  *
5  * Author: Peter Pearse <peter.pearse@arm.com>
6  * Author: Linus Walleij <linus.walleij@stericsson.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program; if not, write to the Free Software Foundation, Inc., 59
20  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
21  *
22  * The full GNU General Public License is in this distribution in the
23  * file called COPYING.
24  *
25  * Documentation: ARM DDI 0196G == PL080
26  * Documentation: ARM DDI 0218E == PL081
27  *
28  * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
29  * any channel.
30  *
31  * The PL080 has 8 channels available for simultaneous use, and the PL081
32  * has only two channels. So on these DMA controllers the number of channels
33  * and the number of incoming DMA signals are two totally different things.
34  * It is usually not possible to theoretically handle all physical signals,
35  * so a multiplexing scheme with possible denial of use is necessary.
36  *
37  * The PL080 has a dual bus master, PL081 has a single master.
38  *
39  * Memory to peripheral transfer may be visualized as
40  *      Get data from memory to DMAC
41  *      Until no data left
42  *              On burst request from peripheral
43  *                      Destination burst from DMAC to peripheral
44  *                      Clear burst request
45  *      Raise terminal count interrupt
46  *
47  * For peripherals with a FIFO:
48  * Source      burst size == half the depth of the peripheral FIFO
49  * Destination burst size == the depth of the peripheral FIFO
50  *
51  * (Bursts are irrelevant for mem to mem transfers - there are no burst
52  * signals, the DMA controller will simply facilitate its AHB master.)
53  *
54  * ASSUMES default (little) endianness for DMA transfers
55  *
56  * The PL08x has two flow control settings:
57  *  - DMAC flow control: the transfer size defines the number of transfers
58  *    which occur for the current LLI entry, and the DMAC raises TC at the
59  *    end of every LLI entry.  Observed behaviour shows the DMAC listening
60  *    to both the BREQ and SREQ signals (contrary to documented),
61  *    transferring data if either is active.  The LBREQ and LSREQ signals
62  *    are ignored.
63  *
64  *  - Peripheral flow control: the transfer size is ignored (and should be
65  *    zero).  The data is transferred from the current LLI entry, until
66  *    after the final transfer signalled by LBREQ or LSREQ.  The DMAC
67  *    will then move to the next LLI entry.
68  *
69  * Only the former works sanely with scatter lists, so we only implement
70  * the DMAC flow control method.  However, peripherals which use the LBREQ
71  * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72  * these hardware restrictions prevents them from using scatter DMA.
73  *
74  * Global TODO:
75  * - Break out common code from arch/arm/mach-s3c64xx and share
76  */
77 #include <linux/device.h>
78 #include <linux/init.h>
79 #include <linux/module.h>
80 #include <linux/interrupt.h>
81 #include <linux/slab.h>
82 #include <linux/dmapool.h>
83 #include <linux/dmaengine.h>
84 #include <linux/amba/bus.h>
85 #include <linux/amba/pl08x.h>
86 #include <linux/debugfs.h>
87 #include <linux/seq_file.h>
88
89 #include <asm/hardware/pl080.h>
90
91 #define DRIVER_NAME     "pl08xdmac"
92
93 /**
94  * struct vendor_data - vendor-specific config parameters
95  * for PL08x derivatives
96  * @channels: the number of channels available in this variant
97  * @dualmaster: whether this version supports dual AHB masters
98  * or not.
99  */
100 struct vendor_data {
101         u8 channels;
102         bool dualmaster;
103 };
104
105 /*
106  * PL08X private data structures
107  * An LLI struct - see PL08x TRM.  Note that next uses bit[0] as a bus bit,
108  * start & end do not - their bus bit info is in cctl.  Also note that these
109  * are fixed 32-bit quantities.
110  */
111 struct pl08x_lli {
112         u32 src;
113         u32 dst;
114         u32 lli;
115         u32 cctl;
116 };
117
118 /**
119  * struct pl08x_driver_data - the local state holder for the PL08x
120  * @slave: slave engine for this instance
121  * @memcpy: memcpy engine for this instance
122  * @base: virtual memory base (remapped) for the PL08x
123  * @adev: the corresponding AMBA (PrimeCell) bus entry
124  * @vd: vendor data for this PL08x variant
125  * @pd: platform data passed in from the platform/machine
126  * @phy_chans: array of data for the physical channels
127  * @pool: a pool for the LLI descriptors
128  * @pool_ctr: counter of LLIs in the pool
129  * @lock: a spinlock for this struct
130  */
131 struct pl08x_driver_data {
132         struct dma_device slave;
133         struct dma_device memcpy;
134         void __iomem *base;
135         struct amba_device *adev;
136         const struct vendor_data *vd;
137         struct pl08x_platform_data *pd;
138         struct pl08x_phy_chan *phy_chans;
139         struct dma_pool *pool;
140         int pool_ctr;
141         spinlock_t lock;
142 };
143
144 /*
145  * PL08X specific defines
146  */
147
148 /*
149  * Memory boundaries: the manual for PL08x says that the controller
150  * cannot read past a 1KiB boundary, so these defines are used to
151  * create transfer LLIs that do not cross such boundaries.
152  */
153 #define PL08X_BOUNDARY_SHIFT            (10)    /* 1KB 0x400 */
154 #define PL08X_BOUNDARY_SIZE             (1 << PL08X_BOUNDARY_SHIFT)
155
156 /* Minimum period between work queue runs */
157 #define PL08X_WQ_PERIODMIN      20
158
159 /* Size (bytes) of each LLI buffer allocated for one transfer */
160 # define PL08X_LLI_TSFR_SIZE    0x2000
161
162 /* Maximum times we call dma_pool_alloc on this pool without freeing */
163 #define PL08X_MAX_ALLOCS        0x40
164 #define MAX_NUM_TSFR_LLIS       (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
165 #define PL08X_ALIGN             8
166
167 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
168 {
169         return container_of(chan, struct pl08x_dma_chan, chan);
170 }
171
172 /*
173  * Physical channel handling
174  */
175
176 /* Whether a certain channel is busy or not */
177 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
178 {
179         unsigned int val;
180
181         val = readl(ch->base + PL080_CH_CONFIG);
182         return val & PL080_CONFIG_ACTIVE;
183 }
184
185 /*
186  * Set the initial DMA register values i.e. those for the first LLI
187  * The next LLI pointer and the configuration interrupt bit have
188  * been set when the LLIs were constructed.  Poke them into the hardware
189  * and start the transfer.
190  */
191 static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
192         struct pl08x_txd *txd)
193 {
194         struct pl08x_driver_data *pl08x = plchan->host;
195         struct pl08x_phy_chan *phychan = plchan->phychan;
196         struct pl08x_lli *lli = &txd->llis_va[0];
197         u32 val, ccfg;
198
199         plchan->at = txd;
200
201         /* Assign the signal to the proper control registers */
202         ccfg = plchan->cd->ccfg;
203         ccfg &= ~(PL080_CONFIG_SRC_SEL_MASK | PL080_CONFIG_DST_SEL_MASK);
204
205         /* If it wasn't set from AMBA, ignore it */
206         if (txd->direction == DMA_TO_DEVICE)
207                 /* Select signal as destination */
208                 ccfg |= phychan->signal << PL080_CONFIG_DST_SEL_SHIFT;
209         else if (txd->direction == DMA_FROM_DEVICE)
210                 /* Select signal as source */
211                 ccfg |= phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
212
213         /* Always enable error and terminal interrupts */
214         ccfg |= PL080_CONFIG_ERR_IRQ_MASK | PL080_CONFIG_TC_IRQ_MASK;
215
216         /* Wait for channel inactive */
217         while (pl08x_phy_channel_busy(phychan))
218                 cpu_relax();
219
220         dev_vdbg(&pl08x->adev->dev,
221                 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
222                 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
223                 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
224                 ccfg);
225
226         writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
227         writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
228         writel(lli->lli, phychan->base + PL080_CH_LLI);
229         writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
230         writel(ccfg, phychan->base + PL080_CH_CONFIG);
231
232         /* Enable the DMA channel */
233         /* Do not access config register until channel shows as disabled */
234         while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
235                 cpu_relax();
236
237         /* Do not access config register until channel shows as inactive */
238         val = readl(phychan->base + PL080_CH_CONFIG);
239         while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
240                 val = readl(phychan->base + PL080_CH_CONFIG);
241
242         writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
243 }
244
245 /*
246  * Overall DMAC remains enabled always.
247  *
248  * Disabling individual channels could lose data.
249  *
250  * Disable the peripheral DMA after disabling the DMAC
251  * in order to allow the DMAC FIFO to drain, and
252  * hence allow the channel to show inactive
253  *
254  */
255 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
256 {
257         u32 val;
258
259         /* Set the HALT bit and wait for the FIFO to drain */
260         val = readl(ch->base + PL080_CH_CONFIG);
261         val |= PL080_CONFIG_HALT;
262         writel(val, ch->base + PL080_CH_CONFIG);
263
264         /* Wait for channel inactive */
265         while (pl08x_phy_channel_busy(ch))
266                 cpu_relax();
267 }
268
269 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
270 {
271         u32 val;
272
273         /* Clear the HALT bit */
274         val = readl(ch->base + PL080_CH_CONFIG);
275         val &= ~PL080_CONFIG_HALT;
276         writel(val, ch->base + PL080_CH_CONFIG);
277 }
278
279
280 /* Stops the channel */
281 static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
282 {
283         u32 val;
284
285         pl08x_pause_phy_chan(ch);
286
287         /* Disable channel */
288         val = readl(ch->base + PL080_CH_CONFIG);
289         val &= ~PL080_CONFIG_ENABLE;
290         val &= ~PL080_CONFIG_ERR_IRQ_MASK;
291         val &= ~PL080_CONFIG_TC_IRQ_MASK;
292         writel(val, ch->base + PL080_CH_CONFIG);
293 }
294
295 static inline u32 get_bytes_in_cctl(u32 cctl)
296 {
297         /* The source width defines the number of bytes */
298         u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
299
300         switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
301         case PL080_WIDTH_8BIT:
302                 break;
303         case PL080_WIDTH_16BIT:
304                 bytes *= 2;
305                 break;
306         case PL080_WIDTH_32BIT:
307                 bytes *= 4;
308                 break;
309         }
310         return bytes;
311 }
312
313 /* The channel should be paused when calling this */
314 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
315 {
316         struct pl08x_phy_chan *ch;
317         struct pl08x_txd *txd;
318         unsigned long flags;
319         size_t bytes = 0;
320
321         spin_lock_irqsave(&plchan->lock, flags);
322         ch = plchan->phychan;
323         txd = plchan->at;
324
325         /*
326          * Follow the LLIs to get the number of remaining
327          * bytes in the currently active transaction.
328          */
329         if (ch && txd) {
330                 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
331
332                 /* First get the remaining bytes in the active transfer */
333                 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
334
335                 if (clli) {
336                         struct pl08x_lli *llis_va = txd->llis_va;
337                         dma_addr_t llis_bus = txd->llis_bus;
338                         int index;
339
340                         BUG_ON(clli < llis_bus || clli >= llis_bus +
341                                 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
342
343                         /*
344                          * Locate the next LLI - as this is an array,
345                          * it's simple maths to find.
346                          */
347                         index = (clli - llis_bus) / sizeof(struct pl08x_lli);
348
349                         for (; index < MAX_NUM_TSFR_LLIS; index++) {
350                                 bytes += get_bytes_in_cctl(llis_va[index].cctl);
351
352                                 /*
353                                  * A LLI pointer of 0 terminates the LLI list
354                                  */
355                                 if (!llis_va[index].lli)
356                                         break;
357                         }
358                 }
359         }
360
361         /* Sum up all queued transactions */
362         if (!list_empty(&plchan->desc_list)) {
363                 struct pl08x_txd *txdi;
364                 list_for_each_entry(txdi, &plchan->desc_list, node) {
365                         bytes += txdi->len;
366                 }
367         }
368
369         spin_unlock_irqrestore(&plchan->lock, flags);
370
371         return bytes;
372 }
373
374 /*
375  * Allocate a physical channel for a virtual channel
376  */
377 static struct pl08x_phy_chan *
378 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
379                       struct pl08x_dma_chan *virt_chan)
380 {
381         struct pl08x_phy_chan *ch = NULL;
382         unsigned long flags;
383         int i;
384
385         /*
386          * Try to locate a physical channel to be used for
387          * this transfer. If all are taken return NULL and
388          * the requester will have to cope by using some fallback
389          * PIO mode or retrying later.
390          */
391         for (i = 0; i < pl08x->vd->channels; i++) {
392                 ch = &pl08x->phy_chans[i];
393
394                 spin_lock_irqsave(&ch->lock, flags);
395
396                 if (!ch->serving) {
397                         ch->serving = virt_chan;
398                         ch->signal = -1;
399                         spin_unlock_irqrestore(&ch->lock, flags);
400                         break;
401                 }
402
403                 spin_unlock_irqrestore(&ch->lock, flags);
404         }
405
406         if (i == pl08x->vd->channels) {
407                 /* No physical channel available, cope with it */
408                 return NULL;
409         }
410
411         return ch;
412 }
413
414 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
415                                          struct pl08x_phy_chan *ch)
416 {
417         unsigned long flags;
418
419         /* Stop the channel and clear its interrupts */
420         pl08x_stop_phy_chan(ch);
421         writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
422         writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
423
424         /* Mark it as free */
425         spin_lock_irqsave(&ch->lock, flags);
426         ch->serving = NULL;
427         spin_unlock_irqrestore(&ch->lock, flags);
428 }
429
430 /*
431  * LLI handling
432  */
433
434 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
435 {
436         switch (coded) {
437         case PL080_WIDTH_8BIT:
438                 return 1;
439         case PL080_WIDTH_16BIT:
440                 return 2;
441         case PL080_WIDTH_32BIT:
442                 return 4;
443         default:
444                 break;
445         }
446         BUG();
447         return 0;
448 }
449
450 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
451                                   size_t tsize)
452 {
453         u32 retbits = cctl;
454
455         /* Remove all src, dst and transfer size bits */
456         retbits &= ~PL080_CONTROL_DWIDTH_MASK;
457         retbits &= ~PL080_CONTROL_SWIDTH_MASK;
458         retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
459
460         /* Then set the bits according to the parameters */
461         switch (srcwidth) {
462         case 1:
463                 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
464                 break;
465         case 2:
466                 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
467                 break;
468         case 4:
469                 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
470                 break;
471         default:
472                 BUG();
473                 break;
474         }
475
476         switch (dstwidth) {
477         case 1:
478                 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
479                 break;
480         case 2:
481                 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
482                 break;
483         case 4:
484                 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
485                 break;
486         default:
487                 BUG();
488                 break;
489         }
490
491         retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
492         return retbits;
493 }
494
495 /*
496  * Autoselect a master bus to use for the transfer
497  * this prefers the destination bus if both available
498  * if fixed address on one bus the other will be chosen
499  */
500 static void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
501         struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
502         struct pl08x_bus_data **sbus, u32 cctl)
503 {
504         if (!(cctl & PL080_CONTROL_DST_INCR)) {
505                 *mbus = src_bus;
506                 *sbus = dst_bus;
507         } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
508                 *mbus = dst_bus;
509                 *sbus = src_bus;
510         } else {
511                 if (dst_bus->buswidth == 4) {
512                         *mbus = dst_bus;
513                         *sbus = src_bus;
514                 } else if (src_bus->buswidth == 4) {
515                         *mbus = src_bus;
516                         *sbus = dst_bus;
517                 } else if (dst_bus->buswidth == 2) {
518                         *mbus = dst_bus;
519                         *sbus = src_bus;
520                 } else if (src_bus->buswidth == 2) {
521                         *mbus = src_bus;
522                         *sbus = dst_bus;
523                 } else {
524                         /* src_bus->buswidth == 1 */
525                         *mbus = dst_bus;
526                         *sbus = src_bus;
527                 }
528         }
529 }
530
531 /*
532  * Fills in one LLI for a certain transfer descriptor
533  * and advance the counter
534  */
535 static int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
536                             struct pl08x_txd *txd, int num_llis, int len,
537                             u32 cctl, u32 *remainder)
538 {
539         struct pl08x_lli *llis_va = txd->llis_va;
540         dma_addr_t llis_bus = txd->llis_bus;
541
542         BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
543
544         llis_va[num_llis].cctl          = cctl;
545         llis_va[num_llis].src           = txd->srcbus.addr;
546         llis_va[num_llis].dst           = txd->dstbus.addr;
547
548         /*
549          * On versions with dual masters, you can optionally AND on
550          * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
551          * in new LLIs with that controller, but we always try to
552          * choose AHB1 to point into memory. The idea is to have AHB2
553          * fixed on the peripheral and AHB1 messing around in the
554          * memory. So we don't manipulate this bit currently.
555          */
556
557         llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
558
559         if (cctl & PL080_CONTROL_SRC_INCR)
560                 txd->srcbus.addr += len;
561         if (cctl & PL080_CONTROL_DST_INCR)
562                 txd->dstbus.addr += len;
563
564         BUG_ON(*remainder < len);
565
566         *remainder -= len;
567
568         return num_llis + 1;
569 }
570
571 /*
572  * Return number of bytes to fill to boundary, or len
573  */
574 static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
575 {
576         u32 boundary;
577
578         boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
579                 << PL08X_BOUNDARY_SHIFT;
580
581         if (boundary < addr + len)
582                 return boundary - addr;
583         else
584                 return len;
585 }
586
587 /*
588  * This fills in the table of LLIs for the transfer descriptor
589  * Note that we assume we never have to change the burst sizes
590  * Return 0 for error
591  */
592 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
593                               struct pl08x_txd *txd)
594 {
595         struct pl08x_channel_data *cd = txd->cd;
596         struct pl08x_bus_data *mbus, *sbus;
597         size_t remainder;
598         int num_llis = 0;
599         u32 cctl;
600         size_t max_bytes_per_lli;
601         size_t total_bytes = 0;
602         struct pl08x_lli *llis_va;
603
604         txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
605                                       &txd->llis_bus);
606         if (!txd->llis_va) {
607                 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
608                 return 0;
609         }
610
611         pl08x->pool_ctr++;
612
613         /*
614          * Initialize bus values for this transfer
615          * from the passed optimal values
616          */
617         if (!cd) {
618                 dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
619                 return 0;
620         }
621
622         /* Get the default CCTL from the platform data */
623         cctl = cd->cctl;
624
625         /*
626          * On the PL080 we have two bus masters and we
627          * should select one for source and one for
628          * destination. We try to use AHB2 for the
629          * bus which does not increment (typically the
630          * peripheral) else we just choose something.
631          */
632         cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
633         if (pl08x->vd->dualmaster) {
634                 if (cctl & PL080_CONTROL_SRC_INCR)
635                         /* Source increments, use AHB2 for destination */
636                         cctl |= PL080_CONTROL_DST_AHB2;
637                 else if (cctl & PL080_CONTROL_DST_INCR)
638                         /* Destination increments, use AHB2 for source */
639                         cctl |= PL080_CONTROL_SRC_AHB2;
640                 else
641                         /* Just pick something, source AHB1 dest AHB2 */
642                         cctl |= PL080_CONTROL_DST_AHB2;
643         }
644
645         /* Find maximum width of the source bus */
646         txd->srcbus.maxwidth =
647                 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
648                                        PL080_CONTROL_SWIDTH_SHIFT);
649
650         /* Find maximum width of the destination bus */
651         txd->dstbus.maxwidth =
652                 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
653                                        PL080_CONTROL_DWIDTH_SHIFT);
654
655         /* Set up the bus widths to the maximum */
656         txd->srcbus.buswidth = txd->srcbus.maxwidth;
657         txd->dstbus.buswidth = txd->dstbus.maxwidth;
658         dev_vdbg(&pl08x->adev->dev,
659                  "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
660                  __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
661
662
663         /*
664          * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
665          */
666         max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
667                 PL080_CONTROL_TRANSFER_SIZE_MASK;
668         dev_vdbg(&pl08x->adev->dev,
669                  "%s max bytes per lli = %zu\n",
670                  __func__, max_bytes_per_lli);
671
672         /* We need to count this down to zero */
673         remainder = txd->len;
674         dev_vdbg(&pl08x->adev->dev,
675                  "%s remainder = %zu\n",
676                  __func__, remainder);
677
678         /*
679          * Choose bus to align to
680          * - prefers destination bus if both available
681          * - if fixed address on one bus chooses other
682          * - modifies cctl to choose an appropriate master
683          */
684         pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
685                                 &mbus, &sbus, cctl);
686
687
688         /*
689          * The lowest bit of the LLI register
690          * is also used to indicate which master to
691          * use for reading the LLIs.
692          */
693
694         if (txd->len < mbus->buswidth) {
695                 /*
696                  * Less than a bus width available
697                  * - send as single bytes
698                  */
699                 while (remainder) {
700                         dev_vdbg(&pl08x->adev->dev,
701                                  "%s single byte LLIs for a transfer of "
702                                  "less than a bus width (remain 0x%08x)\n",
703                                  __func__, remainder);
704                         cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
705                         num_llis =
706                                 pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
707                                         cctl, &remainder);
708                         total_bytes++;
709                 }
710         } else {
711                 /*
712                  *  Make one byte LLIs until master bus is aligned
713                  *  - slave will then be aligned also
714                  */
715                 while ((mbus->addr) % (mbus->buswidth)) {
716                         dev_vdbg(&pl08x->adev->dev,
717                                 "%s adjustment lli for less than bus width "
718                                  "(remain 0x%08x)\n",
719                                  __func__, remainder);
720                         cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
721                         num_llis = pl08x_fill_lli_for_desc
722                                 (pl08x, txd, num_llis, 1, cctl, &remainder);
723                         total_bytes++;
724                 }
725
726                 /*
727                  *  Master now aligned
728                  * - if slave is not then we must set its width down
729                  */
730                 if (sbus->addr % sbus->buswidth) {
731                         dev_dbg(&pl08x->adev->dev,
732                                 "%s set down bus width to one byte\n",
733                                  __func__);
734
735                         sbus->buswidth = 1;
736                 }
737
738                 /*
739                  * Make largest possible LLIs until less than one bus
740                  * width left
741                  */
742                 while (remainder > (mbus->buswidth - 1)) {
743                         size_t lli_len, target_len, tsize, odd_bytes;
744
745                         /*
746                          * If enough left try to send max possible,
747                          * otherwise try to send the remainder
748                          */
749                         target_len = remainder;
750                         if (remainder > max_bytes_per_lli)
751                                 target_len = max_bytes_per_lli;
752
753                         /*
754                          * Set bus lengths for incrementing buses
755                          * to number of bytes which fill to next memory
756                          * boundary
757                          */
758                         if (cctl & PL080_CONTROL_SRC_INCR)
759                                 txd->srcbus.fill_bytes =
760                                         pl08x_pre_boundary(
761                                                 txd->srcbus.addr,
762                                                 remainder);
763                         else
764                                 txd->srcbus.fill_bytes =
765                                         max_bytes_per_lli;
766
767                         if (cctl & PL080_CONTROL_DST_INCR)
768                                 txd->dstbus.fill_bytes =
769                                         pl08x_pre_boundary(
770                                                 txd->dstbus.addr,
771                                                 remainder);
772                         else
773                                 txd->dstbus.fill_bytes =
774                                                 max_bytes_per_lli;
775
776                         /*
777                          *  Find the nearest
778                          */
779                         lli_len = min(txd->srcbus.fill_bytes,
780                                 txd->dstbus.fill_bytes);
781
782                         BUG_ON(lli_len > remainder);
783
784                         if (lli_len <= 0) {
785                                 dev_err(&pl08x->adev->dev,
786                                         "%s lli_len is %zu, <= 0\n",
787                                                 __func__, lli_len);
788                                 return 0;
789                         }
790
791                         if (lli_len == target_len) {
792                                 /*
793                                  * Can send what we wanted
794                                  */
795                                 /*
796                                  *  Maintain alignment
797                                  */
798                                 lli_len = (lli_len/mbus->buswidth) *
799                                                         mbus->buswidth;
800                                 odd_bytes = 0;
801                         } else {
802                                 /*
803                                  * So now we know how many bytes to transfer
804                                  * to get to the nearest boundary
805                                  * The next LLI will past the boundary
806                                  * - however we may be working to a boundary
807                                  *   on the slave bus
808                                  *   We need to ensure the master stays aligned
809                                  */
810                                 odd_bytes = lli_len % mbus->buswidth;
811                                 /*
812                                  * - and that we are working in multiples
813                                  *   of the bus widths
814                                  */
815                                 lli_len -= odd_bytes;
816
817                         }
818
819                         if (lli_len) {
820                                 /*
821                                  * Check against minimum bus alignment:
822                                  * Calculate actual transfer size in relation
823                                  * to bus width an get a maximum remainder of
824                                  * the smallest bus width - 1
825                                  */
826                                 /* FIXME: use round_down()? */
827                                 tsize = lli_len / min(mbus->buswidth,
828                                                       sbus->buswidth);
829                                 lli_len = tsize * min(mbus->buswidth,
830                                                       sbus->buswidth);
831
832                                 if (target_len != lli_len) {
833                                         dev_vdbg(&pl08x->adev->dev,
834                                         "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
835                                         __func__, target_len, lli_len, txd->len);
836                                 }
837
838                                 cctl = pl08x_cctl_bits(cctl,
839                                                        txd->srcbus.buswidth,
840                                                        txd->dstbus.buswidth,
841                                                        tsize);
842
843                                 dev_vdbg(&pl08x->adev->dev,
844                                         "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
845                                         __func__, lli_len, remainder);
846                                 num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
847                                                 num_llis, lli_len, cctl,
848                                                 &remainder);
849                                 total_bytes += lli_len;
850                         }
851
852
853                         if (odd_bytes) {
854                                 /*
855                                  * Creep past the boundary,
856                                  * maintaining master alignment
857                                  */
858                                 int j;
859                                 for (j = 0; (j < mbus->buswidth)
860                                                 && (remainder); j++) {
861                                         cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
862                                         dev_vdbg(&pl08x->adev->dev,
863                                                 "%s align with boundary, single byte (remain 0x%08zx)\n",
864                                                 __func__, remainder);
865                                         num_llis =
866                                                 pl08x_fill_lli_for_desc(pl08x,
867                                                         txd, num_llis, 1,
868                                                         cctl, &remainder);
869                                         total_bytes++;
870                                 }
871                         }
872                 }
873
874                 /*
875                  * Send any odd bytes
876                  */
877                 while (remainder) {
878                         cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
879                         dev_vdbg(&pl08x->adev->dev,
880                                 "%s align with boundary, single odd byte (remain %zu)\n",
881                                 __func__, remainder);
882                         num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
883                                         1, cctl, &remainder);
884                         total_bytes++;
885                 }
886         }
887         if (total_bytes != txd->len) {
888                 dev_err(&pl08x->adev->dev,
889                         "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
890                         __func__, total_bytes, txd->len);
891                 return 0;
892         }
893
894         if (num_llis >= MAX_NUM_TSFR_LLIS) {
895                 dev_err(&pl08x->adev->dev,
896                         "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
897                         __func__, (u32) MAX_NUM_TSFR_LLIS);
898                 return 0;
899         }
900
901         llis_va = txd->llis_va;
902         /*
903          * The final LLI terminates the LLI.
904          */
905         llis_va[num_llis - 1].lli = 0;
906         /*
907          * The final LLI element shall also fire an interrupt
908          */
909         llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
910
911 #ifdef VERBOSE_DEBUG
912         {
913                 int i;
914
915                 for (i = 0; i < num_llis; i++) {
916                         dev_vdbg(&pl08x->adev->dev,
917                                  "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
918                                  i,
919                                  &llis_va[i],
920                                  llis_va[i].src,
921                                  llis_va[i].dst,
922                                  llis_va[i].cctl,
923                                  llis_va[i].lli
924                                 );
925                 }
926         }
927 #endif
928
929         return num_llis;
930 }
931
932 /* You should call this with the struct pl08x lock held */
933 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
934                            struct pl08x_txd *txd)
935 {
936         /* Free the LLI */
937         dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
938
939         pl08x->pool_ctr--;
940
941         kfree(txd);
942 }
943
944 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
945                                 struct pl08x_dma_chan *plchan)
946 {
947         struct pl08x_txd *txdi = NULL;
948         struct pl08x_txd *next;
949
950         if (!list_empty(&plchan->desc_list)) {
951                 list_for_each_entry_safe(txdi,
952                                          next, &plchan->desc_list, node) {
953                         list_del(&txdi->node);
954                         pl08x_free_txd(pl08x, txdi);
955                 }
956
957         }
958 }
959
960 /*
961  * The DMA ENGINE API
962  */
963 static int pl08x_alloc_chan_resources(struct dma_chan *chan)
964 {
965         return 0;
966 }
967
968 static void pl08x_free_chan_resources(struct dma_chan *chan)
969 {
970 }
971
972 /*
973  * This should be called with the channel plchan->lock held
974  */
975 static int prep_phy_channel(struct pl08x_dma_chan *plchan,
976                             struct pl08x_txd *txd)
977 {
978         struct pl08x_driver_data *pl08x = plchan->host;
979         struct pl08x_phy_chan *ch;
980         int ret;
981
982         /* Check if we already have a channel */
983         if (plchan->phychan)
984                 return 0;
985
986         ch = pl08x_get_phy_channel(pl08x, plchan);
987         if (!ch) {
988                 /* No physical channel available, cope with it */
989                 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
990                 return -EBUSY;
991         }
992
993         /*
994          * OK we have a physical channel: for memcpy() this is all we
995          * need, but for slaves the physical signals may be muxed!
996          * Can the platform allow us to use this channel?
997          */
998         if (plchan->slave &&
999             ch->signal < 0 &&
1000             pl08x->pd->get_signal) {
1001                 ret = pl08x->pd->get_signal(plchan);
1002                 if (ret < 0) {
1003                         dev_dbg(&pl08x->adev->dev,
1004                                 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
1005                                 ch->id, plchan->name);
1006                         /* Release physical channel & return */
1007                         pl08x_put_phy_channel(pl08x, ch);
1008                         return -EBUSY;
1009                 }
1010                 ch->signal = ret;
1011         }
1012
1013         dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
1014                  ch->id,
1015                  ch->signal,
1016                  plchan->name);
1017
1018         plchan->phychan = ch;
1019
1020         return 0;
1021 }
1022
1023 static void release_phy_channel(struct pl08x_dma_chan *plchan)
1024 {
1025         struct pl08x_driver_data *pl08x = plchan->host;
1026
1027         if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
1028                 pl08x->pd->put_signal(plchan);
1029                 plchan->phychan->signal = -1;
1030         }
1031         pl08x_put_phy_channel(pl08x, plchan->phychan);
1032         plchan->phychan = NULL;
1033 }
1034
1035 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1036 {
1037         struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
1038
1039         plchan->chan.cookie += 1;
1040         if (plchan->chan.cookie < 0)
1041                 plchan->chan.cookie = 1;
1042         tx->cookie = plchan->chan.cookie;
1043         /* This unlock follows the lock in the prep() function */
1044         spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
1045
1046         return tx->cookie;
1047 }
1048
1049 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1050                 struct dma_chan *chan, unsigned long flags)
1051 {
1052         struct dma_async_tx_descriptor *retval = NULL;
1053
1054         return retval;
1055 }
1056
1057 /*
1058  * Code accessing dma_async_is_complete() in a tight loop
1059  * may give problems - could schedule where indicated.
1060  * If slaves are relying on interrupts to signal completion this
1061  * function must not be called with interrupts disabled
1062  */
1063 static enum dma_status
1064 pl08x_dma_tx_status(struct dma_chan *chan,
1065                     dma_cookie_t cookie,
1066                     struct dma_tx_state *txstate)
1067 {
1068         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1069         dma_cookie_t last_used;
1070         dma_cookie_t last_complete;
1071         enum dma_status ret;
1072         u32 bytesleft = 0;
1073
1074         last_used = plchan->chan.cookie;
1075         last_complete = plchan->lc;
1076
1077         ret = dma_async_is_complete(cookie, last_complete, last_used);
1078         if (ret == DMA_SUCCESS) {
1079                 dma_set_tx_state(txstate, last_complete, last_used, 0);
1080                 return ret;
1081         }
1082
1083         /*
1084          * schedule(); could be inserted here
1085          */
1086
1087         /*
1088          * This cookie not complete yet
1089          */
1090         last_used = plchan->chan.cookie;
1091         last_complete = plchan->lc;
1092
1093         /* Get number of bytes left in the active transactions and queue */
1094         bytesleft = pl08x_getbytes_chan(plchan);
1095
1096         dma_set_tx_state(txstate, last_complete, last_used,
1097                          bytesleft);
1098
1099         if (plchan->state == PL08X_CHAN_PAUSED)
1100                 return DMA_PAUSED;
1101
1102         /* Whether waiting or running, we're in progress */
1103         return DMA_IN_PROGRESS;
1104 }
1105
1106 /* PrimeCell DMA extension */
1107 struct burst_table {
1108         int burstwords;
1109         u32 reg;
1110 };
1111
1112 static const struct burst_table burst_sizes[] = {
1113         {
1114                 .burstwords = 256,
1115                 .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
1116                         (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
1117         },
1118         {
1119                 .burstwords = 128,
1120                 .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
1121                         (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
1122         },
1123         {
1124                 .burstwords = 64,
1125                 .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
1126                         (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
1127         },
1128         {
1129                 .burstwords = 32,
1130                 .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
1131                         (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
1132         },
1133         {
1134                 .burstwords = 16,
1135                 .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
1136                         (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
1137         },
1138         {
1139                 .burstwords = 8,
1140                 .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
1141                         (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
1142         },
1143         {
1144                 .burstwords = 4,
1145                 .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
1146                         (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
1147         },
1148         {
1149                 .burstwords = 1,
1150                 .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1151                         (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
1152         },
1153 };
1154
1155 static void dma_set_runtime_config(struct dma_chan *chan,
1156                                struct dma_slave_config *config)
1157 {
1158         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1159         struct pl08x_driver_data *pl08x = plchan->host;
1160         struct pl08x_channel_data *cd = plchan->cd;
1161         enum dma_slave_buswidth addr_width;
1162         u32 maxburst;
1163         u32 cctl = 0;
1164         /* Mask out all except src and dst channel */
1165         u32 ccfg = cd->ccfg & 0x000003DEU;
1166         int i;
1167
1168         /* Transfer direction */
1169         plchan->runtime_direction = config->direction;
1170         if (config->direction == DMA_TO_DEVICE) {
1171                 plchan->runtime_addr = config->dst_addr;
1172                 cctl |= PL080_CONTROL_SRC_INCR;
1173                 ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1174                 addr_width = config->dst_addr_width;
1175                 maxburst = config->dst_maxburst;
1176         } else if (config->direction == DMA_FROM_DEVICE) {
1177                 plchan->runtime_addr = config->src_addr;
1178                 cctl |= PL080_CONTROL_DST_INCR;
1179                 ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1180                 addr_width = config->src_addr_width;
1181                 maxburst = config->src_maxburst;
1182         } else {
1183                 dev_err(&pl08x->adev->dev,
1184                         "bad runtime_config: alien transfer direction\n");
1185                 return;
1186         }
1187
1188         switch (addr_width) {
1189         case DMA_SLAVE_BUSWIDTH_1_BYTE:
1190                 cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1191                         (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
1192                 break;
1193         case DMA_SLAVE_BUSWIDTH_2_BYTES:
1194                 cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1195                         (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
1196                 break;
1197         case DMA_SLAVE_BUSWIDTH_4_BYTES:
1198                 cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1199                         (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
1200                 break;
1201         default:
1202                 dev_err(&pl08x->adev->dev,
1203                         "bad runtime_config: alien address width\n");
1204                 return;
1205         }
1206
1207         /*
1208          * Now decide on a maxburst:
1209          * If this channel will only request single transfers, set this
1210          * down to ONE element.  Also select one element if no maxburst
1211          * is specified.
1212          */
1213         if (plchan->cd->single || maxburst == 0) {
1214                 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1215                         (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
1216         } else {
1217                 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1218                         if (burst_sizes[i].burstwords <= maxburst)
1219                                 break;
1220                 cctl |= burst_sizes[i].reg;
1221         }
1222
1223         /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1224         cctl &= ~PL080_CONTROL_PROT_MASK;
1225         cctl |= PL080_CONTROL_PROT_SYS;
1226
1227         /* Modify the default channel data to fit PrimeCell request */
1228         cd->cctl = cctl;
1229         cd->ccfg = ccfg;
1230
1231         dev_dbg(&pl08x->adev->dev,
1232                 "configured channel %s (%s) for %s, data width %d, "
1233                 "maxburst %d words, LE, CCTL=0x%08x, CCFG=0x%08x\n",
1234                 dma_chan_name(chan), plchan->name,
1235                 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
1236                 addr_width,
1237                 maxburst,
1238                 cctl, ccfg);
1239 }
1240
1241 /*
1242  * Slave transactions callback to the slave device to allow
1243  * synchronization of slave DMA signals with the DMAC enable
1244  */
1245 static void pl08x_issue_pending(struct dma_chan *chan)
1246 {
1247         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1248         unsigned long flags;
1249
1250         spin_lock_irqsave(&plchan->lock, flags);
1251         /* Something is already active, or we're waiting for a channel... */
1252         if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1253                 spin_unlock_irqrestore(&plchan->lock, flags);
1254                 return;
1255         }
1256
1257         /* Take the first element in the queue and execute it */
1258         if (!list_empty(&plchan->desc_list)) {
1259                 struct pl08x_txd *next;
1260
1261                 next = list_first_entry(&plchan->desc_list,
1262                                         struct pl08x_txd,
1263                                         node);
1264                 list_del(&next->node);
1265                 plchan->state = PL08X_CHAN_RUNNING;
1266
1267                 pl08x_start_txd(plchan, next);
1268         }
1269
1270         spin_unlock_irqrestore(&plchan->lock, flags);
1271 }
1272
1273 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1274                                         struct pl08x_txd *txd)
1275 {
1276         int num_llis;
1277         struct pl08x_driver_data *pl08x = plchan->host;
1278         int ret;
1279
1280         num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1281         if (!num_llis) {
1282                 kfree(txd);
1283                 return -EINVAL;
1284         }
1285
1286         spin_lock_irqsave(&plchan->lock, plchan->lockflags);
1287
1288         list_add_tail(&txd->node, &plchan->desc_list);
1289
1290         /*
1291          * See if we already have a physical channel allocated,
1292          * else this is the time to try to get one.
1293          */
1294         ret = prep_phy_channel(plchan, txd);
1295         if (ret) {
1296                 /*
1297                  * No physical channel available, we will
1298                  * stack up the memcpy channels until there is a channel
1299                  * available to handle it whereas slave transfers may
1300                  * have been denied due to platform channel muxing restrictions
1301                  * and since there is no guarantee that this will ever be
1302                  * resolved, and since the signal must be acquired AFTER
1303                  * acquiring the physical channel, we will let them be NACK:ed
1304                  * with -EBUSY here. The drivers can alway retry the prep()
1305                  * call if they are eager on doing this using DMA.
1306                  */
1307                 if (plchan->slave) {
1308                         pl08x_free_txd_list(pl08x, plchan);
1309                         spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
1310                         return -EBUSY;
1311                 }
1312                 /* Do this memcpy whenever there is a channel ready */
1313                 plchan->state = PL08X_CHAN_WAITING;
1314                 plchan->waiting = txd;
1315         } else
1316                 /*
1317                  * Else we're all set, paused and ready to roll,
1318                  * status will switch to PL08X_CHAN_RUNNING when
1319                  * we call issue_pending(). If there is something
1320                  * running on the channel already we don't change
1321                  * its state.
1322                  */
1323                 if (plchan->state == PL08X_CHAN_IDLE)
1324                         plchan->state = PL08X_CHAN_PAUSED;
1325
1326         /*
1327          * Notice that we leave plchan->lock locked on purpose:
1328          * it will be unlocked in the subsequent tx_submit()
1329          * call. This is a consequence of the current API.
1330          */
1331
1332         return 0;
1333 }
1334
1335 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
1336 {
1337         struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
1338
1339         if (txd) {
1340                 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1341                 txd->tx.tx_submit = pl08x_tx_submit;
1342                 INIT_LIST_HEAD(&txd->node);
1343         }
1344         return txd;
1345 }
1346
1347 /*
1348  * Initialize a descriptor to be used by memcpy submit
1349  */
1350 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1351                 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1352                 size_t len, unsigned long flags)
1353 {
1354         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1355         struct pl08x_driver_data *pl08x = plchan->host;
1356         struct pl08x_txd *txd;
1357         int ret;
1358
1359         txd = pl08x_get_txd(plchan);
1360         if (!txd) {
1361                 dev_err(&pl08x->adev->dev,
1362                         "%s no memory for descriptor\n", __func__);
1363                 return NULL;
1364         }
1365
1366         txd->direction = DMA_NONE;
1367         txd->srcbus.addr = src;
1368         txd->dstbus.addr = dest;
1369
1370         /* Set platform data for m2m */
1371         txd->cd = &pl08x->pd->memcpy_channel;
1372         /* Both to be incremented or the code will break */
1373         txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1374         txd->len = len;
1375
1376         ret = pl08x_prep_channel_resources(plchan, txd);
1377         if (ret)
1378                 return NULL;
1379         /*
1380          * NB: the channel lock is held at this point so tx_submit()
1381          * must be called in direct succession.
1382          */
1383
1384         return &txd->tx;
1385 }
1386
1387 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1388                 struct dma_chan *chan, struct scatterlist *sgl,
1389                 unsigned int sg_len, enum dma_data_direction direction,
1390                 unsigned long flags)
1391 {
1392         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1393         struct pl08x_driver_data *pl08x = plchan->host;
1394         struct pl08x_txd *txd;
1395         int ret;
1396
1397         /*
1398          * Current implementation ASSUMES only one sg
1399          */
1400         if (sg_len != 1) {
1401                 dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
1402                         __func__);
1403                 BUG();
1404         }
1405
1406         dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1407                 __func__, sgl->length, plchan->name);
1408
1409         txd = pl08x_get_txd(plchan);
1410         if (!txd) {
1411                 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1412                 return NULL;
1413         }
1414
1415         if (direction != plchan->runtime_direction)
1416                 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1417                         "the direction configured for the PrimeCell\n",
1418                         __func__);
1419
1420         /*
1421          * Set up addresses, the PrimeCell configured address
1422          * will take precedence since this may configure the
1423          * channel target address dynamically at runtime.
1424          */
1425         txd->direction = direction;
1426         if (direction == DMA_TO_DEVICE) {
1427                 txd->srcbus.addr = sgl->dma_address;
1428                 if (plchan->runtime_addr)
1429                         txd->dstbus.addr = plchan->runtime_addr;
1430                 else
1431                         txd->dstbus.addr = plchan->cd->addr;
1432         } else if (direction == DMA_FROM_DEVICE) {
1433                 if (plchan->runtime_addr)
1434                         txd->srcbus.addr = plchan->runtime_addr;
1435                 else
1436                         txd->srcbus.addr = plchan->cd->addr;
1437                 txd->dstbus.addr = sgl->dma_address;
1438         } else {
1439                 dev_err(&pl08x->adev->dev,
1440                         "%s direction unsupported\n", __func__);
1441                 return NULL;
1442         }
1443         txd->cd = plchan->cd;
1444         txd->len = sgl->length;
1445
1446         ret = pl08x_prep_channel_resources(plchan, txd);
1447         if (ret)
1448                 return NULL;
1449         /*
1450          * NB: the channel lock is held at this point so tx_submit()
1451          * must be called in direct succession.
1452          */
1453
1454         return &txd->tx;
1455 }
1456
1457 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1458                          unsigned long arg)
1459 {
1460         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1461         struct pl08x_driver_data *pl08x = plchan->host;
1462         unsigned long flags;
1463         int ret = 0;
1464
1465         /* Controls applicable to inactive channels */
1466         if (cmd == DMA_SLAVE_CONFIG) {
1467                 dma_set_runtime_config(chan,
1468                                        (struct dma_slave_config *)
1469                                        arg);
1470                 return 0;
1471         }
1472
1473         /*
1474          * Anything succeeds on channels with no physical allocation and
1475          * no queued transfers.
1476          */
1477         spin_lock_irqsave(&plchan->lock, flags);
1478         if (!plchan->phychan && !plchan->at) {
1479                 spin_unlock_irqrestore(&plchan->lock, flags);
1480                 return 0;
1481         }
1482
1483         switch (cmd) {
1484         case DMA_TERMINATE_ALL:
1485                 plchan->state = PL08X_CHAN_IDLE;
1486
1487                 if (plchan->phychan) {
1488                         pl08x_stop_phy_chan(plchan->phychan);
1489
1490                         /*
1491                          * Mark physical channel as free and free any slave
1492                          * signal
1493                          */
1494                         release_phy_channel(plchan);
1495                 }
1496                 /* Dequeue jobs and free LLIs */
1497                 if (plchan->at) {
1498                         pl08x_free_txd(pl08x, plchan->at);
1499                         plchan->at = NULL;
1500                 }
1501                 /* Dequeue jobs not yet fired as well */
1502                 pl08x_free_txd_list(pl08x, plchan);
1503                 break;
1504         case DMA_PAUSE:
1505                 pl08x_pause_phy_chan(plchan->phychan);
1506                 plchan->state = PL08X_CHAN_PAUSED;
1507                 break;
1508         case DMA_RESUME:
1509                 pl08x_resume_phy_chan(plchan->phychan);
1510                 plchan->state = PL08X_CHAN_RUNNING;
1511                 break;
1512         default:
1513                 /* Unknown command */
1514                 ret = -ENXIO;
1515                 break;
1516         }
1517
1518         spin_unlock_irqrestore(&plchan->lock, flags);
1519
1520         return ret;
1521 }
1522
1523 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1524 {
1525         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1526         char *name = chan_id;
1527
1528         /* Check that the channel is not taken! */
1529         if (!strcmp(plchan->name, name))
1530                 return true;
1531
1532         return false;
1533 }
1534
1535 /*
1536  * Just check that the device is there and active
1537  * TODO: turn this bit on/off depending on the number of
1538  * physical channels actually used, if it is zero... well
1539  * shut it off. That will save some power. Cut the clock
1540  * at the same time.
1541  */
1542 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1543 {
1544         u32 val;
1545
1546         val = readl(pl08x->base + PL080_CONFIG);
1547         val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1548         /* We implicitly clear bit 1 and that means little-endian mode */
1549         val |= PL080_CONFIG_ENABLE;
1550         writel(val, pl08x->base + PL080_CONFIG);
1551 }
1552
1553 static void pl08x_tasklet(unsigned long data)
1554 {
1555         struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1556         struct pl08x_driver_data *pl08x = plchan->host;
1557         unsigned long flags;
1558
1559         spin_lock_irqsave(&plchan->lock, flags);
1560
1561         if (plchan->at) {
1562                 dma_async_tx_callback callback =
1563                         plchan->at->tx.callback;
1564                 void *callback_param =
1565                         plchan->at->tx.callback_param;
1566
1567                 /*
1568                  * Update last completed
1569                  */
1570                 plchan->lc = plchan->at->tx.cookie;
1571
1572                 /*
1573                  * Callback to signal completion
1574                  */
1575                 if (callback)
1576                         callback(callback_param);
1577
1578                 /*
1579                  * Free the descriptor
1580                  */
1581                 pl08x_free_txd(pl08x, plchan->at);
1582                 plchan->at = NULL;
1583         }
1584         /*
1585          * If a new descriptor is queued, set it up
1586          * plchan->at is NULL here
1587          */
1588         if (!list_empty(&plchan->desc_list)) {
1589                 struct pl08x_txd *next;
1590
1591                 next = list_first_entry(&plchan->desc_list,
1592                                         struct pl08x_txd,
1593                                         node);
1594                 list_del(&next->node);
1595
1596                 pl08x_start_txd(plchan, next);
1597         } else {
1598                 struct pl08x_dma_chan *waiting = NULL;
1599
1600                 /*
1601                  * No more jobs, so free up the physical channel
1602                  * Free any allocated signal on slave transfers too
1603                  */
1604                 release_phy_channel(plchan);
1605                 plchan->state = PL08X_CHAN_IDLE;
1606
1607                 /*
1608                  * And NOW before anyone else can grab that free:d
1609                  * up physical channel, see if there is some memcpy
1610                  * pending that seriously needs to start because of
1611                  * being stacked up while we were choking the
1612                  * physical channels with data.
1613                  */
1614                 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1615                                     chan.device_node) {
1616                   if (waiting->state == PL08X_CHAN_WAITING &&
1617                             waiting->waiting != NULL) {
1618                                 int ret;
1619
1620                                 /* This should REALLY not fail now */
1621                                 ret = prep_phy_channel(waiting,
1622                                                        waiting->waiting);
1623                                 BUG_ON(ret);
1624                                 waiting->state = PL08X_CHAN_RUNNING;
1625                                 waiting->waiting = NULL;
1626                                 pl08x_issue_pending(&waiting->chan);
1627                                 break;
1628                         }
1629                 }
1630         }
1631
1632         spin_unlock_irqrestore(&plchan->lock, flags);
1633 }
1634
1635 static irqreturn_t pl08x_irq(int irq, void *dev)
1636 {
1637         struct pl08x_driver_data *pl08x = dev;
1638         u32 mask = 0;
1639         u32 val;
1640         int i;
1641
1642         val = readl(pl08x->base + PL080_ERR_STATUS);
1643         if (val) {
1644                 /*
1645                  * An error interrupt (on one or more channels)
1646                  */
1647                 dev_err(&pl08x->adev->dev,
1648                         "%s error interrupt, register value 0x%08x\n",
1649                                 __func__, val);
1650                 /*
1651                  * Simply clear ALL PL08X error interrupts,
1652                  * regardless of channel and cause
1653                  * FIXME: should be 0x00000003 on PL081 really.
1654                  */
1655                 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1656         }
1657         val = readl(pl08x->base + PL080_INT_STATUS);
1658         for (i = 0; i < pl08x->vd->channels; i++) {
1659                 if ((1 << i) & val) {
1660                         /* Locate physical channel */
1661                         struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1662                         struct pl08x_dma_chan *plchan = phychan->serving;
1663
1664                         /* Schedule tasklet on this channel */
1665                         tasklet_schedule(&plchan->tasklet);
1666
1667                         mask |= (1 << i);
1668                 }
1669         }
1670         /*
1671          * Clear only the terminal interrupts on channels we processed
1672          */
1673         writel(mask, pl08x->base + PL080_TC_CLEAR);
1674
1675         return mask ? IRQ_HANDLED : IRQ_NONE;
1676 }
1677
1678 /*
1679  * Initialise the DMAC memcpy/slave channels.
1680  * Make a local wrapper to hold required data
1681  */
1682 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1683                                            struct dma_device *dmadev,
1684                                            unsigned int channels,
1685                                            bool slave)
1686 {
1687         struct pl08x_dma_chan *chan;
1688         int i;
1689
1690         INIT_LIST_HEAD(&dmadev->channels);
1691         /*
1692          * Register as many many memcpy as we have physical channels,
1693          * we won't always be able to use all but the code will have
1694          * to cope with that situation.
1695          */
1696         for (i = 0; i < channels; i++) {
1697                 chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL);
1698                 if (!chan) {
1699                         dev_err(&pl08x->adev->dev,
1700                                 "%s no memory for channel\n", __func__);
1701                         return -ENOMEM;
1702                 }
1703
1704                 chan->host = pl08x;
1705                 chan->state = PL08X_CHAN_IDLE;
1706
1707                 if (slave) {
1708                         chan->slave = true;
1709                         chan->name = pl08x->pd->slave_channels[i].bus_id;
1710                         chan->cd = &pl08x->pd->slave_channels[i];
1711                 } else {
1712                         chan->cd = &pl08x->pd->memcpy_channel;
1713                         chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1714                         if (!chan->name) {
1715                                 kfree(chan);
1716                                 return -ENOMEM;
1717                         }
1718                 }
1719                 if (chan->cd->circular_buffer) {
1720                         dev_err(&pl08x->adev->dev,
1721                                 "channel %s: circular buffers not supported\n",
1722                                 chan->name);
1723                         kfree(chan);
1724                         continue;
1725                 }
1726                 dev_info(&pl08x->adev->dev,
1727                          "initialize virtual channel \"%s\"\n",
1728                          chan->name);
1729
1730                 chan->chan.device = dmadev;
1731                 chan->chan.cookie = 0;
1732                 chan->lc = 0;
1733
1734                 spin_lock_init(&chan->lock);
1735                 INIT_LIST_HEAD(&chan->desc_list);
1736                 tasklet_init(&chan->tasklet, pl08x_tasklet,
1737                              (unsigned long) chan);
1738
1739                 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1740         }
1741         dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1742                  i, slave ? "slave" : "memcpy");
1743         return i;
1744 }
1745
1746 static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1747 {
1748         struct pl08x_dma_chan *chan = NULL;
1749         struct pl08x_dma_chan *next;
1750
1751         list_for_each_entry_safe(chan,
1752                                  next, &dmadev->channels, chan.device_node) {
1753                 list_del(&chan->chan.device_node);
1754                 kfree(chan);
1755         }
1756 }
1757
1758 #ifdef CONFIG_DEBUG_FS
1759 static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1760 {
1761         switch (state) {
1762         case PL08X_CHAN_IDLE:
1763                 return "idle";
1764         case PL08X_CHAN_RUNNING:
1765                 return "running";
1766         case PL08X_CHAN_PAUSED:
1767                 return "paused";
1768         case PL08X_CHAN_WAITING:
1769                 return "waiting";
1770         default:
1771                 break;
1772         }
1773         return "UNKNOWN STATE";
1774 }
1775
1776 static int pl08x_debugfs_show(struct seq_file *s, void *data)
1777 {
1778         struct pl08x_driver_data *pl08x = s->private;
1779         struct pl08x_dma_chan *chan;
1780         struct pl08x_phy_chan *ch;
1781         unsigned long flags;
1782         int i;
1783
1784         seq_printf(s, "PL08x physical channels:\n");
1785         seq_printf(s, "CHANNEL:\tUSER:\n");
1786         seq_printf(s, "--------\t-----\n");
1787         for (i = 0; i < pl08x->vd->channels; i++) {
1788                 struct pl08x_dma_chan *virt_chan;
1789
1790                 ch = &pl08x->phy_chans[i];
1791
1792                 spin_lock_irqsave(&ch->lock, flags);
1793                 virt_chan = ch->serving;
1794
1795                 seq_printf(s, "%d\t\t%s\n",
1796                            ch->id, virt_chan ? virt_chan->name : "(none)");
1797
1798                 spin_unlock_irqrestore(&ch->lock, flags);
1799         }
1800
1801         seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1802         seq_printf(s, "CHANNEL:\tSTATE:\n");
1803         seq_printf(s, "--------\t------\n");
1804         list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1805                 seq_printf(s, "%s\t\t%s\n", chan->name,
1806                            pl08x_state_str(chan->state));
1807         }
1808
1809         seq_printf(s, "\nPL08x virtual slave channels:\n");
1810         seq_printf(s, "CHANNEL:\tSTATE:\n");
1811         seq_printf(s, "--------\t------\n");
1812         list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1813                 seq_printf(s, "%s\t\t%s\n", chan->name,
1814                            pl08x_state_str(chan->state));
1815         }
1816
1817         return 0;
1818 }
1819
1820 static int pl08x_debugfs_open(struct inode *inode, struct file *file)
1821 {
1822         return single_open(file, pl08x_debugfs_show, inode->i_private);
1823 }
1824
1825 static const struct file_operations pl08x_debugfs_operations = {
1826         .open           = pl08x_debugfs_open,
1827         .read           = seq_read,
1828         .llseek         = seq_lseek,
1829         .release        = single_release,
1830 };
1831
1832 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1833 {
1834         /* Expose a simple debugfs interface to view all clocks */
1835         (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
1836                                    NULL, pl08x,
1837                                    &pl08x_debugfs_operations);
1838 }
1839
1840 #else
1841 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1842 {
1843 }
1844 #endif
1845
1846 static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
1847 {
1848         struct pl08x_driver_data *pl08x;
1849         const struct vendor_data *vd = id->data;
1850         int ret = 0;
1851         int i;
1852
1853         ret = amba_request_regions(adev, NULL);
1854         if (ret)
1855                 return ret;
1856
1857         /* Create the driver state holder */
1858         pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL);
1859         if (!pl08x) {
1860                 ret = -ENOMEM;
1861                 goto out_no_pl08x;
1862         }
1863
1864         /* Initialize memcpy engine */
1865         dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1866         pl08x->memcpy.dev = &adev->dev;
1867         pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1868         pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
1869         pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
1870         pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1871         pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
1872         pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
1873         pl08x->memcpy.device_control = pl08x_control;
1874
1875         /* Initialize slave engine */
1876         dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
1877         pl08x->slave.dev = &adev->dev;
1878         pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1879         pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
1880         pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1881         pl08x->slave.device_tx_status = pl08x_dma_tx_status;
1882         pl08x->slave.device_issue_pending = pl08x_issue_pending;
1883         pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
1884         pl08x->slave.device_control = pl08x_control;
1885
1886         /* Get the platform data */
1887         pl08x->pd = dev_get_platdata(&adev->dev);
1888         if (!pl08x->pd) {
1889                 dev_err(&adev->dev, "no platform data supplied\n");
1890                 goto out_no_platdata;
1891         }
1892
1893         /* Assign useful pointers to the driver state */
1894         pl08x->adev = adev;
1895         pl08x->vd = vd;
1896
1897         /* A DMA memory pool for LLIs, align on 1-byte boundary */
1898         pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1899                         PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
1900         if (!pl08x->pool) {
1901                 ret = -ENOMEM;
1902                 goto out_no_lli_pool;
1903         }
1904
1905         spin_lock_init(&pl08x->lock);
1906
1907         pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
1908         if (!pl08x->base) {
1909                 ret = -ENOMEM;
1910                 goto out_no_ioremap;
1911         }
1912
1913         /* Turn on the PL08x */
1914         pl08x_ensure_on(pl08x);
1915
1916         /*
1917          * Attach the interrupt handler
1918          */
1919         writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1920         writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
1921
1922         ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
1923                           DRIVER_NAME, pl08x);
1924         if (ret) {
1925                 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
1926                         __func__, adev->irq[0]);
1927                 goto out_no_irq;
1928         }
1929
1930         /* Initialize physical channels */
1931         pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)),
1932                         GFP_KERNEL);
1933         if (!pl08x->phy_chans) {
1934                 dev_err(&adev->dev, "%s failed to allocate "
1935                         "physical channel holders\n",
1936                         __func__);
1937                 goto out_no_phychans;
1938         }
1939
1940         for (i = 0; i < vd->channels; i++) {
1941                 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
1942
1943                 ch->id = i;
1944                 ch->base = pl08x->base + PL080_Cx_BASE(i);
1945                 spin_lock_init(&ch->lock);
1946                 ch->serving = NULL;
1947                 ch->signal = -1;
1948                 dev_info(&adev->dev,
1949                          "physical channel %d is %s\n", i,
1950                          pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1951         }
1952
1953         /* Register as many memcpy channels as there are physical channels */
1954         ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
1955                                               pl08x->vd->channels, false);
1956         if (ret <= 0) {
1957                 dev_warn(&pl08x->adev->dev,
1958                          "%s failed to enumerate memcpy channels - %d\n",
1959                          __func__, ret);
1960                 goto out_no_memcpy;
1961         }
1962         pl08x->memcpy.chancnt = ret;
1963
1964         /* Register slave channels */
1965         ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
1966                                               pl08x->pd->num_slave_channels,
1967                                               true);
1968         if (ret <= 0) {
1969                 dev_warn(&pl08x->adev->dev,
1970                         "%s failed to enumerate slave channels - %d\n",
1971                                 __func__, ret);
1972                 goto out_no_slave;
1973         }
1974         pl08x->slave.chancnt = ret;
1975
1976         ret = dma_async_device_register(&pl08x->memcpy);
1977         if (ret) {
1978                 dev_warn(&pl08x->adev->dev,
1979                         "%s failed to register memcpy as an async device - %d\n",
1980                         __func__, ret);
1981                 goto out_no_memcpy_reg;
1982         }
1983
1984         ret = dma_async_device_register(&pl08x->slave);
1985         if (ret) {
1986                 dev_warn(&pl08x->adev->dev,
1987                         "%s failed to register slave as an async device - %d\n",
1988                         __func__, ret);
1989                 goto out_no_slave_reg;
1990         }
1991
1992         amba_set_drvdata(adev, pl08x);
1993         init_pl08x_debugfs(pl08x);
1994         dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
1995                  amba_part(adev), amba_rev(adev),
1996                  (unsigned long long)adev->res.start, adev->irq[0]);
1997         return 0;
1998
1999 out_no_slave_reg:
2000         dma_async_device_unregister(&pl08x->memcpy);
2001 out_no_memcpy_reg:
2002         pl08x_free_virtual_channels(&pl08x->slave);
2003 out_no_slave:
2004         pl08x_free_virtual_channels(&pl08x->memcpy);
2005 out_no_memcpy:
2006         kfree(pl08x->phy_chans);
2007 out_no_phychans:
2008         free_irq(adev->irq[0], pl08x);
2009 out_no_irq:
2010         iounmap(pl08x->base);
2011 out_no_ioremap:
2012         dma_pool_destroy(pl08x->pool);
2013 out_no_lli_pool:
2014 out_no_platdata:
2015         kfree(pl08x);
2016 out_no_pl08x:
2017         amba_release_regions(adev);
2018         return ret;
2019 }
2020
2021 /* PL080 has 8 channels and the PL080 have just 2 */
2022 static struct vendor_data vendor_pl080 = {
2023         .channels = 8,
2024         .dualmaster = true,
2025 };
2026
2027 static struct vendor_data vendor_pl081 = {
2028         .channels = 2,
2029         .dualmaster = false,
2030 };
2031
2032 static struct amba_id pl08x_ids[] = {
2033         /* PL080 */
2034         {
2035                 .id     = 0x00041080,
2036                 .mask   = 0x000fffff,
2037                 .data   = &vendor_pl080,
2038         },
2039         /* PL081 */
2040         {
2041                 .id     = 0x00041081,
2042                 .mask   = 0x000fffff,
2043                 .data   = &vendor_pl081,
2044         },
2045         /* Nomadik 8815 PL080 variant */
2046         {
2047                 .id     = 0x00280880,
2048                 .mask   = 0x00ffffff,
2049                 .data   = &vendor_pl080,
2050         },
2051         { 0, 0 },
2052 };
2053
2054 static struct amba_driver pl08x_amba_driver = {
2055         .drv.name       = DRIVER_NAME,
2056         .id_table       = pl08x_ids,
2057         .probe          = pl08x_probe,
2058 };
2059
2060 static int __init pl08x_init(void)
2061 {
2062         int retval;
2063         retval = amba_driver_register(&pl08x_amba_driver);
2064         if (retval)
2065                 printk(KERN_WARNING DRIVER_NAME
2066                        "failed to register as an AMBA device (%d)\n",
2067                        retval);
2068         return retval;
2069 }
2070 subsys_initcall(pl08x_init);