]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/dma/dw_dmac.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless
[mv-sheeva.git] / drivers / dma / dw_dmac.c
1 /*
2  * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3  * AVR32 systems.)
4  *
5  * Copyright (C) 2007-2008 Atmel Corporation
6  * Copyright (C) 2010-2011 ST Microelectronics
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23
24 #include "dw_dmac_regs.h"
25
26 /*
27  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
28  * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
29  * of which use ARM any more).  See the "Databook" from Synopsys for
30  * information beyond what licensees probably provide.
31  *
32  * The driver has currently been tested only with the Atmel AT32AP7000,
33  * which does not support descriptor writeback.
34  */
35
36 #define DWC_DEFAULT_CTLLO(private) ({                           \
37                 struct dw_dma_slave *__slave = (private);       \
38                 int dms = __slave ? __slave->dst_master : 0;    \
39                 int sms = __slave ? __slave->src_master : 1;    \
40                 u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
41                 u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
42                                                                 \
43                 (DWC_CTLL_DST_MSIZE(dmsize)                     \
44                  | DWC_CTLL_SRC_MSIZE(smsize)                   \
45                  | DWC_CTLL_LLP_D_EN                            \
46                  | DWC_CTLL_LLP_S_EN                            \
47                  | DWC_CTLL_DMS(dms)                            \
48                  | DWC_CTLL_SMS(sms));                          \
49         })
50
51 /*
52  * This is configuration-dependent and usually a funny size like 4095.
53  *
54  * Note that this is a transfer count, i.e. if we transfer 32-bit
55  * words, we can do 16380 bytes per descriptor.
56  *
57  * This parameter is also system-specific.
58  */
59 #define DWC_MAX_COUNT   4095U
60
61 /*
62  * Number of descriptors to allocate for each channel. This should be
63  * made configurable somehow; preferably, the clients (at least the
64  * ones using slave transfers) should be able to give us a hint.
65  */
66 #define NR_DESCS_PER_CHANNEL    64
67
68 /*----------------------------------------------------------------------*/
69
70 /*
71  * Because we're not relying on writeback from the controller (it may not
72  * even be configured into the core!) we don't need to use dma_pool.  These
73  * descriptors -- and associated data -- are cacheable.  We do need to make
74  * sure their dcache entries are written back before handing them off to
75  * the controller, though.
76  */
77
78 static struct device *chan2dev(struct dma_chan *chan)
79 {
80         return &chan->dev->device;
81 }
82 static struct device *chan2parent(struct dma_chan *chan)
83 {
84         return chan->dev->device.parent;
85 }
86
87 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
88 {
89         return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
90 }
91
92 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93 {
94         struct dw_desc *desc, *_desc;
95         struct dw_desc *ret = NULL;
96         unsigned int i = 0;
97         unsigned long flags;
98
99         spin_lock_irqsave(&dwc->lock, flags);
100         list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
101                 if (async_tx_test_ack(&desc->txd)) {
102                         list_del(&desc->desc_node);
103                         ret = desc;
104                         break;
105                 }
106                 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
107                 i++;
108         }
109         spin_unlock_irqrestore(&dwc->lock, flags);
110
111         dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
112
113         return ret;
114 }
115
116 static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
117 {
118         struct dw_desc  *child;
119
120         list_for_each_entry(child, &desc->tx_list, desc_node)
121                 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
122                                 child->txd.phys, sizeof(child->lli),
123                                 DMA_TO_DEVICE);
124         dma_sync_single_for_cpu(chan2parent(&dwc->chan),
125                         desc->txd.phys, sizeof(desc->lli),
126                         DMA_TO_DEVICE);
127 }
128
129 /*
130  * Move a descriptor, including any children, to the free list.
131  * `desc' must not be on any lists.
132  */
133 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
134 {
135         unsigned long flags;
136
137         if (desc) {
138                 struct dw_desc *child;
139
140                 dwc_sync_desc_for_cpu(dwc, desc);
141
142                 spin_lock_irqsave(&dwc->lock, flags);
143                 list_for_each_entry(child, &desc->tx_list, desc_node)
144                         dev_vdbg(chan2dev(&dwc->chan),
145                                         "moving child desc %p to freelist\n",
146                                         child);
147                 list_splice_init(&desc->tx_list, &dwc->free_list);
148                 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
149                 list_add(&desc->desc_node, &dwc->free_list);
150                 spin_unlock_irqrestore(&dwc->lock, flags);
151         }
152 }
153
154 /* Called with dwc->lock held and bh disabled */
155 static dma_cookie_t
156 dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
157 {
158         dma_cookie_t cookie = dwc->chan.cookie;
159
160         if (++cookie < 0)
161                 cookie = 1;
162
163         dwc->chan.cookie = cookie;
164         desc->txd.cookie = cookie;
165
166         return cookie;
167 }
168
169 /*----------------------------------------------------------------------*/
170
171 /* Called with dwc->lock held and bh disabled */
172 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
173 {
174         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
175
176         /* ASSERT:  channel is idle */
177         if (dma_readl(dw, CH_EN) & dwc->mask) {
178                 dev_err(chan2dev(&dwc->chan),
179                         "BUG: Attempted to start non-idle channel\n");
180                 dev_err(chan2dev(&dwc->chan),
181                         "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
182                         channel_readl(dwc, SAR),
183                         channel_readl(dwc, DAR),
184                         channel_readl(dwc, LLP),
185                         channel_readl(dwc, CTL_HI),
186                         channel_readl(dwc, CTL_LO));
187
188                 /* The tasklet will hopefully advance the queue... */
189                 return;
190         }
191
192         channel_writel(dwc, LLP, first->txd.phys);
193         channel_writel(dwc, CTL_LO,
194                         DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
195         channel_writel(dwc, CTL_HI, 0);
196         channel_set_bit(dw, CH_EN, dwc->mask);
197 }
198
199 /*----------------------------------------------------------------------*/
200
201 static void
202 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
203                 bool callback_required)
204 {
205         dma_async_tx_callback           callback = NULL;
206         void                            *param = NULL;
207         struct dma_async_tx_descriptor  *txd = &desc->txd;
208         struct dw_desc                  *child;
209         unsigned long                   flags;
210
211         dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
212
213         spin_lock_irqsave(&dwc->lock, flags);
214         dwc->completed = txd->cookie;
215         if (callback_required) {
216                 callback = txd->callback;
217                 param = txd->callback_param;
218         }
219
220         dwc_sync_desc_for_cpu(dwc, desc);
221
222         /* async_tx_ack */
223         list_for_each_entry(child, &desc->tx_list, desc_node)
224                 async_tx_ack(&child->txd);
225         async_tx_ack(&desc->txd);
226
227         list_splice_init(&desc->tx_list, &dwc->free_list);
228         list_move(&desc->desc_node, &dwc->free_list);
229
230         if (!dwc->chan.private) {
231                 struct device *parent = chan2parent(&dwc->chan);
232                 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
233                         if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
234                                 dma_unmap_single(parent, desc->lli.dar,
235                                                 desc->len, DMA_FROM_DEVICE);
236                         else
237                                 dma_unmap_page(parent, desc->lli.dar,
238                                                 desc->len, DMA_FROM_DEVICE);
239                 }
240                 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
241                         if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
242                                 dma_unmap_single(parent, desc->lli.sar,
243                                                 desc->len, DMA_TO_DEVICE);
244                         else
245                                 dma_unmap_page(parent, desc->lli.sar,
246                                                 desc->len, DMA_TO_DEVICE);
247                 }
248         }
249
250         spin_unlock_irqrestore(&dwc->lock, flags);
251
252         if (callback_required && callback)
253                 callback(param);
254 }
255
256 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
257 {
258         struct dw_desc *desc, *_desc;
259         LIST_HEAD(list);
260         unsigned long flags;
261
262         spin_lock_irqsave(&dwc->lock, flags);
263         if (dma_readl(dw, CH_EN) & dwc->mask) {
264                 dev_err(chan2dev(&dwc->chan),
265                         "BUG: XFER bit set, but channel not idle!\n");
266
267                 /* Try to continue after resetting the channel... */
268                 channel_clear_bit(dw, CH_EN, dwc->mask);
269                 while (dma_readl(dw, CH_EN) & dwc->mask)
270                         cpu_relax();
271         }
272
273         /*
274          * Submit queued descriptors ASAP, i.e. before we go through
275          * the completed ones.
276          */
277         list_splice_init(&dwc->active_list, &list);
278         if (!list_empty(&dwc->queue)) {
279                 list_move(dwc->queue.next, &dwc->active_list);
280                 dwc_dostart(dwc, dwc_first_active(dwc));
281         }
282
283         spin_unlock_irqrestore(&dwc->lock, flags);
284
285         list_for_each_entry_safe(desc, _desc, &list, desc_node)
286                 dwc_descriptor_complete(dwc, desc, true);
287 }
288
289 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
290 {
291         dma_addr_t llp;
292         struct dw_desc *desc, *_desc;
293         struct dw_desc *child;
294         u32 status_xfer;
295         unsigned long flags;
296
297         spin_lock_irqsave(&dwc->lock, flags);
298         /*
299          * Clear block interrupt flag before scanning so that we don't
300          * miss any, and read LLP before RAW_XFER to ensure it is
301          * valid if we decide to scan the list.
302          */
303         dma_writel(dw, CLEAR.BLOCK, dwc->mask);
304         llp = channel_readl(dwc, LLP);
305         status_xfer = dma_readl(dw, RAW.XFER);
306
307         if (status_xfer & dwc->mask) {
308                 /* Everything we've submitted is done */
309                 dma_writel(dw, CLEAR.XFER, dwc->mask);
310                 spin_unlock_irqrestore(&dwc->lock, flags);
311
312                 dwc_complete_all(dw, dwc);
313                 return;
314         }
315
316         if (list_empty(&dwc->active_list)) {
317                 spin_unlock_irqrestore(&dwc->lock, flags);
318                 return;
319         }
320
321         dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
322
323         list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
324                 /* check first descriptors addr */
325                 if (desc->txd.phys == llp) {
326                         spin_unlock_irqrestore(&dwc->lock, flags);
327                         return;
328                 }
329
330                 /* check first descriptors llp */
331                 if (desc->lli.llp == llp) {
332                         /* This one is currently in progress */
333                         spin_unlock_irqrestore(&dwc->lock, flags);
334                         return;
335                 }
336
337                 list_for_each_entry(child, &desc->tx_list, desc_node)
338                         if (child->lli.llp == llp) {
339                                 /* Currently in progress */
340                                 spin_unlock_irqrestore(&dwc->lock, flags);
341                                 return;
342                         }
343
344                 /*
345                  * No descriptors so far seem to be in progress, i.e.
346                  * this one must be done.
347                  */
348                 spin_unlock_irqrestore(&dwc->lock, flags);
349                 dwc_descriptor_complete(dwc, desc, true);
350                 spin_lock_irqsave(&dwc->lock, flags);
351         }
352
353         dev_err(chan2dev(&dwc->chan),
354                 "BUG: All descriptors done, but channel not idle!\n");
355
356         /* Try to continue after resetting the channel... */
357         channel_clear_bit(dw, CH_EN, dwc->mask);
358         while (dma_readl(dw, CH_EN) & dwc->mask)
359                 cpu_relax();
360
361         if (!list_empty(&dwc->queue)) {
362                 list_move(dwc->queue.next, &dwc->active_list);
363                 dwc_dostart(dwc, dwc_first_active(dwc));
364         }
365         spin_unlock_irqrestore(&dwc->lock, flags);
366 }
367
368 static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
369 {
370         dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
371                         "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
372                         lli->sar, lli->dar, lli->llp,
373                         lli->ctlhi, lli->ctllo);
374 }
375
376 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
377 {
378         struct dw_desc *bad_desc;
379         struct dw_desc *child;
380         unsigned long flags;
381
382         dwc_scan_descriptors(dw, dwc);
383
384         spin_lock_irqsave(&dwc->lock, flags);
385
386         /*
387          * The descriptor currently at the head of the active list is
388          * borked. Since we don't have any way to report errors, we'll
389          * just have to scream loudly and try to carry on.
390          */
391         bad_desc = dwc_first_active(dwc);
392         list_del_init(&bad_desc->desc_node);
393         list_move(dwc->queue.next, dwc->active_list.prev);
394
395         /* Clear the error flag and try to restart the controller */
396         dma_writel(dw, CLEAR.ERROR, dwc->mask);
397         if (!list_empty(&dwc->active_list))
398                 dwc_dostart(dwc, dwc_first_active(dwc));
399
400         /*
401          * KERN_CRITICAL may seem harsh, but since this only happens
402          * when someone submits a bad physical address in a
403          * descriptor, we should consider ourselves lucky that the
404          * controller flagged an error instead of scribbling over
405          * random memory locations.
406          */
407         dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
408                         "Bad descriptor submitted for DMA!\n");
409         dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
410                         "  cookie: %d\n", bad_desc->txd.cookie);
411         dwc_dump_lli(dwc, &bad_desc->lli);
412         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
413                 dwc_dump_lli(dwc, &child->lli);
414
415         spin_unlock_irqrestore(&dwc->lock, flags);
416
417         /* Pretend the descriptor completed successfully */
418         dwc_descriptor_complete(dwc, bad_desc, true);
419 }
420
421 /* --------------------- Cyclic DMA API extensions -------------------- */
422
423 inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
424 {
425         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
426         return channel_readl(dwc, SAR);
427 }
428 EXPORT_SYMBOL(dw_dma_get_src_addr);
429
430 inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
431 {
432         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
433         return channel_readl(dwc, DAR);
434 }
435 EXPORT_SYMBOL(dw_dma_get_dst_addr);
436
437 /* called with dwc->lock held and all DMAC interrupts disabled */
438 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
439                 u32 status_block, u32 status_err, u32 status_xfer)
440 {
441         unsigned long flags;
442
443         if (status_block & dwc->mask) {
444                 void (*callback)(void *param);
445                 void *callback_param;
446
447                 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
448                                 channel_readl(dwc, LLP));
449                 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
450
451                 callback = dwc->cdesc->period_callback;
452                 callback_param = dwc->cdesc->period_callback_param;
453
454                 if (callback)
455                         callback(callback_param);
456         }
457
458         /*
459          * Error and transfer complete are highly unlikely, and will most
460          * likely be due to a configuration error by the user.
461          */
462         if (unlikely(status_err & dwc->mask) ||
463                         unlikely(status_xfer & dwc->mask)) {
464                 int i;
465
466                 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
467                                 "interrupt, stopping DMA transfer\n",
468                                 status_xfer ? "xfer" : "error");
469
470                 spin_lock_irqsave(&dwc->lock, flags);
471
472                 dev_err(chan2dev(&dwc->chan),
473                         "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
474                         channel_readl(dwc, SAR),
475                         channel_readl(dwc, DAR),
476                         channel_readl(dwc, LLP),
477                         channel_readl(dwc, CTL_HI),
478                         channel_readl(dwc, CTL_LO));
479
480                 channel_clear_bit(dw, CH_EN, dwc->mask);
481                 while (dma_readl(dw, CH_EN) & dwc->mask)
482                         cpu_relax();
483
484                 /* make sure DMA does not restart by loading a new list */
485                 channel_writel(dwc, LLP, 0);
486                 channel_writel(dwc, CTL_LO, 0);
487                 channel_writel(dwc, CTL_HI, 0);
488
489                 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
490                 dma_writel(dw, CLEAR.ERROR, dwc->mask);
491                 dma_writel(dw, CLEAR.XFER, dwc->mask);
492
493                 for (i = 0; i < dwc->cdesc->periods; i++)
494                         dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
495
496                 spin_unlock_irqrestore(&dwc->lock, flags);
497         }
498 }
499
500 /* ------------------------------------------------------------------------- */
501
502 static void dw_dma_tasklet(unsigned long data)
503 {
504         struct dw_dma *dw = (struct dw_dma *)data;
505         struct dw_dma_chan *dwc;
506         u32 status_block;
507         u32 status_xfer;
508         u32 status_err;
509         int i;
510
511         status_block = dma_readl(dw, RAW.BLOCK);
512         status_xfer = dma_readl(dw, RAW.XFER);
513         status_err = dma_readl(dw, RAW.ERROR);
514
515         dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
516                         status_block, status_err);
517
518         for (i = 0; i < dw->dma.chancnt; i++) {
519                 dwc = &dw->chan[i];
520                 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
521                         dwc_handle_cyclic(dw, dwc, status_block, status_err,
522                                         status_xfer);
523                 else if (status_err & (1 << i))
524                         dwc_handle_error(dw, dwc);
525                 else if ((status_block | status_xfer) & (1 << i))
526                         dwc_scan_descriptors(dw, dwc);
527         }
528
529         /*
530          * Re-enable interrupts. Block Complete interrupts are only
531          * enabled if the INT_EN bit in the descriptor is set. This
532          * will trigger a scan before the whole list is done.
533          */
534         channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
535         channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
536         channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
537 }
538
539 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
540 {
541         struct dw_dma *dw = dev_id;
542         u32 status;
543
544         dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
545                         dma_readl(dw, STATUS_INT));
546
547         /*
548          * Just disable the interrupts. We'll turn them back on in the
549          * softirq handler.
550          */
551         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
552         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
553         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
554
555         status = dma_readl(dw, STATUS_INT);
556         if (status) {
557                 dev_err(dw->dma.dev,
558                         "BUG: Unexpected interrupts pending: 0x%x\n",
559                         status);
560
561                 /* Try to recover */
562                 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
563                 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
564                 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
565                 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
566                 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
567         }
568
569         tasklet_schedule(&dw->tasklet);
570
571         return IRQ_HANDLED;
572 }
573
574 /*----------------------------------------------------------------------*/
575
576 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
577 {
578         struct dw_desc          *desc = txd_to_dw_desc(tx);
579         struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
580         dma_cookie_t            cookie;
581         unsigned long           flags;
582
583         spin_lock_irqsave(&dwc->lock, flags);
584         cookie = dwc_assign_cookie(dwc, desc);
585
586         /*
587          * REVISIT: We should attempt to chain as many descriptors as
588          * possible, perhaps even appending to those already submitted
589          * for DMA. But this is hard to do in a race-free manner.
590          */
591         if (list_empty(&dwc->active_list)) {
592                 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
593                                 desc->txd.cookie);
594                 list_add_tail(&desc->desc_node, &dwc->active_list);
595                 dwc_dostart(dwc, dwc_first_active(dwc));
596         } else {
597                 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
598                                 desc->txd.cookie);
599
600                 list_add_tail(&desc->desc_node, &dwc->queue);
601         }
602
603         spin_unlock_irqrestore(&dwc->lock, flags);
604
605         return cookie;
606 }
607
608 static struct dma_async_tx_descriptor *
609 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
610                 size_t len, unsigned long flags)
611 {
612         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
613         struct dw_desc          *desc;
614         struct dw_desc          *first;
615         struct dw_desc          *prev;
616         size_t                  xfer_count;
617         size_t                  offset;
618         unsigned int            src_width;
619         unsigned int            dst_width;
620         u32                     ctllo;
621
622         dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
623                         dest, src, len, flags);
624
625         if (unlikely(!len)) {
626                 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
627                 return NULL;
628         }
629
630         /*
631          * We can be a lot more clever here, but this should take care
632          * of the most common optimization.
633          */
634         if (!((src | dest  | len) & 7))
635                 src_width = dst_width = 3;
636         else if (!((src | dest  | len) & 3))
637                 src_width = dst_width = 2;
638         else if (!((src | dest | len) & 1))
639                 src_width = dst_width = 1;
640         else
641                 src_width = dst_width = 0;
642
643         ctllo = DWC_DEFAULT_CTLLO(chan->private)
644                         | DWC_CTLL_DST_WIDTH(dst_width)
645                         | DWC_CTLL_SRC_WIDTH(src_width)
646                         | DWC_CTLL_DST_INC
647                         | DWC_CTLL_SRC_INC
648                         | DWC_CTLL_FC_M2M;
649         prev = first = NULL;
650
651         for (offset = 0; offset < len; offset += xfer_count << src_width) {
652                 xfer_count = min_t(size_t, (len - offset) >> src_width,
653                                 DWC_MAX_COUNT);
654
655                 desc = dwc_desc_get(dwc);
656                 if (!desc)
657                         goto err_desc_get;
658
659                 desc->lli.sar = src + offset;
660                 desc->lli.dar = dest + offset;
661                 desc->lli.ctllo = ctllo;
662                 desc->lli.ctlhi = xfer_count;
663
664                 if (!first) {
665                         first = desc;
666                 } else {
667                         prev->lli.llp = desc->txd.phys;
668                         dma_sync_single_for_device(chan2parent(chan),
669                                         prev->txd.phys, sizeof(prev->lli),
670                                         DMA_TO_DEVICE);
671                         list_add_tail(&desc->desc_node,
672                                         &first->tx_list);
673                 }
674                 prev = desc;
675         }
676
677
678         if (flags & DMA_PREP_INTERRUPT)
679                 /* Trigger interrupt after last block */
680                 prev->lli.ctllo |= DWC_CTLL_INT_EN;
681
682         prev->lli.llp = 0;
683         dma_sync_single_for_device(chan2parent(chan),
684                         prev->txd.phys, sizeof(prev->lli),
685                         DMA_TO_DEVICE);
686
687         first->txd.flags = flags;
688         first->len = len;
689
690         return &first->txd;
691
692 err_desc_get:
693         dwc_desc_put(dwc, first);
694         return NULL;
695 }
696
697 static struct dma_async_tx_descriptor *
698 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
699                 unsigned int sg_len, enum dma_data_direction direction,
700                 unsigned long flags)
701 {
702         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
703         struct dw_dma_slave     *dws = chan->private;
704         struct dw_desc          *prev;
705         struct dw_desc          *first;
706         u32                     ctllo;
707         dma_addr_t              reg;
708         unsigned int            reg_width;
709         unsigned int            mem_width;
710         unsigned int            i;
711         struct scatterlist      *sg;
712         size_t                  total_len = 0;
713
714         dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
715
716         if (unlikely(!dws || !sg_len))
717                 return NULL;
718
719         reg_width = dws->reg_width;
720         prev = first = NULL;
721
722         switch (direction) {
723         case DMA_TO_DEVICE:
724                 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
725                                 | DWC_CTLL_DST_WIDTH(reg_width)
726                                 | DWC_CTLL_DST_FIX
727                                 | DWC_CTLL_SRC_INC
728                                 | DWC_CTLL_FC(dws->fc));
729                 reg = dws->tx_reg;
730                 for_each_sg(sgl, sg, sg_len, i) {
731                         struct dw_desc  *desc;
732                         u32             len, dlen, mem;
733
734                         mem = sg_phys(sg);
735                         len = sg_dma_len(sg);
736                         mem_width = 2;
737                         if (unlikely(mem & 3 || len & 3))
738                                 mem_width = 0;
739
740 slave_sg_todev_fill_desc:
741                         desc = dwc_desc_get(dwc);
742                         if (!desc) {
743                                 dev_err(chan2dev(chan),
744                                         "not enough descriptors available\n");
745                                 goto err_desc_get;
746                         }
747
748                         desc->lli.sar = mem;
749                         desc->lli.dar = reg;
750                         desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
751                         if ((len >> mem_width) > DWC_MAX_COUNT) {
752                                 dlen = DWC_MAX_COUNT << mem_width;
753                                 mem += dlen;
754                                 len -= dlen;
755                         } else {
756                                 dlen = len;
757                                 len = 0;
758                         }
759
760                         desc->lli.ctlhi = dlen >> mem_width;
761
762                         if (!first) {
763                                 first = desc;
764                         } else {
765                                 prev->lli.llp = desc->txd.phys;
766                                 dma_sync_single_for_device(chan2parent(chan),
767                                                 prev->txd.phys,
768                                                 sizeof(prev->lli),
769                                                 DMA_TO_DEVICE);
770                                 list_add_tail(&desc->desc_node,
771                                                 &first->tx_list);
772                         }
773                         prev = desc;
774                         total_len += dlen;
775
776                         if (len)
777                                 goto slave_sg_todev_fill_desc;
778                 }
779                 break;
780         case DMA_FROM_DEVICE:
781                 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
782                                 | DWC_CTLL_SRC_WIDTH(reg_width)
783                                 | DWC_CTLL_DST_INC
784                                 | DWC_CTLL_SRC_FIX
785                                 | DWC_CTLL_FC(dws->fc));
786
787                 reg = dws->rx_reg;
788                 for_each_sg(sgl, sg, sg_len, i) {
789                         struct dw_desc  *desc;
790                         u32             len, dlen, mem;
791
792                         mem = sg_phys(sg);
793                         len = sg_dma_len(sg);
794                         mem_width = 2;
795                         if (unlikely(mem & 3 || len & 3))
796                                 mem_width = 0;
797
798 slave_sg_fromdev_fill_desc:
799                         desc = dwc_desc_get(dwc);
800                         if (!desc) {
801                                 dev_err(chan2dev(chan),
802                                                 "not enough descriptors available\n");
803                                 goto err_desc_get;
804                         }
805
806                         desc->lli.sar = reg;
807                         desc->lli.dar = mem;
808                         desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
809                         if ((len >> reg_width) > DWC_MAX_COUNT) {
810                                 dlen = DWC_MAX_COUNT << reg_width;
811                                 mem += dlen;
812                                 len -= dlen;
813                         } else {
814                                 dlen = len;
815                                 len = 0;
816                         }
817                         desc->lli.ctlhi = dlen >> reg_width;
818
819                         if (!first) {
820                                 first = desc;
821                         } else {
822                                 prev->lli.llp = desc->txd.phys;
823                                 dma_sync_single_for_device(chan2parent(chan),
824                                                 prev->txd.phys,
825                                                 sizeof(prev->lli),
826                                                 DMA_TO_DEVICE);
827                                 list_add_tail(&desc->desc_node,
828                                                 &first->tx_list);
829                         }
830                         prev = desc;
831                         total_len += dlen;
832
833                         if (len)
834                                 goto slave_sg_fromdev_fill_desc;
835                 }
836                 break;
837         default:
838                 return NULL;
839         }
840
841         if (flags & DMA_PREP_INTERRUPT)
842                 /* Trigger interrupt after last block */
843                 prev->lli.ctllo |= DWC_CTLL_INT_EN;
844
845         prev->lli.llp = 0;
846         dma_sync_single_for_device(chan2parent(chan),
847                         prev->txd.phys, sizeof(prev->lli),
848                         DMA_TO_DEVICE);
849
850         first->len = total_len;
851
852         return &first->txd;
853
854 err_desc_get:
855         dwc_desc_put(dwc, first);
856         return NULL;
857 }
858
859 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
860                        unsigned long arg)
861 {
862         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
863         struct dw_dma           *dw = to_dw_dma(chan->device);
864         struct dw_desc          *desc, *_desc;
865         unsigned long           flags;
866         u32                     cfglo;
867         LIST_HEAD(list);
868
869         if (cmd == DMA_PAUSE) {
870                 spin_lock_irqsave(&dwc->lock, flags);
871
872                 cfglo = channel_readl(dwc, CFG_LO);
873                 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
874                 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
875                         cpu_relax();
876
877                 dwc->paused = true;
878                 spin_unlock_irqrestore(&dwc->lock, flags);
879         } else if (cmd == DMA_RESUME) {
880                 if (!dwc->paused)
881                         return 0;
882
883                 spin_lock_irqsave(&dwc->lock, flags);
884
885                 cfglo = channel_readl(dwc, CFG_LO);
886                 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
887                 dwc->paused = false;
888
889                 spin_unlock_irqrestore(&dwc->lock, flags);
890         } else if (cmd == DMA_TERMINATE_ALL) {
891                 spin_lock_irqsave(&dwc->lock, flags);
892
893                 channel_clear_bit(dw, CH_EN, dwc->mask);
894                 while (dma_readl(dw, CH_EN) & dwc->mask)
895                         cpu_relax();
896
897                 dwc->paused = false;
898
899                 /* active_list entries will end up before queued entries */
900                 list_splice_init(&dwc->queue, &list);
901                 list_splice_init(&dwc->active_list, &list);
902
903                 spin_unlock_irqrestore(&dwc->lock, flags);
904
905                 /* Flush all pending and queued descriptors */
906                 list_for_each_entry_safe(desc, _desc, &list, desc_node)
907                         dwc_descriptor_complete(dwc, desc, false);
908         } else
909                 return -ENXIO;
910
911         return 0;
912 }
913
914 static enum dma_status
915 dwc_tx_status(struct dma_chan *chan,
916               dma_cookie_t cookie,
917               struct dma_tx_state *txstate)
918 {
919         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
920         dma_cookie_t            last_used;
921         dma_cookie_t            last_complete;
922         int                     ret;
923
924         last_complete = dwc->completed;
925         last_used = chan->cookie;
926
927         ret = dma_async_is_complete(cookie, last_complete, last_used);
928         if (ret != DMA_SUCCESS) {
929                 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
930
931                 last_complete = dwc->completed;
932                 last_used = chan->cookie;
933
934                 ret = dma_async_is_complete(cookie, last_complete, last_used);
935         }
936
937         if (ret != DMA_SUCCESS)
938                 dma_set_tx_state(txstate, last_complete, last_used,
939                                 dwc_first_active(dwc)->len);
940         else
941                 dma_set_tx_state(txstate, last_complete, last_used, 0);
942
943         if (dwc->paused)
944                 return DMA_PAUSED;
945
946         return ret;
947 }
948
949 static void dwc_issue_pending(struct dma_chan *chan)
950 {
951         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
952
953         if (!list_empty(&dwc->queue))
954                 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
955 }
956
957 static int dwc_alloc_chan_resources(struct dma_chan *chan)
958 {
959         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
960         struct dw_dma           *dw = to_dw_dma(chan->device);
961         struct dw_desc          *desc;
962         struct dw_dma_slave     *dws;
963         int                     i;
964         u32                     cfghi;
965         u32                     cfglo;
966         unsigned long           flags;
967
968         dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
969
970         /* ASSERT:  channel is idle */
971         if (dma_readl(dw, CH_EN) & dwc->mask) {
972                 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
973                 return -EIO;
974         }
975
976         dwc->completed = chan->cookie = 1;
977
978         cfghi = DWC_CFGH_FIFO_MODE;
979         cfglo = 0;
980
981         dws = chan->private;
982         if (dws) {
983                 /*
984                  * We need controller-specific data to set up slave
985                  * transfers.
986                  */
987                 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
988
989                 cfghi = dws->cfg_hi;
990                 cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
991         }
992
993         cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
994
995         channel_writel(dwc, CFG_LO, cfglo);
996         channel_writel(dwc, CFG_HI, cfghi);
997
998         /*
999          * NOTE: some controllers may have additional features that we
1000          * need to initialize here, like "scatter-gather" (which
1001          * doesn't mean what you think it means), and status writeback.
1002          */
1003
1004         spin_lock_irqsave(&dwc->lock, flags);
1005         i = dwc->descs_allocated;
1006         while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1007                 spin_unlock_irqrestore(&dwc->lock, flags);
1008
1009                 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
1010                 if (!desc) {
1011                         dev_info(chan2dev(chan),
1012                                 "only allocated %d descriptors\n", i);
1013                         spin_lock_irqsave(&dwc->lock, flags);
1014                         break;
1015                 }
1016
1017                 INIT_LIST_HEAD(&desc->tx_list);
1018                 dma_async_tx_descriptor_init(&desc->txd, chan);
1019                 desc->txd.tx_submit = dwc_tx_submit;
1020                 desc->txd.flags = DMA_CTRL_ACK;
1021                 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
1022                                 sizeof(desc->lli), DMA_TO_DEVICE);
1023                 dwc_desc_put(dwc, desc);
1024
1025                 spin_lock_irqsave(&dwc->lock, flags);
1026                 i = ++dwc->descs_allocated;
1027         }
1028
1029         /* Enable interrupts */
1030         channel_set_bit(dw, MASK.XFER, dwc->mask);
1031         channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1032         channel_set_bit(dw, MASK.ERROR, dwc->mask);
1033
1034         spin_unlock_irqrestore(&dwc->lock, flags);
1035
1036         dev_dbg(chan2dev(chan),
1037                 "alloc_chan_resources allocated %d descriptors\n", i);
1038
1039         return i;
1040 }
1041
1042 static void dwc_free_chan_resources(struct dma_chan *chan)
1043 {
1044         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1045         struct dw_dma           *dw = to_dw_dma(chan->device);
1046         struct dw_desc          *desc, *_desc;
1047         unsigned long           flags;
1048         LIST_HEAD(list);
1049
1050         dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1051                         dwc->descs_allocated);
1052
1053         /* ASSERT:  channel is idle */
1054         BUG_ON(!list_empty(&dwc->active_list));
1055         BUG_ON(!list_empty(&dwc->queue));
1056         BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1057
1058         spin_lock_irqsave(&dwc->lock, flags);
1059         list_splice_init(&dwc->free_list, &list);
1060         dwc->descs_allocated = 0;
1061
1062         /* Disable interrupts */
1063         channel_clear_bit(dw, MASK.XFER, dwc->mask);
1064         channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1065         channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1066
1067         spin_unlock_irqrestore(&dwc->lock, flags);
1068
1069         list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1070                 dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1071                 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1072                                 sizeof(desc->lli), DMA_TO_DEVICE);
1073                 kfree(desc);
1074         }
1075
1076         dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1077 }
1078
1079 /* --------------------- Cyclic DMA API extensions -------------------- */
1080
1081 /**
1082  * dw_dma_cyclic_start - start the cyclic DMA transfer
1083  * @chan: the DMA channel to start
1084  *
1085  * Must be called with soft interrupts disabled. Returns zero on success or
1086  * -errno on failure.
1087  */
1088 int dw_dma_cyclic_start(struct dma_chan *chan)
1089 {
1090         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1091         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1092         unsigned long           flags;
1093
1094         if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1095                 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1096                 return -ENODEV;
1097         }
1098
1099         spin_lock_irqsave(&dwc->lock, flags);
1100
1101         /* assert channel is idle */
1102         if (dma_readl(dw, CH_EN) & dwc->mask) {
1103                 dev_err(chan2dev(&dwc->chan),
1104                         "BUG: Attempted to start non-idle channel\n");
1105                 dev_err(chan2dev(&dwc->chan),
1106                         "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1107                         channel_readl(dwc, SAR),
1108                         channel_readl(dwc, DAR),
1109                         channel_readl(dwc, LLP),
1110                         channel_readl(dwc, CTL_HI),
1111                         channel_readl(dwc, CTL_LO));
1112                 spin_unlock_irqrestore(&dwc->lock, flags);
1113                 return -EBUSY;
1114         }
1115
1116         dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1117         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1118         dma_writel(dw, CLEAR.XFER, dwc->mask);
1119
1120         /* setup DMAC channel registers */
1121         channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1122         channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1123         channel_writel(dwc, CTL_HI, 0);
1124
1125         channel_set_bit(dw, CH_EN, dwc->mask);
1126
1127         spin_unlock_irqrestore(&dwc->lock, flags);
1128
1129         return 0;
1130 }
1131 EXPORT_SYMBOL(dw_dma_cyclic_start);
1132
1133 /**
1134  * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1135  * @chan: the DMA channel to stop
1136  *
1137  * Must be called with soft interrupts disabled.
1138  */
1139 void dw_dma_cyclic_stop(struct dma_chan *chan)
1140 {
1141         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1142         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1143         unsigned long           flags;
1144
1145         spin_lock_irqsave(&dwc->lock, flags);
1146
1147         channel_clear_bit(dw, CH_EN, dwc->mask);
1148         while (dma_readl(dw, CH_EN) & dwc->mask)
1149                 cpu_relax();
1150
1151         spin_unlock_irqrestore(&dwc->lock, flags);
1152 }
1153 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1154
1155 /**
1156  * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1157  * @chan: the DMA channel to prepare
1158  * @buf_addr: physical DMA address where the buffer starts
1159  * @buf_len: total number of bytes for the entire buffer
1160  * @period_len: number of bytes for each period
1161  * @direction: transfer direction, to or from device
1162  *
1163  * Must be called before trying to start the transfer. Returns a valid struct
1164  * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1165  */
1166 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1167                 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1168                 enum dma_data_direction direction)
1169 {
1170         struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
1171         struct dw_cyclic_desc           *cdesc;
1172         struct dw_cyclic_desc           *retval = NULL;
1173         struct dw_desc                  *desc;
1174         struct dw_desc                  *last = NULL;
1175         struct dw_dma_slave             *dws = chan->private;
1176         unsigned long                   was_cyclic;
1177         unsigned int                    reg_width;
1178         unsigned int                    periods;
1179         unsigned int                    i;
1180         unsigned long                   flags;
1181
1182         spin_lock_irqsave(&dwc->lock, flags);
1183         if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1184                 spin_unlock_irqrestore(&dwc->lock, flags);
1185                 dev_dbg(chan2dev(&dwc->chan),
1186                                 "queue and/or active list are not empty\n");
1187                 return ERR_PTR(-EBUSY);
1188         }
1189
1190         was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1191         spin_unlock_irqrestore(&dwc->lock, flags);
1192         if (was_cyclic) {
1193                 dev_dbg(chan2dev(&dwc->chan),
1194                                 "channel already prepared for cyclic DMA\n");
1195                 return ERR_PTR(-EBUSY);
1196         }
1197
1198         retval = ERR_PTR(-EINVAL);
1199         reg_width = dws->reg_width;
1200         periods = buf_len / period_len;
1201
1202         /* Check for too big/unaligned periods and unaligned DMA buffer. */
1203         if (period_len > (DWC_MAX_COUNT << reg_width))
1204                 goto out_err;
1205         if (unlikely(period_len & ((1 << reg_width) - 1)))
1206                 goto out_err;
1207         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1208                 goto out_err;
1209         if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
1210                 goto out_err;
1211
1212         retval = ERR_PTR(-ENOMEM);
1213
1214         if (periods > NR_DESCS_PER_CHANNEL)
1215                 goto out_err;
1216
1217         cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1218         if (!cdesc)
1219                 goto out_err;
1220
1221         cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1222         if (!cdesc->desc)
1223                 goto out_err_alloc;
1224
1225         for (i = 0; i < periods; i++) {
1226                 desc = dwc_desc_get(dwc);
1227                 if (!desc)
1228                         goto out_err_desc_get;
1229
1230                 switch (direction) {
1231                 case DMA_TO_DEVICE:
1232                         desc->lli.dar = dws->tx_reg;
1233                         desc->lli.sar = buf_addr + (period_len * i);
1234                         desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1235                                         | DWC_CTLL_DST_WIDTH(reg_width)
1236                                         | DWC_CTLL_SRC_WIDTH(reg_width)
1237                                         | DWC_CTLL_DST_FIX
1238                                         | DWC_CTLL_SRC_INC
1239                                         | DWC_CTLL_FC(dws->fc)
1240                                         | DWC_CTLL_INT_EN);
1241                         break;
1242                 case DMA_FROM_DEVICE:
1243                         desc->lli.dar = buf_addr + (period_len * i);
1244                         desc->lli.sar = dws->rx_reg;
1245                         desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1246                                         | DWC_CTLL_SRC_WIDTH(reg_width)
1247                                         | DWC_CTLL_DST_WIDTH(reg_width)
1248                                         | DWC_CTLL_DST_INC
1249                                         | DWC_CTLL_SRC_FIX
1250                                         | DWC_CTLL_FC(dws->fc)
1251                                         | DWC_CTLL_INT_EN);
1252                         break;
1253                 default:
1254                         break;
1255                 }
1256
1257                 desc->lli.ctlhi = (period_len >> reg_width);
1258                 cdesc->desc[i] = desc;
1259
1260                 if (last) {
1261                         last->lli.llp = desc->txd.phys;
1262                         dma_sync_single_for_device(chan2parent(chan),
1263                                         last->txd.phys, sizeof(last->lli),
1264                                         DMA_TO_DEVICE);
1265                 }
1266
1267                 last = desc;
1268         }
1269
1270         /* lets make a cyclic list */
1271         last->lli.llp = cdesc->desc[0]->txd.phys;
1272         dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1273                         sizeof(last->lli), DMA_TO_DEVICE);
1274
1275         dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1276                         "period %zu periods %d\n", buf_addr, buf_len,
1277                         period_len, periods);
1278
1279         cdesc->periods = periods;
1280         dwc->cdesc = cdesc;
1281
1282         return cdesc;
1283
1284 out_err_desc_get:
1285         while (i--)
1286                 dwc_desc_put(dwc, cdesc->desc[i]);
1287 out_err_alloc:
1288         kfree(cdesc);
1289 out_err:
1290         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1291         return (struct dw_cyclic_desc *)retval;
1292 }
1293 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1294
1295 /**
1296  * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1297  * @chan: the DMA channel to free
1298  */
1299 void dw_dma_cyclic_free(struct dma_chan *chan)
1300 {
1301         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1302         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1303         struct dw_cyclic_desc   *cdesc = dwc->cdesc;
1304         int                     i;
1305         unsigned long           flags;
1306
1307         dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1308
1309         if (!cdesc)
1310                 return;
1311
1312         spin_lock_irqsave(&dwc->lock, flags);
1313
1314         channel_clear_bit(dw, CH_EN, dwc->mask);
1315         while (dma_readl(dw, CH_EN) & dwc->mask)
1316                 cpu_relax();
1317
1318         dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1319         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1320         dma_writel(dw, CLEAR.XFER, dwc->mask);
1321
1322         spin_unlock_irqrestore(&dwc->lock, flags);
1323
1324         for (i = 0; i < cdesc->periods; i++)
1325                 dwc_desc_put(dwc, cdesc->desc[i]);
1326
1327         kfree(cdesc->desc);
1328         kfree(cdesc);
1329
1330         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1331 }
1332 EXPORT_SYMBOL(dw_dma_cyclic_free);
1333
1334 /*----------------------------------------------------------------------*/
1335
1336 static void dw_dma_off(struct dw_dma *dw)
1337 {
1338         dma_writel(dw, CFG, 0);
1339
1340         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1341         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1342         channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1343         channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1344         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1345
1346         while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1347                 cpu_relax();
1348 }
1349
1350 static int __init dw_probe(struct platform_device *pdev)
1351 {
1352         struct dw_dma_platform_data *pdata;
1353         struct resource         *io;
1354         struct dw_dma           *dw;
1355         size_t                  size;
1356         int                     irq;
1357         int                     err;
1358         int                     i;
1359
1360         pdata = pdev->dev.platform_data;
1361         if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1362                 return -EINVAL;
1363
1364         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1365         if (!io)
1366                 return -EINVAL;
1367
1368         irq = platform_get_irq(pdev, 0);
1369         if (irq < 0)
1370                 return irq;
1371
1372         size = sizeof(struct dw_dma);
1373         size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1374         dw = kzalloc(size, GFP_KERNEL);
1375         if (!dw)
1376                 return -ENOMEM;
1377
1378         if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1379                 err = -EBUSY;
1380                 goto err_kfree;
1381         }
1382
1383         dw->regs = ioremap(io->start, DW_REGLEN);
1384         if (!dw->regs) {
1385                 err = -ENOMEM;
1386                 goto err_release_r;
1387         }
1388
1389         dw->clk = clk_get(&pdev->dev, "hclk");
1390         if (IS_ERR(dw->clk)) {
1391                 err = PTR_ERR(dw->clk);
1392                 goto err_clk;
1393         }
1394         clk_enable(dw->clk);
1395
1396         /* force dma off, just in case */
1397         dw_dma_off(dw);
1398
1399         err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1400         if (err)
1401                 goto err_irq;
1402
1403         platform_set_drvdata(pdev, dw);
1404
1405         tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1406
1407         dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1408
1409         INIT_LIST_HEAD(&dw->dma.channels);
1410         for (i = 0; i < pdata->nr_channels; i++) {
1411                 struct dw_dma_chan      *dwc = &dw->chan[i];
1412
1413                 dwc->chan.device = &dw->dma;
1414                 dwc->chan.cookie = dwc->completed = 1;
1415                 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1416                         list_add_tail(&dwc->chan.device_node,
1417                                         &dw->dma.channels);
1418                 else
1419                         list_add(&dwc->chan.device_node, &dw->dma.channels);
1420
1421                 /* 7 is highest priority & 0 is lowest. */
1422                 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1423                         dwc->priority = 7 - i;
1424                 else
1425                         dwc->priority = i;
1426
1427                 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1428                 spin_lock_init(&dwc->lock);
1429                 dwc->mask = 1 << i;
1430
1431                 INIT_LIST_HEAD(&dwc->active_list);
1432                 INIT_LIST_HEAD(&dwc->queue);
1433                 INIT_LIST_HEAD(&dwc->free_list);
1434
1435                 channel_clear_bit(dw, CH_EN, dwc->mask);
1436         }
1437
1438         /* Clear/disable all interrupts on all channels. */
1439         dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1440         dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1441         dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1442         dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1443         dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1444
1445         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1446         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1447         channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1448         channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1449         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1450
1451         dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1452         dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1453         if (pdata->is_private)
1454                 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1455         dw->dma.dev = &pdev->dev;
1456         dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1457         dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1458
1459         dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1460
1461         dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1462         dw->dma.device_control = dwc_control;
1463
1464         dw->dma.device_tx_status = dwc_tx_status;
1465         dw->dma.device_issue_pending = dwc_issue_pending;
1466
1467         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1468
1469         printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1470                         dev_name(&pdev->dev), pdata->nr_channels);
1471
1472         dma_async_device_register(&dw->dma);
1473
1474         return 0;
1475
1476 err_irq:
1477         clk_disable(dw->clk);
1478         clk_put(dw->clk);
1479 err_clk:
1480         iounmap(dw->regs);
1481         dw->regs = NULL;
1482 err_release_r:
1483         release_resource(io);
1484 err_kfree:
1485         kfree(dw);
1486         return err;
1487 }
1488
1489 static int __exit dw_remove(struct platform_device *pdev)
1490 {
1491         struct dw_dma           *dw = platform_get_drvdata(pdev);
1492         struct dw_dma_chan      *dwc, *_dwc;
1493         struct resource         *io;
1494
1495         dw_dma_off(dw);
1496         dma_async_device_unregister(&dw->dma);
1497
1498         free_irq(platform_get_irq(pdev, 0), dw);
1499         tasklet_kill(&dw->tasklet);
1500
1501         list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1502                         chan.device_node) {
1503                 list_del(&dwc->chan.device_node);
1504                 channel_clear_bit(dw, CH_EN, dwc->mask);
1505         }
1506
1507         clk_disable(dw->clk);
1508         clk_put(dw->clk);
1509
1510         iounmap(dw->regs);
1511         dw->regs = NULL;
1512
1513         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1514         release_mem_region(io->start, DW_REGLEN);
1515
1516         kfree(dw);
1517
1518         return 0;
1519 }
1520
1521 static void dw_shutdown(struct platform_device *pdev)
1522 {
1523         struct dw_dma   *dw = platform_get_drvdata(pdev);
1524
1525         dw_dma_off(platform_get_drvdata(pdev));
1526         clk_disable(dw->clk);
1527 }
1528
1529 static int dw_suspend_noirq(struct device *dev)
1530 {
1531         struct platform_device *pdev = to_platform_device(dev);
1532         struct dw_dma   *dw = platform_get_drvdata(pdev);
1533
1534         dw_dma_off(platform_get_drvdata(pdev));
1535         clk_disable(dw->clk);
1536         return 0;
1537 }
1538
1539 static int dw_resume_noirq(struct device *dev)
1540 {
1541         struct platform_device *pdev = to_platform_device(dev);
1542         struct dw_dma   *dw = platform_get_drvdata(pdev);
1543
1544         clk_enable(dw->clk);
1545         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1546         return 0;
1547 }
1548
1549 static const struct dev_pm_ops dw_dev_pm_ops = {
1550         .suspend_noirq = dw_suspend_noirq,
1551         .resume_noirq = dw_resume_noirq,
1552 };
1553
1554 static struct platform_driver dw_driver = {
1555         .remove         = __exit_p(dw_remove),
1556         .shutdown       = dw_shutdown,
1557         .driver = {
1558                 .name   = "dw_dmac",
1559                 .pm     = &dw_dev_pm_ops,
1560         },
1561 };
1562
1563 static int __init dw_init(void)
1564 {
1565         return platform_driver_probe(&dw_driver, dw_probe);
1566 }
1567 subsys_initcall(dw_init);
1568
1569 static void __exit dw_exit(void)
1570 {
1571         platform_driver_unregister(&dw_driver);
1572 }
1573 module_exit(dw_exit);
1574
1575 MODULE_LICENSE("GPL v2");
1576 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1577 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1578 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");