]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/dma/pch_dma.c
dmaengine: add private header file
[mv-sheeva.git] / drivers / dma / pch_dma.c
1 /*
2  * Topcliff PCH DMA controller driver
3  * Copyright (c) 2010 Intel Corporation
4  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  */
19
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/pch_dma.h>
27
28 #include "dmaengine.h"
29
30 #define DRV_NAME "pch-dma"
31
32 #define DMA_CTL0_DISABLE                0x0
33 #define DMA_CTL0_SG                     0x1
34 #define DMA_CTL0_ONESHOT                0x2
35 #define DMA_CTL0_MODE_MASK_BITS         0x3
36 #define DMA_CTL0_DIR_SHIFT_BITS         2
37 #define DMA_CTL0_BITS_PER_CH            4
38
39 #define DMA_CTL2_START_SHIFT_BITS       8
40 #define DMA_CTL2_IRQ_ENABLE_MASK        ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
41
42 #define DMA_STATUS_IDLE                 0x0
43 #define DMA_STATUS_DESC_READ            0x1
44 #define DMA_STATUS_WAIT                 0x2
45 #define DMA_STATUS_ACCESS               0x3
46 #define DMA_STATUS_BITS_PER_CH          2
47 #define DMA_STATUS_MASK_BITS            0x3
48 #define DMA_STATUS_SHIFT_BITS           16
49 #define DMA_STATUS_IRQ(x)               (0x1 << (x))
50 #define DMA_STATUS0_ERR(x)              (0x1 << ((x) + 8))
51 #define DMA_STATUS2_ERR(x)              (0x1 << (x))
52
53 #define DMA_DESC_WIDTH_SHIFT_BITS       12
54 #define DMA_DESC_WIDTH_1_BYTE           (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
55 #define DMA_DESC_WIDTH_2_BYTES          (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
56 #define DMA_DESC_WIDTH_4_BYTES          (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
57 #define DMA_DESC_MAX_COUNT_1_BYTE       0x3FF
58 #define DMA_DESC_MAX_COUNT_2_BYTES      0x3FF
59 #define DMA_DESC_MAX_COUNT_4_BYTES      0x7FF
60 #define DMA_DESC_END_WITHOUT_IRQ        0x0
61 #define DMA_DESC_END_WITH_IRQ           0x1
62 #define DMA_DESC_FOLLOW_WITHOUT_IRQ     0x2
63 #define DMA_DESC_FOLLOW_WITH_IRQ        0x3
64
65 #define MAX_CHAN_NR                     12
66
67 #define DMA_MASK_CTL0_MODE      0x33333333
68 #define DMA_MASK_CTL2_MODE      0x00003333
69
70 static unsigned int init_nr_desc_per_channel = 64;
71 module_param(init_nr_desc_per_channel, uint, 0644);
72 MODULE_PARM_DESC(init_nr_desc_per_channel,
73                  "initial descriptors per channel (default: 64)");
74
75 struct pch_dma_desc_regs {
76         u32     dev_addr;
77         u32     mem_addr;
78         u32     size;
79         u32     next;
80 };
81
82 struct pch_dma_regs {
83         u32     dma_ctl0;
84         u32     dma_ctl1;
85         u32     dma_ctl2;
86         u32     dma_ctl3;
87         u32     dma_sts0;
88         u32     dma_sts1;
89         u32     dma_sts2;
90         u32     reserved3;
91         struct pch_dma_desc_regs desc[MAX_CHAN_NR];
92 };
93
94 struct pch_dma_desc {
95         struct pch_dma_desc_regs regs;
96         struct dma_async_tx_descriptor txd;
97         struct list_head        desc_node;
98         struct list_head        tx_list;
99 };
100
101 struct pch_dma_chan {
102         struct dma_chan         chan;
103         void __iomem *membase;
104         enum dma_transfer_direction dir;
105         struct tasklet_struct   tasklet;
106         unsigned long           err_status;
107
108         spinlock_t              lock;
109
110         struct list_head        active_list;
111         struct list_head        queue;
112         struct list_head        free_list;
113         unsigned int            descs_allocated;
114 };
115
116 #define PDC_DEV_ADDR    0x00
117 #define PDC_MEM_ADDR    0x04
118 #define PDC_SIZE        0x08
119 #define PDC_NEXT        0x0C
120
121 #define channel_readl(pdc, name) \
122         readl((pdc)->membase + PDC_##name)
123 #define channel_writel(pdc, name, val) \
124         writel((val), (pdc)->membase + PDC_##name)
125
126 struct pch_dma {
127         struct dma_device       dma;
128         void __iomem *membase;
129         struct pci_pool         *pool;
130         struct pch_dma_regs     regs;
131         struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
132         struct pch_dma_chan     channels[MAX_CHAN_NR];
133 };
134
135 #define PCH_DMA_CTL0    0x00
136 #define PCH_DMA_CTL1    0x04
137 #define PCH_DMA_CTL2    0x08
138 #define PCH_DMA_CTL3    0x0C
139 #define PCH_DMA_STS0    0x10
140 #define PCH_DMA_STS1    0x14
141 #define PCH_DMA_STS2    0x18
142
143 #define dma_readl(pd, name) \
144         readl((pd)->membase + PCH_DMA_##name)
145 #define dma_writel(pd, name, val) \
146         writel((val), (pd)->membase + PCH_DMA_##name)
147
148 static inline
149 struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
150 {
151         return container_of(txd, struct pch_dma_desc, txd);
152 }
153
154 static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
155 {
156         return container_of(chan, struct pch_dma_chan, chan);
157 }
158
159 static inline struct pch_dma *to_pd(struct dma_device *ddev)
160 {
161         return container_of(ddev, struct pch_dma, dma);
162 }
163
164 static inline struct device *chan2dev(struct dma_chan *chan)
165 {
166         return &chan->dev->device;
167 }
168
169 static inline struct device *chan2parent(struct dma_chan *chan)
170 {
171         return chan->dev->device.parent;
172 }
173
174 static inline
175 struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
176 {
177         return list_first_entry(&pd_chan->active_list,
178                                 struct pch_dma_desc, desc_node);
179 }
180
181 static inline
182 struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
183 {
184         return list_first_entry(&pd_chan->queue,
185                                 struct pch_dma_desc, desc_node);
186 }
187
188 static void pdc_enable_irq(struct dma_chan *chan, int enable)
189 {
190         struct pch_dma *pd = to_pd(chan->device);
191         u32 val;
192         int pos;
193
194         if (chan->chan_id < 8)
195                 pos = chan->chan_id;
196         else
197                 pos = chan->chan_id + 8;
198
199         val = dma_readl(pd, CTL2);
200
201         if (enable)
202                 val |= 0x1 << pos;
203         else
204                 val &= ~(0x1 << pos);
205
206         dma_writel(pd, CTL2, val);
207
208         dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
209                 chan->chan_id, val);
210 }
211
212 static void pdc_set_dir(struct dma_chan *chan)
213 {
214         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
215         struct pch_dma *pd = to_pd(chan->device);
216         u32 val;
217         u32 mask_mode;
218         u32 mask_ctl;
219
220         if (chan->chan_id < 8) {
221                 val = dma_readl(pd, CTL0);
222
223                 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
224                                         (DMA_CTL0_BITS_PER_CH * chan->chan_id);
225                 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
226                                        (DMA_CTL0_BITS_PER_CH * chan->chan_id));
227                 val &= mask_mode;
228                 if (pd_chan->dir == DMA_MEM_TO_DEV)
229                         val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
230                                        DMA_CTL0_DIR_SHIFT_BITS);
231                 else
232                         val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
233                                          DMA_CTL0_DIR_SHIFT_BITS));
234
235                 val |= mask_ctl;
236                 dma_writel(pd, CTL0, val);
237         } else {
238                 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
239                 val = dma_readl(pd, CTL3);
240
241                 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
242                                                 (DMA_CTL0_BITS_PER_CH * ch);
243                 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
244                                                  (DMA_CTL0_BITS_PER_CH * ch));
245                 val &= mask_mode;
246                 if (pd_chan->dir == DMA_MEM_TO_DEV)
247                         val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
248                                        DMA_CTL0_DIR_SHIFT_BITS);
249                 else
250                         val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
251                                          DMA_CTL0_DIR_SHIFT_BITS));
252                 val |= mask_ctl;
253                 dma_writel(pd, CTL3, val);
254         }
255
256         dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
257                 chan->chan_id, val);
258 }
259
260 static void pdc_set_mode(struct dma_chan *chan, u32 mode)
261 {
262         struct pch_dma *pd = to_pd(chan->device);
263         u32 val;
264         u32 mask_ctl;
265         u32 mask_dir;
266
267         if (chan->chan_id < 8) {
268                 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
269                            (DMA_CTL0_BITS_PER_CH * chan->chan_id));
270                 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
271                                  DMA_CTL0_DIR_SHIFT_BITS);
272                 val = dma_readl(pd, CTL0);
273                 val &= mask_dir;
274                 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
275                 val |= mask_ctl;
276                 dma_writel(pd, CTL0, val);
277         } else {
278                 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
279                 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
280                                                  (DMA_CTL0_BITS_PER_CH * ch));
281                 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
282                                  DMA_CTL0_DIR_SHIFT_BITS);
283                 val = dma_readl(pd, CTL3);
284                 val &= mask_dir;
285                 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
286                 val |= mask_ctl;
287                 dma_writel(pd, CTL3, val);
288         }
289
290         dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
291                 chan->chan_id, val);
292 }
293
294 static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
295 {
296         struct pch_dma *pd = to_pd(pd_chan->chan.device);
297         u32 val;
298
299         val = dma_readl(pd, STS0);
300         return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
301                         DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
302 }
303
304 static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
305 {
306         struct pch_dma *pd = to_pd(pd_chan->chan.device);
307         u32 val;
308
309         val = dma_readl(pd, STS2);
310         return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
311                         DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
312 }
313
314 static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
315 {
316         u32 sts;
317
318         if (pd_chan->chan.chan_id < 8)
319                 sts = pdc_get_status0(pd_chan);
320         else
321                 sts = pdc_get_status2(pd_chan);
322
323
324         if (sts == DMA_STATUS_IDLE)
325                 return true;
326         else
327                 return false;
328 }
329
330 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
331 {
332         if (!pdc_is_idle(pd_chan)) {
333                 dev_err(chan2dev(&pd_chan->chan),
334                         "BUG: Attempt to start non-idle channel\n");
335                 return;
336         }
337
338         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
339                 pd_chan->chan.chan_id, desc->regs.dev_addr);
340         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
341                 pd_chan->chan.chan_id, desc->regs.mem_addr);
342         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
343                 pd_chan->chan.chan_id, desc->regs.size);
344         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
345                 pd_chan->chan.chan_id, desc->regs.next);
346
347         if (list_empty(&desc->tx_list)) {
348                 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
349                 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
350                 channel_writel(pd_chan, SIZE, desc->regs.size);
351                 channel_writel(pd_chan, NEXT, desc->regs.next);
352                 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
353         } else {
354                 channel_writel(pd_chan, NEXT, desc->txd.phys);
355                 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
356         }
357 }
358
359 static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
360                                struct pch_dma_desc *desc)
361 {
362         struct dma_async_tx_descriptor *txd = &desc->txd;
363         dma_async_tx_callback callback = txd->callback;
364         void *param = txd->callback_param;
365
366         list_splice_init(&desc->tx_list, &pd_chan->free_list);
367         list_move(&desc->desc_node, &pd_chan->free_list);
368
369         if (callback)
370                 callback(param);
371 }
372
373 static void pdc_complete_all(struct pch_dma_chan *pd_chan)
374 {
375         struct pch_dma_desc *desc, *_d;
376         LIST_HEAD(list);
377
378         BUG_ON(!pdc_is_idle(pd_chan));
379
380         if (!list_empty(&pd_chan->queue))
381                 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
382
383         list_splice_init(&pd_chan->active_list, &list);
384         list_splice_init(&pd_chan->queue, &pd_chan->active_list);
385
386         list_for_each_entry_safe(desc, _d, &list, desc_node)
387                 pdc_chain_complete(pd_chan, desc);
388 }
389
390 static void pdc_handle_error(struct pch_dma_chan *pd_chan)
391 {
392         struct pch_dma_desc *bad_desc;
393
394         bad_desc = pdc_first_active(pd_chan);
395         list_del(&bad_desc->desc_node);
396
397         list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
398
399         if (!list_empty(&pd_chan->active_list))
400                 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
401
402         dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
403         dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
404                  bad_desc->txd.cookie);
405
406         pdc_chain_complete(pd_chan, bad_desc);
407 }
408
409 static void pdc_advance_work(struct pch_dma_chan *pd_chan)
410 {
411         if (list_empty(&pd_chan->active_list) ||
412                 list_is_singular(&pd_chan->active_list)) {
413                 pdc_complete_all(pd_chan);
414         } else {
415                 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
416                 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
417         }
418 }
419
420 static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
421                                       struct pch_dma_desc *desc)
422 {
423         dma_cookie_t cookie = pd_chan->chan.cookie;
424
425         if (++cookie < 0)
426                 cookie = 1;
427
428         pd_chan->chan.cookie = cookie;
429         desc->txd.cookie = cookie;
430
431         return cookie;
432 }
433
434 static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
435 {
436         struct pch_dma_desc *desc = to_pd_desc(txd);
437         struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
438         dma_cookie_t cookie;
439
440         spin_lock(&pd_chan->lock);
441         cookie = pdc_assign_cookie(pd_chan, desc);
442
443         if (list_empty(&pd_chan->active_list)) {
444                 list_add_tail(&desc->desc_node, &pd_chan->active_list);
445                 pdc_dostart(pd_chan, desc);
446         } else {
447                 list_add_tail(&desc->desc_node, &pd_chan->queue);
448         }
449
450         spin_unlock(&pd_chan->lock);
451         return 0;
452 }
453
454 static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
455 {
456         struct pch_dma_desc *desc = NULL;
457         struct pch_dma *pd = to_pd(chan->device);
458         dma_addr_t addr;
459
460         desc = pci_pool_alloc(pd->pool, flags, &addr);
461         if (desc) {
462                 memset(desc, 0, sizeof(struct pch_dma_desc));
463                 INIT_LIST_HEAD(&desc->tx_list);
464                 dma_async_tx_descriptor_init(&desc->txd, chan);
465                 desc->txd.tx_submit = pd_tx_submit;
466                 desc->txd.flags = DMA_CTRL_ACK;
467                 desc->txd.phys = addr;
468         }
469
470         return desc;
471 }
472
473 static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
474 {
475         struct pch_dma_desc *desc, *_d;
476         struct pch_dma_desc *ret = NULL;
477         int i = 0;
478
479         spin_lock(&pd_chan->lock);
480         list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
481                 i++;
482                 if (async_tx_test_ack(&desc->txd)) {
483                         list_del(&desc->desc_node);
484                         ret = desc;
485                         break;
486                 }
487                 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
488         }
489         spin_unlock(&pd_chan->lock);
490         dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
491
492         if (!ret) {
493                 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
494                 if (ret) {
495                         spin_lock(&pd_chan->lock);
496                         pd_chan->descs_allocated++;
497                         spin_unlock(&pd_chan->lock);
498                 } else {
499                         dev_err(chan2dev(&pd_chan->chan),
500                                 "failed to alloc desc\n");
501                 }
502         }
503
504         return ret;
505 }
506
507 static void pdc_desc_put(struct pch_dma_chan *pd_chan,
508                          struct pch_dma_desc *desc)
509 {
510         if (desc) {
511                 spin_lock(&pd_chan->lock);
512                 list_splice_init(&desc->tx_list, &pd_chan->free_list);
513                 list_add(&desc->desc_node, &pd_chan->free_list);
514                 spin_unlock(&pd_chan->lock);
515         }
516 }
517
518 static int pd_alloc_chan_resources(struct dma_chan *chan)
519 {
520         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
521         struct pch_dma_desc *desc;
522         LIST_HEAD(tmp_list);
523         int i;
524
525         if (!pdc_is_idle(pd_chan)) {
526                 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
527                 return -EIO;
528         }
529
530         if (!list_empty(&pd_chan->free_list))
531                 return pd_chan->descs_allocated;
532
533         for (i = 0; i < init_nr_desc_per_channel; i++) {
534                 desc = pdc_alloc_desc(chan, GFP_KERNEL);
535
536                 if (!desc) {
537                         dev_warn(chan2dev(chan),
538                                 "Only allocated %d initial descriptors\n", i);
539                         break;
540                 }
541
542                 list_add_tail(&desc->desc_node, &tmp_list);
543         }
544
545         spin_lock_irq(&pd_chan->lock);
546         list_splice(&tmp_list, &pd_chan->free_list);
547         pd_chan->descs_allocated = i;
548         chan->completed_cookie = chan->cookie = 1;
549         spin_unlock_irq(&pd_chan->lock);
550
551         pdc_enable_irq(chan, 1);
552
553         return pd_chan->descs_allocated;
554 }
555
556 static void pd_free_chan_resources(struct dma_chan *chan)
557 {
558         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
559         struct pch_dma *pd = to_pd(chan->device);
560         struct pch_dma_desc *desc, *_d;
561         LIST_HEAD(tmp_list);
562
563         BUG_ON(!pdc_is_idle(pd_chan));
564         BUG_ON(!list_empty(&pd_chan->active_list));
565         BUG_ON(!list_empty(&pd_chan->queue));
566
567         spin_lock_irq(&pd_chan->lock);
568         list_splice_init(&pd_chan->free_list, &tmp_list);
569         pd_chan->descs_allocated = 0;
570         spin_unlock_irq(&pd_chan->lock);
571
572         list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
573                 pci_pool_free(pd->pool, desc, desc->txd.phys);
574
575         pdc_enable_irq(chan, 0);
576 }
577
578 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
579                                     struct dma_tx_state *txstate)
580 {
581         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
582         dma_cookie_t last_used;
583         dma_cookie_t last_completed;
584         int ret;
585
586         spin_lock_irq(&pd_chan->lock);
587         last_completed = chan->completed_cookie;
588         last_used = chan->cookie;
589         spin_unlock_irq(&pd_chan->lock);
590
591         ret = dma_async_is_complete(cookie, last_completed, last_used);
592
593         dma_set_tx_state(txstate, last_completed, last_used, 0);
594
595         return ret;
596 }
597
598 static void pd_issue_pending(struct dma_chan *chan)
599 {
600         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
601
602         if (pdc_is_idle(pd_chan)) {
603                 spin_lock(&pd_chan->lock);
604                 pdc_advance_work(pd_chan);
605                 spin_unlock(&pd_chan->lock);
606         }
607 }
608
609 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
610                         struct scatterlist *sgl, unsigned int sg_len,
611                         enum dma_transfer_direction direction, unsigned long flags)
612 {
613         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
614         struct pch_dma_slave *pd_slave = chan->private;
615         struct pch_dma_desc *first = NULL;
616         struct pch_dma_desc *prev = NULL;
617         struct pch_dma_desc *desc = NULL;
618         struct scatterlist *sg;
619         dma_addr_t reg;
620         int i;
621
622         if (unlikely(!sg_len)) {
623                 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
624                 return NULL;
625         }
626
627         if (direction == DMA_DEV_TO_MEM)
628                 reg = pd_slave->rx_reg;
629         else if (direction == DMA_MEM_TO_DEV)
630                 reg = pd_slave->tx_reg;
631         else
632                 return NULL;
633
634         pd_chan->dir = direction;
635         pdc_set_dir(chan);
636
637         for_each_sg(sgl, sg, sg_len, i) {
638                 desc = pdc_desc_get(pd_chan);
639
640                 if (!desc)
641                         goto err_desc_get;
642
643                 desc->regs.dev_addr = reg;
644                 desc->regs.mem_addr = sg_phys(sg);
645                 desc->regs.size = sg_dma_len(sg);
646                 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
647
648                 switch (pd_slave->width) {
649                 case PCH_DMA_WIDTH_1_BYTE:
650                         if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
651                                 goto err_desc_get;
652                         desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
653                         break;
654                 case PCH_DMA_WIDTH_2_BYTES:
655                         if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
656                                 goto err_desc_get;
657                         desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
658                         break;
659                 case PCH_DMA_WIDTH_4_BYTES:
660                         if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
661                                 goto err_desc_get;
662                         desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
663                         break;
664                 default:
665                         goto err_desc_get;
666                 }
667
668                 if (!first) {
669                         first = desc;
670                 } else {
671                         prev->regs.next |= desc->txd.phys;
672                         list_add_tail(&desc->desc_node, &first->tx_list);
673                 }
674
675                 prev = desc;
676         }
677
678         if (flags & DMA_PREP_INTERRUPT)
679                 desc->regs.next = DMA_DESC_END_WITH_IRQ;
680         else
681                 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
682
683         first->txd.cookie = -EBUSY;
684         desc->txd.flags = flags;
685
686         return &first->txd;
687
688 err_desc_get:
689         dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
690         pdc_desc_put(pd_chan, first);
691         return NULL;
692 }
693
694 static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
695                              unsigned long arg)
696 {
697         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
698         struct pch_dma_desc *desc, *_d;
699         LIST_HEAD(list);
700
701         if (cmd != DMA_TERMINATE_ALL)
702                 return -ENXIO;
703
704         spin_lock_irq(&pd_chan->lock);
705
706         pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
707
708         list_splice_init(&pd_chan->active_list, &list);
709         list_splice_init(&pd_chan->queue, &list);
710
711         list_for_each_entry_safe(desc, _d, &list, desc_node)
712                 pdc_chain_complete(pd_chan, desc);
713
714         spin_unlock_irq(&pd_chan->lock);
715
716         return 0;
717 }
718
719 static void pdc_tasklet(unsigned long data)
720 {
721         struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
722         unsigned long flags;
723
724         if (!pdc_is_idle(pd_chan)) {
725                 dev_err(chan2dev(&pd_chan->chan),
726                         "BUG: handle non-idle channel in tasklet\n");
727                 return;
728         }
729
730         spin_lock_irqsave(&pd_chan->lock, flags);
731         if (test_and_clear_bit(0, &pd_chan->err_status))
732                 pdc_handle_error(pd_chan);
733         else
734                 pdc_advance_work(pd_chan);
735         spin_unlock_irqrestore(&pd_chan->lock, flags);
736 }
737
738 static irqreturn_t pd_irq(int irq, void *devid)
739 {
740         struct pch_dma *pd = (struct pch_dma *)devid;
741         struct pch_dma_chan *pd_chan;
742         u32 sts0;
743         u32 sts2;
744         int i;
745         int ret0 = IRQ_NONE;
746         int ret2 = IRQ_NONE;
747
748         sts0 = dma_readl(pd, STS0);
749         sts2 = dma_readl(pd, STS2);
750
751         dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
752
753         for (i = 0; i < pd->dma.chancnt; i++) {
754                 pd_chan = &pd->channels[i];
755
756                 if (i < 8) {
757                         if (sts0 & DMA_STATUS_IRQ(i)) {
758                                 if (sts0 & DMA_STATUS0_ERR(i))
759                                         set_bit(0, &pd_chan->err_status);
760
761                                 tasklet_schedule(&pd_chan->tasklet);
762                                 ret0 = IRQ_HANDLED;
763                         }
764                 } else {
765                         if (sts2 & DMA_STATUS_IRQ(i - 8)) {
766                                 if (sts2 & DMA_STATUS2_ERR(i))
767                                         set_bit(0, &pd_chan->err_status);
768
769                                 tasklet_schedule(&pd_chan->tasklet);
770                                 ret2 = IRQ_HANDLED;
771                         }
772                 }
773         }
774
775         /* clear interrupt bits in status register */
776         if (ret0)
777                 dma_writel(pd, STS0, sts0);
778         if (ret2)
779                 dma_writel(pd, STS2, sts2);
780
781         return ret0 | ret2;
782 }
783
784 #ifdef  CONFIG_PM
785 static void pch_dma_save_regs(struct pch_dma *pd)
786 {
787         struct pch_dma_chan *pd_chan;
788         struct dma_chan *chan, *_c;
789         int i = 0;
790
791         pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
792         pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
793         pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
794         pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
795
796         list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
797                 pd_chan = to_pd_chan(chan);
798
799                 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
800                 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
801                 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
802                 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
803
804                 i++;
805         }
806 }
807
808 static void pch_dma_restore_regs(struct pch_dma *pd)
809 {
810         struct pch_dma_chan *pd_chan;
811         struct dma_chan *chan, *_c;
812         int i = 0;
813
814         dma_writel(pd, CTL0, pd->regs.dma_ctl0);
815         dma_writel(pd, CTL1, pd->regs.dma_ctl1);
816         dma_writel(pd, CTL2, pd->regs.dma_ctl2);
817         dma_writel(pd, CTL3, pd->regs.dma_ctl3);
818
819         list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
820                 pd_chan = to_pd_chan(chan);
821
822                 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
823                 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
824                 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
825                 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
826
827                 i++;
828         }
829 }
830
831 static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
832 {
833         struct pch_dma *pd = pci_get_drvdata(pdev);
834
835         if (pd)
836                 pch_dma_save_regs(pd);
837
838         pci_save_state(pdev);
839         pci_disable_device(pdev);
840         pci_set_power_state(pdev, pci_choose_state(pdev, state));
841
842         return 0;
843 }
844
845 static int pch_dma_resume(struct pci_dev *pdev)
846 {
847         struct pch_dma *pd = pci_get_drvdata(pdev);
848         int err;
849
850         pci_set_power_state(pdev, PCI_D0);
851         pci_restore_state(pdev);
852
853         err = pci_enable_device(pdev);
854         if (err) {
855                 dev_dbg(&pdev->dev, "failed to enable device\n");
856                 return err;
857         }
858
859         if (pd)
860                 pch_dma_restore_regs(pd);
861
862         return 0;
863 }
864 #endif
865
866 static int __devinit pch_dma_probe(struct pci_dev *pdev,
867                                    const struct pci_device_id *id)
868 {
869         struct pch_dma *pd;
870         struct pch_dma_regs *regs;
871         unsigned int nr_channels;
872         int err;
873         int i;
874
875         nr_channels = id->driver_data;
876         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
877         if (!pd)
878                 return -ENOMEM;
879
880         pci_set_drvdata(pdev, pd);
881
882         err = pci_enable_device(pdev);
883         if (err) {
884                 dev_err(&pdev->dev, "Cannot enable PCI device\n");
885                 goto err_free_mem;
886         }
887
888         if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
889                 dev_err(&pdev->dev, "Cannot find proper base address\n");
890                 goto err_disable_pdev;
891         }
892
893         err = pci_request_regions(pdev, DRV_NAME);
894         if (err) {
895                 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
896                 goto err_disable_pdev;
897         }
898
899         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
900         if (err) {
901                 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
902                 goto err_free_res;
903         }
904
905         regs = pd->membase = pci_iomap(pdev, 1, 0);
906         if (!pd->membase) {
907                 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
908                 err = -ENOMEM;
909                 goto err_free_res;
910         }
911
912         pci_set_master(pdev);
913
914         err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
915         if (err) {
916                 dev_err(&pdev->dev, "Failed to request IRQ\n");
917                 goto err_iounmap;
918         }
919
920         pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
921                                    sizeof(struct pch_dma_desc), 4, 0);
922         if (!pd->pool) {
923                 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
924                 err = -ENOMEM;
925                 goto err_free_irq;
926         }
927
928         pd->dma.dev = &pdev->dev;
929
930         INIT_LIST_HEAD(&pd->dma.channels);
931
932         for (i = 0; i < nr_channels; i++) {
933                 struct pch_dma_chan *pd_chan = &pd->channels[i];
934
935                 pd_chan->chan.device = &pd->dma;
936                 pd_chan->chan.cookie = 1;
937
938                 pd_chan->membase = &regs->desc[i];
939
940                 spin_lock_init(&pd_chan->lock);
941
942                 INIT_LIST_HEAD(&pd_chan->active_list);
943                 INIT_LIST_HEAD(&pd_chan->queue);
944                 INIT_LIST_HEAD(&pd_chan->free_list);
945
946                 tasklet_init(&pd_chan->tasklet, pdc_tasklet,
947                              (unsigned long)pd_chan);
948                 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
949         }
950
951         dma_cap_zero(pd->dma.cap_mask);
952         dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
953         dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
954
955         pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
956         pd->dma.device_free_chan_resources = pd_free_chan_resources;
957         pd->dma.device_tx_status = pd_tx_status;
958         pd->dma.device_issue_pending = pd_issue_pending;
959         pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
960         pd->dma.device_control = pd_device_control;
961
962         err = dma_async_device_register(&pd->dma);
963         if (err) {
964                 dev_err(&pdev->dev, "Failed to register DMA device\n");
965                 goto err_free_pool;
966         }
967
968         return 0;
969
970 err_free_pool:
971         pci_pool_destroy(pd->pool);
972 err_free_irq:
973         free_irq(pdev->irq, pd);
974 err_iounmap:
975         pci_iounmap(pdev, pd->membase);
976 err_free_res:
977         pci_release_regions(pdev);
978 err_disable_pdev:
979         pci_disable_device(pdev);
980 err_free_mem:
981         return err;
982 }
983
984 static void __devexit pch_dma_remove(struct pci_dev *pdev)
985 {
986         struct pch_dma *pd = pci_get_drvdata(pdev);
987         struct pch_dma_chan *pd_chan;
988         struct dma_chan *chan, *_c;
989
990         if (pd) {
991                 dma_async_device_unregister(&pd->dma);
992
993                 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
994                                          device_node) {
995                         pd_chan = to_pd_chan(chan);
996
997                         tasklet_disable(&pd_chan->tasklet);
998                         tasklet_kill(&pd_chan->tasklet);
999                 }
1000
1001                 pci_pool_destroy(pd->pool);
1002                 free_irq(pdev->irq, pd);
1003                 pci_iounmap(pdev, pd->membase);
1004                 pci_release_regions(pdev);
1005                 pci_disable_device(pdev);
1006                 kfree(pd);
1007         }
1008 }
1009
1010 /* PCI Device ID of DMA device */
1011 #define PCI_VENDOR_ID_ROHM             0x10DB
1012 #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH        0x8810
1013 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH        0x8815
1014 #define PCI_DEVICE_ID_ML7213_DMA1_8CH   0x8026
1015 #define PCI_DEVICE_ID_ML7213_DMA2_8CH   0x802B
1016 #define PCI_DEVICE_ID_ML7213_DMA3_4CH   0x8034
1017 #define PCI_DEVICE_ID_ML7213_DMA4_12CH  0x8032
1018 #define PCI_DEVICE_ID_ML7223_DMA1_4CH   0x800B
1019 #define PCI_DEVICE_ID_ML7223_DMA2_4CH   0x800E
1020 #define PCI_DEVICE_ID_ML7223_DMA3_4CH   0x8017
1021 #define PCI_DEVICE_ID_ML7223_DMA4_4CH   0x803B
1022 #define PCI_DEVICE_ID_ML7831_DMA1_8CH   0x8810
1023 #define PCI_DEVICE_ID_ML7831_DMA2_4CH   0x8815
1024
1025 DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
1026         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
1027         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
1028         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
1029         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
1030         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
1031         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
1032         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
1033         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
1034         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
1035         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
1036         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
1037         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
1038         { 0, },
1039 };
1040
1041 static struct pci_driver pch_dma_driver = {
1042         .name           = DRV_NAME,
1043         .id_table       = pch_dma_id_table,
1044         .probe          = pch_dma_probe,
1045         .remove         = __devexit_p(pch_dma_remove),
1046 #ifdef CONFIG_PM
1047         .suspend        = pch_dma_suspend,
1048         .resume         = pch_dma_resume,
1049 #endif
1050 };
1051
1052 static int __init pch_dma_init(void)
1053 {
1054         return pci_register_driver(&pch_dma_driver);
1055 }
1056
1057 static void __exit pch_dma_exit(void)
1058 {
1059         pci_unregister_driver(&pch_dma_driver);
1060 }
1061
1062 module_init(pch_dma_init);
1063 module_exit(pch_dma_exit);
1064
1065 MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
1066                    "DMA controller driver");
1067 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1068 MODULE_LICENSE("GPL v2");