]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arm/mach-s3c64xx/dma.c
Merge tag 'ecryptfs-3.10-rc1-ablkcipher' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / arch / arm / mach-s3c64xx / dma.c
1 /* linux/arch/arm/plat-s3c64xx/dma.c
2  *
3  * Copyright 2009 Openmoko, Inc.
4  * Copyright 2009 Simtec Electronics
5  *      Ben Dooks <ben@simtec.co.uk>
6  *      http://armlinux.simtec.co.uk/
7  *
8  * S3C64XX DMA core
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
18 #include <linux/dmapool.h>
19 #include <linux/device.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
24 #include <linux/err.h>
25 #include <linux/io.h>
26 #include <linux/amba/pl080.h>
27
28 #include <mach/dma.h>
29 #include <mach/map.h>
30 #include <mach/irqs.h>
31
32 #include "regs-sys.h"
33
34 /* dma channel state information */
35
36 struct s3c64xx_dmac {
37         struct device           dev;
38         struct clk              *clk;
39         void __iomem            *regs;
40         struct s3c2410_dma_chan *channels;
41         enum dma_ch              chanbase;
42 };
43
44 /* pool to provide LLI buffers */
45 static struct dma_pool *dma_pool;
46
47 /* Debug configuration and code */
48
49 static unsigned char debug_show_buffs = 0;
50
51 static void dbg_showchan(struct s3c2410_dma_chan *chan)
52 {
53         pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n",
54                  chan->number,
55                  readl(chan->regs + PL080_CH_SRC_ADDR),
56                  readl(chan->regs + PL080_CH_DST_ADDR),
57                  readl(chan->regs + PL080_CH_LLI),
58                  readl(chan->regs + PL080_CH_CONTROL),
59                  readl(chan->regs + PL080S_CH_CONTROL2),
60                  readl(chan->regs + PL080S_CH_CONFIG));
61 }
62
63 static void show_lli(struct pl080s_lli *lli)
64 {
65         pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n",
66                  lli, lli->src_addr, lli->dst_addr, lli->next_lli,
67                  lli->control0, lli->control1);
68 }
69
70 static void dbg_showbuffs(struct s3c2410_dma_chan *chan)
71 {
72         struct s3c64xx_dma_buff *ptr;
73         struct s3c64xx_dma_buff *end;
74
75         pr_debug("DMA%d: buffs next %p, curr %p, end %p\n",
76                  chan->number, chan->next, chan->curr, chan->end);
77
78         ptr = chan->next;
79         end = chan->end;
80
81         if (debug_show_buffs) {
82                 for (; ptr != NULL; ptr = ptr->next) {
83                         pr_debug("DMA%d: %08x ",
84                                  chan->number, ptr->lli_dma);
85                         show_lli(ptr->lli);
86                 }
87         }
88 }
89
90 /* End of Debug */
91
92 static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel)
93 {
94         struct s3c2410_dma_chan *chan;
95         unsigned int start, offs;
96
97         start = 0;
98
99         if (channel >= DMACH_PCM1_TX)
100                 start = 8;
101
102         for (offs = 0; offs < 8; offs++) {
103                 chan = &s3c2410_chans[start + offs];
104                 if (!chan->in_use)
105                         goto found;
106         }
107
108         return NULL;
109
110 found:
111         s3c_dma_chan_map[channel] = chan;
112         return chan;
113 }
114
115 int s3c2410_dma_config(enum dma_ch channel, int xferunit)
116 {
117         struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
118
119         if (chan == NULL)
120                 return -EINVAL;
121
122         switch (xferunit) {
123         case 1:
124                 chan->hw_width = 0;
125                 break;
126         case 2:
127                 chan->hw_width = 1;
128                 break;
129         case 4:
130                 chan->hw_width = 2;
131                 break;
132         default:
133                 printk(KERN_ERR "%s: illegal width %d\n", __func__, xferunit);
134                 return -EINVAL;
135         }
136
137         return 0;
138 }
139 EXPORT_SYMBOL(s3c2410_dma_config);
140
141 static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
142                                  struct pl080s_lli *lli,
143                                  dma_addr_t data, int size)
144 {
145         dma_addr_t src, dst;
146         u32 control0, control1;
147
148         switch (chan->source) {
149         case DMA_FROM_DEVICE:
150                 src = chan->dev_addr;
151                 dst = data;
152                 control0 = PL080_CONTROL_SRC_AHB2;
153                 control0 |= PL080_CONTROL_DST_INCR;
154                 break;
155
156         case DMA_TO_DEVICE:
157                 src = data;
158                 dst = chan->dev_addr;
159                 control0 = PL080_CONTROL_DST_AHB2;
160                 control0 |= PL080_CONTROL_SRC_INCR;
161                 break;
162         default:
163                 BUG();
164         }
165
166         /* note, we do not currently setup any of the burst controls */
167
168         control1 = size >> chan->hw_width;      /* size in no of xfers */
169         control0 |= PL080_CONTROL_PROT_SYS;     /* always in priv. mode */
170         control0 |= PL080_CONTROL_TC_IRQ_EN;    /* always fire IRQ */
171         control0 |= (u32)chan->hw_width << PL080_CONTROL_DWIDTH_SHIFT;
172         control0 |= (u32)chan->hw_width << PL080_CONTROL_SWIDTH_SHIFT;
173
174         lli->src_addr = src;
175         lli->dst_addr = dst;
176         lli->next_lli = 0;
177         lli->control0 = control0;
178         lli->control1 = control1;
179 }
180
181 static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan *chan,
182                                 struct pl080s_lli *lli)
183 {
184         void __iomem *regs = chan->regs;
185
186         pr_debug("%s: LLI %p => regs\n", __func__, lli);
187         show_lli(lli);
188
189         writel(lli->src_addr, regs + PL080_CH_SRC_ADDR);
190         writel(lli->dst_addr, regs + PL080_CH_DST_ADDR);
191         writel(lli->next_lli, regs + PL080_CH_LLI);
192         writel(lli->control0, regs + PL080_CH_CONTROL);
193         writel(lli->control1, regs + PL080S_CH_CONTROL2);
194 }
195
196 static int s3c64xx_dma_start(struct s3c2410_dma_chan *chan)
197 {
198         struct s3c64xx_dmac *dmac = chan->dmac;
199         u32 config;
200         u32 bit = chan->bit;
201
202         dbg_showchan(chan);
203
204         pr_debug("%s: clearing interrupts\n", __func__);
205
206         /* clear interrupts */
207         writel(bit, dmac->regs + PL080_TC_CLEAR);
208         writel(bit, dmac->regs + PL080_ERR_CLEAR);
209
210         pr_debug("%s: starting channel\n", __func__);
211
212         config = readl(chan->regs + PL080S_CH_CONFIG);
213         config |= PL080_CONFIG_ENABLE;
214         config &= ~PL080_CONFIG_HALT;
215
216         pr_debug("%s: writing config %08x\n", __func__, config);
217         writel(config, chan->regs + PL080S_CH_CONFIG);
218
219         return 0;
220 }
221
222 static int s3c64xx_dma_stop(struct s3c2410_dma_chan *chan)
223 {
224         u32 config;
225         int timeout;
226
227         pr_debug("%s: stopping channel\n", __func__);
228
229         dbg_showchan(chan);
230
231         config = readl(chan->regs + PL080S_CH_CONFIG);
232         config |= PL080_CONFIG_HALT;
233         writel(config, chan->regs + PL080S_CH_CONFIG);
234
235         timeout = 1000;
236         do {
237                 config = readl(chan->regs + PL080S_CH_CONFIG);
238                 pr_debug("%s: %d - config %08x\n", __func__, timeout, config);
239                 if (config & PL080_CONFIG_ACTIVE)
240                         udelay(10);
241                 else
242                         break;
243                 } while (--timeout > 0);
244
245         if (config & PL080_CONFIG_ACTIVE) {
246                 printk(KERN_ERR "%s: channel still active\n", __func__);
247                 return -EFAULT;
248         }
249
250         config = readl(chan->regs + PL080S_CH_CONFIG);
251         config &= ~PL080_CONFIG_ENABLE;
252         writel(config, chan->regs + PL080S_CH_CONFIG);
253
254         return 0;
255 }
256
257 static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan *chan,
258                                          struct s3c64xx_dma_buff *buf,
259                                          enum s3c2410_dma_buffresult result)
260 {
261         if (chan->callback_fn != NULL)
262                 (chan->callback_fn)(chan, buf->pw, 0, result);
263 }
264
265 static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff *buff)
266 {
267         dma_pool_free(dma_pool, buff->lli, buff->lli_dma);
268         kfree(buff);
269 }
270
271 static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
272 {
273         struct s3c64xx_dma_buff *buff, *next;
274         u32 config;
275
276         dbg_showchan(chan);
277
278         pr_debug("%s: flushing channel\n", __func__);
279
280         config = readl(chan->regs + PL080S_CH_CONFIG);
281         config &= ~PL080_CONFIG_ENABLE;
282         writel(config, chan->regs + PL080S_CH_CONFIG);
283
284         /* dump all the buffers associated with this channel */
285
286         for (buff = chan->curr; buff != NULL; buff = next) {
287                 next = buff->next;
288                 pr_debug("%s: buff %p (next %p)\n", __func__, buff, buff->next);
289
290                 s3c64xx_dma_bufffdone(chan, buff, S3C2410_RES_ABORT);
291                 s3c64xx_dma_freebuff(buff);
292         }
293
294         chan->curr = chan->next = chan->end = NULL;
295
296         return 0;
297 }
298
299 int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
300 {
301         struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
302
303         WARN_ON(!chan);
304         if (!chan)
305                 return -EINVAL;
306
307         switch (op) {
308         case S3C2410_DMAOP_START:
309                 return s3c64xx_dma_start(chan);
310
311         case S3C2410_DMAOP_STOP:
312                 return s3c64xx_dma_stop(chan);
313
314         case S3C2410_DMAOP_FLUSH:
315                 return s3c64xx_dma_flush(chan);
316
317         /* believe PAUSE/RESUME are no-ops */
318         case S3C2410_DMAOP_PAUSE:
319         case S3C2410_DMAOP_RESUME:
320         case S3C2410_DMAOP_STARTED:
321         case S3C2410_DMAOP_TIMEOUT:
322                 return 0;
323         }
324
325         return -ENOENT;
326 }
327 EXPORT_SYMBOL(s3c2410_dma_ctrl);
328
329 /* s3c2410_dma_enque
330  *
331  */
332
333 int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
334                         dma_addr_t data, int size)
335 {
336         struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
337         struct s3c64xx_dma_buff *next;
338         struct s3c64xx_dma_buff *buff;
339         struct pl080s_lli *lli;
340         unsigned long flags;
341         int ret;
342
343         WARN_ON(!chan);
344         if (!chan)
345                 return -EINVAL;
346
347         buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_ATOMIC);
348         if (!buff) {
349                 printk(KERN_ERR "%s: no memory for buffer\n", __func__);
350                 return -ENOMEM;
351         }
352
353         lli = dma_pool_alloc(dma_pool, GFP_ATOMIC, &buff->lli_dma);
354         if (!lli) {
355                 printk(KERN_ERR "%s: no memory for lli\n", __func__);
356                 ret = -ENOMEM;
357                 goto err_buff;
358         }
359
360         pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
361                  __func__, buff, data, lli, (u32)buff->lli_dma, size);
362
363         buff->lli = lli;
364         buff->pw = id;
365
366         s3c64xx_dma_fill_lli(chan, lli, data, size);
367
368         local_irq_save(flags);
369
370         if ((next = chan->next) != NULL) {
371                 struct s3c64xx_dma_buff *end = chan->end;
372                 struct pl080s_lli *endlli = end->lli;
373
374                 pr_debug("enquing onto channel\n");
375
376                 end->next = buff;
377                 endlli->next_lli = buff->lli_dma;
378
379                 if (chan->flags & S3C2410_DMAF_CIRCULAR) {
380                         struct s3c64xx_dma_buff *curr = chan->curr;
381                         lli->next_lli = curr->lli_dma;
382                 }
383
384                 if (next == chan->curr) {
385                         writel(buff->lli_dma, chan->regs + PL080_CH_LLI);
386                         chan->next = buff;
387                 }
388
389                 show_lli(endlli);
390                 chan->end = buff;
391         } else {
392                 pr_debug("enquing onto empty channel\n");
393
394                 chan->curr = buff;
395                 chan->next = buff;
396                 chan->end = buff;
397
398                 s3c64xx_lli_to_regs(chan, lli);
399         }
400
401         local_irq_restore(flags);
402
403         show_lli(lli);
404
405         dbg_showchan(chan);
406         dbg_showbuffs(chan);
407         return 0;
408
409 err_buff:
410         kfree(buff);
411         return ret;
412 }
413
414 EXPORT_SYMBOL(s3c2410_dma_enqueue);
415
416
417 int s3c2410_dma_devconfig(enum dma_ch channel,
418                           enum dma_data_direction source,
419                           unsigned long devaddr)
420 {
421         struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
422         u32 peripheral;
423         u32 config = 0;
424
425         pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
426                  __func__, channel, source, devaddr, chan);
427
428         WARN_ON(!chan);
429         if (!chan)
430                 return -EINVAL;
431
432         peripheral = (chan->peripheral & 0xf);
433         chan->source = source;
434         chan->dev_addr = devaddr;
435
436         pr_debug("%s: peripheral %d\n", __func__, peripheral);
437
438         switch (source) {
439         case DMA_FROM_DEVICE:
440                 config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
441                 config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
442                 break;
443         case DMA_TO_DEVICE:
444                 config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
445                 config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
446                 break;
447         default:
448                 printk(KERN_ERR "%s: bad source\n", __func__);
449                 return -EINVAL;
450         }
451
452         /* allow TC and ERR interrupts */
453         config |= PL080_CONFIG_TC_IRQ_MASK;
454         config |= PL080_CONFIG_ERR_IRQ_MASK;
455
456         pr_debug("%s: config %08x\n", __func__, config);
457
458         writel(config, chan->regs + PL080S_CH_CONFIG);
459
460         return 0;
461 }
462 EXPORT_SYMBOL(s3c2410_dma_devconfig);
463
464
465 int s3c2410_dma_getposition(enum dma_ch channel,
466                             dma_addr_t *src, dma_addr_t *dst)
467 {
468         struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
469
470         WARN_ON(!chan);
471         if (!chan)
472                 return -EINVAL;
473
474         if (src != NULL)
475                 *src = readl(chan->regs + PL080_CH_SRC_ADDR);
476
477         if (dst != NULL)
478                 *dst = readl(chan->regs + PL080_CH_DST_ADDR);
479
480         return 0;
481 }
482 EXPORT_SYMBOL(s3c2410_dma_getposition);
483
484 /* s3c2410_request_dma
485  *
486  * get control of an dma channel
487 */
488
489 int s3c2410_dma_request(enum dma_ch channel,
490                         struct s3c2410_dma_client *client,
491                         void *dev)
492 {
493         struct s3c2410_dma_chan *chan;
494         unsigned long flags;
495
496         pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
497                  channel, client->name, dev);
498
499         local_irq_save(flags);
500
501         chan = s3c64xx_dma_map_channel(channel);
502         if (chan == NULL) {
503                 local_irq_restore(flags);
504                 return -EBUSY;
505         }
506
507         dbg_showchan(chan);
508
509         chan->client = client;
510         chan->in_use = 1;
511         chan->peripheral = channel;
512         chan->flags = 0;
513
514         local_irq_restore(flags);
515
516         /* need to setup */
517
518         pr_debug("%s: channel initialised, %p\n", __func__, chan);
519
520         return chan->number | DMACH_LOW_LEVEL;
521 }
522
523 EXPORT_SYMBOL(s3c2410_dma_request);
524
525 /* s3c2410_dma_free
526  *
527  * release the given channel back to the system, will stop and flush
528  * any outstanding transfers, and ensure the channel is ready for the
529  * next claimant.
530  *
531  * Note, although a warning is currently printed if the freeing client
532  * info is not the same as the registrant's client info, the free is still
533  * allowed to go through.
534 */
535
536 int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
537 {
538         struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
539         unsigned long flags;
540
541         if (chan == NULL)
542                 return -EINVAL;
543
544         local_irq_save(flags);
545
546         if (chan->client != client) {
547                 printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
548                        channel, chan->client, client);
549         }
550
551         /* sort out stopping and freeing the channel */
552
553
554         chan->client = NULL;
555         chan->in_use = 0;
556
557         if (!(channel & DMACH_LOW_LEVEL))
558                 s3c_dma_chan_map[channel] = NULL;
559
560         local_irq_restore(flags);
561
562         return 0;
563 }
564
565 EXPORT_SYMBOL(s3c2410_dma_free);
566
567 static irqreturn_t s3c64xx_dma_irq(int irq, void *pw)
568 {
569         struct s3c64xx_dmac *dmac = pw;
570         struct s3c2410_dma_chan *chan;
571         enum s3c2410_dma_buffresult res;
572         u32 tcstat, errstat;
573         u32 bit;
574         int offs;
575
576         tcstat = readl(dmac->regs + PL080_TC_STATUS);
577         errstat = readl(dmac->regs + PL080_ERR_STATUS);
578
579         for (offs = 0, bit = 1; offs < 8; offs++, bit <<= 1) {
580                 struct s3c64xx_dma_buff *buff;
581
582                 if (!(errstat & bit) && !(tcstat & bit))
583                         continue;
584
585                 chan = dmac->channels + offs;
586                 res = S3C2410_RES_ERR;
587
588                 if (tcstat & bit) {
589                         writel(bit, dmac->regs + PL080_TC_CLEAR);
590                         res = S3C2410_RES_OK;
591                 }
592
593                 if (errstat & bit)
594                         writel(bit, dmac->regs + PL080_ERR_CLEAR);
595
596                 /* 'next' points to the buffer that is next to the
597                  * currently active buffer.
598                  * For CIRCULAR queues, 'next' will be same as 'curr'
599                  * when 'end' is the active buffer.
600                  */
601                 buff = chan->curr;
602                 while (buff && buff != chan->next
603                                 && buff->next != chan->next)
604                         buff = buff->next;
605
606                 if (!buff)
607                         BUG();
608
609                 if (buff == chan->next)
610                         buff = chan->end;
611
612                 s3c64xx_dma_bufffdone(chan, buff, res);
613
614                 /* Free the node and update curr, if non-circular queue */
615                 if (!(chan->flags & S3C2410_DMAF_CIRCULAR)) {
616                         chan->curr = buff->next;
617                         s3c64xx_dma_freebuff(buff);
618                 }
619
620                 /* Update 'next' */
621                 buff = chan->next;
622                 if (chan->next == chan->end) {
623                         chan->next = chan->curr;
624                         if (!(chan->flags & S3C2410_DMAF_CIRCULAR))
625                                 chan->end = NULL;
626                 } else {
627                         chan->next = buff->next;
628                 }
629         }
630
631         return IRQ_HANDLED;
632 }
633
634 static struct bus_type dma_subsys = {
635         .name           = "s3c64xx-dma",
636         .dev_name       = "s3c64xx-dma",
637 };
638
639 static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
640                              int irq, unsigned int base)
641 {
642         struct s3c2410_dma_chan *chptr = &s3c2410_chans[chno];
643         struct s3c64xx_dmac *dmac;
644         char clkname[16];
645         void __iomem *regs;
646         void __iomem *regptr;
647         int err, ch;
648
649         dmac = kzalloc(sizeof(struct s3c64xx_dmac), GFP_KERNEL);
650         if (!dmac) {
651                 printk(KERN_ERR "%s: failed to alloc mem\n", __func__);
652                 return -ENOMEM;
653         }
654
655         dmac->dev.id = chno / 8;
656         dmac->dev.bus = &dma_subsys;
657
658         err = device_register(&dmac->dev);
659         if (err) {
660                 printk(KERN_ERR "%s: failed to register device\n", __func__);
661                 goto err_alloc;
662         }
663
664         regs = ioremap(base, 0x200);
665         if (!regs) {
666                 printk(KERN_ERR "%s: failed to ioremap()\n", __func__);
667                 err = -ENXIO;
668                 goto err_dev;
669         }
670
671         snprintf(clkname, sizeof(clkname), "dma%d", dmac->dev.id);
672
673         dmac->clk = clk_get(NULL, clkname);
674         if (IS_ERR(dmac->clk)) {
675                 printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname);
676                 err = PTR_ERR(dmac->clk);
677                 goto err_map;
678         }
679
680         clk_enable(dmac->clk);
681
682         dmac->regs = regs;
683         dmac->chanbase = chbase;
684         dmac->channels = chptr;
685
686         err = request_irq(irq, s3c64xx_dma_irq, 0, "DMA", dmac);
687         if (err < 0) {
688                 printk(KERN_ERR "%s: failed to get irq\n", __func__);
689                 goto err_clk;
690         }
691
692         regptr = regs + PL080_Cx_BASE(0);
693
694         for (ch = 0; ch < 8; ch++, chptr++) {
695                 pr_debug("%s: registering DMA %d (%p)\n",
696                          __func__, chno + ch, regptr);
697
698                 chptr->bit = 1 << ch;
699                 chptr->number = chno + ch;
700                 chptr->dmac = dmac;
701                 chptr->regs = regptr;
702                 regptr += PL080_Cx_STRIDE;
703         }
704
705         /* for the moment, permanently enable the controller */
706         writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
707
708         printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
709                irq, regs, chno, chno+8);
710
711         return 0;
712
713 err_clk:
714         clk_disable(dmac->clk);
715         clk_put(dmac->clk);
716 err_map:
717         iounmap(regs);
718 err_dev:
719         device_unregister(&dmac->dev);
720 err_alloc:
721         kfree(dmac);
722         return err;
723 }
724
725 static int __init s3c64xx_dma_init(void)
726 {
727         int ret;
728
729         printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
730
731         dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0);
732         if (!dma_pool) {
733                 printk(KERN_ERR "%s: failed to create pool\n", __func__);
734                 return -ENOMEM;
735         }
736
737         ret = subsys_system_register(&dma_subsys, NULL);
738         if (ret) {
739                 printk(KERN_ERR "%s: failed to create subsys\n", __func__);
740                 return -ENOMEM;
741         }
742
743         /* Set all DMA configuration to be DMA, not SDMA */
744         writel(0xffffff, S3C64XX_SDMA_SEL);
745
746         /* Register standard DMA controllers */
747         s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
748         s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
749
750         return 0;
751 }
752
753 arch_initcall(s3c64xx_dma_init);