1 /* linux/arch/arm/plat-samsung/s3c-pl330.c
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/platform_device.h>
19 #include <asm/hardware/pl330.h>
21 #include <plat/s3c-pl330-pdata.h>
24 * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
25 * @busy_chan: Number of channels currently busy.
26 * @peri: List of IDs of peripherals this DMAC can work with.
27 * @node: To attach to the global list of DMACs.
28 * @pi: PL330 configuration info for the DMAC.
29 * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
31 struct s3c_pl330_dmac {
34 struct list_head node;
35 struct pl330_info *pi;
36 struct kmem_cache *kmcache;
40 * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
41 * @token: Xfer ID provided by the client.
42 * @node: To attach to the list of xfers on a channel.
43 * @px: Xfer for PL330 core.
44 * @chan: Owner channel of this xfer.
46 struct s3c_pl330_xfer {
48 struct list_head node;
50 struct s3c_pl330_chan *chan;
54 * struct s3c_pl330_chan - Logical channel to communicate with
55 * a Physical peripheral.
56 * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
57 * NULL if the channel is available to be acquired.
58 * @id: ID of the peripheral that this channel can communicate with.
59 * @options: Options specified by the client.
60 * @sdaddr: Address provided via s3c2410_dma_devconfig.
61 * @node: To attach to the global list of channels.
62 * @lrq: Pointer to the last submitted pl330_req to PL330 core.
63 * @xfer_list: To manage list of xfers enqueued.
64 * @req: Two requests to communicate with the PL330 engine.
65 * @callback_fn: Callback function to the client.
66 * @rqcfg: Channel configuration for the xfers.
67 * @xfer_head: Pointer to the xfer to be next excecuted.
68 * @dmac: Pointer to the DMAC that manages this channel, NULL if the
69 * channel is available to be acquired.
70 * @client: Client of this channel. NULL if the
71 * channel is available to be acquired.
73 struct s3c_pl330_chan {
78 struct list_head node;
79 struct pl330_req *lrq;
80 struct list_head xfer_list;
81 struct pl330_req req[2];
82 s3c2410_dma_cbfn_t callback_fn;
83 struct pl330_reqcfg rqcfg;
84 struct s3c_pl330_xfer *xfer_head;
85 struct s3c_pl330_dmac *dmac;
86 struct s3c2410_dma_client *client;
89 /* All DMACs in the platform */
90 static LIST_HEAD(dmac_list);
92 /* All channels to peripherals in the platform */
93 static LIST_HEAD(chan_list);
96 * Since we add resources(DMACs and Channels) to the global pool,
97 * we need to guard access to the resources using a global lock
99 static DEFINE_SPINLOCK(res_lock);
101 /* Returns the channel with ID 'id' in the chan_list */
102 static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
104 struct s3c_pl330_chan *ch;
106 list_for_each_entry(ch, &chan_list, node)
113 /* Allocate a new channel with ID 'id' and add to chan_list */
114 static void chan_add(const enum dma_ch id)
116 struct s3c_pl330_chan *ch = id_to_chan(id);
118 /* Return if the channel already exists */
122 ch = kmalloc(sizeof(*ch), GFP_KERNEL);
123 /* Return silently to work with other channels */
130 list_add_tail(&ch->node, &chan_list);
133 /* If the channel is not yet acquired by any client */
134 static bool chan_free(struct s3c_pl330_chan *ch)
139 /* Channel points to some DMAC only when it's acquired */
140 return ch->dmac ? false : true;
144 * Returns 0 is peripheral i/f is invalid or not present on the dmac.
145 * Index + 1, otherwise.
147 static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
149 enum dma_ch *id = dmac->peri;
152 /* Discount invalid markers */
153 if (ch_id == DMACH_MAX)
156 for (i = 0; i < PL330_MAX_PERI; i++)
163 /* If all channel threads of the DMAC are busy */
164 static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
166 struct pl330_info *pi = dmac->pi;
168 return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
172 * Returns the number of free channels that
173 * can be handled by this dmac only.
175 static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
177 enum dma_ch *id = dmac->peri;
178 struct s3c_pl330_dmac *d;
179 struct s3c_pl330_chan *ch;
180 unsigned found, count = 0;
184 for (i = 0; i < PL330_MAX_PERI; i++) {
188 if (p == DMACH_MAX || !chan_free(ch))
192 list_for_each_entry(d, &dmac_list, node) {
193 if (d != dmac && iface_of_dmac(d, ch->id)) {
206 * Measure of suitability of 'dmac' handling 'ch'
208 * 0 indicates 'dmac' can not handle 'ch' either
209 * because it is not supported by the hardware or
210 * because all dmac channels are currently busy.
212 * >0 vlaue indicates 'dmac' has the capability.
213 * The bigger the value the more suitable the dmac.
215 #define MAX_SUIT UINT_MAX
218 static unsigned suitablility(struct s3c_pl330_dmac *dmac,
219 struct s3c_pl330_chan *ch)
221 struct pl330_info *pi = dmac->pi;
222 enum dma_ch *id = dmac->peri;
223 struct s3c_pl330_dmac *d;
228 /* If all the DMAC channel threads are busy */
232 for (i = 0; i < PL330_MAX_PERI; i++)
236 /* If the 'dmac' can't talk to 'ch' */
237 if (i == PL330_MAX_PERI)
241 list_for_each_entry(d, &dmac_list, node) {
243 * If some other dmac can talk to this
244 * peri and has some channel free.
246 if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
256 /* Good if free chans are more, bad otherwise */
257 s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
262 /* More than one DMAC may have capability to transfer data with the
263 * peripheral. This function assigns most suitable DMAC to manage the
264 * channel and hence communicate with the peripheral.
266 static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
268 struct s3c_pl330_dmac *d, *dmac = NULL;
269 unsigned sn, sl = MIN_SUIT;
271 list_for_each_entry(d, &dmac_list, node) {
272 sn = suitablility(d, ch);
284 /* Acquire the channel for peripheral 'id' */
285 static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
287 struct s3c_pl330_chan *ch = id_to_chan(id);
288 struct s3c_pl330_dmac *dmac;
290 /* If the channel doesn't exist or is already acquired */
291 if (!ch || !chan_free(ch)) {
296 dmac = map_chan_to_dmac(ch);
297 /* If couldn't map */
310 /* Delete xfer from the queue */
311 static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
313 struct s3c_pl330_xfer *t;
314 struct s3c_pl330_chan *ch;
322 /* Make sure xfer is in the queue */
324 list_for_each_entry(t, &ch->xfer_list, node)
333 /* If xfer is last entry in the queue */
334 if (xfer->node.next == &ch->xfer_list)
335 t = list_entry(ch->xfer_list.next,
336 struct s3c_pl330_xfer, node);
338 t = list_entry(xfer->node.next,
339 struct s3c_pl330_xfer, node);
341 /* If there was only one node left */
343 ch->xfer_head = NULL;
344 else if (ch->xfer_head == xfer)
347 list_del(&xfer->node);
350 /* Provides pointer to the next xfer in the queue.
351 * If CIRCULAR option is set, the list is left intact,
352 * otherwise the xfer is removed from the list.
353 * Forced delete 'pluck' can be set to override the CIRCULAR option.
355 static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
358 struct s3c_pl330_xfer *xfer = ch->xfer_head;
363 /* If xfer is last entry in the queue */
364 if (xfer->node.next == &ch->xfer_list)
365 ch->xfer_head = list_entry(ch->xfer_list.next,
366 struct s3c_pl330_xfer, node);
368 ch->xfer_head = list_entry(xfer->node.next,
369 struct s3c_pl330_xfer, node);
371 if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
372 del_from_queue(xfer);
377 static inline void add_to_queue(struct s3c_pl330_chan *ch,
378 struct s3c_pl330_xfer *xfer, int front)
380 struct pl330_xfer *xt;
383 if (ch->xfer_head == NULL)
384 ch->xfer_head = xfer;
386 xt = &ch->xfer_head->px;
387 /* If the head already submitted (CIRCULAR head) */
388 if (ch->options & S3C2410_DMAF_CIRCULAR &&
389 (xt == ch->req[0].x || xt == ch->req[1].x))
390 ch->xfer_head = xfer;
392 /* If this is a resubmission, it should go at the head */
394 ch->xfer_head = xfer;
395 list_add(&xfer->node, &ch->xfer_list);
397 list_add_tail(&xfer->node, &ch->xfer_list);
401 static inline void _finish_off(struct s3c_pl330_xfer *xfer,
402 enum s3c2410_dma_buffresult res, int ffree)
404 struct s3c_pl330_chan *ch;
413 ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
415 /* Force Free or if buffer is not needed anymore */
416 if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
417 kmem_cache_free(ch->dmac->kmcache, xfer);
420 static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
423 struct s3c_pl330_xfer *xfer;
426 /* If already submitted */
430 xfer = get_from_queue(ch, 0);
434 /* Use max bandwidth for M<->M xfers */
435 if (r->rqtype == MEMTOMEM) {
436 struct pl330_info *pi = xfer->chan->dmac->pi;
437 int burst = 1 << ch->rqcfg.brst_size;
438 u32 bytes = r->x->bytes;
441 bl = pi->pcfg.data_bus_width / 8;
442 bl *= pi->pcfg.data_buf_dep;
445 /* src/dst_burst_len can't be more than 16 */
450 if (!(bytes % (bl * burst)))
455 ch->rqcfg.brst_len = bl;
457 ch->rqcfg.brst_len = 1;
460 ret = pl330_submit_req(ch->pl330_chan_id, r);
462 /* If submission was successful */
464 ch->lrq = r; /* latest submitted req */
470 /* If both of the PL330 ping-pong buffers filled */
471 if (ret == -EAGAIN) {
472 dev_err(ch->dmac->pi->dev, "%s:%d!\n",
474 /* Queue back again */
475 add_to_queue(ch, xfer, 1);
478 dev_err(ch->dmac->pi->dev, "%s:%d!\n",
480 _finish_off(xfer, S3C2410_RES_ERR, 0);
487 static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
488 struct pl330_req *r, enum pl330_op_err err)
491 struct s3c_pl330_xfer *xfer;
492 struct pl330_xfer *xl = r->x;
493 enum s3c2410_dma_buffresult res;
495 spin_lock_irqsave(&res_lock, flags);
499 s3c_pl330_submit(ch, r);
501 spin_unlock_irqrestore(&res_lock, flags);
503 /* Map result to S3C DMA API */
504 if (err == PL330_ERR_NONE)
505 res = S3C2410_RES_OK;
506 else if (err == PL330_ERR_ABORT)
507 res = S3C2410_RES_ABORT;
509 res = S3C2410_RES_ERR;
511 /* If last request had some xfer */
513 xfer = container_of(xl, struct s3c_pl330_xfer, px);
514 _finish_off(xfer, res, 0);
516 dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
521 static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
523 struct pl330_req *r = token;
524 struct s3c_pl330_chan *ch = container_of(r,
525 struct s3c_pl330_chan, req[0]);
526 s3c_pl330_rq(ch, r, err);
529 static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
531 struct pl330_req *r = token;
532 struct s3c_pl330_chan *ch = container_of(r,
533 struct s3c_pl330_chan, req[1]);
534 s3c_pl330_rq(ch, r, err);
537 /* Release an acquired channel */
538 static void chan_release(struct s3c_pl330_chan *ch)
540 struct s3c_pl330_dmac *dmac;
550 int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
552 struct s3c_pl330_xfer *xfer;
553 enum pl330_chan_op pl330op;
554 struct s3c_pl330_chan *ch;
558 spin_lock_irqsave(&res_lock, flags);
562 if (!ch || chan_free(ch)) {
568 case S3C2410_DMAOP_START:
569 /* Make sure both reqs are enqueued */
570 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
571 s3c_pl330_submit(ch, &ch->req[idx]);
572 s3c_pl330_submit(ch, &ch->req[1 - idx]);
573 pl330op = PL330_OP_START;
576 case S3C2410_DMAOP_STOP:
577 pl330op = PL330_OP_ABORT;
580 case S3C2410_DMAOP_FLUSH:
581 pl330op = PL330_OP_FLUSH;
584 case S3C2410_DMAOP_PAUSE:
585 case S3C2410_DMAOP_RESUME:
586 case S3C2410_DMAOP_TIMEOUT:
587 case S3C2410_DMAOP_STARTED:
588 spin_unlock_irqrestore(&res_lock, flags);
592 spin_unlock_irqrestore(&res_lock, flags);
596 ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
598 if (pl330op == PL330_OP_START) {
599 spin_unlock_irqrestore(&res_lock, flags);
603 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
605 /* Abort the current xfer */
606 if (ch->req[idx].x) {
607 xfer = container_of(ch->req[idx].x,
608 struct s3c_pl330_xfer, px);
610 /* Drop xfer during FLUSH */
611 if (pl330op == PL330_OP_FLUSH)
612 del_from_queue(xfer);
614 ch->req[idx].x = NULL;
616 spin_unlock_irqrestore(&res_lock, flags);
617 _finish_off(xfer, S3C2410_RES_ABORT,
618 pl330op == PL330_OP_FLUSH ? 1 : 0);
619 spin_lock_irqsave(&res_lock, flags);
622 /* Flush the whole queue */
623 if (pl330op == PL330_OP_FLUSH) {
625 if (ch->req[1 - idx].x) {
626 xfer = container_of(ch->req[1 - idx].x,
627 struct s3c_pl330_xfer, px);
629 del_from_queue(xfer);
631 ch->req[1 - idx].x = NULL;
633 spin_unlock_irqrestore(&res_lock, flags);
634 _finish_off(xfer, S3C2410_RES_ABORT, 1);
635 spin_lock_irqsave(&res_lock, flags);
638 /* Finish off the remaining in the queue */
639 xfer = ch->xfer_head;
642 del_from_queue(xfer);
644 spin_unlock_irqrestore(&res_lock, flags);
645 _finish_off(xfer, S3C2410_RES_ABORT, 1);
646 spin_lock_irqsave(&res_lock, flags);
648 xfer = ch->xfer_head;
653 spin_unlock_irqrestore(&res_lock, flags);
657 EXPORT_SYMBOL(s3c2410_dma_ctrl);
659 int s3c2410_dma_enqueue(enum dma_ch id, void *token,
660 dma_addr_t addr, int size)
662 struct s3c_pl330_chan *ch;
663 struct s3c_pl330_xfer *xfer;
667 spin_lock_irqsave(&res_lock, flags);
671 /* Error if invalid or free channel */
672 if (!ch || chan_free(ch)) {
677 /* Error if size is unaligned */
678 if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
683 xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
691 xfer->px.bytes = size;
692 xfer->px.next = NULL; /* Single request */
694 /* For S3C DMA API, direction is always fixed for all xfers */
695 if (ch->req[0].rqtype == MEMTODEV) {
696 xfer->px.src_addr = addr;
697 xfer->px.dst_addr = ch->sdaddr;
699 xfer->px.src_addr = ch->sdaddr;
700 xfer->px.dst_addr = addr;
703 add_to_queue(ch, xfer, 0);
705 /* Try submitting on either request */
706 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
709 s3c_pl330_submit(ch, &ch->req[idx]);
711 s3c_pl330_submit(ch, &ch->req[1 - idx]);
713 spin_unlock_irqrestore(&res_lock, flags);
715 if (ch->options & S3C2410_DMAF_AUTOSTART)
716 s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
721 spin_unlock_irqrestore(&res_lock, flags);
725 EXPORT_SYMBOL(s3c2410_dma_enqueue);
727 int s3c2410_dma_request(enum dma_ch id,
728 struct s3c2410_dma_client *client,
731 struct s3c_pl330_dmac *dmac;
732 struct s3c_pl330_chan *ch;
736 spin_lock_irqsave(&res_lock, flags);
738 ch = chan_acquire(id);
746 ch->pl330_chan_id = pl330_request_channel(dmac->pi);
747 if (!ch->pl330_chan_id) {
754 ch->options = 0; /* Clear any option */
755 ch->callback_fn = NULL; /* Clear any callback */
758 ch->rqcfg.brst_size = 2; /* Default word size */
759 ch->rqcfg.swap = SWAP_NO;
760 ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
761 ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
762 ch->rqcfg.privileged = 0;
763 ch->rqcfg.insnaccess = 0;
765 /* Set invalid direction */
766 ch->req[0].rqtype = DEVTODEV;
767 ch->req[1].rqtype = ch->req[0].rqtype;
769 ch->req[0].cfg = &ch->rqcfg;
770 ch->req[1].cfg = ch->req[0].cfg;
772 ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
773 ch->req[1].peri = ch->req[0].peri;
775 ch->req[0].token = &ch->req[0];
776 ch->req[0].xfer_cb = s3c_pl330_rq0;
777 ch->req[1].token = &ch->req[1];
778 ch->req[1].xfer_cb = s3c_pl330_rq1;
783 /* Reset xfer list */
784 INIT_LIST_HEAD(&ch->xfer_list);
785 ch->xfer_head = NULL;
788 spin_unlock_irqrestore(&res_lock, flags);
792 EXPORT_SYMBOL(s3c2410_dma_request);
794 int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
796 struct s3c_pl330_chan *ch;
797 struct s3c_pl330_xfer *xfer;
802 spin_lock_irqsave(&res_lock, flags);
806 if (!ch || chan_free(ch))
809 /* Refuse if someone else wanted to free the channel */
810 if (ch->client != client) {
815 /* Stop any active xfer, Flushe the queue and do callbacks */
816 pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
818 /* Abort the submitted requests */
819 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
821 if (ch->req[idx].x) {
822 xfer = container_of(ch->req[idx].x,
823 struct s3c_pl330_xfer, px);
825 ch->req[idx].x = NULL;
826 del_from_queue(xfer);
828 spin_unlock_irqrestore(&res_lock, flags);
829 _finish_off(xfer, S3C2410_RES_ABORT, 1);
830 spin_lock_irqsave(&res_lock, flags);
833 if (ch->req[1 - idx].x) {
834 xfer = container_of(ch->req[1 - idx].x,
835 struct s3c_pl330_xfer, px);
837 ch->req[1 - idx].x = NULL;
838 del_from_queue(xfer);
840 spin_unlock_irqrestore(&res_lock, flags);
841 _finish_off(xfer, S3C2410_RES_ABORT, 1);
842 spin_lock_irqsave(&res_lock, flags);
845 /* Pluck and Abort the queued requests in order */
847 xfer = get_from_queue(ch, 1);
849 spin_unlock_irqrestore(&res_lock, flags);
850 _finish_off(xfer, S3C2410_RES_ABORT, 1);
851 spin_lock_irqsave(&res_lock, flags);
856 pl330_release_channel(ch->pl330_chan_id);
858 ch->pl330_chan_id = NULL;
863 spin_unlock_irqrestore(&res_lock, flags);
867 EXPORT_SYMBOL(s3c2410_dma_free);
869 int s3c2410_dma_config(enum dma_ch id, int xferunit)
871 struct s3c_pl330_chan *ch;
872 struct pl330_info *pi;
874 int i, dbwidth, ret = 0;
876 spin_lock_irqsave(&res_lock, flags);
880 if (!ch || chan_free(ch)) {
886 dbwidth = pi->pcfg.data_bus_width / 8;
888 /* Max size of xfer can be pcfg.data_bus_width */
889 if (xferunit > dbwidth) {
895 while (xferunit != (1 << i))
899 if (xferunit == (1 << i))
900 ch->rqcfg.brst_size = i;
905 spin_unlock_irqrestore(&res_lock, flags);
909 EXPORT_SYMBOL(s3c2410_dma_config);
911 /* Options that are supported by this driver */
912 #define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
914 int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
916 struct s3c_pl330_chan *ch;
920 spin_lock_irqsave(&res_lock, flags);
924 if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
927 ch->options = options;
929 spin_unlock_irqrestore(&res_lock, flags);
933 EXPORT_SYMBOL(s3c2410_dma_setflags);
935 int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
937 struct s3c_pl330_chan *ch;
941 spin_lock_irqsave(&res_lock, flags);
945 if (!ch || chan_free(ch))
948 ch->callback_fn = rtn;
950 spin_unlock_irqrestore(&res_lock, flags);
954 EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
956 int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
957 unsigned long address)
959 struct s3c_pl330_chan *ch;
963 spin_lock_irqsave(&res_lock, flags);
967 if (!ch || chan_free(ch)) {
973 case S3C2410_DMASRC_HW: /* P->M */
974 ch->req[0].rqtype = DEVTOMEM;
975 ch->req[1].rqtype = DEVTOMEM;
976 ch->rqcfg.src_inc = 0;
977 ch->rqcfg.dst_inc = 1;
979 case S3C2410_DMASRC_MEM: /* M->P */
980 ch->req[0].rqtype = MEMTODEV;
981 ch->req[1].rqtype = MEMTODEV;
982 ch->rqcfg.src_inc = 1;
983 ch->rqcfg.dst_inc = 0;
990 ch->sdaddr = address;
993 spin_unlock_irqrestore(&res_lock, flags);
997 EXPORT_SYMBOL(s3c2410_dma_devconfig);
999 int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
1001 struct s3c_pl330_chan *ch = id_to_chan(id);
1002 struct pl330_chanstatus status;
1005 if (!ch || chan_free(ch))
1008 ret = pl330_chan_status(ch->pl330_chan_id, &status);
1012 *src = status.src_addr;
1013 *dst = status.dst_addr;
1017 EXPORT_SYMBOL(s3c2410_dma_getposition);
1019 static irqreturn_t pl330_irq_handler(int irq, void *data)
1021 if (pl330_update(data))
1027 static int pl330_probe(struct platform_device *pdev)
1029 struct s3c_pl330_dmac *s3c_pl330_dmac;
1030 struct s3c_pl330_platdata *pl330pd;
1031 struct pl330_info *pl330_info;
1032 struct resource *res;
1035 pl330pd = pdev->dev.platform_data;
1037 /* Can't do without the list of _32_ peripherals */
1038 if (!pl330pd || !pl330pd->peri) {
1039 dev_err(&pdev->dev, "platform data missing!\n");
1043 pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
1047 pl330_info->pl330_data = NULL;
1048 pl330_info->dev = &pdev->dev;
1050 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1056 request_mem_region(res->start, resource_size(res), pdev->name);
1058 pl330_info->base = ioremap(res->start, resource_size(res));
1059 if (!pl330_info->base) {
1064 irq = platform_get_irq(pdev, 0);
1070 ret = request_irq(irq, pl330_irq_handler, 0,
1071 dev_name(&pdev->dev), pl330_info);
1075 ret = pl330_add(pl330_info);
1079 /* Allocate a new DMAC */
1080 s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
1081 if (!s3c_pl330_dmac) {
1087 s3c_pl330_dmac->pi = pl330_info;
1089 /* No busy channels */
1090 s3c_pl330_dmac->busy_chan = 0;
1092 s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
1093 sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
1095 if (!s3c_pl330_dmac->kmcache) {
1100 /* Get the list of peripherals */
1101 s3c_pl330_dmac->peri = pl330pd->peri;
1103 /* Attach to the list of DMACs */
1104 list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
1106 /* Create a channel for each peripheral in the DMAC
1107 * that is, if it doesn't already exist
1109 for (i = 0; i < PL330_MAX_PERI; i++)
1110 if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
1111 chan_add(s3c_pl330_dmac->peri[i]);
1114 "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
1116 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
1117 pl330_info->pcfg.data_buf_dep,
1118 pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
1119 pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
1124 kfree(s3c_pl330_dmac);
1126 pl330_del(pl330_info);
1128 free_irq(irq, pl330_info);
1131 iounmap(pl330_info->base);
1133 release_mem_region(res->start, resource_size(res));
1140 static int pl330_remove(struct platform_device *pdev)
1142 struct s3c_pl330_dmac *dmac, *d;
1143 struct s3c_pl330_chan *ch;
1144 unsigned long flags;
1147 if (!pdev->dev.platform_data)
1150 spin_lock_irqsave(&res_lock, flags);
1153 list_for_each_entry(d, &dmac_list, node)
1154 if (d->pi->dev == &pdev->dev) {
1160 spin_unlock_irqrestore(&res_lock, flags);
1166 /* Remove all Channels that are managed only by this DMAC */
1167 list_for_each_entry(ch, &chan_list, node) {
1169 /* Only channels that are handled by this DMAC */
1170 if (iface_of_dmac(dmac, ch->id))
1175 /* Don't remove if some other DMAC has it too */
1176 list_for_each_entry(d, &dmac_list, node)
1177 if (d != dmac && iface_of_dmac(d, ch->id)) {
1183 spin_unlock_irqrestore(&res_lock, flags);
1184 s3c2410_dma_free(ch->id, ch->client);
1185 spin_lock_irqsave(&res_lock, flags);
1186 list_del(&ch->node);
1191 /* Remove the DMAC */
1192 list_del(&dmac->node);
1195 spin_unlock_irqrestore(&res_lock, flags);
1200 static struct platform_driver pl330_driver = {
1202 .owner = THIS_MODULE,
1203 .name = "s3c-pl330",
1205 .probe = pl330_probe,
1206 .remove = pl330_remove,
1209 static int __init pl330_init(void)
1211 return platform_driver_register(&pl330_driver);
1213 module_init(pl330_init);
1215 static void __exit pl330_exit(void)
1217 platform_driver_unregister(&pl330_driver);
1220 module_exit(pl330_exit);
1222 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1223 MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
1224 MODULE_LICENSE("GPL");