]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/dma/pl330.c
Merge branch 'topic/api_caps' into for-linus
[karo-tx-linux.git] / drivers / dma / pl330.c
index 593827b3fdd4fd8556724da998934a7c191b8d49..a562d24d20bf55179436d16086ca90f63d1b1894 100644 (file)
@@ -545,6 +545,8 @@ struct dma_pl330_chan {
 
        /* List of to be xfered descriptors */
        struct list_head work_list;
+       /* List of completed descriptors */
+       struct list_head completed_list;
 
        /* Pointer to the DMAC that manages this channel,
         * NULL if the channel is available to be acquired.
@@ -2198,66 +2200,6 @@ to_desc(struct dma_async_tx_descriptor *tx)
        return container_of(tx, struct dma_pl330_desc, txd);
 }
 
-static inline void free_desc_list(struct list_head *list)
-{
-       struct dma_pl330_dmac *pdmac;
-       struct dma_pl330_desc *desc;
-       struct dma_pl330_chan *pch = NULL;
-       unsigned long flags;
-
-       /* Finish off the work list */
-       list_for_each_entry(desc, list, node) {
-               dma_async_tx_callback callback;
-               void *param;
-
-               /* All desc in a list belong to same channel */
-               pch = desc->pchan;
-               callback = desc->txd.callback;
-               param = desc->txd.callback_param;
-
-               if (callback)
-                       callback(param);
-
-               desc->pchan = NULL;
-       }
-
-       /* pch will be unset if list was empty */
-       if (!pch)
-               return;
-
-       pdmac = pch->dmac;
-
-       spin_lock_irqsave(&pdmac->pool_lock, flags);
-       list_splice_tail_init(list, &pdmac->desc_pool);
-       spin_unlock_irqrestore(&pdmac->pool_lock, flags);
-}
-
-static inline void handle_cyclic_desc_list(struct list_head *list)
-{
-       struct dma_pl330_desc *desc;
-       struct dma_pl330_chan *pch = NULL;
-       unsigned long flags;
-
-       list_for_each_entry(desc, list, node) {
-               dma_async_tx_callback callback;
-
-               /* Change status to reload it */
-               desc->status = PREP;
-               pch = desc->pchan;
-               callback = desc->txd.callback;
-               if (callback)
-                       callback(desc->txd.callback_param);
-       }
-
-       /* pch will be unset if list was empty */
-       if (!pch)
-               return;
-
-       spin_lock_irqsave(&pch->lock, flags);
-       list_splice_tail_init(list, &pch->work_list);
-       spin_unlock_irqrestore(&pch->lock, flags);
-}
-
 static inline void fill_queue(struct dma_pl330_chan *pch)
 {
        struct dma_pl330_desc *desc;
@@ -2291,7 +2233,6 @@ static void pl330_tasklet(unsigned long data)
        struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
        struct dma_pl330_desc *desc, *_dt;
        unsigned long flags;
-       LIST_HEAD(list);
 
        spin_lock_irqsave(&pch->lock, flags);
 
@@ -2300,7 +2241,7 @@ static void pl330_tasklet(unsigned long data)
                if (desc->status == DONE) {
                        if (!pch->cyclic)
                                dma_cookie_complete(&desc->txd);
-                       list_move_tail(&desc->node, &list);
+                       list_move_tail(&desc->node, &pch->completed_list);
                }
 
        /* Try to submit a req imm. next to the last completed cookie */
@@ -2309,12 +2250,31 @@ static void pl330_tasklet(unsigned long data)
        /* Make sure the PL330 Channel thread is active */
        pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
 
-       spin_unlock_irqrestore(&pch->lock, flags);
+       while (!list_empty(&pch->completed_list)) {
+               dma_async_tx_callback callback;
+               void *callback_param;
 
-       if (pch->cyclic)
-               handle_cyclic_desc_list(&list);
-       else
-               free_desc_list(&list);
+               desc = list_first_entry(&pch->completed_list,
+                                       struct dma_pl330_desc, node);
+
+               callback = desc->txd.callback;
+               callback_param = desc->txd.callback_param;
+
+               if (pch->cyclic) {
+                       desc->status = PREP;
+                       list_move_tail(&desc->node, &pch->work_list);
+               } else {
+                       desc->status = FREE;
+                       list_move_tail(&desc->node, &pch->dmac->desc_pool);
+               }
+
+               if (callback) {
+                       spin_unlock_irqrestore(&pch->lock, flags);
+                       callback(callback_param);
+                       spin_lock_irqsave(&pch->lock, flags);
+               }
+       }
+       spin_unlock_irqrestore(&pch->lock, flags);
 }
 
 static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -2409,7 +2369,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
 {
        struct dma_pl330_chan *pch = to_pchan(chan);
-       struct dma_pl330_desc *desc, *_dt;
+       struct dma_pl330_desc *desc;
        unsigned long flags;
        struct dma_pl330_dmac *pdmac = pch->dmac;
        struct dma_slave_config *slave_config;
@@ -2423,12 +2383,18 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
                pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
 
                /* Mark all desc done */
-               list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
-                       desc->status = DONE;
-                       list_move_tail(&desc->node, &list);
+               list_for_each_entry(desc, &pch->work_list , node) {
+                       desc->status = FREE;
+                       dma_cookie_complete(&desc->txd);
                }
 
-               list_splice_tail_init(&list, &pdmac->desc_pool);
+               list_for_each_entry(desc, &pch->completed_list , node) {
+                       desc->status = FREE;
+                       dma_cookie_complete(&desc->txd);
+               }
+
+               list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
+               list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
                spin_unlock_irqrestore(&pch->lock, flags);
                break;
        case DMA_SLAVE_CONFIG:
@@ -2505,6 +2471,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
        /* Assign cookies to all nodes */
        while (!list_empty(&last->node)) {
                desc = list_entry(last->node.next, struct dma_pl330_desc, node);
+               if (pch->cyclic) {
+                       desc->txd.callback = last->txd.callback;
+                       desc->txd.callback_param = last->txd.callback_param;
+               }
 
                dma_cookie_assign(&desc->txd);
 
@@ -2688,45 +2658,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
                size_t period_len, enum dma_transfer_direction direction,
                unsigned long flags, void *context)
 {
-       struct dma_pl330_desc *desc;
+       struct dma_pl330_desc *desc = NULL, *first = NULL;
        struct dma_pl330_chan *pch = to_pchan(chan);
+       struct dma_pl330_dmac *pdmac = pch->dmac;
+       unsigned int i;
        dma_addr_t dst;
        dma_addr_t src;
 
-       desc = pl330_get_desc(pch);
-       if (!desc) {
-               dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
-                       __func__, __LINE__);
+       if (len % period_len != 0)
                return NULL;
-       }
 
-       switch (direction) {
-       case DMA_MEM_TO_DEV:
-               desc->rqcfg.src_inc = 1;
-               desc->rqcfg.dst_inc = 0;
-               desc->req.rqtype = MEMTODEV;
-               src = dma_addr;
-               dst = pch->fifo_addr;
-               break;
-       case DMA_DEV_TO_MEM:
-               desc->rqcfg.src_inc = 0;
-               desc->rqcfg.dst_inc = 1;
-               desc->req.rqtype = DEVTOMEM;
-               src = pch->fifo_addr;
-               dst = dma_addr;
-               break;
-       default:
+       if (!is_slave_direction(direction)) {
                dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
                __func__, __LINE__);
                return NULL;
        }
 
-       desc->rqcfg.brst_size = pch->burst_sz;
-       desc->rqcfg.brst_len = 1;
+       for (i = 0; i < len / period_len; i++) {
+               desc = pl330_get_desc(pch);
+               if (!desc) {
+                       dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+                               __func__, __LINE__);
 
-       pch->cyclic = true;
+                       if (!first)
+                               return NULL;
 
-       fill_px(&desc->px, dst, src, period_len);
+                       spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+                       while (!list_empty(&first->node)) {
+                               desc = list_entry(first->node.next,
+                                               struct dma_pl330_desc, node);
+                               list_move_tail(&desc->node, &pdmac->desc_pool);
+                       }
+
+                       list_move_tail(&first->node, &pdmac->desc_pool);
+
+                       spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+                       return NULL;
+               }
+
+               switch (direction) {
+               case DMA_MEM_TO_DEV:
+                       desc->rqcfg.src_inc = 1;
+                       desc->rqcfg.dst_inc = 0;
+                       desc->req.rqtype = MEMTODEV;
+                       src = dma_addr;
+                       dst = pch->fifo_addr;
+                       break;
+               case DMA_DEV_TO_MEM:
+                       desc->rqcfg.src_inc = 0;
+                       desc->rqcfg.dst_inc = 1;
+                       desc->req.rqtype = DEVTOMEM;
+                       src = pch->fifo_addr;
+                       dst = dma_addr;
+                       break;
+               default:
+                       break;
+               }
+
+               desc->rqcfg.brst_size = pch->burst_sz;
+               desc->rqcfg.brst_len = 1;
+               fill_px(&desc->px, dst, src, period_len);
+
+               if (!first)
+                       first = desc;
+               else
+                       list_add_tail(&desc->node, &first->node);
+
+               dma_addr += period_len;
+       }
+
+       if (!desc)
+               return NULL;
+
+       pch->cyclic = true;
+       desc->txd.flags = flags;
 
        return &desc->txd;
 }
@@ -2773,6 +2780,28 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
        return &desc->txd;
 }
 
+static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
+                                 struct dma_pl330_desc *first)
+{
+       unsigned long flags;
+       struct dma_pl330_desc *desc;
+
+       if (!first)
+               return;
+
+       spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+       while (!list_empty(&first->node)) {
+               desc = list_entry(first->node.next,
+                               struct dma_pl330_desc, node);
+               list_move_tail(&desc->node, &pdmac->desc_pool);
+       }
+
+       list_move_tail(&first->node, &pdmac->desc_pool);
+
+       spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
 static struct dma_async_tx_descriptor *
 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                unsigned int sg_len, enum dma_transfer_direction direction,
@@ -2781,7 +2810,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        struct dma_pl330_desc *first, *desc = NULL;
        struct dma_pl330_chan *pch = to_pchan(chan);
        struct scatterlist *sg;
-       unsigned long flags;
        int i;
        dma_addr_t addr;
 
@@ -2801,20 +2829,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        dev_err(pch->dmac->pif.dev,
                                "%s:%d Unable to fetch desc\n",
                                __func__, __LINE__);
-                       if (!first)
-                               return NULL;
-
-                       spin_lock_irqsave(&pdmac->pool_lock, flags);
-
-                       while (!list_empty(&first->node)) {
-                               desc = list_entry(first->node.next,
-                                               struct dma_pl330_desc, node);
-                               list_move_tail(&desc->node, &pdmac->desc_pool);
-                       }
-
-                       list_move_tail(&first->node, &pdmac->desc_pool);
-
-                       spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+                       __pl330_giveback_desc(pdmac, first);
 
                        return NULL;
                }
@@ -2855,6 +2870,25 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
                return IRQ_NONE;
 }
 
+#define PL330_DMA_BUSWIDTHS \
+       BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
+       struct dma_slave_caps *caps)
+{
+       caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
+       caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
+       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       caps->cmd_pause = false;
+       caps->cmd_terminate = true;
+
+       return 0;
+}
+
 static int
 pl330_probe(struct amba_device *adev, const struct amba_id *id)
 {
@@ -2867,7 +2901,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        int i, ret, irq;
        int num_chan;
 
-       pdat = adev->dev.platform_data;
+       pdat = dev_get_platdata(&adev->dev);
 
        /* Allocate a new DMAC and its Channels */
        pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
@@ -2930,6 +2964,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
                        pch->chan.private = adev->dev.of_node;
 
                INIT_LIST_HEAD(&pch->work_list);
+               INIT_LIST_HEAD(&pch->completed_list);
                spin_lock_init(&pch->lock);
                pch->pl330_chid = NULL;
                pch->chan.device = pd;
@@ -2959,6 +2994,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        pd->device_prep_slave_sg = pl330_prep_slave_sg;
        pd->device_control = pl330_control;
        pd->device_issue_pending = pl330_issue_pending;
+       pd->device_slave_caps = pl330_dma_device_slave_caps;
 
        ret = dma_async_device_register(pd);
        if (ret) {
@@ -2974,6 +3010,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
                        "unable to register DMA to the generic DT DMA helpers\n");
                }
        }
+       /*
+        * This is the limit for transfers with a buswidth of 1, larger
+        * buswidths will have larger limits.
+        */
+       ret = dma_set_max_seg_size(&adev->dev, 1900800);
+       if (ret)
+               dev_err(&adev->dev, "unable to set the seg size\n");
+
 
        dev_info(&adev->dev,
                "Loaded driver for PL330 DMAC-%d\n", adev->periphid);