]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/dma/pxp/pxp_dma_v2.c
ENGR00294115 PXP: correct the pxp_dispatch thread exit logic
[karo-tx-linux.git] / drivers / dma / pxp / pxp_dma_v2.c
index 279c033e3ea2ebb4d9d4f2d49328dafcf8350234..63d5d4e91b512f5416893606c6f665dff3880a42 100644 (file)
@@ -47,6 +47,7 @@
 static LIST_HEAD(head);
 static int timeout_in_ms = 600;
 static unsigned int block_size;
+static struct kmem_cache *tx_desc_cache;
 
 struct pxp_dma {
        struct dma_device dma;
@@ -918,8 +919,13 @@ static void pxp_set_s0buf(struct pxps *pxp)
                U1 = U + offset;
                V = U + ((s0_params->width * s0_params->height) >> s);
                V1 = V + offset;
-               __raw_writel(U1, pxp->base + HW_PXP_PS_UBUF);
-               __raw_writel(V1, pxp->base + HW_PXP_PS_VBUF);
+               if (s0_params->pixel_fmt == PXP_PIX_FMT_YVU420P) {
+                       __raw_writel(V1, pxp->base + HW_PXP_PS_UBUF);
+                       __raw_writel(U1, pxp->base + HW_PXP_PS_VBUF);
+               } else {
+                       __raw_writel(U1, pxp->base + HW_PXP_PS_UBUF);
+                       __raw_writel(V1, pxp->base + HW_PXP_PS_VBUF);
+               }
        } else if ((s0_params->pixel_fmt == PXP_PIX_FMT_NV12) ||
                 (s0_params->pixel_fmt == PXP_PIX_FMT_NV21) ||
                 (s0_params->pixel_fmt == PXP_PIX_FMT_NV16) ||
@@ -1059,11 +1065,6 @@ static void pxp_clkoff_timer(unsigned long arg)
                          jiffies + msecs_to_jiffies(timeout_in_ms));
 }
 
-static struct pxp_tx_desc *pxpdma_first_active(struct pxp_channel *pxp_chan)
-{
-       return list_entry(pxp_chan->active_list.next, struct pxp_tx_desc, list);
-}
-
 static struct pxp_tx_desc *pxpdma_first_queued(struct pxp_channel *pxp_chan)
 {
        return list_entry(pxp_chan->queue.next, struct pxp_tx_desc, list);
@@ -1078,9 +1079,8 @@ static void __pxpdma_dostart(struct pxp_channel *pxp_chan)
        struct pxp_tx_desc *child;
        int i = 0;
 
-       /* so far we presume only one transaction on active_list */
        /* S0 */
-       desc = pxpdma_first_active(pxp_chan);
+       desc = list_first_entry(&head, struct pxp_tx_desc, list);
        memcpy(&pxp->pxp_conf_state.s0_param,
               &desc->layer_param.s0_param, sizeof(struct pxp_layer_param));
        memcpy(&pxp->pxp_conf_state.proc_data,
@@ -1113,25 +1113,15 @@ static void __pxpdma_dostart(struct pxp_channel *pxp_chan)
 static void pxpdma_dostart_work(struct pxps *pxp)
 {
        struct pxp_channel *pxp_chan = NULL;
-       unsigned long flags, flags1;
+       unsigned long flags;
+       struct pxp_tx_desc *desc = NULL;
 
        spin_lock_irqsave(&pxp->lock, flags);
-       if (list_empty(&head)) {
-               pxp->pxp_ongoing = 0;
-               spin_unlock_irqrestore(&pxp->lock, flags);
-               return;
-       }
 
-       pxp_chan = list_entry(head.next, struct pxp_channel, list);
+       desc = list_entry(head.next, struct pxp_tx_desc, list);
+       pxp_chan = to_pxp_channel(desc->txd.chan);
 
-       spin_lock_irqsave(&pxp_chan->lock, flags1);
-       if (!list_empty(&pxp_chan->active_list)) {
-               struct pxp_tx_desc *desc;
-               /* REVISIT */
-               desc = pxpdma_first_active(pxp_chan);
-               __pxpdma_dostart(pxp_chan);
-       }
-       spin_unlock_irqrestore(&pxp_chan->lock, flags1);
+       __pxpdma_dostart(pxp_chan);
 
        /* Configure PxP */
        pxp_config(pxp, pxp_chan);
@@ -1141,12 +1131,16 @@ static void pxpdma_dostart_work(struct pxps *pxp)
        spin_unlock_irqrestore(&pxp->lock, flags);
 }
 
-static void pxpdma_dequeue(struct pxp_channel *pxp_chan, struct list_head *list)
+static void pxpdma_dequeue(struct pxp_channel *pxp_chan, struct pxps *pxp)
 {
+       unsigned long flags;
        struct pxp_tx_desc *desc = NULL;
+
        do {
                desc = pxpdma_first_queued(pxp_chan);
-               list_move_tail(&desc->list, list);
+               spin_lock_irqsave(&pxp->lock, flags);
+               list_move_tail(&desc->list, &head);
+               spin_unlock_irqrestore(&pxp->lock, flags);
        } while (!list_empty(&pxp_chan->queue));
 }
 
@@ -1155,11 +1149,11 @@ static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
        struct pxp_tx_desc *desc = to_tx_desc(tx);
        struct pxp_channel *pxp_chan = to_pxp_channel(tx->chan);
        dma_cookie_t cookie;
-       unsigned long flags;
 
        dev_dbg(&pxp_chan->dma_chan.dev->device, "received TX\n");
 
-       mutex_lock(&pxp_chan->chan_mutex);
+       /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
+       spin_lock(&pxp_chan->lock);
 
        cookie = pxp_chan->dma_chan.cookie;
 
@@ -1170,50 +1164,16 @@ static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
        pxp_chan->dma_chan.cookie = cookie;
        tx->cookie = cookie;
 
-       /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
-       spin_lock_irqsave(&pxp_chan->lock, flags);
-
        /* Here we add the tx descriptor to our PxP task queue. */
        list_add_tail(&desc->list, &pxp_chan->queue);
 
-       spin_unlock_irqrestore(&pxp_chan->lock, flags);
+       spin_unlock(&pxp_chan->lock);
 
        dev_dbg(&pxp_chan->dma_chan.dev->device, "done TX\n");
 
-       mutex_unlock(&pxp_chan->chan_mutex);
        return cookie;
 }
 
-/* Called with pxp_chan->chan_mutex held */
-static int pxp_desc_alloc(struct pxp_channel *pxp_chan, int n)
-{
-       struct pxp_tx_desc *desc = vmalloc(n * sizeof(struct pxp_tx_desc));
-
-       if (!desc)
-               return -ENOMEM;
-
-       pxp_chan->n_tx_desc = n;
-       pxp_chan->desc = desc;
-       INIT_LIST_HEAD(&pxp_chan->active_list);
-       INIT_LIST_HEAD(&pxp_chan->queue);
-       INIT_LIST_HEAD(&pxp_chan->free_list);
-
-       while (n--) {
-               struct dma_async_tx_descriptor *txd = &desc->txd;
-
-               memset(txd, 0, sizeof(*txd));
-               INIT_LIST_HEAD(&desc->tx_list);
-               dma_async_tx_descriptor_init(txd, &pxp_chan->dma_chan);
-               txd->tx_submit = pxp_tx_submit;
-
-               list_add(&desc->list, &pxp_chan->free_list);
-
-               desc++;
-       }
-
-       return 0;
-}
-
 /**
  * pxp_init_channel() - initialize a PXP channel.
  * @pxp_dma:   PXP DMA context.
@@ -1223,9 +1183,7 @@ static int pxp_desc_alloc(struct pxp_channel *pxp_chan, int n)
 static int pxp_init_channel(struct pxp_dma *pxp_dma,
                            struct pxp_channel *pxp_chan)
 {
-       unsigned long flags;
-       struct pxps *pxp = to_pxp(pxp_dma);
-       int ret = 0, n_desc = 0;
+       int ret = 0;
 
        /*
         * We are using _virtual_ channel here.
@@ -1234,34 +1192,7 @@ static int pxp_init_channel(struct pxp_dma *pxp_dma,
         * (i.e., pxp_tx_desc) here.
         */
 
-       spin_lock_irqsave(&pxp->lock, flags);
-
-       /* max desc nr: S0+OL+OUT = 1+8+1 */
-       n_desc = 16;
-
-       spin_unlock_irqrestore(&pxp->lock, flags);
-
-       if (n_desc && !pxp_chan->desc)
-               ret = pxp_desc_alloc(pxp_chan, n_desc);
-
-       return ret;
-}
-
-/**
- * pxp_uninit_channel() - uninitialize a PXP channel.
- * @pxp_dma:   PXP DMA context.
- * @pchan:  pointer to the channel object.
- * @return      0 on success or negative error code on failure.
- */
-static int pxp_uninit_channel(struct pxp_dma *pxp_dma,
-                             struct pxp_channel *pxp_chan)
-{
-       int ret = 0;
-
-       if (pxp_chan->desc)
-               vfree(pxp_chan->desc);
-
-       pxp_chan->desc = NULL;
+       INIT_LIST_HEAD(&pxp_chan->queue);
 
        return ret;
 }
@@ -1271,6 +1202,7 @@ static irqreturn_t pxp_irq(int irq, void *dev_id)
        struct pxps *pxp = dev_id;
        struct pxp_channel *pxp_chan;
        struct pxp_tx_desc *desc;
+       struct pxp_tx_desc *child, *_child;
        dma_async_tx_callback callback;
        void *callback_param;
        unsigned long flags;
@@ -1291,18 +1223,9 @@ static irqreturn_t pxp_irq(int irq, void *dev_id)
                return IRQ_NONE;
        }
 
-       pxp_chan = list_entry(head.next, struct pxp_channel, list);
-
-       if (list_empty(&pxp_chan->active_list)) {
-               pr_debug("PXP_IRQ pxp_chan->active_list empty. chan_id %d\n",
-                        pxp_chan->dma_chan.chan_id);
-               pxp->pxp_ongoing = 0;
-               spin_unlock_irqrestore(&pxp->lock, flags);
-               return IRQ_NONE;
-       }
-
        /* Get descriptor and call callback */
-       desc = pxpdma_first_active(pxp_chan);
+       desc = list_entry(head.next, struct pxp_tx_desc, list);
+       pxp_chan = to_pxp_channel(desc->txd.chan);
 
        pxp_chan->completed = desc->txd.cookie;
 
@@ -1317,11 +1240,12 @@ static irqreturn_t pxp_irq(int irq, void *dev_id)
 
        pxp_chan->status = PXP_CHANNEL_INITIALIZED;
 
-       list_splice_init(&desc->tx_list, &pxp_chan->free_list);
-       list_move(&desc->list, &pxp_chan->free_list);
-
-       if (list_empty(&pxp_chan->active_list))
-               list_del_init(&pxp_chan->list);
+       list_for_each_entry_safe(child, _child, &desc->tx_list, list) {
+               list_del_init(&child->list);
+               kmem_cache_free(tx_desc_cache, (void *)child);
+       }
+       list_del_init(&desc->list);
+       kmem_cache_free(tx_desc_cache, (void *)desc);
 
        complete(&pxp->complete);
        pxp->pxp_ongoing = 0;
@@ -1332,35 +1256,23 @@ static irqreturn_t pxp_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-/* called with pxp_chan->lock held */
-static struct pxp_tx_desc *pxpdma_desc_get(struct pxp_channel *pxp_chan)
+/* allocate/free dma tx descriptor dynamically*/
+static struct pxp_tx_desc *pxpdma_desc_alloc(struct pxp_channel *pxp_chan)
 {
-       struct pxp_tx_desc *desc, *_desc;
-       struct pxp_tx_desc *ret = NULL;
+       struct pxp_tx_desc *desc = NULL;
+       struct dma_async_tx_descriptor *txd = NULL;
 
-       list_for_each_entry_safe(desc, _desc, &pxp_chan->free_list, list) {
-               list_del_init(&desc->list);
-               ret = desc;
-               break;
-       }
+       desc = kmem_cache_alloc(tx_desc_cache, GFP_KERNEL | __GFP_ZERO);
+       if (desc == NULL)
+               return NULL;
 
-       return ret;
-}
+       INIT_LIST_HEAD(&desc->list);
+       INIT_LIST_HEAD(&desc->tx_list);
+       txd = &desc->txd;
+       dma_async_tx_descriptor_init(txd, &pxp_chan->dma_chan);
+       txd->tx_submit = pxp_tx_submit;
 
-/* called with pxp_chan->lock held */
-static void pxpdma_desc_put(struct pxp_channel *pxp_chan,
-                           struct pxp_tx_desc *desc)
-{
-       if (desc) {
-               struct device *dev = &pxp_chan->dma_chan.dev->device;
-               struct pxp_tx_desc *child;
-
-               list_for_each_entry(child, &desc->tx_list, list)
-                   dev_info(dev, "moving child desc %p to freelist\n", child);
-               list_splice_init(&desc->tx_list, &pxp_chan->free_list);
-               dev_info(dev, "moving desc %p to freelist\n", desc);
-               list_add(&desc->list, &pxp_chan->free_list);
-       }
+       return desc;
 }
 
 /* Allocate and initialise a transfer descriptor. */
@@ -1380,7 +1292,6 @@ static struct dma_async_tx_descriptor *pxp_prep_slave_sg(struct dma_chan *chan,
        struct pxp_tx_desc *desc = NULL;
        struct pxp_tx_desc *first = NULL, *prev = NULL;
        struct scatterlist *sg;
-       unsigned long flags;
        dma_addr_t phys_addr;
        int i;
 
@@ -1393,13 +1304,10 @@ static struct dma_async_tx_descriptor *pxp_prep_slave_sg(struct dma_chan *chan,
        if (unlikely(sg_len < 2))
                return NULL;
 
-       spin_lock_irqsave(&pxp_chan->lock, flags);
        for_each_sg(sgl, sg, sg_len, i) {
-               desc = pxpdma_desc_get(pxp_chan);
+               desc = pxpdma_desc_alloc(pxp_chan);
                if (!desc) {
-                       pxpdma_desc_put(pxp_chan, first);
-                       dev_err(chan->device->dev, "Can't get DMA desc.\n");
-                       spin_unlock_irqrestore(&pxp_chan->lock, flags);
+                       dev_err(chan->device->dev, "no enough memory to allocate tx descriptor\n");
                        return NULL;
                }
 
@@ -1422,7 +1330,6 @@ static struct dma_async_tx_descriptor *pxp_prep_slave_sg(struct dma_chan *chan,
 
                prev = desc;
        }
-       spin_unlock_irqrestore(&pxp_chan->lock, flags);
 
        pxp->pxp_conf_state.layer_nr = sg_len;
        first->txd.flags = tx_flags;
@@ -1438,38 +1345,18 @@ static void pxp_issue_pending(struct dma_chan *chan)
        struct pxp_channel *pxp_chan = to_pxp_channel(chan);
        struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
        struct pxps *pxp = to_pxp(pxp_dma);
-       unsigned long flags0, flags;
-       struct list_head *iter;
-
-       spin_lock_irqsave(&pxp->lock, flags0);
-       spin_lock_irqsave(&pxp_chan->lock, flags);
-
-       if (!list_empty(&pxp_chan->queue)) {
-               pxpdma_dequeue(pxp_chan, &pxp_chan->active_list);
-               pxp_chan->status = PXP_CHANNEL_READY;
-               iter = head.next;
-               /* Avoid adding a pxp channel to head list which
-                * has been already listed in it. And this may
-                * cause the head list to be broken down.
-                */
-               if (list_empty(&head)) {
-                       list_add_tail(&pxp_chan->list, &head);
-               } else {
-                       while (iter != &head) {
-                               if (&pxp_chan->list == iter)
-                                       break;
-                               iter = iter->next;
-                       }
-                       if (iter == &head)
-                               list_add_tail(&pxp_chan->list, &head);
-               }
-       } else {
-               spin_unlock_irqrestore(&pxp_chan->lock, flags);
-               spin_unlock_irqrestore(&pxp->lock, flags0);
+
+       spin_lock(&pxp_chan->lock);
+
+       if (list_empty(&pxp_chan->queue)) {
+               spin_unlock(&pxp_chan->lock);
                return;
        }
-       spin_unlock_irqrestore(&pxp_chan->lock, flags);
-       spin_unlock_irqrestore(&pxp->lock, flags0);
+
+       pxpdma_dequeue(pxp_chan, pxp);
+       pxp_chan->status = PXP_CHANNEL_READY;
+
+       spin_unlock(&pxp_chan->lock);
 
        pxp_clk_enable(pxp);
        wake_up_interruptible(&pxp->thread_waitq);
@@ -1478,14 +1365,6 @@ static void pxp_issue_pending(struct dma_chan *chan)
 static void __pxp_terminate_all(struct dma_chan *chan)
 {
        struct pxp_channel *pxp_chan = to_pxp_channel(chan);
-       unsigned long flags;
-
-       /* pchan->queue is modified in ISR, have to spinlock */
-       spin_lock_irqsave(&pxp_chan->lock, flags);
-       list_splice_init(&pxp_chan->queue, &pxp_chan->free_list);
-       list_splice_init(&pxp_chan->active_list, &pxp_chan->free_list);
-
-       spin_unlock_irqrestore(&pxp_chan->lock, flags);
 
        pxp_chan->status = PXP_CHANNEL_INITIALIZED;
 }
@@ -1499,9 +1378,9 @@ static int pxp_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
        if (cmd != DMA_TERMINATE_ALL)
                return -ENXIO;
 
-       mutex_lock(&pxp_chan->chan_mutex);
+       spin_lock(&pxp_chan->lock);
        __pxp_terminate_all(chan);
-       mutex_unlock(&pxp_chan->chan_mutex);
+       spin_unlock(&pxp_chan->lock);
 
        return 0;
 }
@@ -1538,17 +1417,14 @@ err_chan:
 static void pxp_free_chan_resources(struct dma_chan *chan)
 {
        struct pxp_channel *pxp_chan = to_pxp_channel(chan);
-       struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
 
-       mutex_lock(&pxp_chan->chan_mutex);
+       spin_lock(&pxp_chan->lock);
 
        __pxp_terminate_all(chan);
 
        pxp_chan->status = PXP_CHANNEL_FREE;
 
-       pxp_uninit_channel(pxp_dma, pxp_chan);
-
-       mutex_unlock(&pxp_chan->chan_mutex);
+       spin_unlock(&pxp_chan->lock);
 }
 
 static enum dma_status pxp_tx_status(struct dma_chan *chan,
@@ -1706,7 +1582,6 @@ static int pxp_dma_init(struct pxps *pxp)
                struct dma_chan *dma_chan = &pxp_chan->dma_chan;
 
                spin_lock_init(&pxp_chan->lock);
-               mutex_init(&pxp_chan->chan_mutex);
 
                /* Only one EOF IRQ for PxP, shared by all channels */
                pxp_chan->eof_irq = pxp->irq;
@@ -1798,6 +1673,9 @@ static int pxp_dispatch_thread(void *argv)
                if (signal_pending(current))
                        continue;
 
+               if (kthread_should_stop())
+                       break;
+
                spin_lock_irqsave(&pxp->lock, flags);
                pxp->pxp_ongoing = 1;
                spin_unlock_irqrestore(&pxp->lock, flags);
@@ -1894,6 +1772,12 @@ static int pxp_probe(struct platform_device *pdev)
                goto exit;
        }
        init_waitqueue_head(&pxp->thread_waitq);
+       tx_desc_cache = kmem_cache_create("tx_desc", sizeof(struct pxp_tx_desc),
+                                         0, SLAB_HWCACHE_ALIGN, NULL);
+       if (!tx_desc_cache) {
+               err = -ENOMEM;
+               goto exit;
+       }
 
        register_pxp_device();
 
@@ -1908,6 +1792,7 @@ static int pxp_remove(struct platform_device *pdev)
        struct pxps *pxp = platform_get_drvdata(pdev);
 
        unregister_pxp_device();
+       kmem_cache_destroy(tx_desc_cache);
        kthread_stop(pxp->dispatch);
        cancel_work_sync(&pxp->work);
        del_timer_sync(&pxp->clk_timer);