]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/dma/pxp/pxp_dma_v2.c
ENGR00294115 PXP: correct the pxp_dispatch thread exit logic
[karo-tx-linux.git] / drivers / dma / pxp / pxp_dma_v2.c
index 30d6a14e20ce4e8c3595e9128ace4d094c774599..63d5d4e91b512f5416893606c6f665dff3880a42 100644 (file)
@@ -919,8 +919,13 @@ static void pxp_set_s0buf(struct pxps *pxp)
                U1 = U + offset;
                V = U + ((s0_params->width * s0_params->height) >> s);
                V1 = V + offset;
-               __raw_writel(U1, pxp->base + HW_PXP_PS_UBUF);
-               __raw_writel(V1, pxp->base + HW_PXP_PS_VBUF);
+               if (s0_params->pixel_fmt == PXP_PIX_FMT_YVU420P) {
+                       __raw_writel(V1, pxp->base + HW_PXP_PS_UBUF);
+                       __raw_writel(U1, pxp->base + HW_PXP_PS_VBUF);
+               } else {
+                       __raw_writel(U1, pxp->base + HW_PXP_PS_UBUF);
+                       __raw_writel(V1, pxp->base + HW_PXP_PS_VBUF);
+               }
        } else if ((s0_params->pixel_fmt == PXP_PIX_FMT_NV12) ||
                 (s0_params->pixel_fmt == PXP_PIX_FMT_NV21) ||
                 (s0_params->pixel_fmt == PXP_PIX_FMT_NV16) ||
@@ -1112,11 +1117,6 @@ static void pxpdma_dostart_work(struct pxps *pxp)
        struct pxp_tx_desc *desc = NULL;
 
        spin_lock_irqsave(&pxp->lock, flags);
-       if (list_empty(&head)) {
-               pxp->pxp_ongoing = 0;
-               spin_unlock_irqrestore(&pxp->lock, flags);
-               return;
-       }
 
        desc = list_entry(head.next, struct pxp_tx_desc, list);
        pxp_chan = to_pxp_channel(desc->txd.chan);
@@ -1131,12 +1131,16 @@ static void pxpdma_dostart_work(struct pxps *pxp)
        spin_unlock_irqrestore(&pxp->lock, flags);
 }
 
-static void pxpdma_dequeue(struct pxp_channel *pxp_chan, struct list_head *list)
+static void pxpdma_dequeue(struct pxp_channel *pxp_chan, struct pxps *pxp)
 {
+       unsigned long flags;
        struct pxp_tx_desc *desc = NULL;
+
        do {
                desc = pxpdma_first_queued(pxp_chan);
-               list_move_tail(&desc->list, list);
+               spin_lock_irqsave(&pxp->lock, flags);
+               list_move_tail(&desc->list, &head);
+               spin_unlock_irqrestore(&pxp->lock, flags);
        } while (!list_empty(&pxp_chan->queue));
 }
 
@@ -1145,11 +1149,11 @@ static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
        struct pxp_tx_desc *desc = to_tx_desc(tx);
        struct pxp_channel *pxp_chan = to_pxp_channel(tx->chan);
        dma_cookie_t cookie;
-       unsigned long flags;
 
        dev_dbg(&pxp_chan->dma_chan.dev->device, "received TX\n");
 
-       mutex_lock(&pxp_chan->chan_mutex);
+       /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
+       spin_lock(&pxp_chan->lock);
 
        cookie = pxp_chan->dma_chan.cookie;
 
@@ -1160,17 +1164,13 @@ static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
        pxp_chan->dma_chan.cookie = cookie;
        tx->cookie = cookie;
 
-       /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
-       spin_lock_irqsave(&pxp_chan->lock, flags);
-
        /* Here we add the tx descriptor to our PxP task queue. */
        list_add_tail(&desc->list, &pxp_chan->queue);
 
-       spin_unlock_irqrestore(&pxp_chan->lock, flags);
+       spin_unlock(&pxp_chan->lock);
 
        dev_dbg(&pxp_chan->dma_chan.dev->device, "done TX\n");
 
-       mutex_unlock(&pxp_chan->chan_mutex);
        return cookie;
 }
 
@@ -1292,7 +1292,6 @@ static struct dma_async_tx_descriptor *pxp_prep_slave_sg(struct dma_chan *chan,
        struct pxp_tx_desc *desc = NULL;
        struct pxp_tx_desc *first = NULL, *prev = NULL;
        struct scatterlist *sg;
-       unsigned long flags;
        dma_addr_t phys_addr;
        int i;
 
@@ -1305,11 +1304,9 @@ static struct dma_async_tx_descriptor *pxp_prep_slave_sg(struct dma_chan *chan,
        if (unlikely(sg_len < 2))
                return NULL;
 
-       spin_lock_irqsave(&pxp_chan->lock, flags);
        for_each_sg(sgl, sg, sg_len, i) {
                desc = pxpdma_desc_alloc(pxp_chan);
                if (!desc) {
-                       spin_unlock_irqrestore(&pxp_chan->lock, flags);
                        dev_err(chan->device->dev, "no enough memory to allocate tx descriptor\n");
                        return NULL;
                }
@@ -1333,7 +1330,6 @@ static struct dma_async_tx_descriptor *pxp_prep_slave_sg(struct dma_chan *chan,
 
                prev = desc;
        }
-       spin_unlock_irqrestore(&pxp_chan->lock, flags);
 
        pxp->pxp_conf_state.layer_nr = sg_len;
        first->txd.flags = tx_flags;
@@ -1349,21 +1345,18 @@ static void pxp_issue_pending(struct dma_chan *chan)
        struct pxp_channel *pxp_chan = to_pxp_channel(chan);
        struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
        struct pxps *pxp = to_pxp(pxp_dma);
-       unsigned long flags0, flags;
 
-       spin_lock_irqsave(&pxp->lock, flags0);
-       spin_lock_irqsave(&pxp_chan->lock, flags);
+       spin_lock(&pxp_chan->lock);
 
-       if (!list_empty(&pxp_chan->queue)) {
-               pxpdma_dequeue(pxp_chan, &head);
-               pxp_chan->status = PXP_CHANNEL_READY;
-       } else {
-               spin_unlock_irqrestore(&pxp_chan->lock, flags);
-               spin_unlock_irqrestore(&pxp->lock, flags0);
+       if (list_empty(&pxp_chan->queue)) {
+               spin_unlock(&pxp_chan->lock);
                return;
        }
-       spin_unlock_irqrestore(&pxp_chan->lock, flags);
-       spin_unlock_irqrestore(&pxp->lock, flags0);
+
+       pxpdma_dequeue(pxp_chan, pxp);
+       pxp_chan->status = PXP_CHANNEL_READY;
+
+       spin_unlock(&pxp_chan->lock);
 
        pxp_clk_enable(pxp);
        wake_up_interruptible(&pxp->thread_waitq);
@@ -1385,9 +1378,9 @@ static int pxp_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
        if (cmd != DMA_TERMINATE_ALL)
                return -ENXIO;
 
-       mutex_lock(&pxp_chan->chan_mutex);
+       spin_lock(&pxp_chan->lock);
        __pxp_terminate_all(chan);
-       mutex_unlock(&pxp_chan->chan_mutex);
+       spin_unlock(&pxp_chan->lock);
 
        return 0;
 }
@@ -1425,13 +1418,13 @@ static void pxp_free_chan_resources(struct dma_chan *chan)
 {
        struct pxp_channel *pxp_chan = to_pxp_channel(chan);
 
-       mutex_lock(&pxp_chan->chan_mutex);
+       spin_lock(&pxp_chan->lock);
 
        __pxp_terminate_all(chan);
 
        pxp_chan->status = PXP_CHANNEL_FREE;
 
-       mutex_unlock(&pxp_chan->chan_mutex);
+       spin_unlock(&pxp_chan->lock);
 }
 
 static enum dma_status pxp_tx_status(struct dma_chan *chan,
@@ -1589,7 +1582,6 @@ static int pxp_dma_init(struct pxps *pxp)
                struct dma_chan *dma_chan = &pxp_chan->dma_chan;
 
                spin_lock_init(&pxp_chan->lock);
-               mutex_init(&pxp_chan->chan_mutex);
 
                /* Only one EOF IRQ for PxP, shared by all channels */
                pxp_chan->eof_irq = pxp->irq;
@@ -1681,6 +1673,9 @@ static int pxp_dispatch_thread(void *argv)
                if (signal_pending(current))
                        continue;
 
+               if (kthread_should_stop())
+                       break;
+
                spin_lock_irqsave(&pxp->lock, flags);
                pxp->pxp_ongoing = 1;
                spin_unlock_irqrestore(&pxp->lock, flags);