]> git.karo-electronics.de Git - linux-beck.git/commitdiff
dmaengine: xgene-dma: Fix the lock to allow client for further submission of requests
authorRameshwar Prasad Sahu <rsahu@apm.com>
Fri, 21 Aug 2015 09:03:34 +0000 (14:33 +0530)
committerVinod Koul <vinod.koul@intel.com>
Fri, 21 Aug 2015 10:18:37 +0000 (15:48 +0530)
This patch provides the fix in the cleanup routing such that client can perform
further submission by releasing the lock before calling client's callback function.

Signed-off-by: Rameshwar Prasad Sahu <rsahu@apm.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
drivers/dma/xgene-dma.c

index d1c8809a0810fb0c7811e6d8b8d10b4efe15a371..0b82bc00b83ab7e8f49ad02d7d43691ada886553 100644 (file)
@@ -763,12 +763,17 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
        struct xgene_dma_ring *ring = &chan->rx_ring;
        struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
        struct xgene_dma_desc_hw *desc_hw;
+       struct list_head ld_completed;
        u8 status;
 
+       INIT_LIST_HEAD(&ld_completed);
+
+       spin_lock_bh(&chan->lock);
+
        /* Clean already completed and acked descriptors */
        xgene_dma_clean_completed_descriptor(chan);
 
-       /* Run the callback for each descriptor, in order */
+       /* Move all completed descriptors to ld completed queue, in order */
        list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
                /* Get subsequent hw descriptor from DMA rx ring */
                desc_hw = &ring->desc_hw[ring->head];
@@ -811,15 +816,17 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
                /* Mark this hw descriptor as processed */
                desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
 
-               xgene_dma_run_tx_complete_actions(chan, desc_sw);
-
-               xgene_dma_clean_running_descriptor(chan, desc_sw);
-
                /*
                 * Decrement the pending transaction count
                 * as we have processed one
                 */
                chan->pending--;
+
+               /*
+                * Delete this node from ld running queue and append it to
+                * ld completed queue for further processing
+                */
+               list_move_tail(&desc_sw->node, &ld_completed);
        }
 
        /*
@@ -828,6 +835,14 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
         * ahead and free the descriptors below.
         */
        xgene_chan_xfer_ld_pending(chan);
+
+       spin_unlock_bh(&chan->lock);
+
+       /* Run the callback for each descriptor, in order */
+       list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
+               xgene_dma_run_tx_complete_actions(chan, desc_sw);
+               xgene_dma_clean_running_descriptor(chan, desc_sw);
+       }
 }
 
 static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
@@ -876,11 +891,11 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
        if (!chan->desc_pool)
                return;
 
-       spin_lock_bh(&chan->lock);
-
        /* Process all running descriptor */
        xgene_dma_cleanup_descriptors(chan);
 
+       spin_lock_bh(&chan->lock);
+
        /* Clean all link descriptor queues */
        xgene_dma_free_desc_list(chan, &chan->ld_pending);
        xgene_dma_free_desc_list(chan, &chan->ld_running);
@@ -1200,15 +1215,11 @@ static void xgene_dma_tasklet_cb(unsigned long data)
 {
        struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
 
-       spin_lock_bh(&chan->lock);
-
        /* Run all cleanup for descriptors which have been completed */
        xgene_dma_cleanup_descriptors(chan);
 
        /* Re-enable DMA channel IRQ */
        enable_irq(chan->rx_irq);
-
-       spin_unlock_bh(&chan->lock);
 }
 
 static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)