]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/dma/amba-pl08x.c
DMA: PL08x: fix infinite wait when terminating transfers
[mv-sheeva.git] / drivers / dma / amba-pl08x.c
index be7fa174d6c0460a3ce443bc7572c46469b45bc4..8321a3997c95772c7ae9588fa75feee960a3ebaf 100644 (file)
  * this program; if not, write to the Free Software Foundation, Inc., 59
  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  *
- * The full GNU General Public License is in this distribution in the
- * file called COPYING.
+ * The full GNU General Public License is in this distribution in the file
+ * called COPYING.
  *
  * Documentation: ARM DDI 0196G == PL080
- * Documentation: ARM DDI 0218E        == PL081
+ * Documentation: ARM DDI 0218E == PL081
  *
- * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
- * any channel.
+ * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
+ * channel.
  *
  * The PL080 has 8 channels available for simultaneous use, and the PL081
  * has only two channels. So on these DMA controllers the number of channels
 #define DRIVER_NAME    "pl08xdmac"
 
 /**
- * struct vendor_data - vendor-specific config parameters
- * for PL08x derivatives
+ * struct vendor_data - vendor-specific config parameters for PL08x derivatives
  * @channels: the number of channels available in this variant
- * @dualmaster: whether this version supports dual AHB masters
- * or not.
+ * @dualmaster: whether this version supports dual AHB masters or not.
  */
 struct vendor_data {
        u8 channels;
@@ -173,6 +171,11 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
        return container_of(chan, struct pl08x_dma_chan, chan);
 }
 
+static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
+{
+       return container_of(tx, struct pl08x_txd, tx);
+}
+
 /*
  * Physical channel handling
  */
@@ -236,10 +239,8 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
  *
  * Disabling individual channels could lose data.
  *
- * Disable the peripheral DMA after disabling the DMAC
- * in order to allow the DMAC FIFO to drain, and
- * hence allow the channel to show inactive
- *
+ * Disable the peripheral DMA after disabling the DMAC in order to allow
+ * the DMAC FIFO to drain, and hence allow the channel to show inactive
  */
 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
 {
@@ -266,19 +267,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
 }
 
 
-/* Stops the channel */
-static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
+/*
+ * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
+ * clears any pending interrupt status.  This should not be used for
+ * an on-going transfer, but as a method of shutting down a channel
+ * (eg, when it's no longer used) or terminating a transfer.
+ */
+static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
+       struct pl08x_phy_chan *ch)
 {
-       u32 val;
+       u32 val = readl(ch->base + PL080_CH_CONFIG);
 
-       pl08x_pause_phy_chan(ch);
+       val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
+                PL080_CONFIG_TC_IRQ_MASK);
 
-       /* Disable channel */
-       val = readl(ch->base + PL080_CH_CONFIG);
-       val &= ~PL080_CONFIG_ENABLE;
-       val &= ~PL080_CONFIG_ERR_IRQ_MASK;
-       val &= ~PL080_CONFIG_TC_IRQ_MASK;
        writel(val, ch->base + PL080_CH_CONFIG);
+
+       writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
+       writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
 }
 
 static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -348,9 +354,9 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
        }
 
        /* Sum up all queued transactions */
-       if (!list_empty(&plchan->desc_list)) {
+       if (!list_empty(&plchan->pend_list)) {
                struct pl08x_txd *txdi;
-               list_for_each_entry(txdi, &plchan->desc_list, node) {
+               list_for_each_entry(txdi, &plchan->pend_list, node) {
                        bytes += txdi->len;
                }
        }
@@ -362,6 +368,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 
 /*
  * Allocate a physical channel for a virtual channel
+ *
+ * Try to locate a physical channel to be used for this transfer. If all
+ * are taken return NULL and the requester will have to cope by using
+ * some fallback PIO mode or retrying later.
  */
 static struct pl08x_phy_chan *
 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
@@ -371,12 +381,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
        unsigned long flags;
        int i;
 
-       /*
-        * Try to locate a physical channel to be used for
-        * this transfer. If all are taken return NULL and
-        * the requester will have to cope by using some fallback
-        * PIO mode or retrying later.
-        */
        for (i = 0; i < pl08x->vd->channels; i++) {
                ch = &pl08x->phy_chans[i];
 
@@ -405,13 +409,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
 {
        unsigned long flags;
 
+       spin_lock_irqsave(&ch->lock, flags);
+
        /* Stop the channel and clear its interrupts */
-       pl08x_stop_phy_chan(ch);
-       writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
-       writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
+       pl08x_terminate_phy_chan(pl08x, ch);
 
        /* Mark it as free */
-       spin_lock_irqsave(&ch->lock, flags);
        ch->serving = NULL;
        spin_unlock_irqrestore(&ch->lock, flags);
 }
@@ -481,69 +484,75 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
        return retbits;
 }
 
+struct pl08x_lli_build_data {
+       struct pl08x_txd *txd;
+       struct pl08x_driver_data *pl08x;
+       struct pl08x_bus_data srcbus;
+       struct pl08x_bus_data dstbus;
+       size_t remainder;
+};
+
 /*
- * Autoselect a master bus to use for the transfer
- * this prefers the destination bus if both available
- * if fixed address on one bus the other will be chosen
+ * Autoselect a master bus to use for the transfer this prefers the
+ * destination bus if both available if fixed address on one bus the
+ * other will be chosen
  */
-static void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
-       struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
-       struct pl08x_bus_data **sbus, u32 cctl)
+static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
+       struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
 {
        if (!(cctl & PL080_CONTROL_DST_INCR)) {
-               *mbus = src_bus;
-               *sbus = dst_bus;
+               *mbus = &bd->srcbus;
+               *sbus = &bd->dstbus;
        } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
-               *mbus = dst_bus;
-               *sbus = src_bus;
+               *mbus = &bd->dstbus;
+               *sbus = &bd->srcbus;
        } else {
-               if (dst_bus->buswidth == 4) {
-                       *mbus = dst_bus;
-                       *sbus = src_bus;
-               } else if (src_bus->buswidth == 4) {
-                       *mbus = src_bus;
-                       *sbus = dst_bus;
-               } else if (dst_bus->buswidth == 2) {
-                       *mbus = dst_bus;
-                       *sbus = src_bus;
-               } else if (src_bus->buswidth == 2) {
-                       *mbus = src_bus;
-                       *sbus = dst_bus;
+               if (bd->dstbus.buswidth == 4) {
+                       *mbus = &bd->dstbus;
+                       *sbus = &bd->srcbus;
+               } else if (bd->srcbus.buswidth == 4) {
+                       *mbus = &bd->srcbus;
+                       *sbus = &bd->dstbus;
+               } else if (bd->dstbus.buswidth == 2) {
+                       *mbus = &bd->dstbus;
+                       *sbus = &bd->srcbus;
+               } else if (bd->srcbus.buswidth == 2) {
+                       *mbus = &bd->srcbus;
+                       *sbus = &bd->dstbus;
                } else {
-                       /* src_bus->buswidth == 1 */
-                       *mbus = dst_bus;
-                       *sbus = src_bus;
+                       /* bd->srcbus.buswidth == 1 */
+                       *mbus = &bd->dstbus;
+                       *sbus = &bd->srcbus;
                }
        }
 }
 
 /*
- * Fills in one LLI for a certain transfer descriptor
- * and advance the counter
+ * Fills in one LLI for a certain transfer descriptor and advance the counter
  */
-static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
-       struct pl08x_txd *txd, int num_llis, int len, u32 cctl, u32 *remainder)
+static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
+       int num_llis, int len, u32 cctl)
 {
-       struct pl08x_lli *llis_va = txd->llis_va;
-       dma_addr_t llis_bus = txd->llis_bus;
+       struct pl08x_lli *llis_va = bd->txd->llis_va;
+       dma_addr_t llis_bus = bd->txd->llis_bus;
 
        BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
 
        llis_va[num_llis].cctl = cctl;
-       llis_va[num_llis].src = txd->srcbus.addr;
-       llis_va[num_llis].dst = txd->dstbus.addr;
+       llis_va[num_llis].src = bd->srcbus.addr;
+       llis_va[num_llis].dst = bd->dstbus.addr;
        llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
-       if (pl08x->lli_buses & PL08X_AHB2)
+       if (bd->pl08x->lli_buses & PL08X_AHB2)
                llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
 
        if (cctl & PL080_CONTROL_SRC_INCR)
-               txd->srcbus.addr += len;
+               bd->srcbus.addr += len;
        if (cctl & PL080_CONTROL_DST_INCR)
-               txd->dstbus.addr += len;
+               bd->dstbus.addr += len;
 
-       BUG_ON(*remainder < len);
+       BUG_ON(bd->remainder < len);
 
-       *remainder -= len;
+       bd->remainder -= len;
 }
 
 /*
@@ -567,7 +576,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
                              struct pl08x_txd *txd)
 {
        struct pl08x_bus_data *mbus, *sbus;
-       size_t remainder;
+       struct pl08x_lli_build_data bd;
        int num_llis = 0;
        u32 cctl;
        size_t max_bytes_per_lli;
@@ -586,81 +595,76 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
        /* Get the default CCTL */
        cctl = txd->cctl;
 
+       bd.txd = txd;
+       bd.pl08x = pl08x;
+       bd.srcbus.addr = txd->src_addr;
+       bd.dstbus.addr = txd->dst_addr;
+
        /* Find maximum width of the source bus */
-       txd->srcbus.maxwidth =
+       bd.srcbus.maxwidth =
                pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
                                       PL080_CONTROL_SWIDTH_SHIFT);
 
        /* Find maximum width of the destination bus */
-       txd->dstbus.maxwidth =
+       bd.dstbus.maxwidth =
                pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
                                       PL080_CONTROL_DWIDTH_SHIFT);
 
        /* Set up the bus widths to the maximum */
-       txd->srcbus.buswidth = txd->srcbus.maxwidth;
-       txd->dstbus.buswidth = txd->dstbus.maxwidth;
+       bd.srcbus.buswidth = bd.srcbus.maxwidth;
+       bd.dstbus.buswidth = bd.dstbus.maxwidth;
        dev_vdbg(&pl08x->adev->dev,
                 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
-                __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
+                __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
 
 
        /*
         * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
         */
-       max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
+       max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
                PL080_CONTROL_TRANSFER_SIZE_MASK;
        dev_vdbg(&pl08x->adev->dev,
                 "%s max bytes per lli = %zu\n",
                 __func__, max_bytes_per_lli);
 
        /* We need to count this down to zero */
-       remainder = txd->len;
+       bd.remainder = txd->len;
        dev_vdbg(&pl08x->adev->dev,
                 "%s remainder = %zu\n",
-                __func__, remainder);
+                __func__, bd.remainder);
 
        /*
         * Choose bus to align to
         * - prefers destination bus if both available
         * - if fixed address on one bus chooses other
-        * - modifies cctl to choose an appropriate master
         */
-       pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
-                               &mbus, &sbus, cctl);
+       pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
 
        if (txd->len < mbus->buswidth) {
-               /*
-                * Less than a bus width available
-                * - send as single bytes
-                */
-               while (remainder) {
+               /* Less than a bus width available - send as single bytes */
+               while (bd.remainder) {
                        dev_vdbg(&pl08x->adev->dev,
                                 "%s single byte LLIs for a transfer of "
                                 "less than a bus width (remain 0x%08x)\n",
-                                __func__, remainder);
+                                __func__, bd.remainder);
                        cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
-                       pl08x_fill_lli_for_desc(pl08x, txd, num_llis++, 1,
-                                       cctl, &remainder);
+                       pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
                        total_bytes++;
                }
        } else {
-               /*
-                *  Make one byte LLIs until master bus is aligned
-                *  - slave will then be aligned also
-                */
+               /* Make one byte LLIs until master bus is aligned */
                while ((mbus->addr) % (mbus->buswidth)) {
                        dev_vdbg(&pl08x->adev->dev,
                                "%s adjustment lli for less than bus width "
                                 "(remain 0x%08x)\n",
-                                __func__, remainder);
+                                __func__, bd.remainder);
                        cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
-                       pl08x_fill_lli_for_desc(pl08x, txd, num_llis++, 1,
-                                       cctl, &remainder);
+                       pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
                        total_bytes++;
                }
 
                /*
-                *  Master now aligned
+                * Master now aligned
                 * - if slave is not then we must set its width down
                 */
                if (sbus->addr % sbus->buswidth) {
@@ -675,14 +679,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
                 * Make largest possible LLIs until less than one bus
                 * width left
                 */
-               while (remainder > (mbus->buswidth - 1)) {
+               while (bd.remainder > (mbus->buswidth - 1)) {
                        size_t lli_len, target_len, tsize, odd_bytes;
 
                        /*
                         * If enough left try to send max possible,
                         * otherwise try to send the remainder
                         */
-                       target_len = min(remainder, max_bytes_per_lli);
+                       target_len = min(bd.remainder, max_bytes_per_lli);
 
                        /*
                         * Set bus lengths for incrementing buses to the
@@ -690,24 +694,24 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
                         * limiting on the target length calculated above.
                         */
                        if (cctl & PL080_CONTROL_SRC_INCR)
-                               txd->srcbus.fill_bytes =
-                                       pl08x_pre_boundary(txd->srcbus.addr,
+                               bd.srcbus.fill_bytes =
+                                       pl08x_pre_boundary(bd.srcbus.addr,
                                                target_len);
                        else
-                               txd->srcbus.fill_bytes = target_len;
+                               bd.srcbus.fill_bytes = target_len;
 
                        if (cctl & PL080_CONTROL_DST_INCR)
-                               txd->dstbus.fill_bytes =
-                                       pl08x_pre_boundary(txd->dstbus.addr,
+                               bd.dstbus.fill_bytes =
+                                       pl08x_pre_boundary(bd.dstbus.addr,
                                                target_len);
                        else
-                               txd->dstbus.fill_bytes = target_len;
+                               bd.dstbus.fill_bytes = target_len;
 
                        /* Find the nearest */
-                       lli_len = min(txd->srcbus.fill_bytes,
-                               txd->dstbus.fill_bytes);
+                       lli_len = min(bd.srcbus.fill_bytes,
+                                     bd.dstbus.fill_bytes);
 
-                       BUG_ON(lli_len > remainder);
+                       BUG_ON(lli_len > bd.remainder);
 
                        if (lli_len <= 0) {
                                dev_err(&pl08x->adev->dev,
@@ -718,10 +722,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
 
                        if (lli_len == target_len) {
                                /*
-                                * Can send what we wanted
-                                */
-                               /*
-                                *  Maintain alignment
+                                * Can send what we wanted.
+                                * Maintain alignment
                                 */
                                lli_len = (lli_len/mbus->buswidth) *
                                                        mbus->buswidth;
@@ -729,17 +731,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
                        } else {
                                /*
                                 * So now we know how many bytes to transfer
-                                * to get to the nearest boundary
-                                * The next LLI will past the boundary
-                                * - however we may be working to a boundary
-                                *   on the slave bus
-                                *   We need to ensure the master stays aligned
+                                * to get to the nearest boundary.  The next
+                                * LLI will past the boundary.  However, we
+                                * may be working to a boundary on the slave
+                                * bus.  We need to ensure the master stays
+                                * aligned, and that we are working in
+                                * multiples of the bus widths.
                                 */
                                odd_bytes = lli_len % mbus->buswidth;
-                               /*
-                                * - and that we are working in multiples
-                                *   of the bus widths
-                                */
                                lli_len -= odd_bytes;
 
                        }
@@ -764,34 +763,33 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
                                }
 
                                cctl = pl08x_cctl_bits(cctl,
-                                                      txd->srcbus.buswidth,
-                                                      txd->dstbus.buswidth,
+                                                      bd.srcbus.buswidth,
+                                                      bd.dstbus.buswidth,
                                                       tsize);
 
                                dev_vdbg(&pl08x->adev->dev,
                                        "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
-                                       __func__, lli_len, remainder);
-                               pl08x_fill_lli_for_desc(pl08x, txd, num_llis++,
-                                               lli_len, cctl, &remainder);
+                                       __func__, lli_len, bd.remainder);
+                               pl08x_fill_lli_for_desc(&bd, num_llis++,
+                                       lli_len, cctl);
                                total_bytes += lli_len;
                        }
 
 
                        if (odd_bytes) {
                                /*
-                                * Creep past the boundary,
-                                * maintaining master alignment
+                                * Creep past the boundary, maintaining
+                                * master alignment
                                 */
                                int j;
                                for (j = 0; (j < mbus->buswidth)
-                                               && (remainder); j++) {
+                                               && (bd.remainder); j++) {
                                        cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
                                        dev_vdbg(&pl08x->adev->dev,
                                                "%s align with boundary, single byte (remain 0x%08zx)\n",
-                                               __func__, remainder);
-                                       pl08x_fill_lli_for_desc(pl08x, txd,
-                                                       num_llis++, 1, cctl,
-                                                       &remainder);
+                                               __func__, bd.remainder);
+                                       pl08x_fill_lli_for_desc(&bd,
+                                               num_llis++, 1, cctl);
                                        total_bytes++;
                                }
                        }
@@ -800,13 +798,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
                /*
                 * Send any odd bytes
                 */
-               while (remainder) {
+               while (bd.remainder) {
                        cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
                        dev_vdbg(&pl08x->adev->dev,
                                "%s align with boundary, single odd byte (remain %zu)\n",
-                               __func__, remainder);
-                       pl08x_fill_lli_for_desc(pl08x, txd, num_llis++, 1,
-                                       cctl, &remainder);
+                               __func__, bd.remainder);
+                       pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
                        total_bytes++;
                }
        }
@@ -825,13 +822,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
        }
 
        llis_va = txd->llis_va;
-       /*
-        * The final LLI terminates the LLI.
-        */
+       /* The final LLI terminates the LLI. */
        llis_va[num_llis - 1].lli = 0;
-       /*
-        * The final LLI element shall also fire an interrupt
-        */
+       /* The final LLI element shall also fire an interrupt. */
        llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
 
 #ifdef VERBOSE_DEBUG
@@ -873,13 +866,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
        struct pl08x_txd *txdi = NULL;
        struct pl08x_txd *next;
 
-       if (!list_empty(&plchan->desc_list)) {
+       if (!list_empty(&plchan->pend_list)) {
                list_for_each_entry_safe(txdi,
-                                        next, &plchan->desc_list, node) {
+                                        next, &plchan->pend_list, node) {
                        list_del(&txdi->node);
                        pl08x_free_txd(pl08x, txdi);
                }
-
        }
 }
 
@@ -947,6 +939,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
                 ch->signal,
                 plchan->name);
 
+       plchan->phychan_hold++;
        plchan->phychan = ch;
 
        return 0;
@@ -967,13 +960,33 @@ static void release_phy_channel(struct pl08x_dma_chan *plchan)
 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 {
        struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
+       struct pl08x_txd *txd = to_pl08x_txd(tx);
+       unsigned long flags;
+
+       spin_lock_irqsave(&plchan->lock, flags);
 
        plchan->chan.cookie += 1;
        if (plchan->chan.cookie < 0)
                plchan->chan.cookie = 1;
        tx->cookie = plchan->chan.cookie;
-       /* This unlock follows the lock in the prep() function */
-       spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+
+       /* Put this onto the pending list */
+       list_add_tail(&txd->node, &plchan->pend_list);
+
+       /*
+        * If there was no physical channel available for this memcpy,
+        * stack the request up and indicate that the channel is waiting
+        * for a free physical channel.
+        */
+       if (!plchan->slave && !plchan->phychan) {
+               /* Do this memcpy whenever there is a channel ready */
+               plchan->state = PL08X_CHAN_WAITING;
+               plchan->waiting = txd;
+       } else {
+               plchan->phychan_hold--;
+       }
+
+       spin_unlock_irqrestore(&plchan->lock, flags);
 
        return tx->cookie;
 }
@@ -987,10 +1000,9 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
 }
 
 /*
- * Code accessing dma_async_is_complete() in a tight loop
- * may give problems - could schedule where indicated.
- * If slaves are relying on interrupts to signal completion this
- * function must not be called with interrupts disabled
+ * Code accessing dma_async_is_complete() in a tight loop may give problems.
+ * If slaves are relying on interrupts to signal completion this function
+ * must not be called with interrupts disabled.
  */
 static enum dma_status
 pl08x_dma_tx_status(struct dma_chan *chan,
@@ -1012,10 +1024,6 @@ pl08x_dma_tx_status(struct dma_chan *chan,
                return ret;
        }
 
-       /*
-        * schedule(); could be inserted here
-        */
-
        /*
         * This cookie not complete yet
         */
@@ -1084,31 +1092,35 @@ static const struct burst_table burst_sizes[] = {
        },
 };
 
-static void dma_set_runtime_config(struct dma_chan *chan,
-                              struct dma_slave_config *config)
+static int dma_set_runtime_config(struct dma_chan *chan,
+                                 struct dma_slave_config *config)
 {
        struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
        struct pl08x_driver_data *pl08x = plchan->host;
        struct pl08x_channel_data *cd = plchan->cd;
        enum dma_slave_buswidth addr_width;
+       dma_addr_t addr;
        u32 maxburst;
        u32 cctl = 0;
        int i;
 
+       if (!plchan->slave)
+               return -EINVAL;
+
        /* Transfer direction */
        plchan->runtime_direction = config->direction;
        if (config->direction == DMA_TO_DEVICE) {
-               plchan->runtime_addr = config->dst_addr;
+               addr = config->dst_addr;
                addr_width = config->dst_addr_width;
                maxburst = config->dst_maxburst;
        } else if (config->direction == DMA_FROM_DEVICE) {
-               plchan->runtime_addr = config->src_addr;
+               addr = config->src_addr;
                addr_width = config->src_addr_width;
                maxburst = config->src_maxburst;
        } else {
                dev_err(&pl08x->adev->dev,
                        "bad runtime_config: alien transfer direction\n");
-               return;
+               return -EINVAL;
        }
 
        switch (addr_width) {
@@ -1127,7 +1139,7 @@ static void dma_set_runtime_config(struct dma_chan *chan,
        default:
                dev_err(&pl08x->adev->dev,
                        "bad runtime_config: alien address width\n");
-               return;
+               return -EINVAL;
        }
 
        /*
@@ -1146,6 +1158,8 @@ static void dma_set_runtime_config(struct dma_chan *chan,
                cctl |= burst_sizes[i].reg;
        }
 
+       plchan->runtime_addr = addr;
+
        /* Modify the default channel data to fit PrimeCell request */
        cd->cctl = cctl;
 
@@ -1157,6 +1171,8 @@ static void dma_set_runtime_config(struct dma_chan *chan,
                addr_width,
                maxburst,
                cctl);
+
+       return 0;
 }
 
 /*
@@ -1176,10 +1192,10 @@ static void pl08x_issue_pending(struct dma_chan *chan)
        }
 
        /* Take the first element in the queue and execute it */
-       if (!list_empty(&plchan->desc_list)) {
+       if (!list_empty(&plchan->pend_list)) {
                struct pl08x_txd *next;
 
-               next = list_first_entry(&plchan->desc_list,
+               next = list_first_entry(&plchan->pend_list,
                                        struct pl08x_txd,
                                        node);
                list_del(&next->node);
@@ -1194,9 +1210,9 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
                                        struct pl08x_txd *txd)
 {
-       int num_llis;
        struct pl08x_driver_data *pl08x = plchan->host;
-       int ret;
+       unsigned long flags;
+       int num_llis, ret;
 
        num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
        if (!num_llis) {
@@ -1204,9 +1220,7 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
                return -EINVAL;
        }
 
-       spin_lock_irqsave(&plchan->lock, plchan->lockflags);
-
-       list_add_tail(&txd->node, &plchan->desc_list);
+       spin_lock_irqsave(&plchan->lock, flags);
 
        /*
         * See if we already have a physical channel allocated,
@@ -1215,40 +1229,34 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
        ret = prep_phy_channel(plchan, txd);
        if (ret) {
                /*
-                * No physical channel available, we will
-                * stack up the memcpy channels until there is a channel
-                * available to handle it whereas slave transfers may
-                * have been denied due to platform channel muxing restrictions
-                * and since there is no guarantee that this will ever be
-                * resolved, and since the signal must be acquired AFTER
-                * acquiring the physical channel, we will let them be NACK:ed
-                * with -EBUSY here. The drivers can alway retry the prep()
-                * call if they are eager on doing this using DMA.
+                * No physical channel was available.
+                *
+                * memcpy transfers can be sorted out at submission time.
+                *
+                * Slave transfers may have been denied due to platform
+                * channel muxing restrictions.  Since there is no guarantee
+                * that this will ever be resolved, and the signal must be
+                * acquired AFTER acquiring the physical channel, we will let
+                * them be NACK:ed with -EBUSY here. The drivers can retry
+                * the prep() call if they are eager on doing this using DMA.
                 */
                if (plchan->slave) {
                        pl08x_free_txd_list(pl08x, plchan);
-                       spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+                       pl08x_free_txd(pl08x, txd);
+                       spin_unlock_irqrestore(&plchan->lock, flags);
                        return -EBUSY;
                }
-               /* Do this memcpy whenever there is a channel ready */
-               plchan->state = PL08X_CHAN_WAITING;
-               plchan->waiting = txd;
        } else
                /*
-                * Else we're all set, paused and ready to roll,
-                * status will switch to PL08X_CHAN_RUNNING when
-                * we call issue_pending(). If there is something
-                * running on the channel already we don't change
-                * its state.
+                * Else we're all set, paused and ready to roll, status
+                * will switch to PL08X_CHAN_RUNNING when we call
+                * issue_pending(). If there is something running on the
+                * channel already we don't change its state.
                 */
                if (plchan->state == PL08X_CHAN_IDLE)
                        plchan->state = PL08X_CHAN_PAUSED;
 
-       /*
-        * Notice that we leave plchan->lock locked on purpose:
-        * it will be unlocked in the subsequent tx_submit()
-        * call. This is a consequence of the current API.
-        */
+       spin_unlock_irqrestore(&plchan->lock, flags);
 
        return 0;
 }
@@ -1270,12 +1278,14 @@ static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
        return cctl;
 }
 
-static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
+       unsigned long flags)
 {
        struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
 
        if (txd) {
                dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
+               txd->tx.flags = flags;
                txd->tx.tx_submit = pl08x_tx_submit;
                INIT_LIST_HEAD(&txd->node);
 
@@ -1298,7 +1308,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
        struct pl08x_txd *txd;
        int ret;
 
-       txd = pl08x_get_txd(plchan);
+       txd = pl08x_get_txd(plchan, flags);
        if (!txd) {
                dev_err(&pl08x->adev->dev,
                        "%s no memory for descriptor\n", __func__);
@@ -1306,8 +1316,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
        }
 
        txd->direction = DMA_NONE;
-       txd->srcbus.addr = src;
-       txd->dstbus.addr = dest;
+       txd->src_addr = src;
+       txd->dst_addr = dest;
        txd->len = len;
 
        /* Set platform data for m2m */
@@ -1325,10 +1335,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
        ret = pl08x_prep_channel_resources(plchan, txd);
        if (ret)
                return NULL;
-       /*
-        * NB: the channel lock is held at this point so tx_submit()
-        * must be called in direct succession.
-        */
 
        return &txd->tx;
 }
@@ -1356,7 +1362,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
        dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
                __func__, sgl->length, plchan->name);
 
-       txd = pl08x_get_txd(plchan);
+       txd = pl08x_get_txd(plchan, flags);
        if (!txd) {
                dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
                return NULL;
@@ -1386,21 +1392,21 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
        if (direction == DMA_TO_DEVICE) {
                txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
                txd->cctl |= PL080_CONTROL_SRC_INCR;
-               txd->srcbus.addr = sgl->dma_address;
+               txd->src_addr = sgl->dma_address;
                if (plchan->runtime_addr)
-                       txd->dstbus.addr = plchan->runtime_addr;
+                       txd->dst_addr = plchan->runtime_addr;
                else
-                       txd->dstbus.addr = plchan->cd->addr;
+                       txd->dst_addr = plchan->cd->addr;
                src_buses = pl08x->mem_buses;
                dst_buses = plchan->cd->periph_buses;
        } else if (direction == DMA_FROM_DEVICE) {
                txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
                txd->cctl |= PL080_CONTROL_DST_INCR;
                if (plchan->runtime_addr)
-                       txd->srcbus.addr = plchan->runtime_addr;
+                       txd->src_addr = plchan->runtime_addr;
                else
-                       txd->srcbus.addr = plchan->cd->addr;
-               txd->dstbus.addr = sgl->dma_address;
+                       txd->src_addr = plchan->cd->addr;
+               txd->dst_addr = sgl->dma_address;
                src_buses = plchan->cd->periph_buses;
                dst_buses = pl08x->mem_buses;
        } else {
@@ -1414,10 +1420,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
        ret = pl08x_prep_channel_resources(plchan, txd);
        if (ret)
                return NULL;
-       /*
-        * NB: the channel lock is held at this point so tx_submit()
-        * must be called in direct succession.
-        */
 
        return &txd->tx;
 }
@@ -1432,10 +1434,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 
        /* Controls applicable to inactive channels */
        if (cmd == DMA_SLAVE_CONFIG) {
-               dma_set_runtime_config(chan,
-                                      (struct dma_slave_config *)
-                                      arg);
-               return 0;
+               return dma_set_runtime_config(chan,
+                                             (struct dma_slave_config *)arg);
        }
 
        /*
@@ -1453,7 +1453,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                plchan->state = PL08X_CHAN_IDLE;
 
                if (plchan->phychan) {
-                       pl08x_stop_phy_chan(plchan->phychan);
+                       pl08x_terminate_phy_chan(pl08x, plchan->phychan);
 
                        /*
                         * Mark physical channel as free and free any slave
@@ -1502,10 +1502,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
 
 /*
  * Just check that the device is there and active
- * TODO: turn this bit on/off depending on the number of
- * physical channels actually used, if it is zero... well
- * shut it off. That will save some power. Cut the clock
- * at the same time.
+ * TODO: turn this bit on/off depending on the number of physical channels
+ * actually used, if it is zero... well shut it off. That will save some
+ * power. Cut the clock at the same time.
  */
 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
 {
@@ -1518,13 +1517,33 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
        writel(val, pl08x->base + PL080_CONFIG);
 }
 
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
+{
+       struct device *dev = txd->tx.chan->device->dev;
+
+       if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+               if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+                       dma_unmap_single(dev, txd->src_addr, txd->len,
+                               DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(dev, txd->src_addr, txd->len,
+                               DMA_TO_DEVICE);
+       }
+       if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+               if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+                       dma_unmap_single(dev, txd->dst_addr, txd->len,
+                               DMA_FROM_DEVICE);
+               else
+                       dma_unmap_page(dev, txd->dst_addr, txd->len,
+                               DMA_FROM_DEVICE);
+       }
+}
+
 static void pl08x_tasklet(unsigned long data)
 {
        struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
        struct pl08x_driver_data *pl08x = plchan->host;
        struct pl08x_txd *txd;
-       dma_async_tx_callback callback = NULL;
-       void *callback_param = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&plchan->lock, flags);
@@ -1533,32 +1552,26 @@ static void pl08x_tasklet(unsigned long data)
        plchan->at = NULL;
 
        if (txd) {
-               callback = txd->tx.callback;
-               callback_param = txd->tx.callback_param;
-
-               /*
-                * Update last completed
-                */
+               /* Update last completed */
                plchan->lc = txd->tx.cookie;
-
-               /*
-                * Free the descriptor
-                */
-               pl08x_free_txd(pl08x, txd);
        }
-       /*
-        * If a new descriptor is queued, set it up
-        * plchan->at is NULL here
-        */
-       if (!list_empty(&plchan->desc_list)) {
+
+       /* If a new descriptor is queued, set it up plchan->at is NULL here */
+       if (!list_empty(&plchan->pend_list)) {
                struct pl08x_txd *next;
 
-               next = list_first_entry(&plchan->desc_list,
+               next = list_first_entry(&plchan->pend_list,
                                        struct pl08x_txd,
                                        node);
                list_del(&next->node);
 
                pl08x_start_txd(plchan, next);
+       } else if (plchan->phychan_hold) {
+               /*
+                * This channel is still in use - we have a new txd being
+                * prepared and will soon be queued.  Don't give up the
+                * physical channel.
+                */
        } else {
                struct pl08x_dma_chan *waiting = NULL;
 
@@ -1570,11 +1583,10 @@ static void pl08x_tasklet(unsigned long data)
                plchan->state = PL08X_CHAN_IDLE;
 
                /*
-                * And NOW before anyone else can grab that free:d
-                * up physical channel, see if there is some memcpy
-                * pending that seriously needs to start because of
-                * being stacked up while we were choking the
-                * physical channels with data.
+                * And NOW before anyone else can grab that free:d up
+                * physical channel, see if there is some memcpy pending
+                * that seriously needs to start because of being stacked
+                * up while we were choking the physical channels with data.
                 */
                list_for_each_entry(waiting, &pl08x->memcpy.channels,
                                    chan.device_node) {
@@ -1586,6 +1598,7 @@ static void pl08x_tasklet(unsigned long data)
                                ret = prep_phy_channel(waiting,
                                                       waiting->waiting);
                                BUG_ON(ret);
+                               waiting->phychan_hold--;
                                waiting->state = PL08X_CHAN_RUNNING;
                                waiting->waiting = NULL;
                                pl08x_issue_pending(&waiting->chan);
@@ -1596,9 +1609,23 @@ static void pl08x_tasklet(unsigned long data)
 
        spin_unlock_irqrestore(&plchan->lock, flags);
 
-       /* Callback to signal completion */
-       if (callback)
-               callback(callback_param);
+       if (txd) {
+               dma_async_tx_callback callback = txd->tx.callback;
+               void *callback_param = txd->tx.callback_param;
+
+               /* Don't try to unmap buffers on slave channels */
+               if (!plchan->slave)
+                       pl08x_unmap_buffers(txd);
+
+               /* Free the descriptor */
+               spin_lock_irqsave(&plchan->lock, flags);
+               pl08x_free_txd(pl08x, txd);
+               spin_unlock_irqrestore(&plchan->lock, flags);
+
+               /* Callback to signal completion */
+               if (callback)
+                       callback(callback_param);
+       }
 }
 
 static irqreturn_t pl08x_irq(int irq, void *dev)
@@ -1610,9 +1637,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 
        val = readl(pl08x->base + PL080_ERR_STATUS);
        if (val) {
-               /*
-                * An error interrupt (on one or more channels)
-                */
+               /* An error interrupt (on one or more channels) */
                dev_err(&pl08x->adev->dev,
                        "%s error interrupt, register value 0x%08x\n",
                                __func__, val);
@@ -1636,9 +1661,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
                        mask |= (1 << i);
                }
        }
-       /*
-        * Clear only the terminal interrupts on channels we processed
-        */
+       /* Clear only the terminal interrupts on channels we processed */
        writel(mask, pl08x->base + PL080_TC_CLEAR);
 
        return mask ? IRQ_HANDLED : IRQ_NONE;
@@ -1657,6 +1680,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
        int i;
 
        INIT_LIST_HEAD(&dmadev->channels);
+
        /*
         * Register as many many memcpy as we have physical channels,
         * we won't always be able to use all but the code will have
@@ -1701,7 +1725,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
                chan->lc = 0;
 
                spin_lock_init(&chan->lock);
-               INIT_LIST_HEAD(&chan->desc_list);
+               INIT_LIST_HEAD(&chan->pend_list);
                tasklet_init(&chan->tasklet, pl08x_tasklet,
                             (unsigned long) chan);
 
@@ -1890,9 +1914,7 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
        /* Turn on the PL08x */
        pl08x_ensure_on(pl08x);
 
-       /*
-        * Attach the interrupt handler
-        */
+       /* Attach the interrupt handler */
        writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
        writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);