* @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors
* @pool_ctr: counter of LLIs in the pool
+ * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
+ * @mem_buses: set to indicate memory transfers on AHB2.
* @lock: a spinlock for this struct
*/
struct pl08x_driver_data {
struct pl08x_phy_chan *phy_chans;
struct dma_pool *pool;
int pool_ctr;
+ u8 lli_buses;
+ u8 mem_buses;
spinlock_t lock;
};
* Fills in one LLI for a certain transfer descriptor
* and advance the counter
*/
-static int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
- struct pl08x_txd *txd, int num_llis, int len,
- u32 cctl, u32 *remainder)
+static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+ struct pl08x_txd *txd, int num_llis, int len, u32 cctl, u32 *remainder)
{
struct pl08x_lli *llis_va = txd->llis_va;
dma_addr_t llis_bus = txd->llis_bus;
BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
- llis_va[num_llis].cctl = cctl;
- llis_va[num_llis].src = txd->srcbus.addr;
- llis_va[num_llis].dst = txd->dstbus.addr;
-
- /*
- * On versions with dual masters, you can optionally AND on
- * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
- * in new LLIs with that controller, but we always try to
- * choose AHB1 to point into memory. The idea is to have AHB2
- * fixed on the peripheral and AHB1 messing around in the
- * memory. So we don't manipulate this bit currently.
- */
-
+ llis_va[num_llis].cctl = cctl;
+ llis_va[num_llis].src = txd->srcbus.addr;
+ llis_va[num_llis].dst = txd->dstbus.addr;
llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
+ if (pl08x->lli_buses & PL08X_AHB2)
+ llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
if (cctl & PL080_CONTROL_SRC_INCR)
txd->srcbus.addr += len;
BUG_ON(*remainder < len);
*remainder -= len;
-
- return num_llis + 1;
}
/*
pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
&mbus, &sbus, cctl);
-
- /*
- * The lowest bit of the LLI register
- * is also used to indicate which master to
- * use for reading the LLIs.
- */
-
if (txd->len < mbus->buswidth) {
/*
* Less than a bus width available
"less than a bus width (remain 0x%08x)\n",
__func__, remainder);
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- num_llis =
- pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
+ pl08x_fill_lli_for_desc(pl08x, txd, num_llis++, 1,
cctl, &remainder);
total_bytes++;
}
"(remain 0x%08x)\n",
__func__, remainder);
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- num_llis = pl08x_fill_lli_for_desc
- (pl08x, txd, num_llis, 1, cctl, &remainder);
+ pl08x_fill_lli_for_desc(pl08x, txd, num_llis++, 1,
+ cctl, &remainder);
total_bytes++;
}
dev_vdbg(&pl08x->adev->dev,
"%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
__func__, lli_len, remainder);
- num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
- num_llis, lli_len, cctl,
- &remainder);
+ pl08x_fill_lli_for_desc(pl08x, txd, num_llis++,
+ lli_len, cctl, &remainder);
total_bytes += lli_len;
}
dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, single byte (remain 0x%08zx)\n",
__func__, remainder);
- num_llis =
- pl08x_fill_lli_for_desc(pl08x,
- txd, num_llis, 1,
- cctl, &remainder);
+ pl08x_fill_lli_for_desc(pl08x, txd,
+ num_llis++, 1, cctl,
+ &remainder);
total_bytes++;
}
}
dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, single odd byte (remain %zu)\n",
__func__, remainder);
- num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
- 1, cctl, &remainder);
+ pl08x_fill_lli_for_desc(pl08x, txd, num_llis++, 1,
+ cctl, &remainder);
total_bytes++;
}
}
return 0;
}
+/*
+ * Given the source and destination available bus masks, select which
+ * will be routed to each port. We try to have source and destination
+ * on separate ports, but always respect the allowable settings.
+ */
+static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
+{
+ u32 cctl = 0;
+
+ if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
+ cctl |= PL080_CONTROL_DST_AHB2;
+ if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
+ cctl |= PL080_CONTROL_SRC_AHB2;
+
+ return cctl;
+}
+
static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
{
struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
/* Both to be incremented or the code will break */
txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
- /*
- * On the PL080 we have two bus masters and we should select one for
- * source and one for destination. We try to use AHB2 for the bus
- * which does not increment (typically the peripheral) else we just
- * choose something.
- */
if (pl08x->vd->dualmaster)
- /* Source increments, use AHB2 for destination */
- txd->cctl |= PL080_CONTROL_DST_AHB2;
+ txd->cctl |= pl08x_select_bus(pl08x,
+ pl08x->mem_buses, pl08x->mem_buses);
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
+ u8 src_buses, dst_buses;
int ret;
/*
if (direction == DMA_TO_DEVICE) {
txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
txd->cctl |= PL080_CONTROL_SRC_INCR;
- if (pl08x->vd->dualmaster)
- /* Source increments, use AHB2 for destination */
- txd->cctl |= PL080_CONTROL_DST_AHB2;
txd->srcbus.addr = sgl->dma_address;
if (plchan->runtime_addr)
txd->dstbus.addr = plchan->runtime_addr;
else
txd->dstbus.addr = plchan->cd->addr;
+ src_buses = pl08x->mem_buses;
+ dst_buses = plchan->cd->periph_buses;
} else if (direction == DMA_FROM_DEVICE) {
txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
txd->cctl |= PL080_CONTROL_DST_INCR;
- if (pl08x->vd->dualmaster)
- /* Destination increments, use AHB2 for source */
- txd->cctl |= PL080_CONTROL_SRC_AHB2;
if (plchan->runtime_addr)
txd->srcbus.addr = plchan->runtime_addr;
else
txd->srcbus.addr = plchan->cd->addr;
txd->dstbus.addr = sgl->dma_address;
+ src_buses = plchan->cd->periph_buses;
+ dst_buses = pl08x->mem_buses;
} else {
dev_err(&pl08x->adev->dev,
"%s direction unsupported\n", __func__);
return NULL;
}
+ txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
+
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
return NULL;
{
struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ dma_async_tx_callback callback = NULL;
+ void *callback_param = NULL;
unsigned long flags;
spin_lock_irqsave(&plchan->lock, flags);
- if (plchan->at) {
- dma_async_tx_callback callback =
- plchan->at->tx.callback;
- void *callback_param =
- plchan->at->tx.callback_param;
+ txd = plchan->at;
+ plchan->at = NULL;
+
+ if (txd) {
+ callback = txd->tx.callback;
+ callback_param = txd->tx.callback_param;
/*
* Update last completed
*/
- plchan->lc = plchan->at->tx.cookie;
-
- /*
- * Callback to signal completion
- */
- if (callback)
- callback(callback_param);
+ plchan->lc = txd->tx.cookie;
/*
* Free the descriptor
*/
- pl08x_free_txd(pl08x, plchan->at);
- plchan->at = NULL;
+ pl08x_free_txd(pl08x, txd);
}
/*
* If a new descriptor is queued, set it up
}
spin_unlock_irqrestore(&plchan->lock, flags);
+
+ /* Callback to signal completion */
+ if (callback)
+ callback(callback_param);
}
static irqreturn_t pl08x_irq(int irq, void *dev)
pl08x->adev = adev;
pl08x->vd = vd;
+ /* By default, AHB1 only. If dualmaster, from platform */
+ pl08x->lli_buses = PL08X_AHB1;
+ pl08x->mem_buses = PL08X_AHB1;
+ if (pl08x->vd->dualmaster) {
+ pl08x->lli_buses = pl08x->pd->lli_buses;
+ pl08x->mem_buses = pl08x->pd->mem_buses;
+ }
+
/* A DMA memory pool for LLIs, align on 1-byte boundary */
pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);