* Fills in one LLI for a certain transfer descriptor
* and advance the counter
*/
-static int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
- struct pl08x_txd *txd, int num_llis, int len,
- u32 cctl, u32 *remainder)
+static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+ struct pl08x_txd *txd, int num_llis, int len, u32 cctl, u32 *remainder)
{
struct pl08x_lli *llis_va = txd->llis_va;
dma_addr_t llis_bus = txd->llis_bus;
BUG_ON(*remainder < len);
*remainder -= len;
-
- return num_llis + 1;
}
/*
- * Return number of bytes to fill to boundary, or len
+ * Return number of bytes to fill to boundary, or len.
+ * This calculation works for any value of addr.
*/
static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
{
- u32 boundary;
-
- boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
- << PL08X_BOUNDARY_SHIFT;
+ size_t boundary_len = PL08X_BOUNDARY_SIZE -
+ (addr & (PL08X_BOUNDARY_SIZE - 1));
- if (boundary < addr + len)
- return boundary - addr;
- else
- return len;
+ return min(boundary_len, len);
}
/*
"less than a bus width (remain 0x%08x)\n",
__func__, remainder);
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- num_llis =
- pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
+ pl08x_fill_lli_for_desc(pl08x, txd, num_llis++, 1,
cctl, &remainder);
total_bytes++;
}
"(remain 0x%08x)\n",
__func__, remainder);
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- num_llis = pl08x_fill_lli_for_desc
- (pl08x, txd, num_llis, 1, cctl, &remainder);
+ pl08x_fill_lli_for_desc(pl08x, txd, num_llis++, 1,
+ cctl, &remainder);
total_bytes++;
}
* If enough left try to send max possible,
* otherwise try to send the remainder
*/
- target_len = remainder;
- if (remainder > max_bytes_per_lli)
- target_len = max_bytes_per_lli;
+ target_len = min(remainder, max_bytes_per_lli);
/*
* Set bus lengths for incrementing buses
dev_vdbg(&pl08x->adev->dev,
"%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
__func__, lli_len, remainder);
- num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
- num_llis, lli_len, cctl,
- &remainder);
+ pl08x_fill_lli_for_desc(pl08x, txd, num_llis++,
+ lli_len, cctl, &remainder);
total_bytes += lli_len;
}
dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, single byte (remain 0x%08zx)\n",
__func__, remainder);
- num_llis =
- pl08x_fill_lli_for_desc(pl08x,
- txd, num_llis, 1,
- cctl, &remainder);
+ pl08x_fill_lli_for_desc(pl08x, txd,
+ num_llis++, 1, cctl,
+ &remainder);
total_bytes++;
}
}
dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, single odd byte (remain %zu)\n",
__func__, remainder);
- num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
- 1, cctl, &remainder);
+ pl08x_fill_lli_for_desc(pl08x, txd, num_llis++, 1,
+ cctl, &remainder);
total_bytes++;
}
}
{
struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ dma_async_tx_callback callback = NULL;
+ void *callback_param = NULL;
unsigned long flags;
spin_lock_irqsave(&plchan->lock, flags);
- if (plchan->at) {
- dma_async_tx_callback callback =
- plchan->at->tx.callback;
- void *callback_param =
- plchan->at->tx.callback_param;
+ txd = plchan->at;
+ plchan->at = NULL;
+
+ if (txd) {
+ callback = txd->tx.callback;
+ callback_param = txd->tx.callback_param;
/*
* Update last completed
*/
- plchan->lc = plchan->at->tx.cookie;
-
- /*
- * Callback to signal completion
- */
- if (callback)
- callback(callback_param);
+ plchan->lc = txd->tx.cookie;
/*
* Free the descriptor
*/
- pl08x_free_txd(pl08x, plchan->at);
- plchan->at = NULL;
+ pl08x_free_txd(pl08x, txd);
}
/*
* If a new descriptor is queued, set it up
}
spin_unlock_irqrestore(&plchan->lock, flags);
+
+ /* Callback to signal completion */
+ if (callback)
+ callback(callback_param);
}
static irqreturn_t pl08x_irq(int irq, void *dev)