return 0;
}
+static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
+ struct wil_ctx *ctx)
+{
+ dma_addr_t pa = wil_desc_addr(&d->dma.addr);
+ u16 dmalen = le16_to_cpu(d->dma.length);
+ switch (ctx->mapped_as) {
+ case wil_mapped_as_single:
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ case wil_mapped_as_page:
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
int tx)
{
ctx = &vring->ctx[vring->swtail];
*d = *_d;
- pa = wil_desc_addr(&d->dma.addr);
- dmalen = le16_to_cpu(d->dma.length);
- if (vring->ctx[vring->swtail].mapped_as_page) {
- dma_unmap_page(dev, pa, dmalen,
- DMA_TO_DEVICE);
- } else {
- dma_unmap_single(dev, pa, dmalen,
- DMA_TO_DEVICE);
- }
+ wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
vring->swtail = wil_vring_next_tail(vring);
wil_dbg_txrx(wil, "%s()\n", __func__);
- if (avail < vring->size/8)
- netif_tx_stop_all_queues(wil_to_ndev(wil));
if (avail < 1 + nr_frags) {
wil_err(wil, "Tx ring full. No space for %d fragments\n",
1 + nr_frags);
if (unlikely(dma_mapping_error(dev, pa)))
return -EINVAL;
+ vring->ctx[i].mapped_as = wil_mapped_as_single;
/* 1-st segment */
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
/* Process TCP/UDP checksum offloading */
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error;
+ vring->ctx[i].mapped_as = wil_mapped_as_page;
wil_tx_desc_map(d, pa, len, vring_index);
/* no need to check return code -
* if it succeeded for 1-st descriptor,
* it will succeed here too
*/
wil_tx_desc_offload_cksum_set(wil, d, skb);
- vring->ctx[i].mapped_as_page = 1;
*_d = *d;
}
/* for the last seg only */
/* unmap what we have mapped */
nr_frags = f + 1; /* frags mapped + one for skb head */
for (f = 0; f < nr_frags; f++) {
- u16 dmalen;
struct wil_ctx *ctx;
i = (swhead + f) % vring->size;
_d = &(vring->va[i].tx);
*d = *_d;
_d->dma.status = TX_DMA_STATUS_DU;
- pa = wil_desc_addr(&d->dma.addr);
- dmalen = le16_to_cpu(d->dma.length);
- if (ctx->mapped_as_page)
- dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
- else
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
/* set up vring entry */
rc = wil_tx_vring(wil, vring, skb);
+ /* do we still have enough room in the vring? */
+ if (wil_vring_avail_tx(vring) < vring->size/8)
+ netif_tx_stop_all_queues(wil_to_ndev(wil));
+
switch (rc) {
case 0:
/* statistics will be updated on the tx_complete */
new_swtail = (lf + 1) % vring->size;
while (vring->swtail != new_swtail) {
struct vring_tx_desc dd, *d = ⅆ
- dma_addr_t pa;
u16 dmalen;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
struct sk_buff *skb = ctx->skb;
wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
- pa = wil_desc_addr(&d->dma.addr);
- if (ctx->mapped_as_page)
- dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
- else
- dma_unmap_single(dev, pa, dmalen,
- DMA_TO_DEVICE);
+ wil_txdesc_unmap(dev, d, ctx);
if (skb) {
if (d->dma.error == 0) {