{
const struct b43_dma_ops *ops = ring->ops;
u8 *header;
- int slot;
+ int slot, old_top_slot, old_used_slots;
int err;
struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
#define SLOTS_PER_PACKET 2
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
+ old_top_slot = ring->current_slot;
+ old_used_slots = ring->used_slots;
+
/* Get a slot for the header. */
slot = request_slot(ring);
desc = ops->idx2desc(ring, slot, &meta_hdr);
err = b43_generate_txhdr(ring->dev, header,
skb->data, skb->len, ctl,
generate_cookie(ring, slot));
- if (unlikely(err))
+ if (unlikely(err)) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
return err;
+ }
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
sizeof(struct b43_txhdr_fw4), 1);
- if (dma_mapping_error(meta_hdr->dmaaddr))
+ if (dma_mapping_error(meta_hdr->dmaaddr)) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
return -EIO;
+ }
ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
sizeof(struct b43_txhdr_fw4), 1, 0, 0);
if (dma_mapping_error(meta->dmaaddr)) {
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
err = -ENOMEM;
goto out_unmap_hdr;
}
meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (dma_mapping_error(meta->dmaaddr)) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
err = -EIO;
goto out_free_bounce;
}