__clear_bit(*hlid, wl->links_map);
__clear_bit(*hlid, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ /*
+ * At this point op_tx() will not add more packets to the queues. We
+ * can purge them.
+ */
+ wl1271_tx_reset_link_queues(wl, *hlid);
+
*hlid = WL12XX_INVALID_LINK_ID;
}
clear_bit(hlid, wlvif->ap.sta_hlid_map);
memset(wl->links[hlid].addr, 0, ETH_ALEN);
wl->links[hlid].ba_bitmap = 0;
- wl1271_tx_reset_link_queues(wl, hlid);
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
wl12xx_free_link(wl, wlvif, &hlid);
if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
+ WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
skb = wl->dummy_packet;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
+ WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
else
wlvif->sta.ba_rx_bitmap = 0;
- wl1271_tx_reset_link_queues(wl, i);
wl->links[i].allocated_pkts = 0;
wl->links[i].prev_freed_pkts = 0;
}
struct sk_buff *skb;
struct ieee80211_tx_info *info;
- for (i = 0; i < NUM_TX_QUEUES; i++)
- wl->tx_queue_count[i] = 0;
+ /* only reset the queues if something bad happened */
+ if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
+ for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ wl1271_tx_reset_link_queues(wl, i);
+
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wl->tx_queue_count[i] = 0;
+ }
wl->stopped_queues_map = 0;