txq->axq_aggr_depth = 0;
txq->axq_totalqueued = 0;
txq->axq_linkbuf = NULL;
+ txq->axq_tx_inprogress = false;
sc->tx.txqsetup |= 1<<qnum;
}
return &sc->tx.txq[qnum];
ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
}
+ spin_lock_bh(&txq->axq_lock);
+ txq->axq_tx_inprogress = false;
+ spin_unlock_bh(&txq->axq_lock);
+
/* flush any pending frames if aggregation is enabled */
if (sc->sc_flags & SC_OP_TXAGGR) {
if (!retry_tx) {
if (tid->paused)
continue;
- if ((txq->axq_depth % 2) == 0)
- ath_tx_sched_aggr(sc, txq, tid);
+ ath_tx_sched_aggr(sc, txq, tid);
/*
* add tid to round-robin queue if more frames
if (bf->bf_stale) {
bf_held = bf;
if (list_is_last(&bf_held->list, &txq->axq_q)) {
- txq->axq_link = NULL;
- txq->axq_linkbuf = NULL;
spin_unlock_bh(&txq->axq_lock);
-
- /*
- * The holding descriptor is the last
- * descriptor in queue. It's safe to remove
- * the last holding descriptor in BH context.
- */
- spin_lock_bh(&sc->tx.txbuflock);
- list_move_tail(&bf_held->list, &sc->tx.txbuf);
- spin_unlock_bh(&sc->tx.txbuflock);
-
break;
} else {
bf = list_entry(bf_held->list.next,
txq->axq_aggr_depth--;
txok = (ds->ds_txstat.ts_status == 0);
+ txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);
if (bf_held) {
}
}
+void ath_tx_complete_poll_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ tx_complete_work.work);
+ struct ath_txq *txq;
+ int i;
+ bool needreset = false;
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i)) {
+ txq = &sc->tx.txq[i];
+ spin_lock_bh(&txq->axq_lock);
+ if (txq->axq_depth) {
+ if (txq->axq_tx_inprogress) {
+ needreset = true;
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ } else {
+ txq->axq_tx_inprogress = true;
+ }
+ }
+ spin_unlock_bh(&txq->axq_lock);
+ }
+
+ if (needreset) {
+ DPRINTF(sc, ATH_DBG_RESET, "tx hung, resetting the chip\n");
+ ath_reset(sc, false);
+ }
+
+ queue_delayed_work(sc->hw->workqueue, &sc->tx_complete_work,
+ msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
+}
+
+
void ath_tx_tasklet(struct ath_softc *sc)
{
goto err;
}
+ INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
+
err:
if (error != 0)
ath_tx_cleanup(sc);