ath_start_ani(sc);
}
-static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
+static bool ath_prepare_reset(struct ath_softc *sc, bool flush)
{
struct ath_hw *ah = sc->sc_ah;
bool ret = true;
ath9k_debug_samp_bb_mac(sc);
ath9k_hw_disable_interrupts(ah);
- if (!ath_drain_all_txq(sc, retry_tx))
+ if (!ath_drain_all_txq(sc))
ret = false;
if (!ath_stoprecv(sc))
return true;
}
-static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
- bool retry_tx)
+static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
hchan = ah->curchan;
}
- if (!ath_prepare_reset(sc, retry_tx, flush))
+ if (!ath_prepare_reset(sc, flush))
fastcc = false;
ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EIO;
- r = ath_reset_internal(sc, hchan, false);
+ r = ath_reset_internal(sc, hchan);
return r;
}
#undef SCHED_INTR
}
-static int ath_reset(struct ath_softc *sc, bool retry_tx)
+static int ath_reset(struct ath_softc *sc)
{
- int r;
+ int i, r;
ath9k_ps_wakeup(sc);
- r = ath_reset_internal(sc, NULL, retry_tx);
+ r = ath_reset_internal(sc, NULL);
- if (retry_tx) {
- int i;
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (ATH_TXQ_SETUP(sc, i)) {
- spin_lock_bh(&sc->tx.txq[i].axq_lock);
- ath_txq_schedule(sc, &sc->tx.txq[i]);
- spin_unlock_bh(&sc->tx.txq[i].axq_lock);
- }
- }
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ spin_lock_bh(&sc->tx.txq[i].axq_lock);
+ ath_txq_schedule(sc, &sc->tx.txq[i]);
+ spin_unlock_bh(&sc->tx.txq[i].axq_lock);
}
ath9k_ps_restore(sc);
{
struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
- ath_reset(sc, true);
+ ath_reset(sc);
}
/**********************/
ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
}
- ath_prepare_reset(sc, false, true);
+ ath_prepare_reset(sc, true);
if (sc->rx.frag) {
dev_kfree_skb_any(sc->rx.frag);
if (drop) {
ath9k_ps_wakeup(sc);
spin_lock_bh(&sc->sc_pcu_lock);
- drain_txq = ath_drain_all_txq(sc, false);
+ drain_txq = ath_drain_all_txq(sc);
spin_unlock_bh(&sc->sc_pcu_lock);
if (!drain_txq)
- ath_reset(sc, false);
+ ath_reset(sc);
ath9k_ps_restore(sc);
ieee80211_wake_queues(hw);
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, bool retry)
+ struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
} else if (!isaggr && txok) {
/* transmit completion */
acked_cnt++;
- } else if ((tid->state & AGGR_CLEANUP) || !retry) {
+ } else if (tid->state & AGGR_CLEANUP) {
/*
* cleanup in progress, just fail
* the un-acked sub-frames
/* Queue Management */
/********************/
-static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
- struct ath_txq *txq)
-{
- struct ath_atx_ac *ac, *ac_tmp;
- struct ath_atx_tid *tid, *tid_tmp;
-
- list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
- list_del(&ac->list);
- ac->sched = false;
- list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
- list_del(&tid->list);
- tid->sched = false;
- ath_tid_drain(sc, txq, tid);
- }
- }
-}
-
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
{
struct ath_hw *ah = sc->sc_ah;
}
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
- struct list_head *list, bool retry_tx)
+ struct list_head *list)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
txq->axq_ampdu_depth--;
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
- retry_tx);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
* This assumes output has been stopped and
* we do not need to block ath_tx_tasklet.
*/
-void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
+void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
{
ath_txq_lock(sc, txq);
int idx = txq->txq_tailidx;
while (!list_empty(&txq->txq_fifo[idx])) {
- ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
- retry_tx);
+ ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
INCR(idx, ATH_TXFIFO_DEPTH);
}
txq->axq_link = NULL;
txq->axq_tx_inprogress = false;
- ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
-
- /* flush any pending frames if aggregation is enabled */
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
- ath_txq_drain_pending_buffers(sc, txq);
+ ath_drain_txq_list(sc, txq, &txq->axq_q);
ath_txq_unlock_complete(sc, txq);
}
-bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
+bool ath_drain_all_txq(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
*/
txq = &sc->tx.txq[i];
txq->stopped = false;
- ath_draintxq(sc, txq, retry_tx);
+ ath_draintxq(sc, txq);
}
return !npend;
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
} else
- ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
+ ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ath_txq_schedule(sc, txq);