#define ATH_MAX_ANTENNA 3
#define ATH_RXBUF 512
#define ATH_TXBUF 512
+#define ATH_TXBUF_RESERVE 5
+#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
#define ATH_TXMAXTRY 13
#define ATH_MGT_TXMAXTRY 4
struct list_head txq_fifo_pending;
u8 txq_headidx;
u8 txq_tailidx;
+ int pending_frames;
};
struct ath_atx_ac {
struct ath_buf_state bf_state;
dma_addr_t bf_dmacontext;
struct ath_wiphy *aphy;
+ struct ath_txq *txq;
};
struct ath_atx_tid {
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_init(struct ath_softc *sc, int nbufs);
void ath_tx_cleanup(struct ath_softc *sc);
-struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control txctl;
int padpos, padsize;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ int qnum;
if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
ath_print(common, ATH_DBG_XMIT,
memmove(skb->data, skb->data + padsize, padpos);
}
- /* Check if a tx queue is available */
-
- txctl.txq = ath_test_get_txq(sc, skb);
- if (!txctl.txq)
- goto exit;
+ qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
+ txctl.txq = &sc->tx.txq[qnum];
ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
return qnum;
}
-struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
-{
- struct ath_txq *txq = NULL;
- u16 skb_queue = skb_get_queue_mapping(skb);
- int qnum;
-
- qnum = ath_get_hal_qnum(skb_queue, sc);
- txq = &sc->tx.txq[qnum];
-
- spin_lock_bh(&txq->axq_lock);
-
- if (txq->axq_depth >= (ATH_TXBUF - 20)) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
- "TX queue: %d is full, depth: %d\n",
- qnum, txq->axq_depth);
- ath_mac80211_stop_queue(sc, skb_queue);
- txq->stopped = 1;
- spin_unlock_bh(&txq->axq_lock);
- return NULL;
- }
-
- spin_unlock_bh(&txq->axq_lock);
-
- return txq;
-}
-
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *qinfo)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_txq *txq = txctl->txq;
struct ath_buf *bf;
int r;
return -1;
}
+ bf->txq = txctl->txq;
+ spin_lock_bh(&bf->txq->axq_lock);
+ if (++bf->txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
+ ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
+ txq->stopped = 1;
+ }
+ spin_unlock_bh(&bf->txq->axq_lock);
+
r = ath_tx_setup_buffer(hw, bf, skb, txctl);
if (unlikely(r)) {
- struct ath_txq *txq = txctl->txq;
-
ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
/* upon ath_tx_processq() this TX queue will be resumed, we
* we will at least have to run TX completionon one buffer
* on the queue */
spin_lock_bh(&txq->axq_lock);
- if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
+ if (!txq->stopped && txq->axq_depth > 1) {
ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
txq->stopped = 1;
}
tx_flags |= ATH_TX_XRETRY;
}
+ if (bf->txq) {
+ spin_lock_bh(&bf->txq->axq_lock);
+ bf->txq->pending_frames--;
+ spin_unlock_bh(&bf->txq->axq_lock);
+ bf->txq = NULL;
+ }
+
dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
ath_tx_complete(sc, skb, bf->aphy, tx_flags);
ath_debug_stat_tx(sc, txq, bf, ts);
int qnum;
spin_lock_bh(&txq->axq_lock);
- if (txq->stopped &&
- sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
+ if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
if (qnum != -1) {
ath_mac80211_start_queue(sc, qnum);