u8 max_subfrms_amsdu);
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
+ bool is_mgmt);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
+ bool is_mgmt,
+ bool is_presp);
+
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
#include "txrx.h"
#include "debug.h"
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
+ bool is_mgmt)
{
- if (limit_mgmt_desc)
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (is_mgmt)
htt->num_pending_mgmt_tx--;
htt->num_pending_tx--;
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
}
-static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
- bool limit_mgmt_desc)
-{
- spin_lock_bh(&htt->tx_lock);
- __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
- spin_unlock_bh(&htt->tx_lock);
-}
-
-static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
- bool limit_mgmt_desc, bool is_probe_resp)
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
+ bool is_mgmt,
+ bool is_presp)
{
struct ath10k *ar = htt->ar;
- int ret = 0;
- spin_lock_bh(&htt->tx_lock);
+ lockdep_assert_held(&htt->tx_lock);
- if (htt->num_pending_tx >= htt->max_num_pending_tx) {
- ret = -EBUSY;
- goto exit;
- }
+ if (htt->num_pending_tx >= htt->max_num_pending_tx)
+ return -EBUSY;
- if (limit_mgmt_desc) {
- if (is_probe_resp && (htt->num_pending_mgmt_tx >
- ar->hw_params.max_probe_resp_desc_thres)) {
- ret = -EBUSY;
- goto exit;
- }
+ if (is_mgmt &&
+ is_presp &&
+ ar->hw_params.max_probe_resp_desc_thres &&
+ ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
+ return -EBUSY;
+
+ if (is_mgmt)
htt->num_pending_mgmt_tx++;
- }
htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx)
ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
-exit:
- spin_unlock_bh(&htt->tx_lock);
- return ret;
+ return 0;
}
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
int msdu_id = -1;
int res;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
- bool limit_mgmt_desc = false;
- bool is_probe_resp = false;
-
- if (ar->hw_params.max_probe_resp_desc_thres) {
- limit_mgmt_desc = true;
-
- if (ieee80211_is_probe_resp(hdr->frame_control))
- is_probe_resp = true;
- }
-
- res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
-
- if (res)
- goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0)
- goto err_tx_dec;
+ goto err;
msdu_id = res;
spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
- ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
return res;
}
u32 frags_paddr = 0;
u32 txbuf_paddr;
struct htt_msdu_ext_desc *ext_desc = NULL;
- bool limit_mgmt_desc = false;
- bool is_probe_resp = false;
-
- if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
- ar->hw_params.max_probe_resp_desc_thres) {
- limit_mgmt_desc = true;
-
- if (ieee80211_is_probe_resp(hdr->frame_control))
- is_probe_resp = true;
- }
-
- res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
- if (res)
- goto err;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0)
- goto err_tx_dec;
+ goto err;
msdu_id = res;
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_msdu_id:
- spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
- ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
return res;
}
static int ath10k_mac_tx_submit(struct ath10k *ar,
enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
- enum ath10k_mac_tx_path txpath;
- int ret;
-
- txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ int ret = -EINVAL;
switch (txpath) {
case ATH10K_MAC_TX_HTT:
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
struct sk_buff *skb)
{
struct ieee80211_hw *hw = ar->hw;
}
}
- ret = ath10k_mac_tx_submit(ar, txmode, skb);
+ ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
if (ret) {
ath10k_warn(ar, "failed to submit frame: %d\n", ret);
return ret;
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
struct ieee80211_hdr *hdr;
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
}
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
- ret = ath10k_mac_tx(ar, vif, sta, txmode, skb);
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
if (ret) {
ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
ret);
struct sk_buff *skb)
{
struct ath10k *ar = hw->priv;
+ struct ath10k_htt *htt = &ar->htt;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ bool is_htt;
+ bool is_mgmt;
+ bool is_presp;
int ret;
ath10k_mac_tx_h_fill_cb(ar, vif, skb);
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_htt = (txpath == ATH10K_MAC_TX_HTT ||
+ txpath == ATH10K_MAC_TX_HTT_MGMT);
- ret = ath10k_mac_tx(ar, vif, sta, txmode, skb);
- if (ret)
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+
+ is_mgmt = ieee80211_is_mgmt(hdr->frame_control);
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+ ret = ath10k_htt_tx_inc_pending(htt, is_mgmt, is_presp);
+ if (ret) {
+ ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
+ ret);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
+ return;
+ }
+
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (ret) {
ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt, is_mgmt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+ return;
+ }
}
/* Must not be called with conf_mutex held as workers can use that also. */
limit_mgmt_desc = true;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
- __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+ ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);