2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #include <linux/log2.h>
28 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
31 /* when under memory pressure rx ring refill may fail and needs a retry */
32 #define HTT_RX_RING_REFILL_RETRY_MS 50
34 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
35 static void ath10k_htt_txrx_compl_task(unsigned long ptr);
37 static struct sk_buff *
38 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
40 struct ath10k_skb_rxcb *rxcb;
42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
43 if (rxcb->paddr == paddr)
44 return ATH10K_RXCB_SKB(rxcb);
50 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
53 struct ath10k_skb_rxcb *rxcb;
57 if (htt->rx_ring.in_ord_rx) {
58 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
59 skb = ATH10K_RXCB_SKB(rxcb);
60 dma_unmap_single(htt->ar->dev, rxcb->paddr,
61 skb->len + skb_tailroom(skb),
63 hash_del(&rxcb->hlist);
64 dev_kfree_skb_any(skb);
67 for (i = 0; i < htt->rx_ring.size; i++) {
68 skb = htt->rx_ring.netbufs_ring[i];
72 rxcb = ATH10K_SKB_RXCB(skb);
73 dma_unmap_single(htt->ar->dev, rxcb->paddr,
74 skb->len + skb_tailroom(skb),
76 dev_kfree_skb_any(skb);
80 htt->rx_ring.fill_cnt = 0;
81 hash_init(htt->rx_ring.skb_table);
82 memset(htt->rx_ring.netbufs_ring, 0,
83 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
86 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
88 struct htt_rx_desc *rx_desc;
89 struct ath10k_skb_rxcb *rxcb;
94 /* The Full Rx Reorder firmware has no way of telling the host
95 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
96 * To keep things simple make sure ring is always half empty. This
97 * guarantees there'll be no replenishment overruns possible.
99 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
101 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
103 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
109 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
111 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
114 /* Clear rx_desc attention word before posting to Rx ring */
115 rx_desc = (struct htt_rx_desc *)skb->data;
116 rx_desc->attention.flags = __cpu_to_le32(0);
118 paddr = dma_map_single(htt->ar->dev, skb->data,
119 skb->len + skb_tailroom(skb),
122 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
123 dev_kfree_skb_any(skb);
128 rxcb = ATH10K_SKB_RXCB(skb);
130 htt->rx_ring.netbufs_ring[idx] = skb;
131 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
132 htt->rx_ring.fill_cnt++;
134 if (htt->rx_ring.in_ord_rx) {
135 hash_add(htt->rx_ring.skb_table,
136 &ATH10K_SKB_RXCB(skb)->hlist,
142 idx &= htt->rx_ring.size_mask;
147 * Make sure the rx buffer is updated before available buffer
148 * index to avoid any potential rx ring corruption.
151 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
155 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
157 lockdep_assert_held(&htt->rx_ring.lock);
158 return __ath10k_htt_rx_ring_fill_n(htt, num);
161 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
163 int ret, num_deficit, num_to_fill;
165 /* Refilling the whole RX ring buffer proves to be a bad idea. The
166 * reason is RX may take up significant amount of CPU cycles and starve
167 * other tasks, e.g. TX on an ethernet device while acting as a bridge
168 * with ath10k wlan interface. This ended up with very poor performance
169 * once CPU the host system was overwhelmed with RX on ath10k.
171 * By limiting the number of refills the replenishing occurs
172 * progressively. This in turns makes use of the fact tasklets are
173 * processed in FIFO order. This means actual RX processing can starve
174 * out refilling. If there's not enough buffers on RX ring FW will not
175 * report RX until it is refilled with enough buffers. This
176 * automatically balances load wrt to CPU power.
178 * This probably comes at a cost of lower maximum throughput but
179 * improves the average and stability. */
180 spin_lock_bh(&htt->rx_ring.lock);
181 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
182 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
183 num_deficit -= num_to_fill;
184 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
185 if (ret == -ENOMEM) {
187 * Failed to fill it to the desired level -
188 * we'll start a timer and try again next time.
189 * As long as enough buffers are left in the ring for
190 * another A-MPDU rx, no special recovery is needed.
192 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
193 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
194 } else if (num_deficit > 0) {
195 tasklet_schedule(&htt->rx_replenish_task);
197 spin_unlock_bh(&htt->rx_ring.lock);
200 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
202 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
204 ath10k_htt_rx_msdu_buff_replenish(htt);
207 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
209 struct ath10k_htt *htt = &ar->htt;
212 spin_lock_bh(&htt->rx_ring.lock);
213 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
214 htt->rx_ring.fill_cnt));
215 spin_unlock_bh(&htt->rx_ring.lock);
218 ath10k_htt_rx_ring_free(htt);
223 void ath10k_htt_rx_free(struct ath10k_htt *htt)
225 del_timer_sync(&htt->rx_ring.refill_retry_timer);
226 tasklet_kill(&htt->rx_replenish_task);
227 tasklet_kill(&htt->txrx_compl_task);
229 skb_queue_purge(&htt->rx_compl_q);
230 skb_queue_purge(&htt->rx_in_ord_compl_q);
231 skb_queue_purge(&htt->tx_fetch_ind_q);
233 ath10k_htt_rx_ring_free(htt);
235 dma_free_coherent(htt->ar->dev,
237 sizeof(htt->rx_ring.paddrs_ring)),
238 htt->rx_ring.paddrs_ring,
239 htt->rx_ring.base_paddr);
241 dma_free_coherent(htt->ar->dev,
242 sizeof(*htt->rx_ring.alloc_idx.vaddr),
243 htt->rx_ring.alloc_idx.vaddr,
244 htt->rx_ring.alloc_idx.paddr);
246 kfree(htt->rx_ring.netbufs_ring);
249 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
251 struct ath10k *ar = htt->ar;
253 struct sk_buff *msdu;
255 lockdep_assert_held(&htt->rx_ring.lock);
257 if (htt->rx_ring.fill_cnt == 0) {
258 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
262 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
263 msdu = htt->rx_ring.netbufs_ring[idx];
264 htt->rx_ring.netbufs_ring[idx] = NULL;
265 htt->rx_ring.paddrs_ring[idx] = 0;
268 idx &= htt->rx_ring.size_mask;
269 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
270 htt->rx_ring.fill_cnt--;
272 dma_unmap_single(htt->ar->dev,
273 ATH10K_SKB_RXCB(msdu)->paddr,
274 msdu->len + skb_tailroom(msdu),
276 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
277 msdu->data, msdu->len + skb_tailroom(msdu));
282 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
283 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
284 u8 **fw_desc, int *fw_desc_len,
285 struct sk_buff_head *amsdu)
287 struct ath10k *ar = htt->ar;
288 int msdu_len, msdu_chaining = 0;
289 struct sk_buff *msdu;
290 struct htt_rx_desc *rx_desc;
292 lockdep_assert_held(&htt->rx_ring.lock);
295 int last_msdu, msdu_len_invalid, msdu_chained;
297 msdu = ath10k_htt_rx_netbuf_pop(htt);
299 __skb_queue_purge(amsdu);
303 __skb_queue_tail(amsdu, msdu);
305 rx_desc = (struct htt_rx_desc *)msdu->data;
307 /* FIXME: we must report msdu payload since this is what caller
309 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
310 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
313 * Sanity check - confirm the HW is finished filling in the
315 * If the HW and SW are working correctly, then it's guaranteed
316 * that the HW's MAC DMA is done before this point in the SW.
317 * To prevent the case that we handle a stale Rx descriptor,
318 * just assert for now until we have a way to recover.
320 if (!(__le32_to_cpu(rx_desc->attention.flags)
321 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
322 __skb_queue_purge(amsdu);
327 * Copy the FW rx descriptor for this MSDU from the rx
328 * indication message into the MSDU's netbuf. HL uses the
329 * same rx indication message definition as LL, and simply
330 * appends new info (fields from the HW rx desc, and the
331 * MSDU payload itself). So, the offset into the rx
332 * indication message only has to account for the standard
333 * offset of the per-MSDU FW rx desc info within the
334 * message, and how many bytes of the per-MSDU FW rx desc
335 * info have already been consumed. (And the endianness of
336 * the host, since for a big-endian host, the rx ind
337 * message contents, including the per-MSDU rx desc bytes,
338 * were byteswapped during upload.)
340 if (*fw_desc_len > 0) {
341 rx_desc->fw_desc.info0 = **fw_desc;
343 * The target is expected to only provide the basic
344 * per-MSDU rx descriptors. Just to be sure, verify
345 * that the target has not attached extension data
346 * (e.g. LRO flow ID).
349 /* or more, if there's extension data */
354 * When an oversized AMSDU happened, FW will lost
355 * some of MSDU status - in this case, the FW
356 * descriptors provided will be less than the
357 * actual MSDUs inside this MPDU. Mark the FW
358 * descriptors so that it will still deliver to
359 * upper stack, if no CRC error for this MPDU.
361 * FIX THIS - the FW descriptors are actually for
362 * MSDUs in the end of this A-MSDU instead of the
365 rx_desc->fw_desc.info0 = 0;
368 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
369 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
370 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
371 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
372 RX_MSDU_START_INFO0_MSDU_LENGTH);
373 msdu_chained = rx_desc->frag_info.ring2_more_count;
375 if (msdu_len_invalid)
379 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
380 msdu_len -= msdu->len;
382 /* Note: Chained buffers do not contain rx descriptor */
383 while (msdu_chained--) {
384 msdu = ath10k_htt_rx_netbuf_pop(htt);
386 __skb_queue_purge(amsdu);
390 __skb_queue_tail(amsdu, msdu);
392 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
393 msdu_len -= msdu->len;
397 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
398 RX_MSDU_END_INFO0_LAST_MSDU;
400 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
401 sizeof(*rx_desc) - sizeof(u32));
407 if (skb_queue_empty(amsdu))
411 * Don't refill the ring yet.
413 * First, the elements popped here are still in use - it is not
414 * safe to overwrite them until the matching call to
415 * mpdu_desc_list_next. Second, for efficiency it is preferable to
416 * refill the rx ring with 1 PPDU's worth of rx buffers (something
417 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
418 * (something like 3 buffers). Consequently, we'll rely on the txrx
419 * SW to tell us when it is done pulling all the PPDU's rx buffers
420 * out of the rx ring, and then refill it just once.
423 return msdu_chaining;
426 static void ath10k_htt_rx_replenish_task(unsigned long ptr)
428 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
430 ath10k_htt_rx_msdu_buff_replenish(htt);
433 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
436 struct ath10k *ar = htt->ar;
437 struct ath10k_skb_rxcb *rxcb;
438 struct sk_buff *msdu;
440 lockdep_assert_held(&htt->rx_ring.lock);
442 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
446 rxcb = ATH10K_SKB_RXCB(msdu);
447 hash_del(&rxcb->hlist);
448 htt->rx_ring.fill_cnt--;
450 dma_unmap_single(htt->ar->dev, rxcb->paddr,
451 msdu->len + skb_tailroom(msdu),
453 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
454 msdu->data, msdu->len + skb_tailroom(msdu));
459 static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
460 struct htt_rx_in_ord_ind *ev,
461 struct sk_buff_head *list)
463 struct ath10k *ar = htt->ar;
464 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
465 struct htt_rx_desc *rxd;
466 struct sk_buff *msdu;
471 lockdep_assert_held(&htt->rx_ring.lock);
473 msdu_count = __le16_to_cpu(ev->msdu_count);
474 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
476 while (msdu_count--) {
477 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
479 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
481 __skb_queue_purge(list);
485 __skb_queue_tail(list, msdu);
488 rxd = (void *)msdu->data;
490 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
492 skb_put(msdu, sizeof(*rxd));
493 skb_pull(msdu, sizeof(*rxd));
494 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
496 if (!(__le32_to_cpu(rxd->attention.flags) &
497 RX_ATTENTION_FLAGS_MSDU_DONE)) {
498 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
509 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
511 struct ath10k *ar = htt->ar;
515 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
517 htt->rx_confused = false;
519 /* XXX: The fill level could be changed during runtime in response to
520 * the host processing latency. Is this really worth it?
522 htt->rx_ring.size = HTT_RX_RING_SIZE;
523 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
524 htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
526 if (!is_power_of_2(htt->rx_ring.size)) {
527 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
531 htt->rx_ring.netbufs_ring =
532 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
534 if (!htt->rx_ring.netbufs_ring)
537 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
539 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
543 htt->rx_ring.paddrs_ring = vaddr;
544 htt->rx_ring.base_paddr = paddr;
546 vaddr = dma_alloc_coherent(htt->ar->dev,
547 sizeof(*htt->rx_ring.alloc_idx.vaddr),
552 htt->rx_ring.alloc_idx.vaddr = vaddr;
553 htt->rx_ring.alloc_idx.paddr = paddr;
554 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
555 *htt->rx_ring.alloc_idx.vaddr = 0;
557 /* Initialize the Rx refill retry timer */
558 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
560 spin_lock_init(&htt->rx_ring.lock);
562 htt->rx_ring.fill_cnt = 0;
563 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
564 hash_init(htt->rx_ring.skb_table);
566 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
569 skb_queue_head_init(&htt->rx_compl_q);
570 skb_queue_head_init(&htt->rx_in_ord_compl_q);
571 skb_queue_head_init(&htt->tx_fetch_ind_q);
573 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
576 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
577 htt->rx_ring.size, htt->rx_ring.fill_level);
581 dma_free_coherent(htt->ar->dev,
583 sizeof(htt->rx_ring.paddrs_ring)),
584 htt->rx_ring.paddrs_ring,
585 htt->rx_ring.base_paddr);
587 kfree(htt->rx_ring.netbufs_ring);
592 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
593 enum htt_rx_mpdu_encrypt_type type)
596 case HTT_RX_MPDU_ENCRYPT_NONE:
598 case HTT_RX_MPDU_ENCRYPT_WEP40:
599 case HTT_RX_MPDU_ENCRYPT_WEP104:
600 return IEEE80211_WEP_IV_LEN;
601 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
602 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
603 return IEEE80211_TKIP_IV_LEN;
604 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
605 return IEEE80211_CCMP_HDR_LEN;
606 case HTT_RX_MPDU_ENCRYPT_WEP128:
607 case HTT_RX_MPDU_ENCRYPT_WAPI:
611 ath10k_warn(ar, "unsupported encryption type %d\n", type);
615 #define MICHAEL_MIC_LEN 8
617 static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
618 enum htt_rx_mpdu_encrypt_type type)
621 case HTT_RX_MPDU_ENCRYPT_NONE:
623 case HTT_RX_MPDU_ENCRYPT_WEP40:
624 case HTT_RX_MPDU_ENCRYPT_WEP104:
625 return IEEE80211_WEP_ICV_LEN;
626 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
627 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
628 return IEEE80211_TKIP_ICV_LEN;
629 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
630 return IEEE80211_CCMP_MIC_LEN;
631 case HTT_RX_MPDU_ENCRYPT_WEP128:
632 case HTT_RX_MPDU_ENCRYPT_WAPI:
636 ath10k_warn(ar, "unsupported encryption type %d\n", type);
640 struct amsdu_subframe_hdr {
646 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
648 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
649 struct ieee80211_rx_status *status,
650 struct htt_rx_desc *rxd)
652 struct ieee80211_supported_band *sband;
653 u8 cck, rate, bw, sgi, mcs, nss;
656 u32 info1, info2, info3;
658 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
659 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
660 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
662 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
666 /* To get legacy rate index band is required. Since band can't
667 * be undefined check if freq is non-zero.
672 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
673 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
674 rate &= ~RX_PPDU_START_RATE_FLAG;
676 sband = &ar->mac.sbands[status->band];
677 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
680 case HTT_RX_HT_WITH_TXBF:
681 /* HT-SIG - Table 20-11 in info2 and info3 */
684 bw = (info2 >> 7) & 1;
685 sgi = (info3 >> 7) & 1;
687 status->rate_idx = mcs;
688 status->flag |= RX_FLAG_HT;
690 status->flag |= RX_FLAG_SHORT_GI;
692 status->flag |= RX_FLAG_40MHZ;
695 case HTT_RX_VHT_WITH_TXBF:
696 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
700 group_id = (info2 >> 4) & 0x3F;
702 if (GROUP_ID_IS_SU_MIMO(group_id)) {
703 mcs = (info3 >> 4) & 0x0F;
704 nss = ((info2 >> 10) & 0x07) + 1;
706 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
707 * so it's impossible to decode MCS. Also since
708 * firmware consumes Group Id Management frames host
709 * has no knowledge regarding group/user position
710 * mapping so it's impossible to pick the correct Nsts
713 * Bandwidth and SGI are valid so report the rateinfo
714 * on best-effort basis.
721 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
722 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
723 __le32_to_cpu(rxd->attention.flags),
724 __le32_to_cpu(rxd->mpdu_start.info0),
725 __le32_to_cpu(rxd->mpdu_start.info1),
726 __le32_to_cpu(rxd->msdu_start.common.info0),
727 __le32_to_cpu(rxd->msdu_start.common.info1),
728 rxd->ppdu_start.info0,
729 __le32_to_cpu(rxd->ppdu_start.info1),
730 __le32_to_cpu(rxd->ppdu_start.info2),
731 __le32_to_cpu(rxd->ppdu_start.info3),
732 __le32_to_cpu(rxd->ppdu_start.info4));
734 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
735 __le32_to_cpu(rxd->msdu_end.common.info0),
736 __le32_to_cpu(rxd->mpdu_end.info0));
738 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
739 "rx desc msdu payload: ",
740 rxd->msdu_payload, 50);
743 status->rate_idx = mcs;
744 status->vht_nss = nss;
747 status->flag |= RX_FLAG_SHORT_GI;
755 status->flag |= RX_FLAG_40MHZ;
759 status->vht_flag |= RX_VHT_FLAG_80MHZ;
762 status->flag |= RX_FLAG_VHT;
769 static struct ieee80211_channel *
770 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
772 struct ath10k_peer *peer;
773 struct ath10k_vif *arvif;
774 struct cfg80211_chan_def def;
777 lockdep_assert_held(&ar->data_lock);
782 if (rxd->attention.flags &
783 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
786 if (!(rxd->msdu_end.common.info0 &
787 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
790 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
791 RX_MPDU_START_INFO0_PEER_IDX);
793 peer = ath10k_peer_find_by_id(ar, peer_id);
797 arvif = ath10k_get_arvif(ar, peer->vdev_id);
798 if (WARN_ON_ONCE(!arvif))
801 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
807 static struct ieee80211_channel *
808 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
810 struct ath10k_vif *arvif;
811 struct cfg80211_chan_def def;
813 lockdep_assert_held(&ar->data_lock);
815 list_for_each_entry(arvif, &ar->arvifs, list) {
816 if (arvif->vdev_id == vdev_id &&
817 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
825 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
826 struct ieee80211_chanctx_conf *conf,
829 struct cfg80211_chan_def *def = data;
834 static struct ieee80211_channel *
835 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
837 struct cfg80211_chan_def def = {};
839 ieee80211_iter_chan_contexts_atomic(ar->hw,
840 ath10k_htt_rx_h_any_chan_iter,
846 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
847 struct ieee80211_rx_status *status,
848 struct htt_rx_desc *rxd,
851 struct ieee80211_channel *ch;
853 spin_lock_bh(&ar->data_lock);
854 ch = ar->scan_channel;
858 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
860 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
862 ch = ath10k_htt_rx_h_any_channel(ar);
864 ch = ar->tgt_oper_chan;
865 spin_unlock_bh(&ar->data_lock);
870 status->band = ch->band;
871 status->freq = ch->center_freq;
876 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
877 struct ieee80211_rx_status *status,
878 struct htt_rx_desc *rxd)
880 /* FIXME: Get real NF */
881 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
882 rxd->ppdu_start.rssi_comb;
883 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
886 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
887 struct ieee80211_rx_status *status,
888 struct htt_rx_desc *rxd)
890 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
891 * means all prior MSDUs in a PPDU are reported to mac80211 without the
892 * TSF. Is it worth holding frames until end of PPDU is known?
894 * FIXME: Can we get/compute 64bit TSF?
896 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
897 status->flag |= RX_FLAG_MACTIME_END;
900 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
901 struct sk_buff_head *amsdu,
902 struct ieee80211_rx_status *status,
905 struct sk_buff *first;
906 struct htt_rx_desc *rxd;
910 if (skb_queue_empty(amsdu))
913 first = skb_peek(amsdu);
914 rxd = (void *)first->data - sizeof(*rxd);
916 is_first_ppdu = !!(rxd->attention.flags &
917 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
918 is_last_ppdu = !!(rxd->attention.flags &
919 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
922 /* New PPDU starts so clear out the old per-PPDU status. */
924 status->rate_idx = 0;
926 status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
927 status->flag &= ~(RX_FLAG_HT |
931 RX_FLAG_MACTIME_END);
932 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
934 ath10k_htt_rx_h_signal(ar, status, rxd);
935 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
936 ath10k_htt_rx_h_rates(ar, status, rxd);
940 ath10k_htt_rx_h_mactime(ar, status, rxd);
943 static const char * const tid_to_ac[] = {
954 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
959 if (!ieee80211_is_data_qos(hdr->frame_control))
962 qc = ieee80211_get_qos_ctl(hdr);
963 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
965 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
967 snprintf(out, size, "tid %d", tid);
972 static void ath10k_process_rx(struct ath10k *ar,
973 struct ieee80211_rx_status *rx_status,
976 struct ieee80211_rx_status *status;
977 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
980 status = IEEE80211_SKB_RXCB(skb);
981 *status = *rx_status;
983 ath10k_dbg(ar, ATH10K_DBG_DATA,
984 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
987 ieee80211_get_SA(hdr),
988 ath10k_get_tid(hdr, tid, sizeof(tid)),
989 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
991 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
992 status->flag == 0 ? "legacy" : "",
993 status->flag & RX_FLAG_HT ? "ht" : "",
994 status->flag & RX_FLAG_VHT ? "vht" : "",
995 status->flag & RX_FLAG_40MHZ ? "40" : "",
996 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
997 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
1001 status->band, status->flag,
1002 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1003 !!(status->flag & RX_FLAG_MMIC_ERROR),
1004 !!(status->flag & RX_FLAG_AMSDU_MORE));
1005 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1006 skb->data, skb->len);
1007 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1008 trace_ath10k_rx_payload(ar, skb->data, skb->len);
1010 ieee80211_rx(ar->hw, skb);
1013 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1014 struct ieee80211_hdr *hdr)
1016 int len = ieee80211_hdrlen(hdr->frame_control);
1018 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1020 len = round_up(len, 4);
1025 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1026 struct sk_buff *msdu,
1027 struct ieee80211_rx_status *status,
1028 enum htt_rx_mpdu_encrypt_type enctype,
1031 struct ieee80211_hdr *hdr;
1032 struct htt_rx_desc *rxd;
1038 rxd = (void *)msdu->data - sizeof(*rxd);
1039 is_first = !!(rxd->msdu_end.common.info0 &
1040 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1041 is_last = !!(rxd->msdu_end.common.info0 &
1042 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1044 /* Delivered decapped frame:
1046 * [crypto param] <-- can be trimmed if !fcs_err &&
1047 * !decrypt_err && !peer_idx_invalid
1048 * [amsdu header] <-- only if A-MSDU
1051 * [FCS] <-- at end, needs to be trimmed
1054 /* This probably shouldn't happen but warn just in case */
1055 if (unlikely(WARN_ON_ONCE(!is_first)))
1058 /* This probably shouldn't happen but warn just in case */
1059 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1062 skb_trim(msdu, msdu->len - FCS_LEN);
1064 /* In most cases this will be true for sniffed frames. It makes sense
1065 * to deliver them as-is without stripping the crypto param. This is
1066 * necessary for software based decryption.
1068 * If there's no error then the frame is decrypted. At least that is
1069 * the case for frames that come in via fragmented rx indication.
1074 /* The payload is decrypted so strip crypto params. Start from tail
1075 * since hdr is used to compute some stuff.
1078 hdr = (void *)msdu->data;
1081 if (status->flag & RX_FLAG_IV_STRIPPED)
1082 skb_trim(msdu, msdu->len -
1083 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1086 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1087 !ieee80211_has_morefrags(hdr->frame_control) &&
1088 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1089 skb_trim(msdu, msdu->len - 8);
1092 if (status->flag & RX_FLAG_IV_STRIPPED) {
1093 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1094 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1096 memmove((void *)msdu->data + crypto_len,
1097 (void *)msdu->data, hdr_len);
1098 skb_pull(msdu, crypto_len);
1102 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1103 struct sk_buff *msdu,
1104 struct ieee80211_rx_status *status,
1105 const u8 first_hdr[64])
1107 struct ieee80211_hdr *hdr;
1112 /* Delivered decapped frame:
1113 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1116 * Note: The nwifi header doesn't have QoS Control and is
1117 * (always?) a 3addr frame.
1119 * Note2: There's no A-MSDU subframe header. Even if it's part
1123 /* pull decapped header and copy SA & DA */
1124 if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
1125 ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
1126 /* The QCA99X0 4 address mode pad 2 bytes at the
1129 hdr = (struct ieee80211_hdr *)(msdu->data + 2);
1130 /* The skb length need be extended 2 as the 2 bytes at the tail
1131 * be excluded due to the padding
1135 hdr = (struct ieee80211_hdr *)(msdu->data);
1138 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1139 ether_addr_copy(da, ieee80211_get_DA(hdr));
1140 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1141 skb_pull(msdu, hdr_len);
1143 /* push original 802.11 header */
1144 hdr = (struct ieee80211_hdr *)first_hdr;
1145 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1146 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1148 /* original 802.11 header has a different DA and in
1149 * case of 4addr it may also have different SA
1151 hdr = (struct ieee80211_hdr *)msdu->data;
1152 ether_addr_copy(ieee80211_get_DA(hdr), da);
1153 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1156 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1157 struct sk_buff *msdu,
1158 enum htt_rx_mpdu_encrypt_type enctype)
1160 struct ieee80211_hdr *hdr;
1161 struct htt_rx_desc *rxd;
1162 size_t hdr_len, crypto_len;
1164 bool is_first, is_last, is_amsdu;
1166 rxd = (void *)msdu->data - sizeof(*rxd);
1167 hdr = (void *)rxd->rx_hdr_status;
1169 is_first = !!(rxd->msdu_end.common.info0 &
1170 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1171 is_last = !!(rxd->msdu_end.common.info0 &
1172 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1173 is_amsdu = !(is_first && is_last);
1178 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1179 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1181 rfc1042 += round_up(hdr_len, 4) +
1182 round_up(crypto_len, 4);
1186 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1191 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1192 struct sk_buff *msdu,
1193 struct ieee80211_rx_status *status,
1194 const u8 first_hdr[64],
1195 enum htt_rx_mpdu_encrypt_type enctype)
1197 struct ieee80211_hdr *hdr;
1204 /* Delivered decapped frame:
1205 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1209 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1210 if (WARN_ON_ONCE(!rfc1042))
1213 /* pull decapped header and copy SA & DA */
1214 eth = (struct ethhdr *)msdu->data;
1215 ether_addr_copy(da, eth->h_dest);
1216 ether_addr_copy(sa, eth->h_source);
1217 skb_pull(msdu, sizeof(struct ethhdr));
1219 /* push rfc1042/llc/snap */
1220 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1221 sizeof(struct rfc1042_hdr));
1223 /* push original 802.11 header */
1224 hdr = (struct ieee80211_hdr *)first_hdr;
1225 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1226 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1228 /* original 802.11 header has a different DA and in
1229 * case of 4addr it may also have different SA
1231 hdr = (struct ieee80211_hdr *)msdu->data;
1232 ether_addr_copy(ieee80211_get_DA(hdr), da);
1233 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1236 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1237 struct sk_buff *msdu,
1238 struct ieee80211_rx_status *status,
1239 const u8 first_hdr[64])
1241 struct ieee80211_hdr *hdr;
1244 /* Delivered decapped frame:
1245 * [amsdu header] <-- replaced with 802.11 hdr
1250 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
1252 hdr = (struct ieee80211_hdr *)first_hdr;
1253 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1254 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1257 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1258 struct sk_buff *msdu,
1259 struct ieee80211_rx_status *status,
1261 enum htt_rx_mpdu_encrypt_type enctype,
1264 struct htt_rx_desc *rxd;
1265 enum rx_msdu_decap_format decap;
1267 /* First msdu's decapped header:
1268 * [802.11 header] <-- padded to 4 bytes long
1269 * [crypto param] <-- padded to 4 bytes long
1270 * [amsdu header] <-- only if A-MSDU
1273 * Other (2nd, 3rd, ..) msdu's decapped header:
1274 * [amsdu header] <-- only if A-MSDU
1278 rxd = (void *)msdu->data - sizeof(*rxd);
1279 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1280 RX_MSDU_START_INFO1_DECAP_FORMAT);
1283 case RX_MSDU_DECAP_RAW:
1284 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1287 case RX_MSDU_DECAP_NATIVE_WIFI:
1288 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1290 case RX_MSDU_DECAP_ETHERNET2_DIX:
1291 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1293 case RX_MSDU_DECAP_8023_SNAP_LLC:
1294 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1299 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1301 struct htt_rx_desc *rxd;
1303 bool is_ip4, is_ip6;
1304 bool is_tcp, is_udp;
1305 bool ip_csum_ok, tcpudp_csum_ok;
1307 rxd = (void *)skb->data - sizeof(*rxd);
1308 flags = __le32_to_cpu(rxd->attention.flags);
1309 info = __le32_to_cpu(rxd->msdu_start.common.info1);
1311 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1312 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1313 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1314 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1315 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1316 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1318 if (!is_ip4 && !is_ip6)
1319 return CHECKSUM_NONE;
1320 if (!is_tcp && !is_udp)
1321 return CHECKSUM_NONE;
1323 return CHECKSUM_NONE;
1324 if (!tcpudp_csum_ok)
1325 return CHECKSUM_NONE;
1327 return CHECKSUM_UNNECESSARY;
1330 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1332 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1335 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1336 struct sk_buff_head *amsdu,
1337 struct ieee80211_rx_status *status)
1339 struct sk_buff *first;
1340 struct sk_buff *last;
1341 struct sk_buff *msdu;
1342 struct htt_rx_desc *rxd;
1343 struct ieee80211_hdr *hdr;
1344 enum htt_rx_mpdu_encrypt_type enctype;
1349 bool has_crypto_err;
1351 bool has_peer_idx_invalid;
1356 if (skb_queue_empty(amsdu))
1359 first = skb_peek(amsdu);
1360 rxd = (void *)first->data - sizeof(*rxd);
1362 is_mgmt = !!(rxd->attention.flags &
1363 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1365 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1366 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1368 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1369 * decapped header. It'll be used for undecapping of each MSDU.
1371 hdr = (void *)rxd->rx_hdr_status;
1372 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1373 memcpy(first_hdr, hdr, hdr_len);
1375 /* Each A-MSDU subframe will use the original header as the base and be
1376 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1378 hdr = (void *)first_hdr;
1379 qos = ieee80211_get_qos_ctl(hdr);
1380 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1382 /* Some attention flags are valid only in the last MSDU. */
1383 last = skb_peek_tail(amsdu);
1384 rxd = (void *)last->data - sizeof(*rxd);
1385 attention = __le32_to_cpu(rxd->attention.flags);
1387 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1388 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1389 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1390 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1392 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1393 * e.g. due to fcs error, missing peer or invalid key data it will
1394 * report the frame as raw.
1396 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1399 !has_peer_idx_invalid);
1401 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1402 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1403 RX_FLAG_MMIC_ERROR |
1405 RX_FLAG_IV_STRIPPED |
1406 RX_FLAG_ONLY_MONITOR |
1407 RX_FLAG_MMIC_STRIPPED);
1410 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1413 status->flag |= RX_FLAG_MMIC_ERROR;
1415 /* Firmware reports all necessary management frames via WMI already.
1416 * They are not reported to monitor interfaces at all so pass the ones
1417 * coming via HTT to monitor interfaces instead. This simplifies
1421 status->flag |= RX_FLAG_ONLY_MONITOR;
1424 status->flag |= RX_FLAG_DECRYPTED;
1426 if (likely(!is_mgmt))
1427 status->flag |= RX_FLAG_IV_STRIPPED |
1428 RX_FLAG_MMIC_STRIPPED;
1431 skb_queue_walk(amsdu, msdu) {
1432 ath10k_htt_rx_h_csum_offload(msdu);
1433 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1436 /* Undecapping involves copying the original 802.11 header back
1437 * to sk_buff. If frame is protected and hardware has decrypted
1438 * it then remove the protected bit.
1445 hdr = (void *)msdu->data;
1446 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1450 static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1451 struct sk_buff_head *amsdu,
1452 struct ieee80211_rx_status *status)
1454 struct sk_buff *msdu;
1456 while ((msdu = __skb_dequeue(amsdu))) {
1457 /* Setup per-MSDU flags */
1458 if (skb_queue_empty(amsdu))
1459 status->flag &= ~RX_FLAG_AMSDU_MORE;
1461 status->flag |= RX_FLAG_AMSDU_MORE;
1463 ath10k_process_rx(ar, status, msdu);
1467 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1469 struct sk_buff *skb, *first;
1473 /* TODO: Might could optimize this by using
1474 * skb_try_coalesce or similar method to
1475 * decrease copying, or maybe get mac80211 to
1476 * provide a way to just receive a list of
1480 first = __skb_dequeue(amsdu);
1482 /* Allocate total length all at once. */
1483 skb_queue_walk(amsdu, skb)
1484 total_len += skb->len;
1486 space = total_len - skb_tailroom(first);
1488 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1489 /* TODO: bump some rx-oom error stat */
1490 /* put it back together so we can free the
1491 * whole list at once.
1493 __skb_queue_head(amsdu, first);
1497 /* Walk list again, copying contents into
1500 while ((skb = __skb_dequeue(amsdu))) {
1501 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1503 dev_kfree_skb_any(skb);
1506 __skb_queue_head(amsdu, first);
1510 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1511 struct sk_buff_head *amsdu,
1514 struct sk_buff *first;
1515 struct htt_rx_desc *rxd;
1516 enum rx_msdu_decap_format decap;
1518 first = skb_peek(amsdu);
1519 rxd = (void *)first->data - sizeof(*rxd);
1520 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1521 RX_MSDU_START_INFO1_DECAP_FORMAT);
1526 /* FIXME: Current unchaining logic can only handle simple case of raw
1527 * msdu chaining. If decapping is other than raw the chaining may be
1528 * more complex and this isn't handled by the current code. Don't even
1529 * try re-constructing such frames - it'll be pretty much garbage.
1531 if (decap != RX_MSDU_DECAP_RAW ||
1532 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1533 __skb_queue_purge(amsdu);
1537 ath10k_unchain_msdu(amsdu);
1540 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1541 struct sk_buff_head *amsdu,
1542 struct ieee80211_rx_status *rx_status)
1544 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1545 * invalid/dangerous frames.
1548 if (!rx_status->freq) {
1549 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
1553 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1554 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1561 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1562 struct sk_buff_head *amsdu,
1563 struct ieee80211_rx_status *rx_status)
1565 if (skb_queue_empty(amsdu))
1568 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1571 __skb_queue_purge(amsdu);
1574 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1575 struct htt_rx_indication *rx)
1577 struct ath10k *ar = htt->ar;
1578 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1579 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1580 struct sk_buff_head amsdu;
1581 int num_mpdu_ranges;
1584 int i, ret, mpdu_count = 0;
1586 lockdep_assert_held(&htt->rx_ring.lock);
1588 if (htt->rx_confused)
1591 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
1592 fw_desc = (u8 *)&rx->fw_desc;
1594 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1595 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1596 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1598 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1600 (sizeof(struct htt_rx_indication_mpdu_range) *
1603 for (i = 0; i < num_mpdu_ranges; i++)
1604 mpdu_count += mpdu_ranges[i].mpdu_count;
1606 while (mpdu_count--) {
1607 __skb_queue_head_init(&amsdu);
1608 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
1609 &fw_desc_len, &amsdu);
1611 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1612 __skb_queue_purge(&amsdu);
1613 /* FIXME: It's probably a good idea to reboot the
1614 * device instead of leaving it inoperable.
1616 htt->rx_confused = true;
1620 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1621 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1622 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1623 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1624 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1627 tasklet_schedule(&htt->rx_replenish_task);
1630 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1631 struct htt_rx_fragment_indication *frag)
1633 struct ath10k *ar = htt->ar;
1634 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1635 struct sk_buff_head amsdu;
1640 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
1641 fw_desc = (u8 *)frag->fw_msdu_rx_desc;
1643 __skb_queue_head_init(&amsdu);
1645 spin_lock_bh(&htt->rx_ring.lock);
1646 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1648 spin_unlock_bh(&htt->rx_ring.lock);
1650 tasklet_schedule(&htt->rx_replenish_task);
1652 ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1655 ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1657 __skb_queue_purge(&amsdu);
1661 if (skb_queue_len(&amsdu) != 1) {
1662 ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
1663 __skb_queue_purge(&amsdu);
1667 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1668 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1669 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1670 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1672 if (fw_desc_len > 0) {
1673 ath10k_dbg(ar, ATH10K_DBG_HTT,
1674 "expecting more fragmented rx in one indication %d\n",
1679 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1680 struct sk_buff *skb)
1682 struct ath10k_htt *htt = &ar->htt;
1683 struct htt_resp *resp = (struct htt_resp *)skb->data;
1684 struct htt_tx_done tx_done = {};
1685 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1690 case HTT_DATA_TX_STATUS_NO_ACK:
1691 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1693 case HTT_DATA_TX_STATUS_OK:
1694 tx_done.status = HTT_TX_COMPL_STATE_ACK;
1696 case HTT_DATA_TX_STATUS_DISCARD:
1697 case HTT_DATA_TX_STATUS_POSTPONE:
1698 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1699 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1702 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1703 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1707 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1708 resp->data_tx_completion.num_msdus);
1710 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1711 msdu_id = resp->data_tx_completion.msdus[i];
1712 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1714 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1715 * interrupt and main interrupt (MSI/-X range case) for the same
1716 * HTC service so it should be safe to use kfifo_put w/o lock.
1718 * From kfifo_put() documentation:
1719 * Note that with only one concurrent reader and one concurrent
1720 * writer, you don't need extra locking to use these macro.
1722 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1723 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1724 tx_done.msdu_id, tx_done.status);
1725 ath10k_txrx_tx_unref(htt, &tx_done);
1730 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1732 struct htt_rx_addba *ev = &resp->rx_addba;
1733 struct ath10k_peer *peer;
1734 struct ath10k_vif *arvif;
1735 u16 info0, tid, peer_id;
1737 info0 = __le16_to_cpu(ev->info0);
1738 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1739 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1741 ath10k_dbg(ar, ATH10K_DBG_HTT,
1742 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1743 tid, peer_id, ev->window_size);
1745 spin_lock_bh(&ar->data_lock);
1746 peer = ath10k_peer_find_by_id(ar, peer_id);
1748 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1750 spin_unlock_bh(&ar->data_lock);
1754 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1756 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1758 spin_unlock_bh(&ar->data_lock);
1762 ath10k_dbg(ar, ATH10K_DBG_HTT,
1763 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1764 peer->addr, tid, ev->window_size);
1766 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1767 spin_unlock_bh(&ar->data_lock);
1770 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1772 struct htt_rx_delba *ev = &resp->rx_delba;
1773 struct ath10k_peer *peer;
1774 struct ath10k_vif *arvif;
1775 u16 info0, tid, peer_id;
1777 info0 = __le16_to_cpu(ev->info0);
1778 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1779 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1781 ath10k_dbg(ar, ATH10K_DBG_HTT,
1782 "htt rx delba tid %hu peer_id %hu\n",
1785 spin_lock_bh(&ar->data_lock);
1786 peer = ath10k_peer_find_by_id(ar, peer_id);
1788 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1790 spin_unlock_bh(&ar->data_lock);
1794 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1796 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1798 spin_unlock_bh(&ar->data_lock);
1802 ath10k_dbg(ar, ATH10K_DBG_HTT,
1803 "htt rx stop rx ba session sta %pM tid %hu\n",
1806 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1807 spin_unlock_bh(&ar->data_lock);
1810 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1811 struct sk_buff_head *amsdu)
1813 struct sk_buff *msdu;
1814 struct htt_rx_desc *rxd;
1816 if (skb_queue_empty(list))
1819 if (WARN_ON(!skb_queue_empty(amsdu)))
1822 while ((msdu = __skb_dequeue(list))) {
1823 __skb_queue_tail(amsdu, msdu);
1825 rxd = (void *)msdu->data - sizeof(*rxd);
1826 if (rxd->msdu_end.common.info0 &
1827 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1831 msdu = skb_peek_tail(amsdu);
1832 rxd = (void *)msdu->data - sizeof(*rxd);
1833 if (!(rxd->msdu_end.common.info0 &
1834 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1835 skb_queue_splice_init(amsdu, list);
1842 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1843 struct sk_buff *skb)
1845 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1847 if (!ieee80211_has_protected(hdr->frame_control))
1850 /* Offloaded frames are already decrypted but firmware insists they are
1851 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1852 * will drop the frame.
1855 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1856 status->flag |= RX_FLAG_DECRYPTED |
1857 RX_FLAG_IV_STRIPPED |
1858 RX_FLAG_MMIC_STRIPPED;
1861 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1862 struct sk_buff_head *list)
1864 struct ath10k_htt *htt = &ar->htt;
1865 struct ieee80211_rx_status *status = &htt->rx_status;
1866 struct htt_rx_offload_msdu *rx;
1867 struct sk_buff *msdu;
1870 while ((msdu = __skb_dequeue(list))) {
1871 /* Offloaded frames don't have Rx descriptor. Instead they have
1872 * a short meta information header.
1875 rx = (void *)msdu->data;
1877 skb_put(msdu, sizeof(*rx));
1878 skb_pull(msdu, sizeof(*rx));
1880 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1881 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1882 dev_kfree_skb_any(msdu);
1886 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1888 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1889 * actual payload is unaligned. Align the frame. Otherwise
1890 * mac80211 complains. This shouldn't reduce performance much
1891 * because these offloaded frames are rare.
1893 offset = 4 - ((unsigned long)msdu->data & 3);
1894 skb_put(msdu, offset);
1895 memmove(msdu->data + offset, msdu->data, msdu->len);
1896 skb_pull(msdu, offset);
1898 /* FIXME: The frame is NWifi. Re-construct QoS Control
1899 * if possible later.
1902 memset(status, 0, sizeof(*status));
1903 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1905 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1906 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
1907 ath10k_process_rx(ar, status, msdu);
1911 static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1913 struct ath10k_htt *htt = &ar->htt;
1914 struct htt_resp *resp = (void *)skb->data;
1915 struct ieee80211_rx_status *status = &htt->rx_status;
1916 struct sk_buff_head list;
1917 struct sk_buff_head amsdu;
1926 lockdep_assert_held(&htt->rx_ring.lock);
1928 if (htt->rx_confused)
1931 skb_pull(skb, sizeof(resp->hdr));
1932 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1934 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1935 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1936 vdev_id = resp->rx_in_ord_ind.vdev_id;
1937 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1938 offload = !!(resp->rx_in_ord_ind.info &
1939 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1940 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1942 ath10k_dbg(ar, ATH10K_DBG_HTT,
1943 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1944 vdev_id, peer_id, tid, offload, frag, msdu_count);
1946 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1947 ath10k_warn(ar, "dropping invalid in order rx indication\n");
1951 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1952 * extracted and processed.
1954 __skb_queue_head_init(&list);
1955 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1957 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1958 htt->rx_confused = true;
1962 /* Offloaded frames are very different and need to be handled
1966 ath10k_htt_rx_h_rx_offload(ar, &list);
1968 while (!skb_queue_empty(&list)) {
1969 __skb_queue_head_init(&amsdu);
1970 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1973 /* Note: The in-order indication may report interleaved
1974 * frames from different PPDUs meaning reported rx rate
1975 * to mac80211 isn't accurate/reliable. It's still
1976 * better to report something than nothing though. This
1977 * should still give an idea about rx rate to the user.
1979 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
1980 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1981 ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1982 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1987 /* Should not happen. */
1988 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1989 htt->rx_confused = true;
1990 __skb_queue_purge(&list);
1995 tasklet_schedule(&htt->rx_replenish_task);
1998 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
1999 const __le32 *resp_ids,
2005 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
2008 for (i = 0; i < num_resp_ids; i++) {
2009 resp_id = le32_to_cpu(resp_ids[i]);
2011 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
2014 /* TODO: free resp_id */
2018 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2020 struct ieee80211_hw *hw = ar->hw;
2021 struct ieee80211_txq *txq;
2022 struct htt_resp *resp = (struct htt_resp *)skb->data;
2023 struct htt_tx_fetch_record *record;
2025 size_t max_num_bytes;
2026 size_t max_num_msdus;
2029 const __le32 *resp_ids;
2037 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
2039 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
2040 if (unlikely(skb->len < len)) {
2041 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
2045 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
2046 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
2048 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
2049 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
2051 if (unlikely(skb->len < len)) {
2052 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2056 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2057 num_records, num_resp_ids,
2058 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
2060 if (!ar->htt.tx_q_state.enabled) {
2061 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
2065 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
2066 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
2072 for (i = 0; i < num_records; i++) {
2073 record = &resp->tx_fetch_ind.records[i];
2074 peer_id = MS(le16_to_cpu(record->info),
2075 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
2076 tid = MS(le16_to_cpu(record->info),
2077 HTT_TX_FETCH_RECORD_INFO_TID);
2078 max_num_msdus = le16_to_cpu(record->num_msdus);
2079 max_num_bytes = le32_to_cpu(record->num_bytes);
2081 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2082 i, peer_id, tid, max_num_msdus, max_num_bytes);
2084 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2085 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2086 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2091 spin_lock_bh(&ar->data_lock);
2092 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2093 spin_unlock_bh(&ar->data_lock);
2095 /* It is okay to release the lock and use txq because RCU read
2099 if (unlikely(!txq)) {
2100 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2108 while (num_msdus < max_num_msdus &&
2109 num_bytes < max_num_bytes) {
2110 ret = ath10k_mac_tx_push_txq(hw, txq);
2118 record->num_msdus = cpu_to_le16(num_msdus);
2119 record->num_bytes = cpu_to_le32(num_bytes);
2121 ath10k_htt_tx_txq_recalc(hw, txq);
2126 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2127 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2129 ret = ath10k_htt_tx_fetch_resp(ar,
2130 resp->tx_fetch_ind.token,
2131 resp->tx_fetch_ind.fetch_seq_num,
2132 resp->tx_fetch_ind.records,
2134 if (unlikely(ret)) {
2135 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2136 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2137 /* FIXME: request fw restart */
2140 ath10k_htt_tx_txq_sync(ar);
2143 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2144 struct sk_buff *skb)
2146 const struct htt_resp *resp = (void *)skb->data;
2150 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2152 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2153 if (unlikely(skb->len < len)) {
2154 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2158 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2159 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2161 if (unlikely(skb->len < len)) {
2162 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2166 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2167 resp->tx_fetch_confirm.resp_ids,
2171 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2172 struct sk_buff *skb)
2174 const struct htt_resp *resp = (void *)skb->data;
2175 const struct htt_tx_mode_switch_record *record;
2176 struct ieee80211_txq *txq;
2177 struct ath10k_txq *artxq;
2180 enum htt_tx_mode_switch_mode mode;
2189 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2191 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2192 if (unlikely(skb->len < len)) {
2193 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2197 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2198 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2200 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2201 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2202 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2203 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2205 ath10k_dbg(ar, ATH10K_DBG_HTT,
2206 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2207 info0, info1, enable, num_records, mode, threshold);
2209 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2211 if (unlikely(skb->len < len)) {
2212 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2217 case HTT_TX_MODE_SWITCH_PUSH:
2218 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2221 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2229 ar->htt.tx_q_state.enabled = enable;
2230 ar->htt.tx_q_state.mode = mode;
2231 ar->htt.tx_q_state.num_push_allowed = threshold;
2235 for (i = 0; i < num_records; i++) {
2236 record = &resp->tx_mode_switch_ind.records[i];
2237 info0 = le16_to_cpu(record->info0);
2238 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2239 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2241 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2242 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2243 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2248 spin_lock_bh(&ar->data_lock);
2249 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2250 spin_unlock_bh(&ar->data_lock);
2252 /* It is okay to release the lock and use txq because RCU read
2256 if (unlikely(!txq)) {
2257 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2262 spin_lock_bh(&ar->htt.tx_lock);
2263 artxq = (void *)txq->drv_priv;
2264 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2265 spin_unlock_bh(&ar->htt.tx_lock);
2270 ath10k_mac_tx_push_pending(ar);
2273 static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
2275 enum ieee80211_band band;
2279 case MODE_11NA_HT20:
2280 case MODE_11NA_HT40:
2281 case MODE_11AC_VHT20:
2282 case MODE_11AC_VHT40:
2283 case MODE_11AC_VHT80:
2284 band = IEEE80211_BAND_5GHZ;
2289 case MODE_11NG_HT20:
2290 case MODE_11NG_HT40:
2291 case MODE_11AC_VHT20_2G:
2292 case MODE_11AC_VHT40_2G:
2293 case MODE_11AC_VHT80_2G:
2295 band = IEEE80211_BAND_2GHZ;
2301 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2303 struct ath10k_htt *htt = &ar->htt;
2304 struct htt_resp *resp = (struct htt_resp *)skb->data;
2305 enum htt_t2h_msg_type type;
2307 /* confirm alignment */
2308 if (!IS_ALIGNED((unsigned long)skb->data, 4))
2309 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2311 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2312 resp->hdr.msg_type);
2314 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2315 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2316 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2317 dev_kfree_skb_any(skb);
2320 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2323 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2324 htt->target_version_major = resp->ver_resp.major;
2325 htt->target_version_minor = resp->ver_resp.minor;
2326 complete(&htt->target_version_received);
2329 case HTT_T2H_MSG_TYPE_RX_IND:
2330 skb_queue_tail(&htt->rx_compl_q, skb);
2331 tasklet_schedule(&htt->txrx_compl_task);
2333 case HTT_T2H_MSG_TYPE_PEER_MAP: {
2334 struct htt_peer_map_event ev = {
2335 .vdev_id = resp->peer_map.vdev_id,
2336 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2338 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2339 ath10k_peer_map_event(htt, &ev);
2342 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2343 struct htt_peer_unmap_event ev = {
2344 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2346 ath10k_peer_unmap_event(htt, &ev);
2349 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2350 struct htt_tx_done tx_done = {};
2351 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2353 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2356 case HTT_MGMT_TX_STATUS_OK:
2357 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2359 case HTT_MGMT_TX_STATUS_RETRY:
2360 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2362 case HTT_MGMT_TX_STATUS_DROP:
2363 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2367 status = ath10k_txrx_tx_unref(htt, &tx_done);
2369 spin_lock_bh(&htt->tx_lock);
2370 ath10k_htt_tx_mgmt_dec_pending(htt);
2371 spin_unlock_bh(&htt->tx_lock);
2373 ath10k_mac_tx_push_pending(ar);
2376 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2377 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2378 tasklet_schedule(&htt->txrx_compl_task);
2380 case HTT_T2H_MSG_TYPE_SEC_IND: {
2381 struct ath10k *ar = htt->ar;
2382 struct htt_security_indication *ev = &resp->security_indication;
2384 ath10k_dbg(ar, ATH10K_DBG_HTT,
2385 "sec ind peer_id %d unicast %d type %d\n",
2386 __le16_to_cpu(ev->peer_id),
2387 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2388 MS(ev->flags, HTT_SECURITY_TYPE));
2389 complete(&ar->install_key_done);
2392 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2393 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2394 skb->data, skb->len);
2395 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
2398 case HTT_T2H_MSG_TYPE_TEST:
2400 case HTT_T2H_MSG_TYPE_STATS_CONF:
2401 trace_ath10k_htt_stats(ar, skb->data, skb->len);
2403 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2404 /* Firmware can return tx frames if it's unable to fully
2405 * process them and suspects host may be able to fix it. ath10k
2406 * sends all tx frames as already inspected so this shouldn't
2407 * happen unless fw has a bug.
2409 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2411 case HTT_T2H_MSG_TYPE_RX_ADDBA:
2412 ath10k_htt_rx_addba(ar, resp);
2414 case HTT_T2H_MSG_TYPE_RX_DELBA:
2415 ath10k_htt_rx_delba(ar, resp);
2417 case HTT_T2H_MSG_TYPE_PKTLOG: {
2418 struct ath10k_pktlog_hdr *hdr =
2419 (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
2421 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2423 __le16_to_cpu(hdr->size));
2426 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2427 /* Ignore this event because mac80211 takes care of Rx
2428 * aggregation reordering.
2432 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2433 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2434 tasklet_schedule(&htt->txrx_compl_task);
2437 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2439 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2440 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2441 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2444 __ieee80211_get_channel(ar->hw->wiphy, freq);
2445 ath10k_dbg(ar, ATH10K_DBG_HTT,
2446 "htt chan change freq %u phymode %s\n",
2447 freq, ath10k_wmi_phymode_str(phymode));
2450 case HTT_T2H_MSG_TYPE_AGGR_CONF:
2452 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2453 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2455 if (!tx_fetch_ind) {
2456 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2459 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2460 tasklet_schedule(&htt->txrx_compl_task);
2463 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2464 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2466 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2467 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2469 case HTT_T2H_MSG_TYPE_EN_STATS:
2471 ath10k_warn(ar, "htt event (%d) not handled\n",
2472 resp->hdr.msg_type);
2473 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2474 skb->data, skb->len);
2478 /* Free the indication buffer */
2479 dev_kfree_skb_any(skb);
2481 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2483 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2484 struct sk_buff *skb)
2486 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2487 dev_kfree_skb_any(skb);
2489 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2491 static void ath10k_htt_txrx_compl_task(unsigned long ptr)
2493 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
2494 struct ath10k *ar = htt->ar;
2495 struct htt_tx_done tx_done = {};
2496 struct sk_buff_head rx_q;
2497 struct sk_buff_head rx_ind_q;
2498 struct sk_buff_head tx_ind_q;
2499 struct htt_resp *resp;
2500 struct sk_buff *skb;
2501 unsigned long flags;
2503 __skb_queue_head_init(&rx_q);
2504 __skb_queue_head_init(&rx_ind_q);
2505 __skb_queue_head_init(&tx_ind_q);
2507 spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
2508 skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
2509 spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
2511 spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
2512 skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
2513 spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
2515 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2516 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2517 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2519 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2520 * From kfifo_get() documentation:
2521 * Note that with only one concurrent reader and one concurrent writer,
2522 * you don't need extra locking to use these macro.
2524 while (kfifo_get(&htt->txdone_fifo, &tx_done))
2525 ath10k_txrx_tx_unref(htt, &tx_done);
2527 while ((skb = __skb_dequeue(&tx_ind_q))) {
2528 ath10k_htt_rx_tx_fetch_ind(ar, skb);
2529 dev_kfree_skb_any(skb);
2532 ath10k_mac_tx_push_pending(ar);
2534 while ((skb = __skb_dequeue(&rx_q))) {
2535 resp = (struct htt_resp *)skb->data;
2536 spin_lock_bh(&htt->rx_ring.lock);
2537 ath10k_htt_rx_handler(htt, &resp->rx_ind);
2538 spin_unlock_bh(&htt->rx_ring.lock);
2539 dev_kfree_skb_any(skb);
2542 while ((skb = __skb_dequeue(&rx_ind_q))) {
2543 spin_lock_bh(&htt->rx_ring.lock);
2544 ath10k_htt_rx_in_ord_ind(ar, skb);
2545 spin_unlock_bh(&htt->rx_ring.lock);
2546 dev_kfree_skb_any(skb);