2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
18 * redistribution must be conditioned upon including a substantially
19 * similar Disclaimer requirement for further binary redistribution.
20 * 3. Neither the names of the above-listed copyright holders nor the names
21 * of any contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * Alternatively, this software may be distributed under the terms of the
25 * GNU General Public License ("GPL") version 2 as published by the Free
26 * Software Foundation.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
32 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
33 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
34 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
39 * THE POSSIBILITY OF SUCH DAMAGES.
43 #include <linux/module.h>
44 #include <linux/delay.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/hardirq.h>
49 #include <linux/netdevice.h>
50 #include <linux/cache.h>
51 #include <linux/ethtool.h>
52 #include <linux/uaccess.h>
53 #include <linux/slab.h>
54 #include <linux/etherdevice.h>
55 #include <linux/nl80211.h>
57 #include <net/ieee80211_radiotap.h>
59 #include <asm/unaligned.h>
68 #define CREATE_TRACE_POINTS
71 int ath5k_modparam_nohwcrypt;
72 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
73 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
75 static int modparam_all_channels;
76 module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
77 MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
79 static int modparam_fastchanswitch;
80 module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
81 MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
85 MODULE_AUTHOR("Jiri Slaby");
86 MODULE_AUTHOR("Nick Kossifidis");
87 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
88 MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
89 MODULE_LICENSE("Dual BSD/GPL");
91 static int ath5k_init(struct ieee80211_hw *hw);
92 static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
96 static const struct ath5k_srev_name srev_names[] = {
97 #ifdef CONFIG_ATHEROS_AR231X
98 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
99 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
100 { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
101 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
102 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
103 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
104 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
106 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
107 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
108 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
109 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
110 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
111 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
112 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
113 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
114 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
115 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
116 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
117 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
118 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
119 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
120 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
121 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
122 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
123 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
125 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
126 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
127 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
128 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
129 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
130 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
131 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
132 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
133 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
134 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
135 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
136 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
137 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
138 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
139 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
140 #ifdef CONFIG_ATHEROS_AR231X
141 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
142 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
144 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
147 static const struct ieee80211_rate ath5k_rates[] = {
149 .hw_value = ATH5K_RATE_CODE_1M, },
151 .hw_value = ATH5K_RATE_CODE_2M,
152 .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
153 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
155 .hw_value = ATH5K_RATE_CODE_5_5M,
156 .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
157 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
159 .hw_value = ATH5K_RATE_CODE_11M,
160 .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
161 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
163 .hw_value = ATH5K_RATE_CODE_6M,
166 .hw_value = ATH5K_RATE_CODE_9M,
169 .hw_value = ATH5K_RATE_CODE_12M,
172 .hw_value = ATH5K_RATE_CODE_18M,
175 .hw_value = ATH5K_RATE_CODE_24M,
178 .hw_value = ATH5K_RATE_CODE_36M,
181 .hw_value = ATH5K_RATE_CODE_48M,
184 .hw_value = ATH5K_RATE_CODE_54M,
189 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
191 u64 tsf = ath5k_hw_get_tsf64(ah);
193 if ((tsf & 0x7fff) < rstamp)
196 return (tsf & ~0x7fff) | rstamp;
200 ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
202 const char *name = "xxxxx";
205 for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
206 if (srev_names[i].sr_type != type)
209 if ((val & 0xf0) == srev_names[i].sr_val)
210 name = srev_names[i].sr_name;
212 if ((val & 0xff) == srev_names[i].sr_val) {
213 name = srev_names[i].sr_name;
220 static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
222 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
223 return ath5k_hw_reg_read(ah, reg_offset);
226 static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
228 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
229 ath5k_hw_reg_write(ah, val, reg_offset);
232 static const struct ath_ops ath5k_common_ops = {
233 .read = ath5k_ioread32,
234 .write = ath5k_iowrite32,
237 /***********************\
238 * Driver Initialization *
239 \***********************/
241 static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
243 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
244 struct ath5k_hw *ah = hw->priv;
245 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
247 return ath_reg_notifier_apply(wiphy, request, regulatory);
250 /********************\
251 * Channel/mode setup *
252 \********************/
255 * Returns true for the channel numbers used without all_channels modparam.
257 static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
259 if (band == IEEE80211_BAND_2GHZ && chan <= 14)
262 return /* UNII 1,2 */
263 (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
265 ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
267 ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
268 /* 802.11j 5.030-5.080 GHz (20MHz) */
269 (chan == 8 || chan == 12 || chan == 16) ||
270 /* 802.11j 4.9GHz (20MHz) */
271 (chan == 184 || chan == 188 || chan == 192 || chan == 196));
275 ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
276 unsigned int mode, unsigned int max)
278 unsigned int count, size, freq, ch;
279 enum ieee80211_band band;
283 /* 1..220, but 2GHz frequencies are filtered by check_channel */
285 band = IEEE80211_BAND_5GHZ;
290 band = IEEE80211_BAND_2GHZ;
293 ATH5K_WARN(ah, "bad mode, not copying channels\n");
298 for (ch = 1; ch <= size && count < max; ch++) {
299 freq = ieee80211_channel_to_frequency(ch, band);
301 if (freq == 0) /* mapping failed - not a standard channel */
304 /* Write channel info, needed for ath5k_channel_ok() */
305 channels[count].center_freq = freq;
306 channels[count].band = band;
307 channels[count].hw_value = mode;
309 /* Check if channel is supported by the chipset */
310 if (!ath5k_channel_ok(ah, &channels[count]))
313 if (!modparam_all_channels &&
314 !ath5k_is_standard_channel(ch, band))
324 ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
328 for (i = 0; i < AR5K_MAX_RATES; i++)
329 ah->rate_idx[b->band][i] = -1;
331 for (i = 0; i < b->n_bitrates; i++) {
332 ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
333 if (b->bitrates[i].hw_value_short)
334 ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
339 ath5k_setup_bands(struct ieee80211_hw *hw)
341 struct ath5k_hw *ah = hw->priv;
342 struct ieee80211_supported_band *sband;
343 int max_c, count_c = 0;
346 BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
347 max_c = ARRAY_SIZE(ah->channels);
350 sband = &ah->sbands[IEEE80211_BAND_2GHZ];
351 sband->band = IEEE80211_BAND_2GHZ;
352 sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
354 if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
356 memcpy(sband->bitrates, &ath5k_rates[0],
357 sizeof(struct ieee80211_rate) * 12);
358 sband->n_bitrates = 12;
360 sband->channels = ah->channels;
361 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
362 AR5K_MODE_11G, max_c);
364 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
365 count_c = sband->n_channels;
367 } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
369 memcpy(sband->bitrates, &ath5k_rates[0],
370 sizeof(struct ieee80211_rate) * 4);
371 sband->n_bitrates = 4;
373 /* 5211 only supports B rates and uses 4bit rate codes
374 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
377 if (ah->ah_version == AR5K_AR5211) {
378 for (i = 0; i < 4; i++) {
379 sband->bitrates[i].hw_value =
380 sband->bitrates[i].hw_value & 0xF;
381 sband->bitrates[i].hw_value_short =
382 sband->bitrates[i].hw_value_short & 0xF;
386 sband->channels = ah->channels;
387 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
388 AR5K_MODE_11B, max_c);
390 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
391 count_c = sband->n_channels;
394 ath5k_setup_rate_idx(ah, sband);
396 /* 5GHz band, A mode */
397 if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
398 sband = &ah->sbands[IEEE80211_BAND_5GHZ];
399 sband->band = IEEE80211_BAND_5GHZ;
400 sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
402 memcpy(sband->bitrates, &ath5k_rates[4],
403 sizeof(struct ieee80211_rate) * 8);
404 sband->n_bitrates = 8;
406 sband->channels = &ah->channels[count_c];
407 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
408 AR5K_MODE_11A, max_c);
410 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
412 ath5k_setup_rate_idx(ah, sband);
414 ath5k_debug_dump_bands(ah);
420 * Set/change channels. We always reset the chip.
421 * To accomplish this we must first cleanup any pending DMA,
422 * then restart stuff after a la ath5k_init.
424 * Called with ah->lock.
427 ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
429 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
430 "channel set, resetting (%u -> %u MHz)\n",
431 ah->curchan->center_freq, chan->center_freq);
434 * To switch channels clear any pending DMA operations;
435 * wait long enough for the RX fifo to drain, reset the
436 * hardware at the new frequency, and then re-enable
437 * the relevant bits of the h/w.
439 return ath5k_reset(ah, chan, true);
442 void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
444 struct ath5k_vif_iter_data *iter_data = data;
446 struct ath5k_vif *avf = (void *)vif->drv_priv;
448 if (iter_data->hw_macaddr)
449 for (i = 0; i < ETH_ALEN; i++)
450 iter_data->mask[i] &=
451 ~(iter_data->hw_macaddr[i] ^ mac[i]);
453 if (!iter_data->found_active) {
454 iter_data->found_active = true;
455 memcpy(iter_data->active_mac, mac, ETH_ALEN);
458 if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
459 if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
460 iter_data->need_set_hw_addr = false;
462 if (!iter_data->any_assoc) {
464 iter_data->any_assoc = true;
467 /* Calculate combined mode - when APs are active, operate in AP mode.
468 * Otherwise use the mode of the new interface. This can currently
469 * only deal with combinations of APs and STAs. Only one ad-hoc
470 * interfaces is allowed.
472 if (avf->opmode == NL80211_IFTYPE_AP)
473 iter_data->opmode = NL80211_IFTYPE_AP;
475 if (avf->opmode == NL80211_IFTYPE_STATION)
477 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
478 iter_data->opmode = avf->opmode;
483 ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
484 struct ieee80211_vif *vif)
486 struct ath_common *common = ath5k_hw_common(ah);
487 struct ath5k_vif_iter_data iter_data;
491 * Use the hardware MAC address as reference, the hardware uses it
492 * together with the BSSID mask when matching addresses.
494 iter_data.hw_macaddr = common->macaddr;
495 memset(&iter_data.mask, 0xff, ETH_ALEN);
496 iter_data.found_active = false;
497 iter_data.need_set_hw_addr = true;
498 iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
499 iter_data.n_stas = 0;
502 ath5k_vif_iter(&iter_data, vif->addr, vif);
504 /* Get list of all active MAC addresses */
505 ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
507 memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
509 ah->opmode = iter_data.opmode;
510 if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
511 /* Nothing active, default to station mode */
512 ah->opmode = NL80211_IFTYPE_STATION;
514 ath5k_hw_set_opmode(ah, ah->opmode);
515 ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
516 ah->opmode, ath_opmode_to_string(ah->opmode));
518 if (iter_data.need_set_hw_addr && iter_data.found_active)
519 ath5k_hw_set_lladdr(ah, iter_data.active_mac);
521 if (ath5k_hw_hasbssidmask(ah))
522 ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
524 /* Set up RX Filter */
525 if (iter_data.n_stas > 1) {
526 /* If you have multiple STA interfaces connected to
527 * different APs, ARPs are not received (most of the time?)
528 * Enabling PROMISC appears to fix that problem.
530 ah->filter_flags |= AR5K_RX_FILTER_PROM;
533 rfilt = ah->filter_flags;
534 ath5k_hw_set_rx_filter(ah, rfilt);
535 ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
539 ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
543 /* return base rate on errors */
544 if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
545 "hw_rix out of bounds: %x\n", hw_rix))
548 rix = ah->rate_idx[ah->curchan->band][hw_rix];
549 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
560 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
562 struct ath_common *common = ath5k_hw_common(ah);
566 * Allocate buffer with headroom_needed space for the
567 * fake physical layer header at the start.
569 skb = ath_rxbuf_alloc(common,
574 ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
579 *skb_addr = dma_map_single(ah->dev,
580 skb->data, common->rx_bufsize,
583 if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
584 ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
592 ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
594 struct sk_buff *skb = bf->skb;
595 struct ath5k_desc *ds;
599 skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
606 * Setup descriptors. For receive we always terminate
607 * the descriptor list with a self-linked entry so we'll
608 * not get overrun under high load (as can happen with a
609 * 5212 when ANI processing enables PHY error frames).
611 * To ensure the last descriptor is self-linked we create
612 * each descriptor as self-linked and add it to the end. As
613 * each additional descriptor is added the previous self-linked
614 * entry is "fixed" naturally. This should be safe even
615 * if DMA is happening. When processing RX interrupts we
616 * never remove/process the last, self-linked, entry on the
617 * descriptor list. This ensures the hardware always has
618 * someplace to write a new frame.
621 ds->ds_link = bf->daddr; /* link to self */
622 ds->ds_data = bf->skbaddr;
623 ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
625 ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
629 if (ah->rxlink != NULL)
630 *ah->rxlink = bf->daddr;
631 ah->rxlink = &ds->ds_link;
635 static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
637 struct ieee80211_hdr *hdr;
638 enum ath5k_pkt_type htype;
641 hdr = (struct ieee80211_hdr *)skb->data;
642 fc = hdr->frame_control;
644 if (ieee80211_is_beacon(fc))
645 htype = AR5K_PKT_TYPE_BEACON;
646 else if (ieee80211_is_probe_resp(fc))
647 htype = AR5K_PKT_TYPE_PROBE_RESP;
648 else if (ieee80211_is_atim(fc))
649 htype = AR5K_PKT_TYPE_ATIM;
650 else if (ieee80211_is_pspoll(fc))
651 htype = AR5K_PKT_TYPE_PSPOLL;
653 htype = AR5K_PKT_TYPE_NORMAL;
659 ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
660 struct ath5k_txq *txq, int padsize)
662 struct ath5k_desc *ds = bf->desc;
663 struct sk_buff *skb = bf->skb;
664 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
665 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
666 struct ieee80211_rate *rate;
667 unsigned int mrr_rate[3], mrr_tries[3];
674 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
677 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
680 rate = ieee80211_get_tx_rate(ah->hw, info);
686 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
687 flags |= AR5K_TXDESC_NOACK;
689 rc_flags = info->control.rates[0].flags;
690 hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
691 rate->hw_value_short : rate->hw_value;
695 /* FIXME: If we are in g mode and rate is a CCK rate
696 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
697 * from tx power (value is in dB units already) */
698 if (info->control.hw_key) {
699 keyidx = info->control.hw_key->hw_key_idx;
700 pktlen += info->control.hw_key->icv_len;
702 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
703 flags |= AR5K_TXDESC_RTSENA;
704 cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
705 duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
706 info->control.vif, pktlen, info));
708 if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
709 flags |= AR5K_TXDESC_CTSENA;
710 cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
711 duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
712 info->control.vif, pktlen, info));
714 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
715 ieee80211_get_hdrlen_from_skb(skb), padsize,
716 get_hw_packet_type(skb),
717 (ah->power_level * 2),
719 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
724 memset(mrr_rate, 0, sizeof(mrr_rate));
725 memset(mrr_tries, 0, sizeof(mrr_tries));
726 for (i = 0; i < 3; i++) {
727 rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
731 mrr_rate[i] = rate->hw_value;
732 mrr_tries[i] = info->control.rates[i + 1].count;
735 ath5k_hw_setup_mrr_tx_desc(ah, ds,
736 mrr_rate[0], mrr_tries[0],
737 mrr_rate[1], mrr_tries[1],
738 mrr_rate[2], mrr_tries[2]);
741 ds->ds_data = bf->skbaddr;
743 spin_lock_bh(&txq->lock);
744 list_add_tail(&bf->list, &txq->q);
746 if (txq->link == NULL) /* is this first packet? */
747 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
748 else /* no, so only link it */
749 *txq->link = bf->daddr;
751 txq->link = &ds->ds_link;
752 ath5k_hw_start_tx_dma(ah, txq->qnum);
754 spin_unlock_bh(&txq->lock);
758 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
762 /*******************\
763 * Descriptors setup *
764 \*******************/
767 ath5k_desc_alloc(struct ath5k_hw *ah)
769 struct ath5k_desc *ds;
770 struct ath5k_buf *bf;
775 /* allocate descriptors */
776 ah->desc_len = sizeof(struct ath5k_desc) *
777 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
779 ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
780 &ah->desc_daddr, GFP_KERNEL);
781 if (ah->desc == NULL) {
782 ATH5K_ERR(ah, "can't allocate descriptors\n");
788 ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
789 ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
791 bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
792 sizeof(struct ath5k_buf), GFP_KERNEL);
794 ATH5K_ERR(ah, "can't allocate bufptr\n");
800 INIT_LIST_HEAD(&ah->rxbuf);
801 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
804 list_add_tail(&bf->list, &ah->rxbuf);
807 INIT_LIST_HEAD(&ah->txbuf);
808 ah->txbuf_len = ATH_TXBUF;
809 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
812 list_add_tail(&bf->list, &ah->txbuf);
816 INIT_LIST_HEAD(&ah->bcbuf);
817 for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
820 list_add_tail(&bf->list, &ah->bcbuf);
825 dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
832 ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
837 dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
839 dev_kfree_skb_any(bf->skb);
842 bf->desc->ds_data = 0;
846 ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
848 struct ath_common *common = ath5k_hw_common(ah);
853 dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
855 dev_kfree_skb_any(bf->skb);
858 bf->desc->ds_data = 0;
862 ath5k_desc_free(struct ath5k_hw *ah)
864 struct ath5k_buf *bf;
866 list_for_each_entry(bf, &ah->txbuf, list)
867 ath5k_txbuf_free_skb(ah, bf);
868 list_for_each_entry(bf, &ah->rxbuf, list)
869 ath5k_rxbuf_free_skb(ah, bf);
870 list_for_each_entry(bf, &ah->bcbuf, list)
871 ath5k_txbuf_free_skb(ah, bf);
873 /* Free memory associated with all descriptors */
874 dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
887 static struct ath5k_txq *
888 ath5k_txq_setup(struct ath5k_hw *ah,
889 int qtype, int subtype)
891 struct ath5k_txq *txq;
892 struct ath5k_txq_info qi = {
893 .tqi_subtype = subtype,
894 /* XXX: default values not correct for B and XR channels,
896 .tqi_aifs = AR5K_TUNE_AIFS,
897 .tqi_cw_min = AR5K_TUNE_CWMIN,
898 .tqi_cw_max = AR5K_TUNE_CWMAX
903 * Enable interrupts only for EOL and DESC conditions.
904 * We mark tx descriptors to receive a DESC interrupt
905 * when a tx queue gets deep; otherwise we wait for the
906 * EOL to reap descriptors. Note that this is done to
907 * reduce interrupt load and this only defers reaping
908 * descriptors, never transmitting frames. Aside from
909 * reducing interrupts this also permits more concurrency.
910 * The only potential downside is if the tx queue backs
911 * up in which case the top half of the kernel may backup
912 * due to a lack of tx descriptors.
914 qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
915 AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
916 qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
919 * NB: don't print a message, this happens
920 * normally on parts with too few tx queues
922 return ERR_PTR(qnum);
924 txq = &ah->txqs[qnum];
928 INIT_LIST_HEAD(&txq->q);
929 spin_lock_init(&txq->lock);
932 txq->txq_max = ATH5K_TXQ_LEN_MAX;
933 txq->txq_poll_mark = false;
936 return &ah->txqs[qnum];
940 ath5k_beaconq_setup(struct ath5k_hw *ah)
942 struct ath5k_txq_info qi = {
943 /* XXX: default values not correct for B and XR channels,
945 .tqi_aifs = AR5K_TUNE_AIFS,
946 .tqi_cw_min = AR5K_TUNE_CWMIN,
947 .tqi_cw_max = AR5K_TUNE_CWMAX,
948 /* NB: for dynamic turbo, don't enable any other interrupts */
949 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
952 return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
956 ath5k_beaconq_config(struct ath5k_hw *ah)
958 struct ath5k_txq_info qi;
961 ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
965 if (ah->opmode == NL80211_IFTYPE_AP ||
966 ah->opmode == NL80211_IFTYPE_MESH_POINT) {
968 * Always burst out beacon and CAB traffic
969 * (aifs = cwmin = cwmax = 0)
974 } else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
976 * Adhoc mode; backoff between 0 and (2 * cw_min).
980 qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
983 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
984 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
985 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
987 ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
989 ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
990 "hardware queue!\n", __func__);
993 ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
997 /* reconfigure cabq with ready time to 80% of beacon_interval */
998 ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1002 qi.tqi_ready_time = (ah->bintval * 80) / 100;
1003 ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1007 ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1013 * ath5k_drain_tx_buffs - Empty tx buffers
1015 * @ah The &struct ath5k_hw
1017 * Empty tx buffers from all queues in preparation
1018 * of a reset or during shutdown.
1020 * NB: this assumes output has been stopped and
1021 * we do not need to block ath5k_tx_tasklet
1024 ath5k_drain_tx_buffs(struct ath5k_hw *ah)
1026 struct ath5k_txq *txq;
1027 struct ath5k_buf *bf, *bf0;
1030 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
1031 if (ah->txqs[i].setup) {
1033 spin_lock_bh(&txq->lock);
1034 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1035 ath5k_debug_printtxbuf(ah, bf);
1037 ath5k_txbuf_free_skb(ah, bf);
1039 spin_lock_bh(&ah->txbuflock);
1040 list_move_tail(&bf->list, &ah->txbuf);
1043 spin_unlock_bh(&ah->txbuflock);
1046 txq->txq_poll_mark = false;
1047 spin_unlock_bh(&txq->lock);
1053 ath5k_txq_release(struct ath5k_hw *ah)
1055 struct ath5k_txq *txq = ah->txqs;
1058 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++)
1060 ath5k_hw_release_tx_queue(ah, txq->qnum);
1071 * Enable the receive h/w following a reset.
1074 ath5k_rx_start(struct ath5k_hw *ah)
1076 struct ath_common *common = ath5k_hw_common(ah);
1077 struct ath5k_buf *bf;
1080 common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1082 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1083 common->cachelsz, common->rx_bufsize);
1085 spin_lock_bh(&ah->rxbuflock);
1087 list_for_each_entry(bf, &ah->rxbuf, list) {
1088 ret = ath5k_rxbuf_setup(ah, bf);
1090 spin_unlock_bh(&ah->rxbuflock);
1094 bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1095 ath5k_hw_set_rxdp(ah, bf->daddr);
1096 spin_unlock_bh(&ah->rxbuflock);
1098 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
1099 ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */
1100 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1108 * Disable the receive logic on PCU (DRU)
1109 * In preparation for a shutdown.
1111 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
1115 ath5k_rx_stop(struct ath5k_hw *ah)
1118 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1119 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1121 ath5k_debug_printrxbuffs(ah);
1125 ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
1126 struct ath5k_rx_status *rs)
1128 struct ath_common *common = ath5k_hw_common(ah);
1129 struct ieee80211_hdr *hdr = (void *)skb->data;
1130 unsigned int keyix, hlen;
1132 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1133 rs->rs_keyix != AR5K_RXKEYIX_INVALID)
1134 return RX_FLAG_DECRYPTED;
1136 /* Apparently when a default key is used to decrypt the packet
1137 the hw does not set the index used to decrypt. In such cases
1138 get the index from the packet. */
1139 hlen = ieee80211_hdrlen(hdr->frame_control);
1140 if (ieee80211_has_protected(hdr->frame_control) &&
1141 !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1142 skb->len >= hlen + 4) {
1143 keyix = skb->data[hlen + 3] >> 6;
1145 if (test_bit(keyix, common->keymap))
1146 return RX_FLAG_DECRYPTED;
1154 ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
1155 struct ieee80211_rx_status *rxs)
1157 struct ath_common *common = ath5k_hw_common(ah);
1160 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1162 if (ieee80211_is_beacon(mgmt->frame_control) &&
1163 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1164 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
1166 * Received an IBSS beacon with the same BSSID. Hardware *must*
1167 * have updated the local TSF. We have to work around various
1168 * hardware bugs, though...
1170 tsf = ath5k_hw_get_tsf64(ah);
1171 bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
1172 hw_tu = TSF_TO_TU(tsf);
1174 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1175 "beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
1176 (unsigned long long)bc_tstamp,
1177 (unsigned long long)rxs->mactime,
1178 (unsigned long long)(rxs->mactime - bc_tstamp),
1179 (unsigned long long)tsf);
1182 * Sometimes the HW will give us a wrong tstamp in the rx
1183 * status, causing the timestamp extension to go wrong.
1184 * (This seems to happen especially with beacon frames bigger
1185 * than 78 byte (incl. FCS))
1186 * But we know that the receive timestamp must be later than the
1187 * timestamp of the beacon since HW must have synced to that.
1189 * NOTE: here we assume mactime to be after the frame was
1190 * received, not like mac80211 which defines it at the start.
1192 if (bc_tstamp > rxs->mactime) {
1193 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1194 "fixing mactime from %llx to %llx\n",
1195 (unsigned long long)rxs->mactime,
1196 (unsigned long long)tsf);
1201 * Local TSF might have moved higher than our beacon timers,
1202 * in that case we have to update them to continue sending
1203 * beacons. This also takes care of synchronizing beacon sending
1204 * times with other stations.
1206 if (hw_tu >= ah->nexttbtt)
1207 ath5k_beacon_update_timers(ah, bc_tstamp);
1209 /* Check if the beacon timers are still correct, because a TSF
1210 * update might have created a window between them - for a
1211 * longer description see the comment of this function: */
1212 if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
1213 ath5k_beacon_update_timers(ah, bc_tstamp);
1214 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1215 "fixed beacon timers after beacon receive\n");
1221 ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
1223 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1224 struct ath_common *common = ath5k_hw_common(ah);
1226 /* only beacons from our BSSID */
1227 if (!ieee80211_is_beacon(mgmt->frame_control) ||
1228 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
1231 ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1233 /* in IBSS mode we should keep RSSI statistics per neighbour */
1234 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
1238 * Compute padding position. skb must contain an IEEE 802.11 frame
1240 static int ath5k_common_padpos(struct sk_buff *skb)
1242 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1243 __le16 frame_control = hdr->frame_control;
1246 if (ieee80211_has_a4(frame_control))
1249 if (ieee80211_is_data_qos(frame_control))
1250 padpos += IEEE80211_QOS_CTL_LEN;
1256 * This function expects an 802.11 frame and returns the number of
1257 * bytes added, or -1 if we don't have enough header room.
1259 static int ath5k_add_padding(struct sk_buff *skb)
1261 int padpos = ath5k_common_padpos(skb);
1262 int padsize = padpos & 3;
1264 if (padsize && skb->len > padpos) {
1266 if (skb_headroom(skb) < padsize)
1269 skb_push(skb, padsize);
1270 memmove(skb->data, skb->data + padsize, padpos);
1278 * The MAC header is padded to have 32-bit boundary if the
1279 * packet payload is non-zero. The general calculation for
1280 * padsize would take into account odd header lengths:
1281 * padsize = 4 - (hdrlen & 3); however, since only
1282 * even-length headers are used, padding can only be 0 or 2
1283 * bytes and we can optimize this a bit. We must not try to
1284 * remove padding from short control frames that do not have a
1287 * This function expects an 802.11 frame and returns the number of
1290 static int ath5k_remove_padding(struct sk_buff *skb)
1292 int padpos = ath5k_common_padpos(skb);
1293 int padsize = padpos & 3;
1295 if (padsize && skb->len >= padpos + padsize) {
1296 memmove(skb->data + padsize, skb->data, padpos);
1297 skb_pull(skb, padsize);
1305 ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
1306 struct ath5k_rx_status *rs)
1308 struct ieee80211_rx_status *rxs;
1310 ath5k_remove_padding(skb);
1312 rxs = IEEE80211_SKB_RXCB(skb);
1315 if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
1316 rxs->flag |= RX_FLAG_MMIC_ERROR;
1319 * always extend the mac timestamp, since this information is
1320 * also needed for proper IBSS merging.
1322 * XXX: it might be too late to do it here, since rs_tstamp is
1323 * 15bit only. that means TSF extension has to be done within
1324 * 32768usec (about 32ms). it might be necessary to move this to
1325 * the interrupt handler, like it is done in madwifi.
1327 * Unfortunately we don't know when the hardware takes the rx
1328 * timestamp (beginning of phy frame, data frame, end of rx?).
1329 * The only thing we know is that it is hardware specific...
1330 * On AR5213 it seems the rx timestamp is at the end of the
1331 * frame, but I'm not sure.
1333 * NOTE: mac80211 defines mactime at the beginning of the first
1334 * data symbol. Since we don't have any time references it's
1335 * impossible to comply to that. This affects IBSS merge only
1336 * right now, so it's not too bad...
1338 rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
1339 rxs->flag |= RX_FLAG_MACTIME_MPDU;
1341 rxs->freq = ah->curchan->center_freq;
1342 rxs->band = ah->curchan->band;
1344 rxs->signal = ah->ah_noise_floor + rs->rs_rssi;
1346 rxs->antenna = rs->rs_antenna;
1348 if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1349 ah->stats.antenna_rx[rs->rs_antenna]++;
1351 ah->stats.antenna_rx[0]++; /* invalid */
1353 rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
1354 rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
1356 if (rxs->rate_idx >= 0 && rs->rs_rate ==
1357 ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1358 rxs->flag |= RX_FLAG_SHORTPRE;
1360 trace_ath5k_rx(ah, skb);
1362 ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi);
1364 /* check beacons in IBSS mode */
1365 if (ah->opmode == NL80211_IFTYPE_ADHOC)
1366 ath5k_check_ibss_tsf(ah, skb, rxs);
1368 ieee80211_rx(ah->hw, skb);
1371 /** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
1373 * Check if we want to further process this frame or not. Also update
1374 * statistics. Return true if we want this frame, false if not.
1377 ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
1379 ah->stats.rx_all_count++;
1380 ah->stats.rx_bytes_count += rs->rs_datalen;
1382 if (unlikely(rs->rs_status)) {
1383 if (rs->rs_status & AR5K_RXERR_CRC)
1384 ah->stats.rxerr_crc++;
1385 if (rs->rs_status & AR5K_RXERR_FIFO)
1386 ah->stats.rxerr_fifo++;
1387 if (rs->rs_status & AR5K_RXERR_PHY) {
1388 ah->stats.rxerr_phy++;
1389 if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1390 ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
1393 if (rs->rs_status & AR5K_RXERR_DECRYPT) {
1395 * Decrypt error. If the error occurred
1396 * because there was no hardware key, then
1397 * let the frame through so the upper layers
1398 * can process it. This is necessary for 5210
1399 * parts which have no way to setup a ``clear''
1402 * XXX do key cache faulting
1404 ah->stats.rxerr_decrypt++;
1405 if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
1406 !(rs->rs_status & AR5K_RXERR_CRC))
1409 if (rs->rs_status & AR5K_RXERR_MIC) {
1410 ah->stats.rxerr_mic++;
1414 /* reject any frames with non-crypto errors */
1415 if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
1419 if (unlikely(rs->rs_more)) {
1420 ah->stats.rxerr_jumbo++;
1427 ath5k_set_current_imask(struct ath5k_hw *ah)
1429 enum ath5k_int imask;
1430 unsigned long flags;
1432 spin_lock_irqsave(&ah->irqlock, flags);
1435 imask &= ~AR5K_INT_RX_ALL;
1437 imask &= ~AR5K_INT_TX_ALL;
1438 ath5k_hw_set_imr(ah, imask);
1439 spin_unlock_irqrestore(&ah->irqlock, flags);
1443 ath5k_tasklet_rx(unsigned long data)
1445 struct ath5k_rx_status rs = {};
1446 struct sk_buff *skb, *next_skb;
1447 dma_addr_t next_skb_addr;
1448 struct ath5k_hw *ah = (void *)data;
1449 struct ath_common *common = ath5k_hw_common(ah);
1450 struct ath5k_buf *bf;
1451 struct ath5k_desc *ds;
1454 spin_lock(&ah->rxbuflock);
1455 if (list_empty(&ah->rxbuf)) {
1456 ATH5K_WARN(ah, "empty rx buf pool\n");
1460 bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1461 BUG_ON(bf->skb == NULL);
1465 /* bail if HW is still using self-linked descriptor */
1466 if (ath5k_hw_get_rxdp(ah) == bf->daddr)
1469 ret = ah->ah_proc_rx_desc(ah, ds, &rs);
1470 if (unlikely(ret == -EINPROGRESS))
1472 else if (unlikely(ret)) {
1473 ATH5K_ERR(ah, "error in processing rx descriptor\n");
1474 ah->stats.rxerr_proc++;
1478 if (ath5k_receive_frame_ok(ah, &rs)) {
1479 next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr);
1482 * If we can't replace bf->skb with a new skb under
1483 * memory pressure, just skip this packet
1488 dma_unmap_single(ah->dev, bf->skbaddr,
1492 skb_put(skb, rs.rs_datalen);
1494 ath5k_receive_frame(ah, skb, &rs);
1497 bf->skbaddr = next_skb_addr;
1500 list_move_tail(&bf->list, &ah->rxbuf);
1501 } while (ath5k_rxbuf_setup(ah, bf) == 0);
1503 spin_unlock(&ah->rxbuflock);
1504 ah->rx_pending = false;
1505 ath5k_set_current_imask(ah);
1514 ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1515 struct ath5k_txq *txq)
1517 struct ath5k_hw *ah = hw->priv;
1518 struct ath5k_buf *bf;
1519 unsigned long flags;
1522 trace_ath5k_tx(ah, skb, txq);
1525 * The hardware expects the header padded to 4 byte boundaries.
1526 * If this is not the case, we add the padding after the header.
1528 padsize = ath5k_add_padding(skb);
1530 ATH5K_ERR(ah, "tx hdrlen not %%4: not enough"
1531 " headroom to pad");
1535 if (txq->txq_len >= txq->txq_max &&
1536 txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
1537 ieee80211_stop_queue(hw, txq->qnum);
1539 spin_lock_irqsave(&ah->txbuflock, flags);
1540 if (list_empty(&ah->txbuf)) {
1541 ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
1542 spin_unlock_irqrestore(&ah->txbuflock, flags);
1543 ieee80211_stop_queues(hw);
1546 bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
1547 list_del(&bf->list);
1549 if (list_empty(&ah->txbuf))
1550 ieee80211_stop_queues(hw);
1551 spin_unlock_irqrestore(&ah->txbuflock, flags);
1555 if (ath5k_txbuf_setup(ah, bf, txq, padsize)) {
1557 spin_lock_irqsave(&ah->txbuflock, flags);
1558 list_add_tail(&bf->list, &ah->txbuf);
1560 spin_unlock_irqrestore(&ah->txbuflock, flags);
1566 dev_kfree_skb_any(skb);
1570 ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
1571 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1573 struct ieee80211_tx_info *info;
1577 ah->stats.tx_all_count++;
1578 ah->stats.tx_bytes_count += skb->len;
1579 info = IEEE80211_SKB_CB(skb);
1581 tries[0] = info->status.rates[0].count;
1582 tries[1] = info->status.rates[1].count;
1583 tries[2] = info->status.rates[2].count;
1585 ieee80211_tx_info_clear_status(info);
1587 for (i = 0; i < ts->ts_final_idx; i++) {
1588 struct ieee80211_tx_rate *r =
1589 &info->status.rates[i];
1591 r->count = tries[i];
1594 info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1595 info->status.rates[ts->ts_final_idx + 1].idx = -1;
1597 if (unlikely(ts->ts_status)) {
1598 ah->stats.ack_fail++;
1599 if (ts->ts_status & AR5K_TXERR_FILT) {
1600 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1601 ah->stats.txerr_filt++;
1603 if (ts->ts_status & AR5K_TXERR_XRETRY)
1604 ah->stats.txerr_retry++;
1605 if (ts->ts_status & AR5K_TXERR_FIFO)
1606 ah->stats.txerr_fifo++;
1608 info->flags |= IEEE80211_TX_STAT_ACK;
1609 info->status.ack_signal = ts->ts_rssi;
1611 /* count the successful attempt as well */
1612 info->status.rates[ts->ts_final_idx].count++;
1616 * Remove MAC header padding before giving the frame
1619 ath5k_remove_padding(skb);
1621 if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1622 ah->stats.antenna_tx[ts->ts_antenna]++;
1624 ah->stats.antenna_tx[0]++; /* invalid */
1626 trace_ath5k_tx_complete(ah, skb, txq, ts);
1627 ieee80211_tx_status(ah->hw, skb);
1631 ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
1633 struct ath5k_tx_status ts = {};
1634 struct ath5k_buf *bf, *bf0;
1635 struct ath5k_desc *ds;
1636 struct sk_buff *skb;
1639 spin_lock(&txq->lock);
1640 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1642 txq->txq_poll_mark = false;
1644 /* skb might already have been processed last time. */
1645 if (bf->skb != NULL) {
1648 ret = ah->ah_proc_tx_desc(ah, ds, &ts);
1649 if (unlikely(ret == -EINPROGRESS))
1651 else if (unlikely(ret)) {
1653 "error %d while processing "
1654 "queue %u\n", ret, txq->qnum);
1661 dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
1663 ath5k_tx_frame_completed(ah, skb, txq, &ts);
1667 * It's possible that the hardware can say the buffer is
1668 * completed when it hasn't yet loaded the ds_link from
1669 * host memory and moved on.
1670 * Always keep the last descriptor to avoid HW races...
1672 if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
1673 spin_lock(&ah->txbuflock);
1674 list_move_tail(&bf->list, &ah->txbuf);
1677 spin_unlock(&ah->txbuflock);
1680 spin_unlock(&txq->lock);
1681 if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
1682 ieee80211_wake_queue(ah->hw, txq->qnum);
1686 ath5k_tasklet_tx(unsigned long data)
1689 struct ath5k_hw *ah = (void *)data;
1691 for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
1692 if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
1693 ath5k_tx_processq(ah, &ah->txqs[i]);
1695 ah->tx_pending = false;
1696 ath5k_set_current_imask(ah);
1705 * Setup the beacon frame for transmit.
1708 ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1710 struct sk_buff *skb = bf->skb;
1711 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1712 struct ath5k_desc *ds;
1716 const int padsize = 0;
1718 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
1720 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1721 "skbaddr %llx\n", skb, skb->data, skb->len,
1722 (unsigned long long)bf->skbaddr);
1724 if (dma_mapping_error(ah->dev, bf->skbaddr)) {
1725 ATH5K_ERR(ah, "beacon DMA mapping failed\n");
1726 dev_kfree_skb_any(skb);
1732 antenna = ah->ah_tx_ant;
1734 flags = AR5K_TXDESC_NOACK;
1735 if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1736 ds->ds_link = bf->daddr; /* self-linked */
1737 flags |= AR5K_TXDESC_VEOL;
1742 * If we use multiple antennas on AP and use
1743 * the Sectored AP scenario, switch antenna every
1744 * 4 beacons to make sure everybody hears our AP.
1745 * When a client tries to associate, hw will keep
1746 * track of the tx antenna to be used for this client
1747 * automatically, based on ACKed packets.
1749 * Note: AP still listens and transmits RTS on the
1750 * default antenna which is supposed to be an omni.
1752 * Note2: On sectored scenarios it's possible to have
1753 * multiple antennas (1 omni -- the default -- and 14
1754 * sectors), so if we choose to actually support this
1755 * mode, we need to allow the user to set how many antennas
1756 * we have and tweak the code below to send beacons
1759 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
1760 antenna = ah->bsent & 4 ? 2 : 1;
1763 /* FIXME: If we are in g mode and rate is a CCK rate
1764 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
1765 * from tx power (value is in dB units already) */
1766 ds->ds_data = bf->skbaddr;
1767 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1768 ieee80211_get_hdrlen_from_skb(skb), padsize,
1769 AR5K_PKT_TYPE_BEACON, (ah->power_level * 2),
1770 ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1771 1, AR5K_TXKEYIX_INVALID,
1772 antenna, flags, 0, 0);
1778 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1783 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
1784 * this is called only once at config_bss time, for AP we do it every
1785 * SWBA interrupt so that the TIM will reflect buffered frames.
1787 * Called with the beacon lock.
1790 ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1793 struct ath5k_hw *ah = hw->priv;
1794 struct ath5k_vif *avf = (void *)vif->drv_priv;
1795 struct sk_buff *skb;
1797 if (WARN_ON(!vif)) {
1802 skb = ieee80211_beacon_get(hw, vif);
1809 ath5k_txbuf_free_skb(ah, avf->bbuf);
1810 avf->bbuf->skb = skb;
1811 ret = ath5k_beacon_setup(ah, avf->bbuf);
1817 * Transmit a beacon frame at SWBA. Dynamic updates to the
1818 * frame contents are done as needed and the slot time is
1819 * also adjusted based on current state.
1821 * This is called from software irq context (beacontq tasklets)
1822 * or user context from ath5k_beacon_config.
1825 ath5k_beacon_send(struct ath5k_hw *ah)
1827 struct ieee80211_vif *vif;
1828 struct ath5k_vif *avf;
1829 struct ath5k_buf *bf;
1830 struct sk_buff *skb;
1833 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1836 * Check if the previous beacon has gone out. If
1837 * not, don't don't try to post another: skip this
1838 * period and wait for the next. Missed beacons
1839 * indicate a problem and should not occur. If we
1840 * miss too many consecutive beacons reset the device.
1842 if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
1844 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1845 "missed %u consecutive beacons\n", ah->bmisscount);
1846 if (ah->bmisscount > 10) { /* NB: 10 is a guess */
1847 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1848 "stuck beacon time (%u missed)\n",
1850 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
1851 "stuck beacon, resetting\n");
1852 ieee80211_queue_work(ah->hw, &ah->reset_work);
1856 if (unlikely(ah->bmisscount != 0)) {
1857 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1858 "resume beacon xmit after %u misses\n",
1863 if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) ||
1864 ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1865 u64 tsf = ath5k_hw_get_tsf64(ah);
1866 u32 tsftu = TSF_TO_TU(tsf);
1867 int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval;
1868 vif = ah->bslot[(slot + 1) % ATH_BCBUF];
1869 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1870 "tsf %llx tsftu %x intval %u slot %u vif %p\n",
1871 (unsigned long long)tsf, tsftu, ah->bintval, slot, vif);
1872 } else /* only one interface */
1878 avf = (void *)vif->drv_priv;
1882 * Stop any current dma and put the new frame on the queue.
1883 * This should never fail since we check above that no frames
1884 * are still pending on the queue.
1886 if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
1887 ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq);
1888 /* NB: hw still stops DMA, so proceed */
1891 /* refresh the beacon for AP or MESH mode */
1892 if (ah->opmode == NL80211_IFTYPE_AP ||
1893 ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1894 err = ath5k_beacon_update(ah->hw, vif);
1899 if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
1900 ah->opmode == NL80211_IFTYPE_MONITOR)) {
1901 ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
1905 trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
1907 ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr);
1908 ath5k_hw_start_tx_dma(ah, ah->bhalq);
1909 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
1910 ah->bhalq, (unsigned long long)bf->daddr, bf->desc);
1912 skb = ieee80211_get_buffered_bc(ah->hw, vif);
1914 ath5k_tx_queue(ah->hw, skb, ah->cabq);
1916 if (ah->cabq->txq_len >= ah->cabq->txq_max)
1919 skb = ieee80211_get_buffered_bc(ah->hw, vif);
1926 * ath5k_beacon_update_timers - update beacon timers
1928 * @ah: struct ath5k_hw pointer we are operating on
1929 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
1930 * beacon timer update based on the current HW TSF.
1932 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
1933 * of a received beacon or the current local hardware TSF and write it to the
1934 * beacon timer registers.
1936 * This is called in a variety of situations, e.g. when a beacon is received,
1937 * when a TSF update has been detected, but also when an new IBSS is created or
1938 * when we otherwise know we have to update the timers, but we keep it in this
1939 * function to have it all together in one place.
1942 ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
1944 u32 nexttbtt, intval, hw_tu, bc_tu;
1947 intval = ah->bintval & AR5K_BEACON_PERIOD;
1948 if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) {
1949 intval /= ATH_BCBUF; /* staggered multi-bss beacons */
1951 ATH5K_WARN(ah, "intval %u is too low, min 15\n",
1954 if (WARN_ON(!intval))
1957 /* beacon TSF converted to TU */
1958 bc_tu = TSF_TO_TU(bc_tsf);
1960 /* current TSF converted to TU */
1961 hw_tsf = ath5k_hw_get_tsf64(ah);
1962 hw_tu = TSF_TO_TU(hw_tsf);
1964 #define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
1965 /* We use FUDGE to make sure the next TBTT is ahead of the current TU.
1966 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
1967 * configuration we need to make sure it is bigger than that. */
1971 * no beacons received, called internally.
1972 * just need to refresh timers based on HW TSF.
1974 nexttbtt = roundup(hw_tu + FUDGE, intval);
1975 } else if (bc_tsf == 0) {
1977 * no beacon received, probably called by ath5k_reset_tsf().
1978 * reset TSF to start with 0.
1981 intval |= AR5K_BEACON_RESET_TSF;
1982 } else if (bc_tsf > hw_tsf) {
1984 * beacon received, SW merge happened but HW TSF not yet updated.
1985 * not possible to reconfigure timers yet, but next time we
1986 * receive a beacon with the same BSSID, the hardware will
1987 * automatically update the TSF and then we need to reconfigure
1990 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1991 "need to wait for HW TSF sync\n");
1995 * most important case for beacon synchronization between STA.
1997 * beacon received and HW TSF has been already updated by HW.
1998 * update next TBTT based on the TSF of the beacon, but make
1999 * sure it is ahead of our local TSF timer.
2001 nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
2005 ah->nexttbtt = nexttbtt;
2007 intval |= AR5K_BEACON_ENA;
2008 ath5k_hw_init_beacon(ah, nexttbtt, intval);
2011 * debugging output last in order to preserve the time critical aspect
2015 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2016 "reconfigured timers based on HW TSF\n");
2017 else if (bc_tsf == 0)
2018 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2019 "reset HW TSF and timers\n");
2021 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2022 "updated timers based on beacon TSF\n");
2024 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2025 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
2026 (unsigned long long) bc_tsf,
2027 (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2028 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2029 intval & AR5K_BEACON_PERIOD,
2030 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
2031 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2035 * ath5k_beacon_config - Configure the beacon queues and interrupts
2037 * @ah: struct ath5k_hw pointer we are operating on
2039 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2040 * interrupts to detect TSF updates only.
2043 ath5k_beacon_config(struct ath5k_hw *ah)
2045 unsigned long flags;
2047 spin_lock_irqsave(&ah->block, flags);
2049 ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2051 if (ah->enable_beacon) {
2053 * In IBSS mode we use a self-linked tx descriptor and let the
2054 * hardware send the beacons automatically. We have to load it
2056 * We use the SWBA interrupt only to keep track of the beacon
2057 * timers in order to detect automatic TSF updates.
2059 ath5k_beaconq_config(ah);
2061 ah->imask |= AR5K_INT_SWBA;
2063 if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2064 if (ath5k_hw_hasveol(ah))
2065 ath5k_beacon_send(ah);
2067 ath5k_beacon_update_timers(ah, -1);
2069 ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
2072 ath5k_hw_set_imr(ah, ah->imask);
2074 spin_unlock_irqrestore(&ah->block, flags);
2077 static void ath5k_tasklet_beacon(unsigned long data)
2079 struct ath5k_hw *ah = (struct ath5k_hw *) data;
2082 * Software beacon alert--time to send a beacon.
2084 * In IBSS mode we use this interrupt just to
2085 * keep track of the next TBTT (target beacon
2086 * transmission time) in order to detect whether
2087 * automatic TSF updates happened.
2089 if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2090 /* XXX: only if VEOL supported */
2091 u64 tsf = ath5k_hw_get_tsf64(ah);
2092 ah->nexttbtt += ah->bintval;
2093 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
2094 "SWBA nexttbtt: %x hw_tu: %x "
2098 (unsigned long long) tsf);
2100 spin_lock(&ah->block);
2101 ath5k_beacon_send(ah);
2102 spin_unlock(&ah->block);
2107 /********************\
2108 * Interrupt handling *
2109 \********************/
2112 ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2114 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
2115 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
2116 /* run ANI only when full calibration is not active */
2117 ah->ah_cal_next_ani = jiffies +
2118 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2119 tasklet_schedule(&ah->ani_tasklet);
2121 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2122 ah->ah_cal_next_full = jiffies +
2123 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2124 tasklet_schedule(&ah->calib);
2126 /* we could use SWI to generate enough interrupts to meet our
2127 * calibration interval requirements, if necessary:
2128 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
2132 ath5k_schedule_rx(struct ath5k_hw *ah)
2134 ah->rx_pending = true;
2135 tasklet_schedule(&ah->rxtq);
2139 ath5k_schedule_tx(struct ath5k_hw *ah)
2141 ah->tx_pending = true;
2142 tasklet_schedule(&ah->txtq);
2146 ath5k_intr(int irq, void *dev_id)
2148 struct ath5k_hw *ah = dev_id;
2149 enum ath5k_int status;
2150 unsigned int counter = 1000;
2152 if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
2153 ((ath5k_get_bus_type(ah) != ATH_AHB) &&
2154 !ath5k_hw_is_intr_pending(ah))))
2158 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
2159 ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
2161 if (unlikely(status & AR5K_INT_FATAL)) {
2163 * Fatal errors are unrecoverable.
2164 * Typically these are caused by DMA errors.
2166 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2167 "fatal int, resetting\n");
2168 ieee80211_queue_work(ah->hw, &ah->reset_work);
2169 } else if (unlikely(status & AR5K_INT_RXORN)) {
2171 * Receive buffers are full. Either the bus is busy or
2172 * the CPU is not fast enough to process all received
2174 * Older chipsets need a reset to come out of this
2175 * condition, but we treat it as RX for newer chips.
2176 * We don't know exactly which versions need a reset -
2177 * this guess is copied from the HAL.
2179 ah->stats.rxorn_intr++;
2180 if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2181 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2182 "rx overrun, resetting\n");
2183 ieee80211_queue_work(ah->hw, &ah->reset_work);
2185 ath5k_schedule_rx(ah);
2187 if (status & AR5K_INT_SWBA)
2188 tasklet_hi_schedule(&ah->beacontq);
2190 if (status & AR5K_INT_RXEOL) {
2192 * NB: the hardware should re-read the link when
2193 * RXE bit is written, but it doesn't work at
2194 * least on older hardware revs.
2196 ah->stats.rxeol_intr++;
2198 if (status & AR5K_INT_TXURN) {
2199 /* bump tx trigger level */
2200 ath5k_hw_update_tx_triglevel(ah, true);
2202 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2203 ath5k_schedule_rx(ah);
2204 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
2205 | AR5K_INT_TXERR | AR5K_INT_TXEOL))
2206 ath5k_schedule_tx(ah);
2207 if (status & AR5K_INT_BMISS) {
2210 if (status & AR5K_INT_MIB) {
2211 ah->stats.mib_intr++;
2212 ath5k_hw_update_mib_counters(ah);
2213 ath5k_ani_mib_intr(ah);
2215 if (status & AR5K_INT_GPIO)
2216 tasklet_schedule(&ah->rf_kill.toggleq);
2220 if (ath5k_get_bus_type(ah) == ATH_AHB)
2223 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2225 if (ah->rx_pending || ah->tx_pending)
2226 ath5k_set_current_imask(ah);
2228 if (unlikely(!counter))
2229 ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
2231 ath5k_intr_calibration_poll(ah);
2237 * Periodically recalibrate the PHY to account
2238 * for temperature/environment changes.
2241 ath5k_tasklet_calibrate(unsigned long data)
2243 struct ath5k_hw *ah = (void *)data;
2245 /* Only full calibration for now */
2246 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2248 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2249 ieee80211_frequency_to_channel(ah->curchan->center_freq),
2250 ah->curchan->hw_value);
2252 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2254 * Rfgain is out of bounds, reset the chip
2255 * to load new gain values.
2257 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2258 ieee80211_queue_work(ah->hw, &ah->reset_work);
2260 if (ath5k_hw_phy_calibrate(ah, ah->curchan))
2261 ATH5K_ERR(ah, "calibration of channel %u failed\n",
2262 ieee80211_frequency_to_channel(
2263 ah->curchan->center_freq));
2265 /* Noise floor calibration interrupts rx/tx path while I/Q calibration
2267 * TODO: We should stop TX here, so that it doesn't interfere.
2268 * Note that stopping the queues is not enough to stop TX! */
2269 if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
2270 ah->ah_cal_next_nf = jiffies +
2271 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
2272 ath5k_hw_update_noise_floor(ah);
2275 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2280 ath5k_tasklet_ani(unsigned long data)
2282 struct ath5k_hw *ah = (void *)data;
2284 ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
2285 ath5k_ani_calibration(ah);
2286 ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2291 ath5k_tx_complete_poll_work(struct work_struct *work)
2293 struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2294 tx_complete_work.work);
2295 struct ath5k_txq *txq;
2297 bool needreset = false;
2299 mutex_lock(&ah->lock);
2301 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
2302 if (ah->txqs[i].setup) {
2304 spin_lock_bh(&txq->lock);
2305 if (txq->txq_len > 1) {
2306 if (txq->txq_poll_mark) {
2307 ATH5K_DBG(ah, ATH5K_DEBUG_XMIT,
2308 "TX queue stuck %d\n",
2312 spin_unlock_bh(&txq->lock);
2315 txq->txq_poll_mark = true;
2318 spin_unlock_bh(&txq->lock);
2323 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2324 "TX queues stuck, resetting\n");
2325 ath5k_reset(ah, NULL, true);
2328 mutex_unlock(&ah->lock);
2330 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2331 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2335 /*************************\
2336 * Initialization routines *
2337 \*************************/
2340 ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2342 struct ieee80211_hw *hw = ah->hw;
2343 struct ath_common *common;
2347 /* Initialize driver private data */
2348 SET_IEEE80211_DEV(hw, ah->dev);
2349 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2350 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2351 IEEE80211_HW_SIGNAL_DBM |
2352 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2354 hw->wiphy->interface_modes =
2355 BIT(NL80211_IFTYPE_AP) |
2356 BIT(NL80211_IFTYPE_STATION) |
2357 BIT(NL80211_IFTYPE_ADHOC) |
2358 BIT(NL80211_IFTYPE_MESH_POINT);
2360 /* both antennas can be configured as RX or TX */
2361 hw->wiphy->available_antennas_tx = 0x3;
2362 hw->wiphy->available_antennas_rx = 0x3;
2364 hw->extra_tx_headroom = 2;
2365 hw->channel_change_time = 5000;
2368 * Mark the device as detached to avoid processing
2369 * interrupts until setup is complete.
2371 __set_bit(ATH_STAT_INVALID, ah->status);
2373 ah->opmode = NL80211_IFTYPE_STATION;
2375 mutex_init(&ah->lock);
2376 spin_lock_init(&ah->rxbuflock);
2377 spin_lock_init(&ah->txbuflock);
2378 spin_lock_init(&ah->block);
2379 spin_lock_init(&ah->irqlock);
2381 /* Setup interrupt handler */
2382 ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah);
2384 ATH5K_ERR(ah, "request_irq failed\n");
2388 common = ath5k_hw_common(ah);
2389 common->ops = &ath5k_common_ops;
2390 common->bus_ops = bus_ops;
2394 common->clockrate = 40;
2397 * Cache line size is used to size and align various
2398 * structures used to communicate with the hardware.
2400 ath5k_read_cachesize(common, &csz);
2401 common->cachelsz = csz << 2; /* convert to bytes */
2403 spin_lock_init(&common->cc_lock);
2405 /* Initialize device */
2406 ret = ath5k_hw_init(ah);
2410 /* set up multi-rate retry capabilities */
2411 if (ah->ah_version == AR5K_AR5212) {
2413 hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
2414 AR5K_INIT_RETRY_LONG);
2417 hw->vif_data_size = sizeof(struct ath5k_vif);
2419 /* Finish private driver data initialization */
2420 ret = ath5k_init(hw);
2424 ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
2425 ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev),
2427 ah->ah_phy_revision);
2429 if (!ah->ah_single_chip) {
2430 /* Single chip radio (!RF5111) */
2431 if (ah->ah_radio_5ghz_revision &&
2432 !ah->ah_radio_2ghz_revision) {
2433 /* No 5GHz support -> report 2GHz radio */
2434 if (!test_bit(AR5K_MODE_11A,
2435 ah->ah_capabilities.cap_mode)) {
2436 ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2437 ath5k_chip_name(AR5K_VERSION_RAD,
2438 ah->ah_radio_5ghz_revision),
2439 ah->ah_radio_5ghz_revision);
2440 /* No 2GHz support (5110 and some
2441 * 5GHz only cards) -> report 5GHz radio */
2442 } else if (!test_bit(AR5K_MODE_11B,
2443 ah->ah_capabilities.cap_mode)) {
2444 ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2445 ath5k_chip_name(AR5K_VERSION_RAD,
2446 ah->ah_radio_5ghz_revision),
2447 ah->ah_radio_5ghz_revision);
2448 /* Multiband radio */
2450 ATH5K_INFO(ah, "RF%s multiband radio found"
2452 ath5k_chip_name(AR5K_VERSION_RAD,
2453 ah->ah_radio_5ghz_revision),
2454 ah->ah_radio_5ghz_revision);
2457 /* Multi chip radio (RF5111 - RF2111) ->
2458 * report both 2GHz/5GHz radios */
2459 else if (ah->ah_radio_5ghz_revision &&
2460 ah->ah_radio_2ghz_revision) {
2461 ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2462 ath5k_chip_name(AR5K_VERSION_RAD,
2463 ah->ah_radio_5ghz_revision),
2464 ah->ah_radio_5ghz_revision);
2465 ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2466 ath5k_chip_name(AR5K_VERSION_RAD,
2467 ah->ah_radio_2ghz_revision),
2468 ah->ah_radio_2ghz_revision);
2472 ath5k_debug_init_device(ah);
2474 /* ready to process interrupts */
2475 __clear_bit(ATH_STAT_INVALID, ah->status);
2479 ath5k_hw_deinit(ah);
2481 free_irq(ah->irq, ah);
2487 ath5k_stop_locked(struct ath5k_hw *ah)
2490 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n",
2491 test_bit(ATH_STAT_INVALID, ah->status));
2494 * Shutdown the hardware and driver:
2495 * stop output from above
2496 * disable interrupts
2498 * turn off the radio
2499 * clear transmit machinery
2500 * clear receive machinery
2501 * drain and release tx queues
2502 * reclaim beacon resources
2503 * power down hardware
2505 * Note that some of this work is not possible if the
2506 * hardware is gone (invalid).
2508 ieee80211_stop_queues(ah->hw);
2510 if (!test_bit(ATH_STAT_INVALID, ah->status)) {
2512 ath5k_hw_set_imr(ah, 0);
2513 synchronize_irq(ah->irq);
2515 ath5k_hw_dma_stop(ah);
2516 ath5k_drain_tx_buffs(ah);
2517 ath5k_hw_phy_disable(ah);
2523 int ath5k_start(struct ieee80211_hw *hw)
2525 struct ath5k_hw *ah = hw->priv;
2526 struct ath_common *common = ath5k_hw_common(ah);
2529 mutex_lock(&ah->lock);
2531 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode);
2534 * Stop anything previously setup. This is safe
2535 * no matter this is the first time through or not.
2537 ath5k_stop_locked(ah);
2540 * The basic interface to setting the hardware in a good
2541 * state is ``reset''. On return the hardware is known to
2542 * be powered up and with interrupts disabled. This must
2543 * be followed by initialization of the appropriate bits
2544 * and then setup of the interrupt mask.
2546 ah->curchan = ah->hw->conf.channel;
2547 ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2548 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2549 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2551 ret = ath5k_reset(ah, NULL, false);
2555 ath5k_rfkill_hw_start(ah);
2558 * Reset the key cache since some parts do not reset the
2559 * contents on initial power up or resume from suspend.
2561 for (i = 0; i < common->keymax; i++)
2562 ath_hw_keyreset(common, (u16) i);
2564 /* Use higher rates for acks instead of base
2566 ah->ah_ack_bitrate_high = true;
2568 for (i = 0; i < ARRAY_SIZE(ah->bslot); i++)
2569 ah->bslot[i] = NULL;
2574 mutex_unlock(&ah->lock);
2576 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2577 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2582 static void ath5k_stop_tasklets(struct ath5k_hw *ah)
2584 ah->rx_pending = false;
2585 ah->tx_pending = false;
2586 tasklet_kill(&ah->rxtq);
2587 tasklet_kill(&ah->txtq);
2588 tasklet_kill(&ah->calib);
2589 tasklet_kill(&ah->beacontq);
2590 tasklet_kill(&ah->ani_tasklet);
2594 * Stop the device, grabbing the top-level lock to protect
2595 * against concurrent entry through ath5k_init (which can happen
2596 * if another thread does a system call and the thread doing the
2597 * stop is preempted).
2599 void ath5k_stop(struct ieee80211_hw *hw)
2601 struct ath5k_hw *ah = hw->priv;
2604 mutex_lock(&ah->lock);
2605 ret = ath5k_stop_locked(ah);
2606 if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) {
2608 * Don't set the card in full sleep mode!
2610 * a) When the device is in this state it must be carefully
2611 * woken up or references to registers in the PCI clock
2612 * domain may freeze the bus (and system). This varies
2613 * by chip and is mostly an issue with newer parts
2614 * (madwifi sources mentioned srev >= 0x78) that go to
2615 * sleep more quickly.
2617 * b) On older chips full sleep results a weird behaviour
2618 * during wakeup. I tested various cards with srev < 0x78
2619 * and they don't wake up after module reload, a second
2620 * module reload is needed to bring the card up again.
2622 * Until we figure out what's going on don't enable
2623 * full chip reset on any chip (this is what Legacy HAL
2624 * and Sam's HAL do anyway). Instead Perform a full reset
2625 * on the device (same as initial state after attach) and
2626 * leave it idle (keep MAC/BB on warm reset) */
2627 ret = ath5k_hw_on_hold(ah);
2629 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2630 "putting device to sleep\n");
2634 mutex_unlock(&ah->lock);
2636 ath5k_stop_tasklets(ah);
2638 cancel_delayed_work_sync(&ah->tx_complete_work);
2640 ath5k_rfkill_hw_stop(ah);
2644 * Reset the hardware. If chan is not NULL, then also pause rx/tx
2645 * and change to the given channel.
2647 * This should be called with ah->lock.
2650 ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2653 struct ath_common *common = ath5k_hw_common(ah);
2657 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
2659 ath5k_hw_set_imr(ah, 0);
2660 synchronize_irq(ah->irq);
2661 ath5k_stop_tasklets(ah);
2663 /* Save ani mode and disable ANI during
2664 * reset. If we don't we might get false
2665 * PHY error interrupts. */
2666 ani_mode = ah->ani_state.ani_mode;
2667 ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
2669 /* We are going to empty hw queues
2670 * so we should also free any remaining
2672 ath5k_drain_tx_buffs(ah);
2676 fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
2678 ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
2680 ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
2684 ret = ath5k_rx_start(ah);
2686 ATH5K_ERR(ah, "can't start recv logic\n");
2690 ath5k_ani_init(ah, ani_mode);
2692 ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
2693 ah->ah_cal_next_ani = jiffies;
2694 ah->ah_cal_next_nf = jiffies;
2695 ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2697 /* clear survey data and cycle counters */
2698 memset(&ah->survey, 0, sizeof(ah->survey));
2699 spin_lock_bh(&common->cc_lock);
2700 ath_hw_cycle_counters_update(common);
2701 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
2702 memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2703 spin_unlock_bh(&common->cc_lock);
2706 * Change channels and update the h/w rate map if we're switching;
2707 * e.g. 11a to 11b/g.
2709 * We may be doing a reset in response to an ioctl that changes the
2710 * channel so update any state that might change as a result.
2714 /* ath5k_chan_change(ah, c); */
2716 ath5k_beacon_config(ah);
2717 /* intrs are enabled by ath5k_beacon_config */
2719 ieee80211_wake_queues(ah->hw);
2726 static void ath5k_reset_work(struct work_struct *work)
2728 struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2731 mutex_lock(&ah->lock);
2732 ath5k_reset(ah, NULL, true);
2733 mutex_unlock(&ah->lock);
2736 static int __devinit
2737 ath5k_init(struct ieee80211_hw *hw)
2740 struct ath5k_hw *ah = hw->priv;
2741 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
2742 struct ath5k_txq *txq;
2743 u8 mac[ETH_ALEN] = {};
2748 * Check if the MAC has multi-rate retry support.
2749 * We do this by trying to setup a fake extended
2750 * descriptor. MACs that don't have support will
2751 * return false w/o doing anything. MACs that do
2752 * support it will return true w/o doing anything.
2754 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
2759 __set_bit(ATH_STAT_MRRETRY, ah->status);
2762 * Collect the channel list. The 802.11 layer
2763 * is responsible for filtering this list based
2764 * on settings like the phy mode and regulatory
2765 * domain restrictions.
2767 ret = ath5k_setup_bands(hw);
2769 ATH5K_ERR(ah, "can't get channels\n");
2774 * Allocate tx+rx descriptors and populate the lists.
2776 ret = ath5k_desc_alloc(ah);
2778 ATH5K_ERR(ah, "can't allocate descriptors\n");
2783 * Allocate hardware transmit queues: one queue for
2784 * beacon frames and one data queue for each QoS
2785 * priority. Note that hw functions handle resetting
2786 * these queues at the needed time.
2788 ret = ath5k_beaconq_setup(ah);
2790 ATH5K_ERR(ah, "can't setup a beacon xmit queue\n");
2794 ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0);
2795 if (IS_ERR(ah->cabq)) {
2796 ATH5K_ERR(ah, "can't setup cab queue\n");
2797 ret = PTR_ERR(ah->cabq);
2801 /* 5211 and 5212 usually support 10 queues but we better rely on the
2802 * capability information */
2803 if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
2804 /* This order matches mac80211's queue priority, so we can
2805 * directly use the mac80211 queue number without any mapping */
2806 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2808 ATH5K_ERR(ah, "can't setup xmit queue\n");
2812 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2814 ATH5K_ERR(ah, "can't setup xmit queue\n");
2818 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2820 ATH5K_ERR(ah, "can't setup xmit queue\n");
2824 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2826 ATH5K_ERR(ah, "can't setup xmit queue\n");
2832 /* older hardware (5210) can only support one data queue */
2833 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2835 ATH5K_ERR(ah, "can't setup xmit queue\n");
2842 tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
2843 tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
2844 tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
2845 tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
2846 tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
2848 INIT_WORK(&ah->reset_work, ath5k_reset_work);
2849 INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
2851 ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
2853 ATH5K_ERR(ah, "unable to read address from EEPROM\n");
2857 SET_IEEE80211_PERM_ADDR(hw, mac);
2858 /* All MAC address bits matter for ACKs */
2859 ath5k_update_bssid_mask_and_opmode(ah, NULL);
2861 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
2862 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
2864 ATH5K_ERR(ah, "can't initialize regulatory system\n");
2868 ret = ieee80211_register_hw(hw);
2870 ATH5K_ERR(ah, "can't register ieee80211 hw\n");
2874 if (!ath_is_world_regd(regulatory))
2875 regulatory_hint(hw->wiphy, regulatory->alpha2);
2877 ath5k_init_leds(ah);
2879 ath5k_sysfs_register(ah);
2883 ath5k_txq_release(ah);
2885 ath5k_hw_release_tx_queue(ah, ah->bhalq);
2887 ath5k_desc_free(ah);
2893 ath5k_deinit_ah(struct ath5k_hw *ah)
2895 struct ieee80211_hw *hw = ah->hw;
2898 * NB: the order of these is important:
2899 * o call the 802.11 layer before detaching ath5k_hw to
2900 * ensure callbacks into the driver to delete global
2901 * key cache entries can be handled
2902 * o reclaim the tx queue data structures after calling
2903 * the 802.11 layer as we'll get called back to reclaim
2904 * node state and potentially want to use them
2905 * o to cleanup the tx queues the hal is called, so detach
2907 * XXX: ??? detach ath5k_hw ???
2908 * Other than that, it's straightforward...
2910 ieee80211_unregister_hw(hw);
2911 ath5k_desc_free(ah);
2912 ath5k_txq_release(ah);
2913 ath5k_hw_release_tx_queue(ah, ah->bhalq);
2914 ath5k_unregister_leds(ah);
2916 ath5k_sysfs_unregister(ah);
2918 * NB: can't reclaim these until after ieee80211_ifdetach
2919 * returns because we'll get called back to reclaim node
2920 * state and potentially want to use them.
2922 ath5k_hw_deinit(ah);
2923 free_irq(ah->irq, ah);
2927 ath5k_any_vif_assoc(struct ath5k_hw *ah)
2929 struct ath5k_vif_iter_data iter_data;
2930 iter_data.hw_macaddr = NULL;
2931 iter_data.any_assoc = false;
2932 iter_data.need_set_hw_addr = false;
2933 iter_data.found_active = true;
2935 ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
2937 return iter_data.any_assoc;
2941 ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
2943 struct ath5k_hw *ah = hw->priv;
2945 rfilt = ath5k_hw_get_rx_filter(ah);
2947 rfilt |= AR5K_RX_FILTER_BEACON;
2949 rfilt &= ~AR5K_RX_FILTER_BEACON;
2950 ath5k_hw_set_rx_filter(ah, rfilt);
2951 ah->filter_flags = rfilt;