1 /******************************************************************************
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/pci-aspm.h>
37 #include <linux/slab.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/delay.h>
40 #include <linux/sched.h>
41 #include <linux/skbuff.h>
42 #include <linux/netdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_arp.h>
47 #include <net/mac80211.h>
49 #include <asm/div64.h>
51 #define DRV_NAME "iwl4965"
53 #include "iwl-eeprom.h"
57 #include "iwl-helpers.h"
62 /******************************************************************************
66 ******************************************************************************/
69 * module name, copyright, version, etc.
71 #define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
73 #ifdef CONFIG_IWLEGACY_DEBUG
79 #define DRV_VERSION IWLWIFI_VERSION VD
82 MODULE_DESCRIPTION(DRV_DESCRIPTION);
83 MODULE_VERSION(DRV_VERSION);
84 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
85 MODULE_LICENSE("GPL");
86 MODULE_ALIAS("iwl4965");
88 void il4965_check_abort_status(struct il_priv *il,
89 u8 frame_count, u32 status)
91 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
92 IL_ERR("Tx flush command to flush out all frames\n");
93 if (!test_bit(S_EXIT_PENDING, &il->status))
94 queue_work(il->workqueue, &il->tx_flush);
101 struct il_mod_params il4965_mod_params = {
104 /* the rest are 0 by default */
107 void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
111 spin_lock_irqsave(&rxq->lock, flags);
112 INIT_LIST_HEAD(&rxq->rx_free);
113 INIT_LIST_HEAD(&rxq->rx_used);
114 /* Fill the rx_used queue with _all_ of the Rx buffers */
115 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
116 /* In the reset function, these buffers may have been allocated
117 * to an SKB, so we need to unmap and free potential storage */
118 if (rxq->pool[i].page != NULL) {
119 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
120 PAGE_SIZE << il->hw_params.rx_page_order,
122 __il_free_pages(il, rxq->pool[i].page);
123 rxq->pool[i].page = NULL;
125 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
128 for (i = 0; i < RX_QUEUE_SIZE; i++)
129 rxq->queue[i] = NULL;
131 /* Set us so that we have processed and used all buffers, but have
132 * not restocked the Rx queue with fresh buffers */
133 rxq->read = rxq->write = 0;
134 rxq->write_actual = 0;
136 spin_unlock_irqrestore(&rxq->lock, flags);
139 int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
142 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
145 if (il->cfg->mod_params->amsdu_size_8K)
146 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
148 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
151 il_wr(il, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
153 /* Reset driver's Rx queue write idx */
154 il_wr(il, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
156 /* Tell device where to find RBD circular buffer in DRAM */
157 il_wr(il, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
158 (u32)(rxq->bd_dma >> 8));
160 /* Tell device where in DRAM to update its Rx status */
161 il_wr(il, FH_RSCSR_CHNL0_STTS_WPTR_REG,
162 rxq->rb_stts_dma >> 4);
165 * Direct rx interrupts to hosts
166 * Rx buffer size 4 or 8k
170 il_wr(il, FH_MEM_RCSR_CHNL0_CONFIG_REG,
171 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
172 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
173 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
175 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
176 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
178 /* Set interrupt coalescing timer to default (2048 usecs) */
179 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
184 static void il4965_set_pwr_vmain(struct il_priv *il)
187 * (for documentation purposes)
188 * to set power to V_AUX, do:
190 if (pci_pme_capable(il->pci_dev, PCI_D3cold))
191 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
192 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
193 ~APMG_PS_CTRL_MSK_PWR_SRC);
196 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
197 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
198 ~APMG_PS_CTRL_MSK_PWR_SRC);
201 int il4965_hw_nic_init(struct il_priv *il)
204 struct il_rx_queue *rxq = &il->rxq;
208 spin_lock_irqsave(&il->lock, flags);
209 il->cfg->ops->lib->apm_ops.init(il);
211 /* Set interrupt coalescing calibration timer to default (512 usecs) */
212 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
214 spin_unlock_irqrestore(&il->lock, flags);
216 il4965_set_pwr_vmain(il);
218 il->cfg->ops->lib->apm_ops.config(il);
220 /* Allocate the RX queue, or reset if it is already allocated */
222 ret = il_rx_queue_alloc(il);
224 IL_ERR("Unable to initialize Rx queue\n");
228 il4965_rx_queue_reset(il, rxq);
230 il4965_rx_replenish(il);
232 il4965_rx_init(il, rxq);
234 spin_lock_irqsave(&il->lock, flags);
236 rxq->need_update = 1;
237 il_rx_queue_update_write_ptr(il, rxq);
239 spin_unlock_irqrestore(&il->lock, flags);
241 /* Allocate or reset and init all Tx and Command queues */
243 ret = il4965_txq_ctx_alloc(il);
247 il4965_txq_ctx_reset(il);
249 set_bit(S_INIT, &il->status);
255 * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
257 static inline __le32 il4965_dma_addr2rbd_ptr(struct il_priv *il,
260 return cpu_to_le32((u32)(dma_addr >> 8));
264 * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
266 * If there are slots in the RX queue that need to be restocked,
267 * and we have free pre-allocated buffers, fill the ranks as much
268 * as we can, pulling from rx_free.
270 * This moves the 'write' idx forward to catch up with 'processed', and
271 * also updates the memory address in the firmware to reference the new
274 void il4965_rx_queue_restock(struct il_priv *il)
276 struct il_rx_queue *rxq = &il->rxq;
277 struct list_head *element;
278 struct il_rx_buf *rxb;
281 spin_lock_irqsave(&rxq->lock, flags);
282 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
283 /* The overwritten rxb must be a used one */
284 rxb = rxq->queue[rxq->write];
285 BUG_ON(rxb && rxb->page);
287 /* Get next free Rx buffer, remove from free list */
288 element = rxq->rx_free.next;
289 rxb = list_entry(element, struct il_rx_buf, list);
292 /* Point to Rx buffer via next RBD in circular buffer */
293 rxq->bd[rxq->write] = il4965_dma_addr2rbd_ptr(il,
295 rxq->queue[rxq->write] = rxb;
296 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
299 spin_unlock_irqrestore(&rxq->lock, flags);
300 /* If the pre-allocated buffer pool is dropping low, schedule to
302 if (rxq->free_count <= RX_LOW_WATERMARK)
303 queue_work(il->workqueue, &il->rx_replenish);
306 /* If we've added more space for the firmware to place data, tell it.
307 * Increment device's write pointer in multiples of 8. */
308 if (rxq->write_actual != (rxq->write & ~0x7)) {
309 spin_lock_irqsave(&rxq->lock, flags);
310 rxq->need_update = 1;
311 spin_unlock_irqrestore(&rxq->lock, flags);
312 il_rx_queue_update_write_ptr(il, rxq);
317 * il4965_rx_replenish - Move all used packet from rx_used to rx_free
319 * When moving to rx_free an SKB is allocated for the slot.
321 * Also restock the Rx queue via il_rx_queue_restock.
322 * This is called as a scheduled work item (except for during initialization)
324 static void il4965_rx_allocate(struct il_priv *il, gfp_t priority)
326 struct il_rx_queue *rxq = &il->rxq;
327 struct list_head *element;
328 struct il_rx_buf *rxb;
331 gfp_t gfp_mask = priority;
334 spin_lock_irqsave(&rxq->lock, flags);
335 if (list_empty(&rxq->rx_used)) {
336 spin_unlock_irqrestore(&rxq->lock, flags);
339 spin_unlock_irqrestore(&rxq->lock, flags);
341 if (rxq->free_count > RX_LOW_WATERMARK)
342 gfp_mask |= __GFP_NOWARN;
344 if (il->hw_params.rx_page_order > 0)
345 gfp_mask |= __GFP_COMP;
347 /* Alloc a new receive buffer */
348 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
351 D_INFO("alloc_pages failed, "
353 il->hw_params.rx_page_order);
355 if (rxq->free_count <= RX_LOW_WATERMARK &&
358 "Failed to alloc_pages with %s. "
359 "Only %u free buffers remaining.\n",
360 priority == GFP_ATOMIC ?
361 "GFP_ATOMIC" : "GFP_KERNEL",
363 /* We don't reschedule replenish work here -- we will
364 * call the restock method and if it still needs
365 * more buffers it will schedule replenish */
369 spin_lock_irqsave(&rxq->lock, flags);
371 if (list_empty(&rxq->rx_used)) {
372 spin_unlock_irqrestore(&rxq->lock, flags);
373 __free_pages(page, il->hw_params.rx_page_order);
376 element = rxq->rx_used.next;
377 rxb = list_entry(element, struct il_rx_buf, list);
380 spin_unlock_irqrestore(&rxq->lock, flags);
384 /* Get physical address of the RB */
385 rxb->page_dma = pci_map_page(il->pci_dev, page, 0,
386 PAGE_SIZE << il->hw_params.rx_page_order,
388 /* dma address must be no more than 36 bits */
389 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
390 /* and also 256 byte aligned! */
391 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
393 spin_lock_irqsave(&rxq->lock, flags);
395 list_add_tail(&rxb->list, &rxq->rx_free);
397 il->alloc_rxb_page++;
399 spin_unlock_irqrestore(&rxq->lock, flags);
403 void il4965_rx_replenish(struct il_priv *il)
407 il4965_rx_allocate(il, GFP_KERNEL);
409 spin_lock_irqsave(&il->lock, flags);
410 il4965_rx_queue_restock(il);
411 spin_unlock_irqrestore(&il->lock, flags);
414 void il4965_rx_replenish_now(struct il_priv *il)
416 il4965_rx_allocate(il, GFP_ATOMIC);
418 il4965_rx_queue_restock(il);
421 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
422 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
423 * This free routine walks the list of POOL entries and if SKB is set to
424 * non NULL it is unmapped and freed
426 void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
429 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
430 if (rxq->pool[i].page != NULL) {
431 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
432 PAGE_SIZE << il->hw_params.rx_page_order,
434 __il_free_pages(il, rxq->pool[i].page);
435 rxq->pool[i].page = NULL;
439 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
441 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
442 rxq->rb_stts, rxq->rb_stts_dma);
447 int il4965_rxq_stop(struct il_priv *il)
451 il_wr(il, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
452 il_poll_bit(il, FH_MEM_RSSR_RX_STATUS_REG,
453 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
458 int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
463 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
464 if (rate_n_flags & RATE_MCS_HT_MSK) {
465 idx = (rate_n_flags & 0xff);
467 /* Legacy rate format, search for match in table */
469 if (band == IEEE80211_BAND_5GHZ)
470 band_offset = IL_FIRST_OFDM_RATE;
471 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
472 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
473 return idx - band_offset;
479 static int il4965_calc_rssi(struct il_priv *il,
480 struct il_rx_phy_res *rx_resp)
482 /* data from PHY/DSP regarding signal strength, etc.,
483 * contents are always there, not configurable by host. */
484 struct il4965_rx_non_cfg_phy *ncphy =
485 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
486 u32 agc = (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK)
490 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
491 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
495 /* Find max rssi among 3 possible receivers.
496 * These values are measured by the digital signal processor (DSP).
497 * They should stay fairly constant even as the signal strength varies,
498 * if the radio's automatic gain control (AGC) is working right.
499 * AGC value (see below) will provide the "interesting" info. */
500 for (i = 0; i < 3; i++)
501 if (valid_antennae & (1 << i))
502 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
504 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
505 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
508 /* dBm = max_rssi dB - agc dB - constant.
509 * Higher AGC (higher radio gain) means lower signal. */
510 return max_rssi - agc - IL4965_RSSI_OFFSET;
514 static u32 il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
518 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
519 RX_RES_STATUS_STATION_FOUND)
520 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
521 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
523 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
525 /* packet was not encrypted */
526 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
527 RX_RES_STATUS_SEC_TYPE_NONE)
530 /* packet was encrypted with unknown alg */
531 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
532 RX_RES_STATUS_SEC_TYPE_ERR)
535 /* decryption was not done in HW */
536 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
537 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
540 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
542 case RX_RES_STATUS_SEC_TYPE_CCMP:
543 /* alg is CCM: check MIC only */
544 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
546 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
548 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
552 case RX_RES_STATUS_SEC_TYPE_TKIP:
553 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
555 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
558 /* fall through if TTAK OK */
560 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
561 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
563 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
567 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
568 decrypt_in, decrypt_out);
573 static void il4965_pass_packet_to_mac80211(struct il_priv *il,
574 struct ieee80211_hdr *hdr,
577 struct il_rx_buf *rxb,
578 struct ieee80211_rx_status *stats)
581 __le16 fc = hdr->frame_control;
583 /* We only process data packets if the interface is open */
584 if (unlikely(!il->is_open)) {
586 "Dropping packet while interface is not open.\n");
590 /* In case of HW accelerated crypto and bad decryption, drop */
591 if (!il->cfg->mod_params->sw_crypto &&
592 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
595 skb = dev_alloc_skb(128);
597 IL_ERR("dev_alloc_skb failed\n");
601 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
603 il_update_stats(il, false, fc, len);
604 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
606 ieee80211_rx(il->hw, skb);
607 il->alloc_rxb_page--;
611 /* Called for N_RX (legacy ABG frames), or
612 * N_RX_MPDU (HT high-throughput N frames). */
613 void il4965_hdl_rx(struct il_priv *il,
614 struct il_rx_buf *rxb)
616 struct ieee80211_hdr *header;
617 struct ieee80211_rx_status rx_status;
618 struct il_rx_pkt *pkt = rxb_addr(rxb);
619 struct il_rx_phy_res *phy_res;
620 __le32 rx_pkt_status;
621 struct il_rx_mpdu_res_start *amsdu;
627 * N_RX and N_RX_MPDU are handled differently.
628 * N_RX: physical layer info is in this buffer
629 * N_RX_MPDU: physical layer info was sent in separate
630 * command and cached in il->last_phy_res
632 * Here we set up local variables depending on which command is
635 if (pkt->hdr.cmd == N_RX) {
636 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
637 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
638 + phy_res->cfg_phy_cnt);
640 len = le16_to_cpu(phy_res->byte_count);
641 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
642 phy_res->cfg_phy_cnt + len);
643 ampdu_status = le32_to_cpu(rx_pkt_status);
645 if (!il->_4965.last_phy_res_valid) {
646 IL_ERR("MPDU frame without cached PHY data\n");
649 phy_res = &il->_4965.last_phy_res;
650 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
651 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
652 len = le16_to_cpu(amsdu->byte_count);
653 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
654 ampdu_status = il4965_translate_rx_status(il,
655 le32_to_cpu(rx_pkt_status));
658 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
659 D_DROP("dsp size out of range [0,20]: %d/n",
660 phy_res->cfg_phy_cnt);
664 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
665 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
666 D_RX("Bad CRC or FIFO: 0x%08X.\n",
667 le32_to_cpu(rx_pkt_status));
671 /* This will be used in several places later */
672 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
674 /* rx_status carries information about the packet to mac80211 */
675 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
676 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
677 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
679 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
682 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
685 /* TSF isn't reliable. In order to allow smooth user experience,
686 * this W/A doesn't propagate it to the mac80211 */
687 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
689 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
691 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
692 rx_status.signal = il4965_calc_rssi(il, phy_res);
694 il_dbg_log_rx_data_frame(il, len, header);
695 D_STATS("Rssi %d, TSF %llu\n",
696 rx_status.signal, (unsigned long long)rx_status.mactime);
701 * It seems that the antenna field in the phy flags value
702 * is actually a bit field. This is undefined by radiotap,
703 * it wants an actual antenna number but I always get "7"
704 * for most legacy frames I receive indicating that the
705 * same frame was received on all three RX chains.
707 * I think this field should be removed in favor of a
708 * new 802.11n radiotap field "RX chains" that is defined
712 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
713 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
715 /* set the preamble flag if appropriate */
716 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
717 rx_status.flag |= RX_FLAG_SHORTPRE;
719 /* Set up the HT phy flags */
720 if (rate_n_flags & RATE_MCS_HT_MSK)
721 rx_status.flag |= RX_FLAG_HT;
722 if (rate_n_flags & RATE_MCS_HT40_MSK)
723 rx_status.flag |= RX_FLAG_40MHZ;
724 if (rate_n_flags & RATE_MCS_SGI_MSK)
725 rx_status.flag |= RX_FLAG_SHORT_GI;
727 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status,
731 /* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
732 * This will be used later in il_hdl_rx() for N_RX_MPDU. */
733 void il4965_hdl_rx_phy(struct il_priv *il,
734 struct il_rx_buf *rxb)
736 struct il_rx_pkt *pkt = rxb_addr(rxb);
737 il->_4965.last_phy_res_valid = true;
738 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
739 sizeof(struct il_rx_phy_res));
742 static int il4965_get_channels_for_scan(struct il_priv *il,
743 struct ieee80211_vif *vif,
744 enum ieee80211_band band,
745 u8 is_active, u8 n_probes,
746 struct il_scan_channel *scan_ch)
748 struct ieee80211_channel *chan;
749 const struct ieee80211_supported_band *sband;
750 const struct il_channel_info *ch_info;
751 u16 passive_dwell = 0;
752 u16 active_dwell = 0;
756 sband = il_get_hw_mode(il, band);
760 active_dwell = il_get_active_dwell_time(il, band, n_probes);
761 passive_dwell = il_get_passive_dwell_time(il, band, vif);
763 if (passive_dwell <= active_dwell)
764 passive_dwell = active_dwell + 1;
766 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
767 chan = il->scan_request->channels[i];
769 if (chan->band != band)
772 channel = chan->hw_value;
773 scan_ch->channel = cpu_to_le16(channel);
775 ch_info = il_get_channel_info(il, band, channel);
776 if (!il_is_channel_valid(ch_info)) {
778 "Channel %d is INVALID for this band.\n",
783 if (!is_active || il_is_channel_passive(ch_info) ||
784 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
785 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
787 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
790 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
792 scan_ch->active_dwell = cpu_to_le16(active_dwell);
793 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
795 /* Set txpower levels to defaults */
796 scan_ch->dsp_atten = 110;
798 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
800 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
802 if (band == IEEE80211_BAND_5GHZ)
803 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
805 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
807 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n",
808 channel, le32_to_cpu(scan_ch->type),
809 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
810 "ACTIVE" : "PASSIVE",
811 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
812 active_dwell : passive_dwell);
818 D_SCAN("total channels to scan %d\n", added);
822 static inline u32 il4965_ant_idx_to_flags(u8 ant_idx)
824 return BIT(ant_idx) << RATE_MCS_ANT_POS;
827 int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
829 struct il_host_cmd cmd = {
831 .len = sizeof(struct il_scan_cmd),
832 .flags = CMD_SIZE_HUGE,
834 struct il_scan_cmd *scan;
835 struct il_rxon_context *ctx = &il->ctx;
839 enum ieee80211_band band;
841 u8 rx_ant = il->hw_params.valid_rx_ant;
843 bool is_active = false;
846 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
849 lockdep_assert_held(&il->mutex);
852 ctx = il_rxon_ctx_from_vif(vif);
855 il->scan_cmd = kmalloc(sizeof(struct il_scan_cmd) +
856 IL_MAX_SCAN_SIZE, GFP_KERNEL);
859 "fail to allocate memory for scan\n");
864 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
866 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
867 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
869 if (il_is_any_associated(il)) {
872 u32 suspend_time = 100;
873 u32 scan_suspend_time = 100;
875 D_INFO("Scanning while associated...\n");
876 interval = vif->bss_conf.beacon_int;
878 scan->suspend_time = 0;
879 scan->max_out_time = cpu_to_le32(200 * 1024);
881 interval = suspend_time;
883 extra = (suspend_time / interval) << 22;
884 scan_suspend_time = (extra |
885 ((suspend_time % interval) * 1024));
886 scan->suspend_time = cpu_to_le32(scan_suspend_time);
887 D_SCAN("suspend_time 0x%X beacon interval %d\n",
888 scan_suspend_time, interval);
891 if (il->scan_request->n_ssids) {
893 D_SCAN("Kicking off active scan\n");
894 for (i = 0; i < il->scan_request->n_ssids; i++) {
895 /* always does wildcard anyway */
896 if (!il->scan_request->ssids[i].ssid_len)
898 scan->direct_scan[p].id = WLAN_EID_SSID;
899 scan->direct_scan[p].len =
900 il->scan_request->ssids[i].ssid_len;
901 memcpy(scan->direct_scan[p].ssid,
902 il->scan_request->ssids[i].ssid,
903 il->scan_request->ssids[i].ssid_len);
909 D_SCAN("Start passive scan.\n");
911 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
912 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
913 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
915 switch (il->scan_band) {
916 case IEEE80211_BAND_2GHZ:
917 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
918 chan_mod = le32_to_cpu(
919 il->ctx.active.flags &
920 RXON_FLG_CHANNEL_MODE_MSK)
921 >> RXON_FLG_CHANNEL_MODE_POS;
922 if (chan_mod == CHANNEL_MODE_PURE_40) {
926 rate_flags = RATE_MCS_CCK_MSK;
929 case IEEE80211_BAND_5GHZ:
933 IL_WARN("Invalid scan band\n");
938 * If active scanning is requested but a certain channel is
939 * marked passive, we can do active scanning if we detect
942 * There is an issue with some firmware versions that triggers
943 * a sysassert on a "good CRC threshold" of zero (== disabled),
944 * on a radar channel even though this means that we should NOT
947 * The "good CRC threshold" is the number of frames that we
948 * need to receive during our dwell time on a channel before
949 * sending out probes -- setting this to a huge value will
950 * mean we never reach it, but at the same time work around
951 * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
952 * here instead of IL_GOOD_CRC_TH_DISABLED.
954 scan->good_CRC_th = is_active ? IL_GOOD_CRC_TH_DEFAULT :
955 IL_GOOD_CRC_TH_NEVER;
957 band = il->scan_band;
959 if (il->cfg->scan_rx_antennas[band])
960 rx_ant = il->cfg->scan_rx_antennas[band];
962 il->scan_tx_ant[band] = il4965_toggle_tx_ant(il,
963 il->scan_tx_ant[band],
965 rate_flags |= il4965_ant_idx_to_flags(il->scan_tx_ant[band]);
966 scan->tx_cmd.rate_n_flags = il4965_hw_set_rate_n_flags(rate, rate_flags);
968 /* In power save mode use one chain, otherwise use all chains */
969 if (test_bit(S_POWER_PMI, &il->status)) {
970 /* rx_ant has been set to all valid chains previously */
971 active_chains = rx_ant &
972 ((u8)(il->chain_noise_data.active_chains));
974 active_chains = rx_ant;
976 D_SCAN("chain_noise_data.active_chains: %u\n",
977 il->chain_noise_data.active_chains);
979 rx_ant = il4965_first_antenna(active_chains);
982 /* MIMO is not used here, but value is required */
983 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
984 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
985 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
986 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
987 scan->rx_chain = cpu_to_le16(rx_chain);
989 cmd_len = il_fill_probe_req(il,
990 (struct ieee80211_mgmt *)scan->data,
992 il->scan_request->ie,
993 il->scan_request->ie_len,
994 IL_MAX_SCAN_SIZE - sizeof(*scan));
995 scan->tx_cmd.len = cpu_to_le16(cmd_len);
997 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
998 RXON_FILTER_BCON_AWARE_MSK);
1000 scan->channel_count = il4965_get_channels_for_scan(il, vif, band,
1001 is_active, n_probes,
1002 (void *)&scan->data[cmd_len]);
1003 if (scan->channel_count == 0) {
1004 D_SCAN("channel count %d\n", scan->channel_count);
1008 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1009 scan->channel_count * sizeof(struct il_scan_channel);
1011 scan->len = cpu_to_le16(cmd.len);
1013 set_bit(S_SCAN_HW, &il->status);
1015 ret = il_send_cmd_sync(il, &cmd);
1017 clear_bit(S_SCAN_HW, &il->status);
1022 int il4965_manage_ibss_station(struct il_priv *il,
1023 struct ieee80211_vif *vif, bool add)
1025 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1028 return il4965_add_bssid_station(il, vif_priv->ctx,
1029 vif->bss_conf.bssid,
1030 &vif_priv->ibss_bssid_sta_id);
1031 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1032 vif->bss_conf.bssid);
1035 void il4965_free_tfds_in_queue(struct il_priv *il,
1036 int sta_id, int tid, int freed)
1038 lockdep_assert_held(&il->sta_lock);
1040 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1041 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1043 D_TX("free more than tfds_in_queue (%u:%d)\n",
1044 il->stations[sta_id].tid[tid].tfds_in_queue,
1046 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1050 #define IL_TX_QUEUE_MSK 0xfffff
1052 static bool il4965_is_single_rx_stream(struct il_priv *il)
1054 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1055 il->current_ht_config.single_chain_sufficient;
1058 #define IL_NUM_RX_CHAINS_MULTIPLE 3
1059 #define IL_NUM_RX_CHAINS_SINGLE 2
1060 #define IL_NUM_IDLE_CHAINS_DUAL 2
1061 #define IL_NUM_IDLE_CHAINS_SINGLE 1
1064 * Determine how many receiver/antenna chains to use.
1066 * More provides better reception via diversity. Fewer saves power
1067 * at the expense of throughput, but only when not in powersave to
1070 * MIMO (dual stream) requires at least 2, but works better with 3.
1071 * This does not determine *which* chains to use, just how many.
1073 static int il4965_get_active_rx_chain_count(struct il_priv *il)
1075 /* # of Rx chains to use when expecting MIMO. */
1076 if (il4965_is_single_rx_stream(il))
1077 return IL_NUM_RX_CHAINS_SINGLE;
1079 return IL_NUM_RX_CHAINS_MULTIPLE;
1083 * When we are in power saving mode, unless device support spatial
1084 * multiplexing power save, use the active count for rx chain count.
1087 il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1089 /* # Rx chains when idling, depending on SMPS mode */
1090 switch (il->current_ht_config.smps) {
1091 case IEEE80211_SMPS_STATIC:
1092 case IEEE80211_SMPS_DYNAMIC:
1093 return IL_NUM_IDLE_CHAINS_SINGLE;
1094 case IEEE80211_SMPS_OFF:
1097 WARN(1, "invalid SMPS mode %d",
1098 il->current_ht_config.smps);
1103 /* up to 4 chains */
1104 static u8 il4965_count_chain_bitmap(u32 chain_bitmap)
1107 res = (chain_bitmap & BIT(0)) >> 0;
1108 res += (chain_bitmap & BIT(1)) >> 1;
1109 res += (chain_bitmap & BIT(2)) >> 2;
1110 res += (chain_bitmap & BIT(3)) >> 3;
1115 * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1117 * Selects how many and which Rx receivers/antennas/chains to use.
1118 * This should not be used for scan command ... it puts data in wrong place.
1120 void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
1122 bool is_single = il4965_is_single_rx_stream(il);
1123 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
1124 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1128 /* Tell uCode which antennas are actually connected.
1129 * Before first association, we assume all antennas are connected.
1130 * Just after first association, il4965_chain_noise_calibration()
1131 * checks which antennas actually *are* connected. */
1132 if (il->chain_noise_data.active_chains)
1133 active_chains = il->chain_noise_data.active_chains;
1135 active_chains = il->hw_params.valid_rx_ant;
1137 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1139 /* How many receivers should we use? */
1140 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1141 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1144 /* correct rx chain count according hw settings
1145 * and chain noise calibration
1147 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1148 if (valid_rx_cnt < active_rx_cnt)
1149 active_rx_cnt = valid_rx_cnt;
1151 if (valid_rx_cnt < idle_rx_cnt)
1152 idle_rx_cnt = valid_rx_cnt;
1154 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1155 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1157 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1159 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1160 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1162 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1164 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n",
1165 ctx->staging.rx_chain,
1166 active_rx_cnt, idle_rx_cnt);
1168 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1169 active_rx_cnt < idle_rx_cnt);
1172 u8 il4965_toggle_tx_ant(struct il_priv *il, u8 ant, u8 valid)
1177 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1178 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1179 if (valid & BIT(ind))
1185 static const char *il4965_get_fh_string(int cmd)
1188 IL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1189 IL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1190 IL_CMD(FH_RSCSR_CHNL0_WPTR);
1191 IL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1192 IL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1193 IL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1194 IL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1195 IL_CMD(FH_TSSR_TX_STATUS_REG);
1196 IL_CMD(FH_TSSR_TX_ERROR_REG);
1202 int il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1205 #ifdef CONFIG_IWLEGACY_DEBUG
1209 static const u32 fh_tbl[] = {
1210 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1211 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1212 FH_RSCSR_CHNL0_WPTR,
1213 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1214 FH_MEM_RSSR_SHARED_CTRL_REG,
1215 FH_MEM_RSSR_RX_STATUS_REG,
1216 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1217 FH_TSSR_TX_STATUS_REG,
1218 FH_TSSR_TX_ERROR_REG
1220 #ifdef CONFIG_IWLEGACY_DEBUG
1222 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1223 *buf = kmalloc(bufsz, GFP_KERNEL);
1226 pos += scnprintf(*buf + pos, bufsz - pos,
1227 "FH register values:\n");
1228 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1229 pos += scnprintf(*buf + pos, bufsz - pos,
1231 il4965_get_fh_string(fh_tbl[i]),
1232 il_rd(il, fh_tbl[i]));
1237 IL_ERR("FH register values:\n");
1238 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1239 IL_ERR(" %34s: 0X%08x\n",
1240 il4965_get_fh_string(fh_tbl[i]),
1241 il_rd(il, fh_tbl[i]));
1245 void il4965_hdl_missed_beacon(struct il_priv *il,
1246 struct il_rx_buf *rxb)
1249 struct il_rx_pkt *pkt = rxb_addr(rxb);
1250 struct il_missed_beacon_notif *missed_beacon;
1252 missed_beacon = &pkt->u.missed_beacon;
1253 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1254 il->missed_beacon_threshold) {
1256 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1257 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1258 le32_to_cpu(missed_beacon->total_missed_becons),
1259 le32_to_cpu(missed_beacon->num_recvd_beacons),
1260 le32_to_cpu(missed_beacon->num_expected_beacons));
1261 if (!test_bit(S_SCANNING, &il->status))
1262 il4965_init_sensitivity(il);
1266 /* Calculate noise level, based on measurements during network silence just
1267 * before arriving beacon. This measurement can be done only if we know
1268 * exactly when to expect beacons, therefore only when we're associated. */
1269 static void il4965_rx_calc_noise(struct il_priv *il)
1271 struct stats_rx_non_phy *rx_info;
1272 int num_active_rx = 0;
1273 int total_silence = 0;
1274 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1277 rx_info = &(il->_4965.stats.rx.general);
1279 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1281 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1283 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1285 if (bcn_silence_a) {
1286 total_silence += bcn_silence_a;
1289 if (bcn_silence_b) {
1290 total_silence += bcn_silence_b;
1293 if (bcn_silence_c) {
1294 total_silence += bcn_silence_c;
1298 /* Average among active antennas */
1300 last_rx_noise = (total_silence / num_active_rx) - 107;
1302 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1304 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
1305 bcn_silence_a, bcn_silence_b, bcn_silence_c,
1309 #ifdef CONFIG_IWLEGACY_DEBUGFS
1311 * based on the assumption of all stats counter are in DWORD
1312 * FIXME: This function is for debugging, do not deal with
1313 * the case of counters roll-over.
1315 static void il4965_accumulative_stats(struct il_priv *il,
1321 u32 *delta, *max_delta;
1322 struct stats_general_common *general, *accum_general;
1323 struct stats_tx *tx, *accum_tx;
1325 prev_stats = (__le32 *)&il->_4965.stats;
1326 accum_stats = (u32 *)&il->_4965.accum_stats;
1327 size = sizeof(struct il_notif_stats);
1328 general = &il->_4965.stats.general.common;
1329 accum_general = &il->_4965.accum_stats.general.common;
1330 tx = &il->_4965.stats.tx;
1331 accum_tx = &il->_4965.accum_stats.tx;
1332 delta = (u32 *)&il->_4965.delta_stats;
1333 max_delta = (u32 *)&il->_4965.max_delta;
1335 for (i = sizeof(__le32); i < size;
1336 i += sizeof(__le32), stats++, prev_stats++, delta++,
1337 max_delta++, accum_stats++) {
1338 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1339 *delta = (le32_to_cpu(*stats) -
1340 le32_to_cpu(*prev_stats));
1341 *accum_stats += *delta;
1342 if (*delta > *max_delta)
1343 *max_delta = *delta;
1347 /* reset accumulative stats for "no-counter" type stats */
1348 accum_general->temperature = general->temperature;
1349 accum_general->ttl_timestamp = general->ttl_timestamp;
1353 #define REG_RECALIB_PERIOD (60)
1355 void il4965_hdl_stats(struct il_priv *il,
1356 struct il_rx_buf *rxb)
1359 struct il_rx_pkt *pkt = rxb_addr(rxb);
1362 "Statistics notification received (%d vs %d).\n",
1363 (int)sizeof(struct il_notif_stats),
1364 le32_to_cpu(pkt->len_n_flags) &
1365 FH_RSCSR_FRAME_SIZE_MSK);
1367 change = ((il->_4965.stats.general.common.temperature !=
1368 pkt->u.stats.general.common.temperature) ||
1369 ((il->_4965.stats.flag &
1370 STATS_REPLY_FLG_HT40_MODE_MSK) !=
1371 (pkt->u.stats.flag &
1372 STATS_REPLY_FLG_HT40_MODE_MSK)));
1373 #ifdef CONFIG_IWLEGACY_DEBUGFS
1374 il4965_accumulative_stats(il, (__le32 *)&pkt->u.stats);
1377 /* TODO: reading some of stats is unneeded */
1378 memcpy(&il->_4965.stats, &pkt->u.stats,
1379 sizeof(il->_4965.stats));
1381 set_bit(S_STATS, &il->status);
1383 /* Reschedule the stats timer to occur in
1384 * REG_RECALIB_PERIOD seconds to ensure we get a
1385 * thermal update even if the uCode doesn't give
1387 mod_timer(&il->stats_periodic, jiffies +
1388 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
1390 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
1391 (pkt->hdr.cmd == N_STATS)) {
1392 il4965_rx_calc_noise(il);
1393 queue_work(il->workqueue, &il->run_time_calib_work);
1395 if (il->cfg->ops->lib->temp_ops.temperature && change)
1396 il->cfg->ops->lib->temp_ops.temperature(il);
1399 void il4965_hdl_c_stats(struct il_priv *il,
1400 struct il_rx_buf *rxb)
1402 struct il_rx_pkt *pkt = rxb_addr(rxb);
1404 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
1405 #ifdef CONFIG_IWLEGACY_DEBUGFS
1406 memset(&il->_4965.accum_stats, 0,
1407 sizeof(struct il_notif_stats));
1408 memset(&il->_4965.delta_stats, 0,
1409 sizeof(struct il_notif_stats));
1410 memset(&il->_4965.max_delta, 0,
1411 sizeof(struct il_notif_stats));
1413 D_RX("Statistics have been cleared\n");
1415 il4965_hdl_stats(il, rxb);
1420 * mac80211 queues, ACs, hardware queues, FIFOs.
1422 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
1424 * Mac80211 uses the following numbers, which we get as from it
1425 * by way of skb_get_queue_mapping(skb):
1433 * Regular (not A-MPDU) frames are put into hardware queues corresponding
1434 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
1435 * own queue per aggregation session (RA/TID combination), such queues are
1436 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
1437 * order to map frames to the right queue, we also need an AC->hw queue
1438 * mapping. This is implemented here.
1440 * Due to the way hw queues are set up (by the hw specific modules like
1441 * 4965.c), the AC->hw queue mapping is the identity
1445 static const u8 tid_to_ac[] = {
1456 static inline int il4965_get_ac_from_tid(u16 tid)
1458 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1459 return tid_to_ac[tid];
1461 /* no support for TIDs 8-15 yet */
1466 il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
1468 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1469 return ctx->ac_to_fifo[tid_to_ac[tid]];
1471 /* no support for TIDs 8-15 yet */
1476 * handle build C_TX command notification.
1478 static void il4965_tx_cmd_build_basic(struct il_priv *il,
1479 struct sk_buff *skb,
1480 struct il_tx_cmd *tx_cmd,
1481 struct ieee80211_tx_info *info,
1482 struct ieee80211_hdr *hdr,
1485 __le16 fc = hdr->frame_control;
1486 __le32 tx_flags = tx_cmd->tx_flags;
1488 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1489 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1490 tx_flags |= TX_CMD_FLG_ACK_MSK;
1491 if (ieee80211_is_mgmt(fc))
1492 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1493 if (ieee80211_is_probe_resp(fc) &&
1494 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1495 tx_flags |= TX_CMD_FLG_TSF_MSK;
1497 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1498 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1501 if (ieee80211_is_back_req(fc))
1502 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1504 tx_cmd->sta_id = std_id;
1505 if (ieee80211_has_morefrags(fc))
1506 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1508 if (ieee80211_is_data_qos(fc)) {
1509 u8 *qc = ieee80211_get_qos_ctl(hdr);
1510 tx_cmd->tid_tspec = qc[0] & 0xf;
1511 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1513 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1516 il_tx_cmd_protection(il, info, fc, &tx_flags);
1518 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1519 if (ieee80211_is_mgmt(fc)) {
1520 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1521 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1523 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1525 tx_cmd->timeout.pm_frame_timeout = 0;
1528 tx_cmd->driver_txop = 0;
1529 tx_cmd->tx_flags = tx_flags;
1530 tx_cmd->next_frame_len = 0;
1533 #define RTS_DFAULT_RETRY_LIMIT 60
1535 static void il4965_tx_cmd_build_rate(struct il_priv *il,
1536 struct il_tx_cmd *tx_cmd,
1537 struct ieee80211_tx_info *info,
1543 u8 data_retry_limit;
1546 /* Set retry limit on DATA packets and Probe Responses*/
1547 if (ieee80211_is_probe_resp(fc))
1548 data_retry_limit = 3;
1550 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1551 tx_cmd->data_retry_limit = data_retry_limit;
1553 /* Set retry limit on RTS packets */
1554 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
1555 if (data_retry_limit < rts_retry_limit)
1556 rts_retry_limit = data_retry_limit;
1557 tx_cmd->rts_retry_limit = rts_retry_limit;
1559 /* DATA packets will use the uCode station table for rate/antenna
1561 if (ieee80211_is_data(fc)) {
1562 tx_cmd->initial_rate_idx = 0;
1563 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1568 * If the current TX rate stored in mac80211 has the MCS bit set, it's
1569 * not really a TX rate. Thus, we use the lowest supported rate for
1570 * this band. Also use the lowest supported rate if the stored rate
1573 rate_idx = info->control.rates[0].idx;
1574 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) ||
1575 rate_idx < 0 || rate_idx > RATE_COUNT_LEGACY)
1576 rate_idx = rate_lowest_index(&il->bands[info->band],
1578 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
1579 if (info->band == IEEE80211_BAND_5GHZ)
1580 rate_idx += IL_FIRST_OFDM_RATE;
1581 /* Get PLCP rate for tx_cmd->rate_n_flags */
1582 rate_plcp = il_rates[rate_idx].plcp;
1583 /* Zero out flags for this packet */
1586 /* Set CCK flag as needed */
1587 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1588 rate_flags |= RATE_MCS_CCK_MSK;
1590 /* Set up antennas */
1591 il->mgmt_tx_ant = il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
1592 il->hw_params.valid_tx_ant);
1594 rate_flags |= il4965_ant_idx_to_flags(il->mgmt_tx_ant);
1596 /* Set the rate in the TX cmd */
1597 tx_cmd->rate_n_flags = il4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
1600 static void il4965_tx_cmd_build_hwcrypto(struct il_priv *il,
1601 struct ieee80211_tx_info *info,
1602 struct il_tx_cmd *tx_cmd,
1603 struct sk_buff *skb_frag,
1606 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1608 switch (keyconf->cipher) {
1609 case WLAN_CIPHER_SUITE_CCMP:
1610 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1611 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1612 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1613 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1614 D_TX("tx_cmd with AES hwcrypto\n");
1617 case WLAN_CIPHER_SUITE_TKIP:
1618 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1619 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1620 D_TX("tx_cmd with tkip hwcrypto\n");
1623 case WLAN_CIPHER_SUITE_WEP104:
1624 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1626 case WLAN_CIPHER_SUITE_WEP40:
1627 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
1628 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
1630 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1632 D_TX("Configuring packet for WEP encryption "
1633 "with key %d\n", keyconf->keyidx);
1637 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1643 * start C_TX command process
1645 int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1647 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1648 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1649 struct ieee80211_sta *sta = info->control.sta;
1650 struct il_station_priv *sta_priv = NULL;
1651 struct il_tx_queue *txq;
1653 struct il_device_cmd *out_cmd;
1654 struct il_cmd_meta *out_meta;
1655 struct il_tx_cmd *tx_cmd;
1656 struct il_rxon_context *ctx = &il->ctx;
1658 dma_addr_t phys_addr;
1659 dma_addr_t txcmd_phys;
1660 dma_addr_t scratch_phys;
1661 u16 len, firstlen, secondlen;
1666 u8 wait_write_ptr = 0;
1669 unsigned long flags;
1670 bool is_agg = false;
1672 if (info->control.vif)
1673 ctx = il_rxon_ctx_from_vif(info->control.vif);
1675 spin_lock_irqsave(&il->lock, flags);
1676 if (il_is_rfkill(il)) {
1677 D_DROP("Dropping - RF KILL\n");
1681 fc = hdr->frame_control;
1683 #ifdef CONFIG_IWLEGACY_DEBUG
1684 if (ieee80211_is_auth(fc))
1685 D_TX("Sending AUTH frame\n");
1686 else if (ieee80211_is_assoc_req(fc))
1687 D_TX("Sending ASSOC frame\n");
1688 else if (ieee80211_is_reassoc_req(fc))
1689 D_TX("Sending REASSOC frame\n");
1692 hdr_len = ieee80211_hdrlen(fc);
1694 /* For management frames use broadcast id to do not break aggregation */
1695 if (!ieee80211_is_data(fc))
1696 sta_id = ctx->bcast_sta_id;
1698 /* Find idx into station table for destination station */
1699 sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
1701 if (sta_id == IL_INVALID_STATION) {
1702 D_DROP("Dropping - INVALID STATION: %pM\n",
1708 D_TX("station Id %d\n", sta_id);
1711 sta_priv = (void *)sta->drv_priv;
1713 if (sta_priv && sta_priv->asleep &&
1714 (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
1716 * This sends an asynchronous command to the device,
1717 * but we can rely on it being processed before the
1718 * next frame is processed -- and the next frame to
1719 * this station is the one that will consume this
1721 * For now set the counter to just 1 since we do not
1722 * support uAPSD yet.
1724 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1728 * Send this frame after DTIM -- there's a special queue
1729 * reserved for this for contexts that support AP mode.
1731 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1732 txq_id = ctx->mcast_queue;
1734 * The microcode will clear the more data
1735 * bit in the last frame it transmits.
1737 hdr->frame_control |=
1738 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1740 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
1742 /* irqs already disabled/saved above when locking il->lock */
1743 spin_lock(&il->sta_lock);
1745 if (ieee80211_is_data_qos(fc)) {
1746 qc = ieee80211_get_qos_ctl(hdr);
1747 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1748 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1749 spin_unlock(&il->sta_lock);
1752 seq_number = il->stations[sta_id].tid[tid].seq_number;
1753 seq_number &= IEEE80211_SCTL_SEQ;
1754 hdr->seq_ctrl = hdr->seq_ctrl &
1755 cpu_to_le16(IEEE80211_SCTL_FRAG);
1756 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1758 /* aggregation is on for this <sta,tid> */
1759 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1760 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1761 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1766 txq = &il->txq[txq_id];
1769 if (unlikely(il_queue_space(q) < q->high_mark)) {
1770 spin_unlock(&il->sta_lock);
1774 if (ieee80211_is_data_qos(fc)) {
1775 il->stations[sta_id].tid[tid].tfds_in_queue++;
1776 if (!ieee80211_has_morefrags(fc))
1777 il->stations[sta_id].tid[tid].seq_number = seq_number;
1780 spin_unlock(&il->sta_lock);
1782 /* Set up driver data for this TFD */
1783 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
1784 txq->txb[q->write_ptr].skb = skb;
1785 txq->txb[q->write_ptr].ctx = ctx;
1787 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1788 out_cmd = txq->cmd[q->write_ptr];
1789 out_meta = &txq->meta[q->write_ptr];
1790 tx_cmd = &out_cmd->cmd.tx;
1791 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1792 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1795 * Set up the Tx-command (not MAC!) header.
1796 * Store the chosen Tx queue and TFD idx within the sequence field;
1797 * after Tx, uCode's Tx response will return this value so driver can
1798 * locate the frame within the tx queue and do post-tx processing.
1800 out_cmd->hdr.cmd = C_TX;
1801 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1802 IDX_TO_SEQ(q->write_ptr)));
1804 /* Copy MAC header from skb into command buffer */
1805 memcpy(tx_cmd->hdr, hdr, hdr_len);
1808 /* Total # bytes to be transmitted */
1809 len = (u16)skb->len;
1810 tx_cmd->len = cpu_to_le16(len);
1812 if (info->control.hw_key)
1813 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1815 /* TODO need this for burst mode later on */
1816 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1817 il_dbg_log_tx_data_frame(il, len, hdr);
1819 il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
1821 il_update_stats(il, true, fc, len);
1823 * Use the first empty entry in this queue's command buffer array
1824 * to contain the Tx command and MAC header concatenated together
1825 * (payload data will be in another buffer).
1826 * Size of this varies, due to varying MAC header length.
1827 * If end is not dword aligned, we'll have 2 extra bytes at the end
1828 * of the MAC header (device reads on dword boundaries).
1829 * We'll tell device about this padding later.
1831 len = sizeof(struct il_tx_cmd) +
1832 sizeof(struct il_cmd_header) + hdr_len;
1833 firstlen = (len + 3) & ~3;
1835 /* Tell NIC about any 2-byte padding after MAC header */
1836 if (firstlen != len)
1837 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1839 /* Physical address of this Tx command's header (not MAC header!),
1840 * within command buffer array. */
1841 txcmd_phys = pci_map_single(il->pci_dev,
1842 &out_cmd->hdr, firstlen,
1843 PCI_DMA_BIDIRECTIONAL);
1844 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1845 dma_unmap_len_set(out_meta, len, firstlen);
1846 /* Add buffer containing Tx command and MAC(!) header to TFD's
1848 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq,
1849 txcmd_phys, firstlen, 1, 0);
1851 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1852 txq->need_update = 1;
1855 txq->need_update = 0;
1858 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1859 * if any (802.11 null frames have no payload). */
1860 secondlen = skb->len - hdr_len;
1861 if (secondlen > 0) {
1862 phys_addr = pci_map_single(il->pci_dev, skb->data + hdr_len,
1863 secondlen, PCI_DMA_TODEVICE);
1864 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq,
1865 phys_addr, secondlen,
1869 scratch_phys = txcmd_phys + sizeof(struct il_cmd_header) +
1870 offsetof(struct il_tx_cmd, scratch);
1872 /* take back ownership of DMA buffer to enable update */
1873 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys,
1874 firstlen, PCI_DMA_BIDIRECTIONAL);
1875 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1876 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1878 D_TX("sequence nr = 0X%x\n",
1879 le16_to_cpu(out_cmd->hdr.sequence));
1880 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1881 il_print_hex_dump(il, IL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1882 il_print_hex_dump(il, IL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1884 /* Set up entry for this TFD in Tx byte-count array */
1885 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1886 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
1887 le16_to_cpu(tx_cmd->len));
1889 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys,
1890 firstlen, PCI_DMA_BIDIRECTIONAL);
1892 /* Tell device the write idx *just past* this latest filled TFD */
1893 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1894 il_txq_update_write_ptr(il, txq);
1895 spin_unlock_irqrestore(&il->lock, flags);
1898 * At this point the frame is "transmitted" successfully
1899 * and we will get a TX status notification eventually,
1900 * regardless of the value of ret. "ret" only indicates
1901 * whether or not we should update the write pointer.
1905 * Avoid atomic ops if it isn't an associated client.
1906 * Also, if this is a packet for aggregation, don't
1907 * increase the counter because the ucode will stop
1908 * aggregation queues when their respective station
1911 if (sta_priv && sta_priv->client && !is_agg)
1912 atomic_inc(&sta_priv->pending_frames);
1914 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1915 if (wait_write_ptr) {
1916 spin_lock_irqsave(&il->lock, flags);
1917 txq->need_update = 1;
1918 il_txq_update_write_ptr(il, txq);
1919 spin_unlock_irqrestore(&il->lock, flags);
1921 il_stop_queue(il, txq);
1928 spin_unlock_irqrestore(&il->lock, flags);
1932 static inline int il4965_alloc_dma_ptr(struct il_priv *il,
1933 struct il_dma_ptr *ptr, size_t size)
1935 ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
1943 static inline void il4965_free_dma_ptr(struct il_priv *il,
1944 struct il_dma_ptr *ptr)
1946 if (unlikely(!ptr->addr))
1949 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1950 memset(ptr, 0, sizeof(*ptr));
1954 * il4965_hw_txq_ctx_free - Free TXQ Context
1956 * Destroy all TX DMA queues and structures
1958 void il4965_hw_txq_ctx_free(struct il_priv *il)
1964 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1965 if (txq_id == il->cmd_queue)
1966 il_cmd_queue_free(il);
1968 il_tx_queue_free(il, txq_id);
1970 il4965_free_dma_ptr(il, &il->kw);
1972 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1974 /* free tx queue structure */
1979 * il4965_txq_ctx_alloc - allocate TX queue context
1980 * Allocate all Tx DMA structures and initialize them
1983 * @return error code
1985 int il4965_txq_ctx_alloc(struct il_priv *il)
1988 int txq_id, slots_num;
1989 unsigned long flags;
1991 /* Free all tx/cmd queues and keep-warm buffer */
1992 il4965_hw_txq_ctx_free(il);
1994 ret = il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1995 il->hw_params.scd_bc_tbls_size);
1997 IL_ERR("Scheduler BC Table allocation failed\n");
2000 /* Alloc keep-warm buffer */
2001 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
2003 IL_ERR("Keep Warm allocation failed\n");
2007 /* allocate tx queue structure */
2008 ret = il_alloc_txq_mem(il);
2012 spin_lock_irqsave(&il->lock, flags);
2014 /* Turn off all Tx DMA fifos */
2015 il4965_txq_set_sched(il, 0);
2017 /* Tell NIC where to find the "keep warm" buffer */
2018 il_wr(il, FH_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2020 spin_unlock_irqrestore(&il->lock, flags);
2022 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
2023 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2024 slots_num = (txq_id == il->cmd_queue) ?
2025 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
2026 ret = il_tx_queue_init(il,
2027 &il->txq[txq_id], slots_num,
2030 IL_ERR("Tx %d queue init failed\n", txq_id);
2038 il4965_hw_txq_ctx_free(il);
2039 il4965_free_dma_ptr(il, &il->kw);
2041 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2046 void il4965_txq_ctx_reset(struct il_priv *il)
2048 int txq_id, slots_num;
2049 unsigned long flags;
2051 spin_lock_irqsave(&il->lock, flags);
2053 /* Turn off all Tx DMA fifos */
2054 il4965_txq_set_sched(il, 0);
2056 /* Tell NIC where to find the "keep warm" buffer */
2057 il_wr(il, FH_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2059 spin_unlock_irqrestore(&il->lock, flags);
2061 /* Alloc and init all Tx queues, including the command queue (#4) */
2062 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2063 slots_num = txq_id == il->cmd_queue ?
2064 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
2065 il_tx_queue_reset(il, &il->txq[txq_id],
2071 * il4965_txq_ctx_stop - Stop all Tx DMA channels
2073 void il4965_txq_ctx_stop(struct il_priv *il)
2076 unsigned long flags;
2078 /* Turn off all Tx DMA fifos */
2079 spin_lock_irqsave(&il->lock, flags);
2081 il4965_txq_set_sched(il, 0);
2083 /* Stop each Tx DMA channel, and wait for it to be idle */
2084 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2086 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2087 if (il_poll_bit(il, FH_TSSR_TX_STATUS_REG,
2088 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2090 IL_ERR("Failing on timeout while stopping"
2091 " DMA channel %d [0x%08x]", ch,
2093 FH_TSSR_TX_STATUS_REG));
2095 spin_unlock_irqrestore(&il->lock, flags);
2100 /* Unmap DMA from host system and free skb's */
2101 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2102 if (txq_id == il->cmd_queue)
2103 il_cmd_queue_unmap(il);
2105 il_tx_queue_unmap(il, txq_id);
2109 * Find first available (lowest unused) Tx Queue, mark it "active".
2110 * Called only when finding queue for aggregation.
2111 * Should never return anything < 7, because they should already
2112 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
2114 static int il4965_txq_ctx_activate_free(struct il_priv *il)
2118 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2119 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2125 * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
2127 static void il4965_tx_queue_stop_scheduler(struct il_priv *il,
2130 /* Simply stop the queue, but don't change any configuration;
2131 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
2133 IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2134 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2135 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2139 * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
2141 static int il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid,
2148 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2150 tbl_dw_addr = il->scd_base_addr +
2151 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2153 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2156 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2158 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2160 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2166 * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
2168 * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
2169 * i.e. it must be one of the higher queues used for aggregation
2171 static int il4965_txq_agg_enable(struct il_priv *il, int txq_id,
2172 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
2174 unsigned long flags;
2178 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2179 (IL49_FIRST_AMPDU_QUEUE +
2180 il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
2182 "queue number out of range: %d, must be %d to %d\n",
2183 txq_id, IL49_FIRST_AMPDU_QUEUE,
2184 IL49_FIRST_AMPDU_QUEUE +
2185 il->cfg->base_params->num_of_ampdu_queues - 1);
2189 ra_tid = BUILD_RAxTID(sta_id, tid);
2191 /* Modify device's station table to Tx this TID */
2192 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2196 spin_lock_irqsave(&il->lock, flags);
2198 /* Stop this Tx queue before configuring it */
2199 il4965_tx_queue_stop_scheduler(il, txq_id);
2201 /* Map receiver-address / traffic-ID to this queue */
2202 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2204 /* Set this queue as a chain-building queue */
2205 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2207 /* Place first TFD at idx corresponding to start sequence number.
2208 * Assumes that ssn_idx is valid (!= 0xFFF) */
2209 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2210 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2211 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2213 /* Set up Tx win size and frame limit for this queue */
2214 il_write_targ_mem(il,
2215 il->scd_base_addr + IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2216 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
2217 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2219 il_write_targ_mem(il, il->scd_base_addr +
2220 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2221 (SCD_FRAME_LIMIT << IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
2222 & IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2224 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2226 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
2227 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2229 spin_unlock_irqrestore(&il->lock, flags);
2235 int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2236 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2242 unsigned long flags;
2243 struct il_tid_data *tid_data;
2245 tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
2246 if (unlikely(tx_fifo < 0))
2249 IL_WARN("%s on ra = %pM tid = %d\n",
2250 __func__, sta->addr, tid);
2252 sta_id = il_sta_id(sta);
2253 if (sta_id == IL_INVALID_STATION) {
2254 IL_ERR("Start AGG on invalid station\n");
2257 if (unlikely(tid >= MAX_TID_COUNT))
2260 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2261 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2265 txq_id = il4965_txq_ctx_activate_free(il);
2267 IL_ERR("No free aggregation queue available\n");
2271 spin_lock_irqsave(&il->sta_lock, flags);
2272 tid_data = &il->stations[sta_id].tid[tid];
2273 *ssn = SEQ_TO_SN(tid_data->seq_number);
2274 tid_data->agg.txq_id = txq_id;
2275 il_set_swq_id(&il->txq[txq_id],
2276 il4965_get_ac_from_tid(tid), txq_id);
2277 spin_unlock_irqrestore(&il->sta_lock, flags);
2279 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo,
2284 spin_lock_irqsave(&il->sta_lock, flags);
2285 tid_data = &il->stations[sta_id].tid[tid];
2286 if (tid_data->tfds_in_queue == 0) {
2287 D_HT("HW queue is empty\n");
2288 tid_data->agg.state = IL_AGG_ON;
2289 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2292 "HW queue is NOT empty: %d packets in HW queue\n",
2293 tid_data->tfds_in_queue);
2294 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2296 spin_unlock_irqrestore(&il->sta_lock, flags);
2301 * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
2302 * il->lock must be held by the caller
2304 static int il4965_txq_agg_disable(struct il_priv *il, u16 txq_id,
2305 u16 ssn_idx, u8 tx_fifo)
2307 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2308 (IL49_FIRST_AMPDU_QUEUE +
2309 il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
2311 "queue number out of range: %d, must be %d to %d\n",
2312 txq_id, IL49_FIRST_AMPDU_QUEUE,
2313 IL49_FIRST_AMPDU_QUEUE +
2314 il->cfg->base_params->num_of_ampdu_queues - 1);
2318 il4965_tx_queue_stop_scheduler(il, txq_id);
2320 il_clear_bits_prph(il,
2321 IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2323 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2324 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2325 /* supposes that ssn_idx is valid (!= 0xFFF) */
2326 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2328 il_clear_bits_prph(il,
2329 IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2330 il_txq_ctx_deactivate(il, txq_id);
2331 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2336 int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2337 struct ieee80211_sta *sta, u16 tid)
2339 int tx_fifo_id, txq_id, sta_id, ssn;
2340 struct il_tid_data *tid_data;
2341 int write_ptr, read_ptr;
2342 unsigned long flags;
2344 tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
2345 if (unlikely(tx_fifo_id < 0))
2348 sta_id = il_sta_id(sta);
2350 if (sta_id == IL_INVALID_STATION) {
2351 IL_ERR("Invalid station for AGG tid %d\n", tid);
2355 spin_lock_irqsave(&il->sta_lock, flags);
2357 tid_data = &il->stations[sta_id].tid[tid];
2358 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2359 txq_id = tid_data->agg.txq_id;
2361 switch (il->stations[sta_id].tid[tid].agg.state) {
2362 case IL_EMPTYING_HW_QUEUE_ADDBA:
2364 * This can happen if the peer stops aggregation
2365 * again before we've had a chance to drain the
2366 * queue we selected previously, i.e. before the
2367 * session was really started completely.
2369 D_HT("AGG stop before setup done\n");
2374 IL_WARN("Stopping AGG while state not ON or starting\n");
2377 write_ptr = il->txq[txq_id].q.write_ptr;
2378 read_ptr = il->txq[txq_id].q.read_ptr;
2380 /* The queue is not empty */
2381 if (write_ptr != read_ptr) {
2382 D_HT("Stopping a non empty AGG HW QUEUE\n");
2383 il->stations[sta_id].tid[tid].agg.state =
2384 IL_EMPTYING_HW_QUEUE_DELBA;
2385 spin_unlock_irqrestore(&il->sta_lock, flags);
2389 D_HT("HW queue is empty\n");
2391 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2393 /* do not restore/save irqs */
2394 spin_unlock(&il->sta_lock);
2395 spin_lock(&il->lock);
2398 * the only reason this call can fail is queue number out of range,
2399 * which can happen if uCode is reloaded and all the station
2400 * information are lost. if it is outside the range, there is no need
2401 * to deactivate the uCode queue, just return "success" to allow
2402 * mac80211 to clean up it own data.
2404 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2405 spin_unlock_irqrestore(&il->lock, flags);
2407 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2412 int il4965_txq_check_empty(struct il_priv *il,
2413 int sta_id, u8 tid, int txq_id)
2415 struct il_queue *q = &il->txq[txq_id].q;
2416 u8 *addr = il->stations[sta_id].sta.sta.addr;
2417 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2418 struct il_rxon_context *ctx;
2422 lockdep_assert_held(&il->sta_lock);
2424 switch (il->stations[sta_id].tid[tid].agg.state) {
2425 case IL_EMPTYING_HW_QUEUE_DELBA:
2426 /* We are reclaiming the last packet of the */
2427 /* aggregated HW queue */
2428 if (txq_id == tid_data->agg.txq_id &&
2429 q->read_ptr == q->write_ptr) {
2430 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
2431 int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
2433 "HW queue empty: continue DELBA flow\n");
2434 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2435 tid_data->agg.state = IL_AGG_OFF;
2436 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
2439 case IL_EMPTYING_HW_QUEUE_ADDBA:
2440 /* We are reclaiming the last packet of the queue */
2441 if (tid_data->tfds_in_queue == 0) {
2443 "HW queue empty: continue ADDBA flow\n");
2444 tid_data->agg.state = IL_AGG_ON;
2445 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
2453 static void il4965_non_agg_tx_status(struct il_priv *il,
2454 struct il_rxon_context *ctx,
2457 struct ieee80211_sta *sta;
2458 struct il_station_priv *sta_priv;
2461 sta = ieee80211_find_sta(ctx->vif, addr1);
2463 sta_priv = (void *)sta->drv_priv;
2464 /* avoid atomic ops if this isn't a client */
2465 if (sta_priv->client &&
2466 atomic_dec_return(&sta_priv->pending_frames) == 0)
2467 ieee80211_sta_block_awake(il->hw, sta, false);
2473 il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info,
2476 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
2479 il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
2481 ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
2484 int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2486 struct il_tx_queue *txq = &il->txq[txq_id];
2487 struct il_queue *q = &txq->q;
2488 struct il_tx_info *tx_info;
2490 struct ieee80211_hdr *hdr;
2492 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2493 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2494 "is out of range [0-%d] %d %d.\n", txq_id,
2495 idx, q->n_bd, q->write_ptr, q->read_ptr);
2499 for (idx = il_queue_inc_wrap(idx, q->n_bd);
2501 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2503 tx_info = &txq->txb[txq->q.read_ptr];
2505 if (WARN_ON_ONCE(tx_info->skb == NULL))
2508 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
2509 if (ieee80211_is_data_qos(hdr->frame_control))
2512 il4965_tx_status(il, tx_info,
2513 txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2514 tx_info->skb = NULL;
2516 il->cfg->ops->lib->txq_free_tfd(il, txq);
2522 * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
2524 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
2525 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
2527 static int il4965_tx_status_reply_compressed_ba(struct il_priv *il,
2528 struct il_ht_agg *agg,
2529 struct il_compressed_ba_resp *ba_resp)
2533 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2534 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2536 struct ieee80211_tx_info *info;
2537 u64 bitmap, sent_bitmap;
2539 if (unlikely(!agg->wait_for_ba)) {
2540 if (unlikely(ba_resp->bitmap))
2541 IL_ERR("Received BA when not expected\n");
2545 /* Mark that the expected block-ack response arrived */
2546 agg->wait_for_ba = 0;
2547 D_TX_REPLY("BA %d %d\n", agg->start_idx,
2550 /* Calculate shift to align block-ack bits with our Tx win bits */
2551 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2552 if (sh < 0) /* tbw something is wrong with indices */
2555 if (agg->frame_count > (64 - sh)) {
2556 D_TX_REPLY("more frames than bitmap size");
2560 /* don't use 64-bit values for now */
2561 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2563 /* check for success or failure according to the
2564 * transmitted bitmap and block-ack bitmap */
2565 sent_bitmap = bitmap & agg->bitmap;
2567 /* For each frame attempted in aggregation,
2568 * update driver's record of tx frame's status. */
2570 while (sent_bitmap) {
2571 ack = sent_bitmap & 1ULL;
2573 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
2574 ack ? "ACK" : "NACK", i,
2575 (agg->start_idx + i) & 0xff,
2576 agg->start_idx + i);
2581 D_TX_REPLY("Bitmap %llx\n",
2582 (unsigned long long)bitmap);
2584 info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
2585 memset(&info->status, 0, sizeof(info->status));
2586 info->flags |= IEEE80211_TX_STAT_ACK;
2587 info->flags |= IEEE80211_TX_STAT_AMPDU;
2588 info->status.ampdu_ack_len = successes;
2589 info->status.ampdu_len = agg->frame_count;
2590 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2596 * translate ucode response to mac80211 tx status control values
2598 void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2599 struct ieee80211_tx_info *info)
2601 struct ieee80211_tx_rate *r = &info->control.rates[0];
2603 info->antenna_sel_tx =
2604 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2605 if (rate_n_flags & RATE_MCS_HT_MSK)
2606 r->flags |= IEEE80211_TX_RC_MCS;
2607 if (rate_n_flags & RATE_MCS_GF_MSK)
2608 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2609 if (rate_n_flags & RATE_MCS_HT40_MSK)
2610 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2611 if (rate_n_flags & RATE_MCS_DUP_MSK)
2612 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2613 if (rate_n_flags & RATE_MCS_SGI_MSK)
2614 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2615 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2619 * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
2621 * Handles block-acknowledge notification from device, which reports success
2622 * of frames sent via aggregation.
2624 void il4965_hdl_compressed_ba(struct il_priv *il,
2625 struct il_rx_buf *rxb)
2627 struct il_rx_pkt *pkt = rxb_addr(rxb);
2628 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2629 struct il_tx_queue *txq = NULL;
2630 struct il_ht_agg *agg;
2634 unsigned long flags;
2636 /* "flow" corresponds to Tx queue */
2637 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2639 /* "ssn" is start of block-ack Tx win, corresponds to idx
2640 * (in Tx queue's circular buffer) of first TFD/frame in win */
2641 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2643 if (scd_flow >= il->hw_params.max_txq_num) {
2645 "BUG_ON scd_flow is bigger than number of queues\n");
2649 txq = &il->txq[scd_flow];
2650 sta_id = ba_resp->sta_id;
2652 agg = &il->stations[sta_id].tid[tid].agg;
2653 if (unlikely(agg->txq_id != scd_flow)) {
2655 * FIXME: this is a uCode bug which need to be addressed,
2656 * log the information and return for now!
2657 * since it is possible happen very often and in order
2658 * not to fill the syslog, don't enable the logging by default
2661 "BA scd_flow %d does not match txq_id %d\n",
2662 scd_flow, agg->txq_id);
2666 /* Find idx just before block-ack win */
2667 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2669 spin_lock_irqsave(&il->sta_lock, flags);
2671 D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, "
2674 (u8 *) &ba_resp->sta_addr_lo32,
2676 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
2678 "%d, scd_ssn = %d\n",
2681 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2684 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n",
2686 (unsigned long long)agg->bitmap);
2688 /* Update driver's record of ACK vs. not for each frame in win */
2689 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2691 /* Release all TFDs before the SSN, i.e. all TFDs in front of
2692 * block-ack win (we assume that they've been successfully
2693 * transmitted ... if not, it's too late anyway). */
2694 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2695 /* calculate mac80211 ampdu sw queue to wake */
2696 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2697 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2699 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2700 il->mac80211_registered &&
2701 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2702 il_wake_queue(il, txq);
2704 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2707 spin_unlock_irqrestore(&il->sta_lock, flags);
2710 #ifdef CONFIG_IWLEGACY_DEBUG
2711 const char *il4965_get_tx_fail_reason(u32 status)
2713 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
2714 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
2716 switch (status & TX_STATUS_MSK) {
2717 case TX_STATUS_SUCCESS:
2719 TX_STATUS_POSTPONE(DELAY);
2720 TX_STATUS_POSTPONE(FEW_BYTES);
2721 TX_STATUS_POSTPONE(QUIET_PERIOD);
2722 TX_STATUS_POSTPONE(CALC_TTAK);
2723 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
2724 TX_STATUS_FAIL(SHORT_LIMIT);
2725 TX_STATUS_FAIL(LONG_LIMIT);
2726 TX_STATUS_FAIL(FIFO_UNDERRUN);
2727 TX_STATUS_FAIL(DRAIN_FLOW);
2728 TX_STATUS_FAIL(RFKILL_FLUSH);
2729 TX_STATUS_FAIL(LIFE_EXPIRE);
2730 TX_STATUS_FAIL(DEST_PS);
2731 TX_STATUS_FAIL(HOST_ABORTED);
2732 TX_STATUS_FAIL(BT_RETRY);
2733 TX_STATUS_FAIL(STA_INVALID);
2734 TX_STATUS_FAIL(FRAG_DROPPED);
2735 TX_STATUS_FAIL(TID_DISABLE);
2736 TX_STATUS_FAIL(FIFO_FLUSHED);
2737 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
2738 TX_STATUS_FAIL(PASSIVE_NO_RX);
2739 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
2744 #undef TX_STATUS_FAIL
2745 #undef TX_STATUS_POSTPONE
2747 #endif /* CONFIG_IWLEGACY_DEBUG */
2749 static struct il_link_quality_cmd *
2750 il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
2753 struct il_link_quality_cmd *link_cmd;
2755 __le32 rate_n_flags;
2757 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
2759 IL_ERR("Unable to allocate memory for LQ cmd.\n");
2762 /* Set up the rate scaling to start at selected rate, fall back
2763 * all the way down to 1M in IEEE order, and then spin on 1M */
2764 if (il->band == IEEE80211_BAND_5GHZ)
2769 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
2770 rate_flags |= RATE_MCS_CCK_MSK;
2772 rate_flags |= il4965_first_antenna(il->hw_params.valid_tx_ant) <<
2774 rate_n_flags = il4965_hw_set_rate_n_flags(il_rates[r].plcp,
2776 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2777 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
2779 link_cmd->general_params.single_stream_ant_msk =
2780 il4965_first_antenna(il->hw_params.valid_tx_ant);
2782 link_cmd->general_params.dual_stream_ant_msk =
2783 il->hw_params.valid_tx_ant &
2784 ~il4965_first_antenna(il->hw_params.valid_tx_ant);
2785 if (!link_cmd->general_params.dual_stream_ant_msk) {
2786 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
2787 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
2788 link_cmd->general_params.dual_stream_ant_msk =
2789 il->hw_params.valid_tx_ant;
2792 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2793 link_cmd->agg_params.agg_time_limit =
2794 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2796 link_cmd->sta_id = sta_id;
2802 * il4965_add_bssid_station - Add the special IBSS BSSID station
2807 il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
2808 const u8 *addr, u8 *sta_id_r)
2812 struct il_link_quality_cmd *link_cmd;
2813 unsigned long flags;
2816 *sta_id_r = IL_INVALID_STATION;
2818 ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
2820 IL_ERR("Unable to add station %pM\n", addr);
2827 spin_lock_irqsave(&il->sta_lock, flags);
2828 il->stations[sta_id].used |= IL_STA_LOCAL;
2829 spin_unlock_irqrestore(&il->sta_lock, flags);
2831 /* Set up default rate scaling table in device's station table */
2832 link_cmd = il4965_sta_alloc_lq(il, sta_id);
2835 "Unable to initialize rate scaling for station %pM.\n",
2840 ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
2842 IL_ERR("Link quality command failed (%d)\n", ret);
2844 spin_lock_irqsave(&il->sta_lock, flags);
2845 il->stations[sta_id].lq = link_cmd;
2846 spin_unlock_irqrestore(&il->sta_lock, flags);
2851 static int il4965_static_wepkey_cmd(struct il_priv *il,
2852 struct il_rxon_context *ctx,
2855 int i, not_empty = 0;
2856 u8 buff[sizeof(struct il_wep_cmd) +
2857 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
2858 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
2859 size_t cmd_size = sizeof(struct il_wep_cmd);
2860 struct il_host_cmd cmd = {
2861 .id = ctx->wep_key_cmd,
2868 memset(wep_cmd, 0, cmd_size +
2869 (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
2871 for (i = 0; i < WEP_KEYS_MAX ; i++) {
2872 wep_cmd->key[i].key_idx = i;
2873 if (ctx->wep_keys[i].key_size) {
2874 wep_cmd->key[i].key_offset = i;
2877 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
2880 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
2881 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
2882 ctx->wep_keys[i].key_size);
2885 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
2886 wep_cmd->num_keys = WEP_KEYS_MAX;
2888 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
2892 if (not_empty || send_if_empty)
2893 return il_send_cmd(il, &cmd);
2898 int il4965_restore_default_wep_keys(struct il_priv *il,
2899 struct il_rxon_context *ctx)
2901 lockdep_assert_held(&il->mutex);
2903 return il4965_static_wepkey_cmd(il, ctx, false);
2906 int il4965_remove_default_wep_key(struct il_priv *il,
2907 struct il_rxon_context *ctx,
2908 struct ieee80211_key_conf *keyconf)
2912 lockdep_assert_held(&il->mutex);
2914 D_WEP("Removing default WEP key: idx=%d\n",
2917 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
2918 if (il_is_rfkill(il)) {
2920 "Not sending C_WEPKEY command due to RFKILL.\n");
2921 /* but keys in device are clear anyway so return success */
2924 ret = il4965_static_wepkey_cmd(il, ctx, 1);
2925 D_WEP("Remove default WEP key: idx=%d ret=%d\n",
2926 keyconf->keyidx, ret);
2931 int il4965_set_default_wep_key(struct il_priv *il,
2932 struct il_rxon_context *ctx,
2933 struct ieee80211_key_conf *keyconf)
2937 lockdep_assert_held(&il->mutex);
2939 if (keyconf->keylen != WEP_KEY_LEN_128 &&
2940 keyconf->keylen != WEP_KEY_LEN_64) {
2941 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
2945 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
2946 keyconf->hw_key_idx = HW_KEY_DEFAULT;
2947 il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
2949 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
2950 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
2953 ret = il4965_static_wepkey_cmd(il, ctx, false);
2954 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n",
2955 keyconf->keylen, keyconf->keyidx, ret);
2960 static int il4965_set_wep_dynamic_key_info(struct il_priv *il,
2961 struct il_rxon_context *ctx,
2962 struct ieee80211_key_conf *keyconf,
2965 unsigned long flags;
2966 __le16 key_flags = 0;
2967 struct il_addsta_cmd sta_cmd;
2969 lockdep_assert_held(&il->mutex);
2971 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
2973 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
2974 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
2975 key_flags &= ~STA_KEY_FLG_INVALID;
2977 if (keyconf->keylen == WEP_KEY_LEN_128)
2978 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
2980 if (sta_id == ctx->bcast_sta_id)
2981 key_flags |= STA_KEY_MULTICAST_MSK;
2983 spin_lock_irqsave(&il->sta_lock, flags);
2985 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
2986 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
2987 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
2989 memcpy(il->stations[sta_id].keyinfo.key,
2990 keyconf->key, keyconf->keylen);
2992 memcpy(&il->stations[sta_id].sta.key.key[3],
2993 keyconf->key, keyconf->keylen);
2995 if ((il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
2996 == STA_KEY_FLG_NO_ENC)
2997 il->stations[sta_id].sta.key.key_offset =
2998 il_get_free_ucode_key_idx(il);
2999 /* else, we are overriding an existing key => no need to allocated room
3002 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3003 "no space for a new key");
3005 il->stations[sta_id].sta.key.key_flags = key_flags;
3006 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3007 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3009 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3010 sizeof(struct il_addsta_cmd));
3011 spin_unlock_irqrestore(&il->sta_lock, flags);
3013 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3016 static int il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
3017 struct il_rxon_context *ctx,
3018 struct ieee80211_key_conf *keyconf,
3021 unsigned long flags;
3022 __le16 key_flags = 0;
3023 struct il_addsta_cmd sta_cmd;
3025 lockdep_assert_held(&il->mutex);
3027 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3028 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3029 key_flags &= ~STA_KEY_FLG_INVALID;
3031 if (sta_id == ctx->bcast_sta_id)
3032 key_flags |= STA_KEY_MULTICAST_MSK;
3034 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3036 spin_lock_irqsave(&il->sta_lock, flags);
3037 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3038 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3040 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key,
3043 memcpy(il->stations[sta_id].sta.key.key, keyconf->key,
3046 if ((il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
3047 == STA_KEY_FLG_NO_ENC)
3048 il->stations[sta_id].sta.key.key_offset =
3049 il_get_free_ucode_key_idx(il);
3050 /* else, we are overriding an existing key => no need to allocated room
3053 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3054 "no space for a new key");
3056 il->stations[sta_id].sta.key.key_flags = key_flags;
3057 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3058 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3060 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3061 sizeof(struct il_addsta_cmd));
3062 spin_unlock_irqrestore(&il->sta_lock, flags);
3064 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3067 static int il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3068 struct il_rxon_context *ctx,
3069 struct ieee80211_key_conf *keyconf,
3072 unsigned long flags;
3074 __le16 key_flags = 0;
3076 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3077 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3078 key_flags &= ~STA_KEY_FLG_INVALID;
3080 if (sta_id == ctx->bcast_sta_id)
3081 key_flags |= STA_KEY_MULTICAST_MSK;
3083 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3084 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3086 spin_lock_irqsave(&il->sta_lock, flags);
3088 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3089 il->stations[sta_id].keyinfo.keylen = 16;
3091 if ((il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
3092 == STA_KEY_FLG_NO_ENC)
3093 il->stations[sta_id].sta.key.key_offset =
3094 il_get_free_ucode_key_idx(il);
3095 /* else, we are overriding an existing key => no need to allocated room
3098 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3099 "no space for a new key");
3101 il->stations[sta_id].sta.key.key_flags = key_flags;
3104 /* This copy is acutally not needed: we get the key with each TX */
3105 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3107 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3109 spin_unlock_irqrestore(&il->sta_lock, flags);
3114 void il4965_update_tkip_key(struct il_priv *il,
3115 struct il_rxon_context *ctx,
3116 struct ieee80211_key_conf *keyconf,
3117 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
3120 unsigned long flags;
3123 if (il_scan_cancel(il)) {
3124 /* cancel scan failed, just live w/ bad key and rely
3125 briefly on SW decryption */
3129 sta_id = il_sta_id_or_broadcast(il, ctx, sta);
3130 if (sta_id == IL_INVALID_STATION)
3133 spin_lock_irqsave(&il->sta_lock, flags);
3135 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3137 for (i = 0; i < 5; i++)
3138 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3139 cpu_to_le16(phase1key[i]);
3141 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3142 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3144 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3146 spin_unlock_irqrestore(&il->sta_lock, flags);
3150 int il4965_remove_dynamic_key(struct il_priv *il,
3151 struct il_rxon_context *ctx,
3152 struct ieee80211_key_conf *keyconf,
3155 unsigned long flags;
3158 struct il_addsta_cmd sta_cmd;
3160 lockdep_assert_held(&il->mutex);
3162 ctx->key_mapping_keys--;
3164 spin_lock_irqsave(&il->sta_lock, flags);
3165 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3166 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3168 D_WEP("Remove dynamic key: idx=%d sta=%d\n",
3169 keyconf->keyidx, sta_id);
3171 if (keyconf->keyidx != keyidx) {
3172 /* We need to remove a key with idx different that the one
3173 * in the uCode. This means that the key we need to remove has
3174 * been replaced by another one with different idx.
3175 * Don't do anything and return ok
3177 spin_unlock_irqrestore(&il->sta_lock, flags);
3181 if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
3182 IL_WARN("Removing wrong key %d 0x%x\n",
3183 keyconf->keyidx, key_flags);
3184 spin_unlock_irqrestore(&il->sta_lock, flags);
3188 if (!test_and_clear_bit(il->stations[sta_id].sta.key.key_offset,
3189 &il->ucode_key_table))
3190 IL_ERR("idx %d not used in uCode key table.\n",
3191 il->stations[sta_id].sta.key.key_offset);
3192 memset(&il->stations[sta_id].keyinfo, 0,
3193 sizeof(struct il_hw_key));
3194 memset(&il->stations[sta_id].sta.key, 0,
3195 sizeof(struct il4965_keyinfo));
3196 il->stations[sta_id].sta.key.key_flags =
3197 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3198 il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
3199 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3200 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3202 if (il_is_rfkill(il)) {
3204 "Not sending C_ADD_STA command because RFKILL enabled.\n");
3205 spin_unlock_irqrestore(&il->sta_lock, flags);
3208 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3209 sizeof(struct il_addsta_cmd));
3210 spin_unlock_irqrestore(&il->sta_lock, flags);
3212 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3215 int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
3216 struct ieee80211_key_conf *keyconf, u8 sta_id)
3220 lockdep_assert_held(&il->mutex);
3222 ctx->key_mapping_keys++;
3223 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3225 switch (keyconf->cipher) {
3226 case WLAN_CIPHER_SUITE_CCMP:
3227 ret = il4965_set_ccmp_dynamic_key_info(il, ctx,
3230 case WLAN_CIPHER_SUITE_TKIP:
3231 ret = il4965_set_tkip_dynamic_key_info(il, ctx,
3234 case WLAN_CIPHER_SUITE_WEP40:
3235 case WLAN_CIPHER_SUITE_WEP104:
3236 ret = il4965_set_wep_dynamic_key_info(il, ctx,
3241 "Unknown alg: %s cipher = %x\n", __func__,
3247 "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3248 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3255 * il4965_alloc_bcast_station - add broadcast station into driver's station table.
3257 * This adds the broadcast station into the driver's station table
3258 * and marks it driver active, so that it will be restored to the
3259 * device at the next best time.
3261 int il4965_alloc_bcast_station(struct il_priv *il,
3262 struct il_rxon_context *ctx)
3264 struct il_link_quality_cmd *link_cmd;
3265 unsigned long flags;
3268 spin_lock_irqsave(&il->sta_lock, flags);
3269 sta_id = il_prep_station(il, ctx, il_bcast_addr,
3271 if (sta_id == IL_INVALID_STATION) {
3272 IL_ERR("Unable to prepare broadcast station\n");
3273 spin_unlock_irqrestore(&il->sta_lock, flags);
3278 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3279 il->stations[sta_id].used |= IL_STA_BCAST;
3280 spin_unlock_irqrestore(&il->sta_lock, flags);
3282 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3285 "Unable to initialize rate scaling for bcast station.\n");
3289 spin_lock_irqsave(&il->sta_lock, flags);
3290 il->stations[sta_id].lq = link_cmd;
3291 spin_unlock_irqrestore(&il->sta_lock, flags);
3297 * il4965_update_bcast_station - update broadcast station's LQ command
3299 * Only used by iwl4965. Placed here to have all bcast station management
3302 static int il4965_update_bcast_station(struct il_priv *il,
3303 struct il_rxon_context *ctx)
3305 unsigned long flags;
3306 struct il_link_quality_cmd *link_cmd;
3307 u8 sta_id = ctx->bcast_sta_id;
3309 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3312 "Unable to initialize rate scaling for bcast station.\n");
3316 spin_lock_irqsave(&il->sta_lock, flags);
3317 if (il->stations[sta_id].lq)
3318 kfree(il->stations[sta_id].lq);
3321 "Bcast station rate scaling has not been initialized yet.\n");
3322 il->stations[sta_id].lq = link_cmd;
3323 spin_unlock_irqrestore(&il->sta_lock, flags);
3328 int il4965_update_bcast_stations(struct il_priv *il)
3330 return il4965_update_bcast_station(il, &il->ctx);
3334 * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
3336 int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3338 unsigned long flags;
3339 struct il_addsta_cmd sta_cmd;
3341 lockdep_assert_held(&il->mutex);
3343 /* Remove "disable" flag, to enable Tx for this TID */
3344 spin_lock_irqsave(&il->sta_lock, flags);
3345 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3346 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3347 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3348 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3349 sizeof(struct il_addsta_cmd));
3350 spin_unlock_irqrestore(&il->sta_lock, flags);
3352 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3355 int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
3358 unsigned long flags;
3360 struct il_addsta_cmd sta_cmd;
3362 lockdep_assert_held(&il->mutex);
3364 sta_id = il_sta_id(sta);
3365 if (sta_id == IL_INVALID_STATION)
3368 spin_lock_irqsave(&il->sta_lock, flags);
3369 il->stations[sta_id].sta.station_flags_msk = 0;
3370 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3371 il->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
3372 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3373 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3374 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3375 sizeof(struct il_addsta_cmd));
3376 spin_unlock_irqrestore(&il->sta_lock, flags);
3378 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3381 int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
3384 unsigned long flags;
3386 struct il_addsta_cmd sta_cmd;
3388 lockdep_assert_held(&il->mutex);
3390 sta_id = il_sta_id(sta);
3391 if (sta_id == IL_INVALID_STATION) {
3392 IL_ERR("Invalid station for AGG tid %d\n", tid);
3396 spin_lock_irqsave(&il->sta_lock, flags);
3397 il->stations[sta_id].sta.station_flags_msk = 0;
3398 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3399 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
3400 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3401 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3402 sizeof(struct il_addsta_cmd));
3403 spin_unlock_irqrestore(&il->sta_lock, flags);
3405 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3409 il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3411 unsigned long flags;
3413 spin_lock_irqsave(&il->sta_lock, flags);
3414 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3415 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3416 il->stations[sta_id].sta.sta.modify_mask =
3417 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3418 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3419 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3421 &il->stations[sta_id].sta, CMD_ASYNC);
3422 spin_unlock_irqrestore(&il->sta_lock, flags);
3426 void il4965_update_chain_flags(struct il_priv *il)
3428 if (il->cfg->ops->hcmd->set_rxon_chain) {
3429 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
3430 if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
3431 il_commit_rxon(il, &il->ctx);
3435 static void il4965_clear_free_frames(struct il_priv *il)
3437 struct list_head *element;
3439 D_INFO("%d frames on pre-allocated heap on clear.\n",
3442 while (!list_empty(&il->free_frames)) {
3443 element = il->free_frames.next;
3445 kfree(list_entry(element, struct il_frame, list));
3449 if (il->frames_count) {
3450 IL_WARN("%d frames still in use. Did we lose one?\n",
3452 il->frames_count = 0;
3456 static struct il_frame *il4965_get_free_frame(struct il_priv *il)
3458 struct il_frame *frame;
3459 struct list_head *element;
3460 if (list_empty(&il->free_frames)) {
3461 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3463 IL_ERR("Could not allocate frame!\n");
3471 element = il->free_frames.next;
3473 return list_entry(element, struct il_frame, list);
3476 static void il4965_free_frame(struct il_priv *il, struct il_frame *frame)
3478 memset(frame, 0, sizeof(*frame));
3479 list_add(&frame->list, &il->free_frames);
3482 static u32 il4965_fill_beacon_frame(struct il_priv *il,
3483 struct ieee80211_hdr *hdr,
3486 lockdep_assert_held(&il->mutex);
3488 if (!il->beacon_skb)
3491 if (il->beacon_skb->len > left)
3494 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
3496 return il->beacon_skb->len;
3499 /* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
3500 static void il4965_set_beacon_tim(struct il_priv *il,
3501 struct il_tx_beacon_cmd *tx_beacon_cmd,
3502 u8 *beacon, u32 frame_size)
3505 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3508 * The idx is relative to frame start but we start looking at the
3509 * variable-length part of the beacon.
3511 tim_idx = mgmt->u.beacon.variable - beacon;
3513 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
3514 while ((tim_idx < (frame_size - 2)) &&
3515 (beacon[tim_idx] != WLAN_EID_TIM))
3516 tim_idx += beacon[tim_idx+1] + 2;
3518 /* If TIM field was found, set variables */
3519 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3520 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3521 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
3523 IL_WARN("Unable to find TIM Element in beacon\n");
3526 static unsigned int il4965_hw_get_beacon_cmd(struct il_priv *il,
3527 struct il_frame *frame)
3529 struct il_tx_beacon_cmd *tx_beacon_cmd;
3534 * We have to set up the TX command, the TX Beacon command, and the
3538 lockdep_assert_held(&il->mutex);
3540 if (!il->beacon_ctx) {
3541 IL_ERR("trying to build beacon w/o beacon context!\n");
3545 /* Initialize memory */
3546 tx_beacon_cmd = &frame->u.beacon;
3547 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3549 /* Set up TX beacon contents */
3550 frame_size = il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
3551 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3552 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3557 /* Set up TX command fields */
3558 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
3559 tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
3560 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3561 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
3562 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
3564 /* Set up TX beacon command fields */
3565 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
3568 /* Set up packet rate and flags */
3569 rate = il_get_lowest_plcp(il, il->beacon_ctx);
3570 il->mgmt_tx_ant = il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
3571 il->hw_params.valid_tx_ant);
3572 rate_flags = il4965_ant_idx_to_flags(il->mgmt_tx_ant);
3573 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
3574 rate_flags |= RATE_MCS_CCK_MSK;
3575 tx_beacon_cmd->tx.rate_n_flags = il4965_hw_set_rate_n_flags(rate,
3578 return sizeof(*tx_beacon_cmd) + frame_size;
3581 int il4965_send_beacon_cmd(struct il_priv *il)
3583 struct il_frame *frame;
3584 unsigned int frame_size;
3587 frame = il4965_get_free_frame(il);
3589 IL_ERR("Could not obtain free frame buffer for beacon "
3594 frame_size = il4965_hw_get_beacon_cmd(il, frame);
3596 IL_ERR("Error configuring the beacon command\n");
3597 il4965_free_frame(il, frame);
3601 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size,
3604 il4965_free_frame(il, frame);
3609 static inline dma_addr_t il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
3611 struct il_tfd_tb *tb = &tfd->tbs[idx];
3613 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3614 if (sizeof(dma_addr_t) > sizeof(u32))
3616 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
3621 static inline u16 il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
3623 struct il_tfd_tb *tb = &tfd->tbs[idx];
3625 return le16_to_cpu(tb->hi_n_len) >> 4;
3628 static inline void il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx,
3629 dma_addr_t addr, u16 len)
3631 struct il_tfd_tb *tb = &tfd->tbs[idx];
3632 u16 hi_n_len = len << 4;
3634 put_unaligned_le32(addr, &tb->lo);
3635 if (sizeof(dma_addr_t) > sizeof(u32))
3636 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3638 tb->hi_n_len = cpu_to_le16(hi_n_len);
3640 tfd->num_tbs = idx + 1;
3643 static inline u8 il4965_tfd_get_num_tbs(struct il_tfd *tfd)
3645 return tfd->num_tbs & 0x1f;
3649 * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
3650 * @il - driver ilate data
3653 * Does NOT advance any TFD circular buffer read/write idxes
3654 * Does NOT free the TFD itself (which is within circular buffer)
3656 void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
3658 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3660 struct pci_dev *dev = il->pci_dev;
3661 int idx = txq->q.read_ptr;
3665 tfd = &tfd_tmp[idx];
3667 /* Sanity check on number of chunks */
3668 num_tbs = il4965_tfd_get_num_tbs(tfd);
3670 if (num_tbs >= IL_NUM_OF_TBS) {
3671 IL_ERR("Too many chunks: %i\n", num_tbs);
3672 /* @todo issue fatal error, it is quite serious situation */
3678 pci_unmap_single(dev,
3679 dma_unmap_addr(&txq->meta[idx], mapping),
3680 dma_unmap_len(&txq->meta[idx], len),
3681 PCI_DMA_BIDIRECTIONAL);
3683 /* Unmap chunks, if any. */
3684 for (i = 1; i < num_tbs; i++)
3685 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3686 il4965_tfd_tb_get_len(tfd, i),
3691 struct sk_buff *skb;
3693 skb = txq->txb[txq->q.read_ptr].skb;
3695 /* can be called from irqs-disabled context */
3697 dev_kfree_skb_any(skb);
3698 txq->txb[txq->q.read_ptr].skb = NULL;
3703 int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il,
3704 struct il_tx_queue *txq,
3705 dma_addr_t addr, u16 len,
3709 struct il_tfd *tfd, *tfd_tmp;
3713 tfd_tmp = (struct il_tfd *)txq->tfds;
3714 tfd = &tfd_tmp[q->write_ptr];
3717 memset(tfd, 0, sizeof(*tfd));
3719 num_tbs = il4965_tfd_get_num_tbs(tfd);
3721 /* Each TFD can point to a maximum 20 Tx buffers */
3722 if (num_tbs >= IL_NUM_OF_TBS) {
3723 IL_ERR("Error can not send more than %d chunks\n",
3728 BUG_ON(addr & ~DMA_BIT_MASK(36));
3729 if (unlikely(addr & ~IL_TX_DMA_MASK))
3730 IL_ERR("Unaligned address = %llx\n",
3731 (unsigned long long)addr);
3733 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
3739 * Tell nic where to find circular buffer of Tx Frame Descriptors for
3740 * given Tx queue, and enable the DMA channel used for that queue.
3742 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3743 * channels supported in hardware.
3745 int il4965_hw_tx_queue_init(struct il_priv *il,
3746 struct il_tx_queue *txq)
3748 int txq_id = txq->q.id;
3750 /* Circular buffer (TFD queue in DRAM) physical base address */
3751 il_wr(il, FH_MEM_CBBC_QUEUE(txq_id),
3752 txq->q.dma_addr >> 8);
3757 /******************************************************************************
3759 * Generic RX handler implementations
3761 ******************************************************************************/
3762 static void il4965_hdl_alive(struct il_priv *il,
3763 struct il_rx_buf *rxb)
3765 struct il_rx_pkt *pkt = rxb_addr(rxb);
3766 struct il_alive_resp *palive;
3767 struct delayed_work *pwork;
3769 palive = &pkt->u.alive_frame;
3771 D_INFO("Alive ucode status 0x%08X revision "
3773 palive->is_valid, palive->ver_type,
3774 palive->ver_subtype);
3776 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3777 D_INFO("Initialization Alive received.\n");
3778 memcpy(&il->card_alive_init,
3779 &pkt->u.alive_frame,
3780 sizeof(struct il_init_alive_resp));
3781 pwork = &il->init_alive_start;
3783 D_INFO("Runtime Alive received.\n");
3784 memcpy(&il->card_alive, &pkt->u.alive_frame,
3785 sizeof(struct il_alive_resp));
3786 pwork = &il->alive_start;
3789 /* We delay the ALIVE response by 5ms to
3790 * give the HW RF Kill time to activate... */
3791 if (palive->is_valid == UCODE_VALID_OK)
3792 queue_delayed_work(il->workqueue, pwork,
3793 msecs_to_jiffies(5));
3795 IL_WARN("uCode did not respond OK.\n");
3799 * il4965_bg_stats_periodic - Timer callback to queue stats
3801 * This callback is provided in order to send a stats request.
3803 * This timer function is continually reset to execute within
3804 * REG_RECALIB_PERIOD seconds since the last N_STATS
3805 * was received. We need to ensure we receive the stats in order
3806 * to update the temperature used for calibrating the TXPOWER.
3808 static void il4965_bg_stats_periodic(unsigned long data)
3810 struct il_priv *il = (struct il_priv *)data;
3812 if (test_bit(S_EXIT_PENDING, &il->status))
3815 /* dont send host command if rf-kill is on */
3816 if (!il_is_ready_rf(il))
3819 il_send_stats_request(il, CMD_ASYNC, false);
3822 static void il4965_hdl_beacon(struct il_priv *il,
3823 struct il_rx_buf *rxb)
3825 struct il_rx_pkt *pkt = rxb_addr(rxb);
3826 struct il4965_beacon_notif *beacon =
3827 (struct il4965_beacon_notif *)pkt->u.raw;
3828 #ifdef CONFIG_IWLEGACY_DEBUG
3829 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
3831 D_RX("beacon status %x retries %d iss %d "
3832 "tsf %d %d rate %d\n",
3833 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
3834 beacon->beacon_notify_hdr.failure_frame,
3835 le32_to_cpu(beacon->ibss_mgr_status),
3836 le32_to_cpu(beacon->high_tsf),
3837 le32_to_cpu(beacon->low_tsf), rate);
3840 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
3843 static void il4965_perform_ct_kill_task(struct il_priv *il)
3845 unsigned long flags;
3847 D_POWER("Stop all queues\n");
3849 if (il->mac80211_registered)
3850 ieee80211_stop_queues(il->hw);
3852 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
3853 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
3854 _il_rd(il, CSR_UCODE_DRV_GP1);
3856 spin_lock_irqsave(&il->reg_lock, flags);
3857 if (!_il_grab_nic_access(il))
3858 _il_release_nic_access(il);
3859 spin_unlock_irqrestore(&il->reg_lock, flags);
3862 /* Handle notification from uCode that card's power state is changing
3863 * due to software, hardware, or critical temperature RFKILL */
3864 static void il4965_hdl_card_state(struct il_priv *il,
3865 struct il_rx_buf *rxb)
3867 struct il_rx_pkt *pkt = rxb_addr(rxb);
3868 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3869 unsigned long status = il->status;
3871 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
3872 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3873 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
3874 (flags & CT_CARD_DISABLED) ?
3875 "Reached" : "Not reached");
3877 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
3878 CT_CARD_DISABLED)) {
3880 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
3881 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3883 il_wr(il, HBUS_TARG_MBX_C,
3884 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3886 if (!(flags & RXON_CARD_DISABLED)) {
3887 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
3888 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3889 il_wr(il, HBUS_TARG_MBX_C,
3890 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3894 if (flags & CT_CARD_DISABLED)
3895 il4965_perform_ct_kill_task(il);
3897 if (flags & HW_CARD_DISABLED)
3898 set_bit(S_RF_KILL_HW, &il->status);
3900 clear_bit(S_RF_KILL_HW, &il->status);
3902 if (!(flags & RXON_CARD_DISABLED))
3905 if ((test_bit(S_RF_KILL_HW, &status) !=
3906 test_bit(S_RF_KILL_HW, &il->status)))
3907 wiphy_rfkill_set_hw_state(il->hw->wiphy,
3908 test_bit(S_RF_KILL_HW, &il->status));
3910 wake_up(&il->wait_command_queue);
3914 * il4965_setup_handlers - Initialize Rx handler callbacks
3916 * Setup the RX handlers for each of the reply types sent from the uCode
3919 * This function chains into the hardware specific files for them to setup
3920 * any hardware specific handlers as well.
3922 static void il4965_setup_handlers(struct il_priv *il)
3924 il->handlers[N_ALIVE] = il4965_hdl_alive;
3925 il->handlers[N_ERROR] = il_hdl_error;
3926 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
3927 il->handlers[N_SPECTRUM_MEASUREMENT] =
3928 il_hdl_spectrum_measurement;
3929 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
3930 il->handlers[N_PM_DEBUG_STATS] =
3931 il_hdl_pm_debug_stats;
3932 il->handlers[N_BEACON] = il4965_hdl_beacon;
3935 * The same handler is used for both the REPLY to a discrete
3936 * stats request from the host as well as for the periodic
3937 * stats notifications (after received beacons) from the uCode.
3939 il->handlers[C_STATS] = il4965_hdl_c_stats;
3940 il->handlers[N_STATS] = il4965_hdl_stats;
3942 il_setup_rx_scan_handlers(il);
3944 /* status change handler */
3945 il->handlers[N_CARD_STATE] =
3946 il4965_hdl_card_state;
3948 il->handlers[N_MISSED_BEACONS] =
3949 il4965_hdl_missed_beacon;
3951 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
3952 il->handlers[N_RX_MPDU] = il4965_hdl_rx;
3954 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
3955 /* Set up hardware specific Rx handlers */
3956 il->cfg->ops->lib->handler_setup(il);
3960 * il4965_rx_handle - Main entry function for receiving responses from uCode
3962 * Uses the il->handlers callback function array to invoke
3963 * the appropriate handlers, including command responses,
3964 * frame-received notifications, and other notifications.
3966 void il4965_rx_handle(struct il_priv *il)
3968 struct il_rx_buf *rxb;
3969 struct il_rx_pkt *pkt;
3970 struct il_rx_queue *rxq = &il->rxq;
3973 unsigned long flags;
3978 /* uCode's read idx (stored in shared DRAM) indicates the last Rx
3979 * buffer that the driver may process (last buffer filled by ucode). */
3980 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
3983 /* Rx interrupt, but nothing sent from uCode */
3985 D_RX("r = %d, i = %d\n", r, i);
3987 /* calculate total frames need to be restock after handling RX */
3988 total_empty = r - rxq->write_actual;
3989 if (total_empty < 0)
3990 total_empty += RX_QUEUE_SIZE;
3992 if (total_empty > (RX_QUEUE_SIZE / 2))
3998 rxb = rxq->queue[i];
4000 /* If an RXB doesn't have a Rx queue slot associated with it,
4001 * then a bug has been introduced in the queue refilling
4002 * routines -- catch it here */
4003 BUG_ON(rxb == NULL);
4005 rxq->queue[i] = NULL;
4007 pci_unmap_page(il->pci_dev, rxb->page_dma,
4008 PAGE_SIZE << il->hw_params.rx_page_order,
4009 PCI_DMA_FROMDEVICE);
4010 pkt = rxb_addr(rxb);
4012 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
4013 len += sizeof(u32); /* account for status word */
4015 /* Reclaim a command buffer only if this packet is a response
4016 * to a (driver-originated) command.
4017 * If the packet (e.g. Rx frame) originated from uCode,
4018 * there is no command buffer to reclaim.
4019 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4020 * but apparently a few don't get set; catch them here. */
4021 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4022 (pkt->hdr.cmd != N_RX_PHY) &&
4023 (pkt->hdr.cmd != N_RX) &&
4024 (pkt->hdr.cmd != N_RX_MPDU) &&
4025 (pkt->hdr.cmd != N_COMPRESSED_BA) &&
4026 (pkt->hdr.cmd != N_STATS) &&
4027 (pkt->hdr.cmd != C_TX);
4029 /* Based on type of command response or notification,
4030 * handle those that need handling via function in
4031 * handlers table. See il4965_setup_handlers() */
4032 if (il->handlers[pkt->hdr.cmd]) {
4033 D_RX("r = %d, i = %d, %s, 0x%02x\n", r,
4034 i, il_get_cmd_string(pkt->hdr.cmd),
4036 il->isr_stats.handlers[pkt->hdr.cmd]++;
4037 il->handlers[pkt->hdr.cmd] (il, rxb);
4039 /* No handling needed */
4041 "r %d i %d No handler needed for %s, 0x%02x\n",
4042 r, i, il_get_cmd_string(pkt->hdr.cmd),
4047 * XXX: After here, we should always check rxb->page
4048 * against NULL before touching it or its virtual
4049 * memory (pkt). Because some handler might have
4050 * already taken or freed the pages.
4054 /* Invoke any callbacks, transfer the buffer to caller,
4055 * and fire off the (possibly) blocking il_send_cmd()
4056 * as we reclaim the driver command queue */
4058 il_tx_cmd_complete(il, rxb);
4060 IL_WARN("Claim null rxb?\n");
4063 /* Reuse the page if possible. For notification packets and
4064 * SKBs that fail to Rx correctly, add them back into the
4065 * rx_free list for reuse later. */
4066 spin_lock_irqsave(&rxq->lock, flags);
4067 if (rxb->page != NULL) {
4068 rxb->page_dma = pci_map_page(il->pci_dev, rxb->page,
4069 0, PAGE_SIZE << il->hw_params.rx_page_order,
4070 PCI_DMA_FROMDEVICE);
4071 list_add_tail(&rxb->list, &rxq->rx_free);
4074 list_add_tail(&rxb->list, &rxq->rx_used);
4076 spin_unlock_irqrestore(&rxq->lock, flags);
4078 i = (i + 1) & RX_QUEUE_MASK;
4079 /* If there are a lot of unused frames,
4080 * restock the Rx queue so ucode wont assert. */
4085 il4965_rx_replenish_now(il);
4091 /* Backtrack one entry */
4094 il4965_rx_replenish_now(il);
4096 il4965_rx_queue_restock(il);
4099 /* call this function to flush any scheduled tasklet */
4100 static inline void il4965_synchronize_irq(struct il_priv *il)
4102 /* wait to make sure we flush pending tasklet*/
4103 synchronize_irq(il->pci_dev->irq);
4104 tasklet_kill(&il->irq_tasklet);
4107 static void il4965_irq_tasklet(struct il_priv *il)
4109 u32 inta, handled = 0;
4111 unsigned long flags;
4113 #ifdef CONFIG_IWLEGACY_DEBUG
4117 spin_lock_irqsave(&il->lock, flags);
4119 /* Ack/clear/reset pending uCode interrupts.
4120 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4121 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
4122 inta = _il_rd(il, CSR_INT);
4123 _il_wr(il, CSR_INT, inta);
4125 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4126 * Any new interrupts that happen after this, either while we're
4127 * in this tasklet, or later, will show up in next ISR/tasklet. */
4128 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4129 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
4131 #ifdef CONFIG_IWLEGACY_DEBUG
4132 if (il_get_debug_level(il) & IL_DL_ISR) {
4133 /* just for debug */
4134 inta_mask = _il_rd(il, CSR_INT_MASK);
4135 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4136 inta, inta_mask, inta_fh);
4140 spin_unlock_irqrestore(&il->lock, flags);
4142 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4143 * atomic, make sure that inta covers all the interrupts that
4144 * we've discovered, even if FH interrupt came in just after
4145 * reading CSR_INT. */
4146 if (inta_fh & CSR49_FH_INT_RX_MASK)
4147 inta |= CSR_INT_BIT_FH_RX;
4148 if (inta_fh & CSR49_FH_INT_TX_MASK)
4149 inta |= CSR_INT_BIT_FH_TX;
4151 /* Now service all interrupt bits discovered above. */
4152 if (inta & CSR_INT_BIT_HW_ERR) {
4153 IL_ERR("Hardware error detected. Restarting.\n");
4155 /* Tell the device to stop sending interrupts */
4156 il_disable_interrupts(il);
4159 il_irq_handle_error(il);
4161 handled |= CSR_INT_BIT_HW_ERR;
4166 #ifdef CONFIG_IWLEGACY_DEBUG
4167 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4168 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4169 if (inta & CSR_INT_BIT_SCD) {
4170 D_ISR("Scheduler finished to transmit "
4171 "the frame/frames.\n");
4172 il->isr_stats.sch++;
4175 /* Alive notification via Rx interrupt will do the real work */
4176 if (inta & CSR_INT_BIT_ALIVE) {
4177 D_ISR("Alive interrupt\n");
4178 il->isr_stats.alive++;
4182 /* Safely ignore these bits for debug checks below */
4183 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4185 /* HW RF KILL switch toggled */
4186 if (inta & CSR_INT_BIT_RF_KILL) {
4188 if (!(_il_rd(il, CSR_GP_CNTRL) &
4189 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4192 IL_WARN("RF_KILL bit toggled to %s.\n",
4193 hw_rf_kill ? "disable radio" : "enable radio");
4195 il->isr_stats.rfkill++;
4197 /* driver only loads ucode once setting the interface up.
4198 * the driver allows loading the ucode even if the radio
4199 * is killed. Hence update the killswitch state here. The
4200 * rfkill handler will care about restarting if needed.
4202 if (!test_bit(S_ALIVE, &il->status)) {
4204 set_bit(S_RF_KILL_HW, &il->status);
4206 clear_bit(S_RF_KILL_HW, &il->status);
4207 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4210 handled |= CSR_INT_BIT_RF_KILL;
4213 /* Chip got too hot and stopped itself */
4214 if (inta & CSR_INT_BIT_CT_KILL) {
4215 IL_ERR("Microcode CT kill error detected.\n");
4216 il->isr_stats.ctkill++;
4217 handled |= CSR_INT_BIT_CT_KILL;
4220 /* Error detected by uCode */
4221 if (inta & CSR_INT_BIT_SW_ERR) {
4222 IL_ERR("Microcode SW error detected. "
4223 " Restarting 0x%X.\n", inta);
4225 il_irq_handle_error(il);
4226 handled |= CSR_INT_BIT_SW_ERR;
4230 * uCode wakes up after power-down sleep.
4231 * Tell device about any new tx or host commands enqueued,
4232 * and about any Rx buffers made available while asleep.
4234 if (inta & CSR_INT_BIT_WAKEUP) {
4235 D_ISR("Wakeup interrupt\n");
4236 il_rx_queue_update_write_ptr(il, &il->rxq);
4237 for (i = 0; i < il->hw_params.max_txq_num; i++)
4238 il_txq_update_write_ptr(il, &il->txq[i]);
4239 il->isr_stats.wakeup++;
4240 handled |= CSR_INT_BIT_WAKEUP;
4243 /* All uCode command responses, including Tx command responses,
4244 * Rx "responses" (frame-received notification), and other
4245 * notifications from uCode come through here*/
4246 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4247 il4965_rx_handle(il);
4249 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4252 /* This "Tx" DMA channel is used only for loading uCode */
4253 if (inta & CSR_INT_BIT_FH_TX) {
4254 D_ISR("uCode load interrupt\n");
4256 handled |= CSR_INT_BIT_FH_TX;
4257 /* Wake up uCode load routine, now that load is complete */
4258 il->ucode_write_complete = 1;
4259 wake_up(&il->wait_command_queue);
4262 if (inta & ~handled) {
4263 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4264 il->isr_stats.unhandled++;
4267 if (inta & ~(il->inta_mask)) {
4268 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
4269 inta & ~il->inta_mask);
4270 IL_WARN(" with FH_INT = 0x%08x\n", inta_fh);
4273 /* Re-enable all interrupts */
4274 /* only Re-enable if disabled by irq */
4275 if (test_bit(S_INT_ENABLED, &il->status))
4276 il_enable_interrupts(il);
4277 /* Re-enable RF_KILL if it occurred */
4278 else if (handled & CSR_INT_BIT_RF_KILL)
4279 il_enable_rfkill_int(il);
4281 #ifdef CONFIG_IWLEGACY_DEBUG
4282 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4283 inta = _il_rd(il, CSR_INT);
4284 inta_mask = _il_rd(il, CSR_INT_MASK);
4285 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4287 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4288 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4293 /*****************************************************************************
4297 *****************************************************************************/
4299 #ifdef CONFIG_IWLEGACY_DEBUG
4302 * The following adds a new attribute to the sysfs representation
4303 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
4304 * used for controlling the debug level.
4306 * See the level definitions in iwl for details.
4308 * The debug_level being managed using sysfs below is a per device debug
4309 * level that is used instead of the global debug level if it (the per
4310 * device debug level) is set.
4312 static ssize_t il4965_show_debug_level(struct device *d,
4313 struct device_attribute *attr, char *buf)
4315 struct il_priv *il = dev_get_drvdata(d);
4316 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
4318 static ssize_t il4965_store_debug_level(struct device *d,
4319 struct device_attribute *attr,
4320 const char *buf, size_t count)
4322 struct il_priv *il = dev_get_drvdata(d);
4326 ret = strict_strtoul(buf, 0, &val);
4328 IL_ERR("%s is not in hex or decimal form.\n", buf);
4330 il->debug_level = val;
4331 if (il_alloc_traffic_mem(il))
4333 "Not enough memory to generate traffic log\n");
4335 return strnlen(buf, count);
4338 static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
4339 il4965_show_debug_level, il4965_store_debug_level);
4342 #endif /* CONFIG_IWLEGACY_DEBUG */
4345 static ssize_t il4965_show_temperature(struct device *d,
4346 struct device_attribute *attr, char *buf)
4348 struct il_priv *il = dev_get_drvdata(d);
4350 if (!il_is_alive(il))
4353 return sprintf(buf, "%d\n", il->temperature);
4356 static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
4358 static ssize_t il4965_show_tx_power(struct device *d,
4359 struct device_attribute *attr, char *buf)
4361 struct il_priv *il = dev_get_drvdata(d);
4363 if (!il_is_ready_rf(il))
4364 return sprintf(buf, "off\n");
4366 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
4369 static ssize_t il4965_store_tx_power(struct device *d,
4370 struct device_attribute *attr,
4371 const char *buf, size_t count)
4373 struct il_priv *il = dev_get_drvdata(d);
4377 ret = strict_strtoul(buf, 10, &val);
4379 IL_INFO("%s is not in decimal form.\n", buf);
4381 ret = il_set_tx_power(il, val, false);
4383 IL_ERR("failed setting tx power (0x%d).\n",
4391 static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
4392 il4965_show_tx_power, il4965_store_tx_power);
4394 static struct attribute *il_sysfs_entries[] = {
4395 &dev_attr_temperature.attr,
4396 &dev_attr_tx_power.attr,
4397 #ifdef CONFIG_IWLEGACY_DEBUG
4398 &dev_attr_debug_level.attr,
4403 static struct attribute_group il_attribute_group = {
4404 .name = NULL, /* put in device directory */
4405 .attrs = il_sysfs_entries,
4408 /******************************************************************************
4410 * uCode download functions
4412 ******************************************************************************/
4414 static void il4965_dealloc_ucode_pci(struct il_priv *il)
4416 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4417 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4418 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4419 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4420 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4421 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
4424 static void il4965_nic_start(struct il_priv *il)
4426 /* Remove all resets to allow NIC to operate */
4427 _il_wr(il, CSR_RESET, 0);
4430 static void il4965_ucode_callback(const struct firmware *ucode_raw,
4432 static int il4965_mac_setup_register(struct il_priv *il,
4433 u32 max_probe_length);
4435 static int __must_check il4965_request_firmware(struct il_priv *il, bool first)
4437 const char *name_pre = il->cfg->fw_name_pre;
4441 il->fw_idx = il->cfg->ucode_api_max;
4442 sprintf(tag, "%d", il->fw_idx);
4445 sprintf(tag, "%d", il->fw_idx);
4448 if (il->fw_idx < il->cfg->ucode_api_min) {
4449 IL_ERR("no suitable firmware found!\n");
4453 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
4455 D_INFO("attempting to load firmware '%s'\n",
4458 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4459 &il->pci_dev->dev, GFP_KERNEL, il,
4460 il4965_ucode_callback);
4463 struct il4965_firmware_pieces {
4464 const void *inst, *data, *init, *init_data, *boot;
4465 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4468 static int il4965_load_firmware(struct il_priv *il,
4469 const struct firmware *ucode_raw,
4470 struct il4965_firmware_pieces *pieces)
4472 struct il_ucode_header *ucode = (void *)ucode_raw->data;
4473 u32 api_ver, hdr_size;
4476 il->ucode_ver = le32_to_cpu(ucode->ver);
4477 api_ver = IL_UCODE_API(il->ucode_ver);
4485 if (ucode_raw->size < hdr_size) {
4486 IL_ERR("File size too small!\n");
4489 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4490 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4491 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4492 pieces->init_data_size =
4493 le32_to_cpu(ucode->v1.init_data_size);
4494 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4495 src = ucode->v1.data;
4499 /* Verify size of file vs. image size info in file's header */
4500 if (ucode_raw->size != hdr_size + pieces->inst_size +
4501 pieces->data_size + pieces->init_size +
4502 pieces->init_data_size + pieces->boot_size) {
4505 "uCode file size %d does not match expected size\n",
4506 (int)ucode_raw->size);
4511 src += pieces->inst_size;
4513 src += pieces->data_size;
4515 src += pieces->init_size;
4516 pieces->init_data = src;
4517 src += pieces->init_data_size;
4519 src += pieces->boot_size;
4525 * il4965_ucode_callback - callback when firmware was loaded
4527 * If loaded successfully, copies the firmware into buffers
4528 * for the card to fetch (via DMA).
4531 il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4533 struct il_priv *il = context;
4534 struct il_ucode_header *ucode;
4536 struct il4965_firmware_pieces pieces;
4537 const unsigned int api_max = il->cfg->ucode_api_max;
4538 const unsigned int api_min = il->cfg->ucode_api_min;
4541 u32 max_probe_length = 200;
4542 u32 standard_phy_calibration_size =
4543 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
4545 memset(&pieces, 0, sizeof(pieces));
4548 if (il->fw_idx <= il->cfg->ucode_api_max)
4550 "request for firmware file '%s' failed.\n",
4555 D_INFO("Loaded firmware file '%s' (%zd bytes).\n",
4556 il->firmware_name, ucode_raw->size);
4558 /* Make sure that we got at least the API version number */
4559 if (ucode_raw->size < 4) {
4560 IL_ERR("File size way too small!\n");
4564 /* Data from ucode file: header followed by uCode images */
4565 ucode = (struct il_ucode_header *)ucode_raw->data;
4567 err = il4965_load_firmware(il, ucode_raw, &pieces);
4572 api_ver = IL_UCODE_API(il->ucode_ver);
4575 * api_ver should match the api version forming part of the
4576 * firmware filename ... but we don't check for that and only rely
4577 * on the API version read from firmware header from here on forward
4579 if (api_ver < api_min || api_ver > api_max) {
4581 "Driver unable to support your firmware API. "
4582 "Driver supports v%u, firmware is v%u.\n",
4587 if (api_ver != api_max)
4589 "Firmware has old API version. Expected v%u, "
4590 "got v%u. New firmware can be obtained "
4591 "from http://www.intellinuxwireless.org.\n",
4594 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
4595 IL_UCODE_MAJOR(il->ucode_ver),
4596 IL_UCODE_MINOR(il->ucode_ver),
4597 IL_UCODE_API(il->ucode_ver),
4598 IL_UCODE_SERIAL(il->ucode_ver));
4600 snprintf(il->hw->wiphy->fw_version,
4601 sizeof(il->hw->wiphy->fw_version),
4603 IL_UCODE_MAJOR(il->ucode_ver),
4604 IL_UCODE_MINOR(il->ucode_ver),
4605 IL_UCODE_API(il->ucode_ver),
4606 IL_UCODE_SERIAL(il->ucode_ver));
4609 * For any of the failures below (before allocating pci memory)
4610 * we will try to load a version with a smaller API -- maybe the
4611 * user just got a corrupted version of the latest API.
4614 D_INFO("f/w package hdr ucode version raw = 0x%x\n",
4616 D_INFO("f/w package hdr runtime inst size = %Zd\n",
4618 D_INFO("f/w package hdr runtime data size = %Zd\n",
4620 D_INFO("f/w package hdr init inst size = %Zd\n",
4622 D_INFO("f/w package hdr init data size = %Zd\n",
4623 pieces.init_data_size);
4624 D_INFO("f/w package hdr boot inst size = %Zd\n",
4627 /* Verify that uCode images will fit in card's SRAM */
4628 if (pieces.inst_size > il->hw_params.max_inst_size) {
4629 IL_ERR("uCode instr len %Zd too large to fit in\n",
4634 if (pieces.data_size > il->hw_params.max_data_size) {
4635 IL_ERR("uCode data len %Zd too large to fit in\n",
4640 if (pieces.init_size > il->hw_params.max_inst_size) {
4641 IL_ERR("uCode init instr len %Zd too large to fit in\n",
4646 if (pieces.init_data_size > il->hw_params.max_data_size) {
4647 IL_ERR("uCode init data len %Zd too large to fit in\n",
4648 pieces.init_data_size);
4652 if (pieces.boot_size > il->hw_params.max_bsm_size) {
4653 IL_ERR("uCode boot instr len %Zd too large to fit in\n",
4658 /* Allocate ucode buffers for card's bus-master loading ... */
4660 /* Runtime instructions and 2 copies of data:
4661 * 1) unmodified from disk
4662 * 2) backup cache for save/restore during power-downs */
4663 il->ucode_code.len = pieces.inst_size;
4664 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
4666 il->ucode_data.len = pieces.data_size;
4667 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
4669 il->ucode_data_backup.len = pieces.data_size;
4670 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
4672 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4673 !il->ucode_data_backup.v_addr)
4676 /* Initialization instructions and data */
4677 if (pieces.init_size && pieces.init_data_size) {
4678 il->ucode_init.len = pieces.init_size;
4679 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
4681 il->ucode_init_data.len = pieces.init_data_size;
4682 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
4684 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
4688 /* Bootstrap (instructions only, no data) */
4689 if (pieces.boot_size) {
4690 il->ucode_boot.len = pieces.boot_size;
4691 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
4693 if (!il->ucode_boot.v_addr)
4697 /* Now that we can no longer fail, copy information */
4699 il->sta_key_max_num = STA_KEY_MAX_NUM;
4701 /* Copy images into buffers for card's bus-master reads ... */
4703 /* Runtime instructions (first block of data in file) */
4704 D_INFO("Copying (but not loading) uCode instr len %Zd\n",
4706 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
4708 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4709 il->ucode_code.v_addr, (u32)il->ucode_code.p_addr);
4713 * NOTE: Copy into backup buffer will be done in il_up()
4715 D_INFO("Copying (but not loading) uCode data len %Zd\n",
4717 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4718 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
4720 /* Initialization instructions */
4721 if (pieces.init_size) {
4723 "Copying (but not loading) init instr len %Zd\n",
4725 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
4728 /* Initialization data */
4729 if (pieces.init_data_size) {
4731 "Copying (but not loading) init data len %Zd\n",
4732 pieces.init_data_size);
4733 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
4734 pieces.init_data_size);
4737 /* Bootstrap instructions */
4738 D_INFO("Copying (but not loading) boot instr len %Zd\n",
4740 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
4743 * figure out the offset of chain noise reset and gain commands
4744 * base on the size of standard phy calibration commands table size
4746 il->_4965.phy_calib_chain_noise_reset_cmd =
4747 standard_phy_calibration_size;
4748 il->_4965.phy_calib_chain_noise_gain_cmd =
4749 standard_phy_calibration_size + 1;
4751 /**************************************************
4752 * This is still part of probe() in a sense...
4754 * 9. Setup and register with mac80211 and debugfs
4755 **************************************************/
4756 err = il4965_mac_setup_register(il, max_probe_length);
4760 err = il_dbgfs_register(il, DRV_NAME);
4763 "failed to create debugfs files. Ignoring error: %d\n", err);
4765 err = sysfs_create_group(&il->pci_dev->dev.kobj,
4766 &il_attribute_group);
4768 IL_ERR("failed to create sysfs device attributes\n");
4772 /* We have our copies now, allow OS release its copies */
4773 release_firmware(ucode_raw);
4774 complete(&il->_4965.firmware_loading_complete);
4778 /* try next, if any */
4779 if (il4965_request_firmware(il, false))
4781 release_firmware(ucode_raw);
4785 IL_ERR("failed to allocate pci memory\n");
4786 il4965_dealloc_ucode_pci(il);
4788 complete(&il->_4965.firmware_loading_complete);
4789 device_release_driver(&il->pci_dev->dev);
4790 release_firmware(ucode_raw);
4793 static const char * const desc_lookup_text[] = {
4798 "NMI_INTERRUPT_WDG",
4802 "HW_ERROR_TUNE_LOCK",
4803 "HW_ERROR_TEMPERATURE",
4804 "ILLEGAL_CHAN_FREQ",
4807 "NMI_INTERRUPT_HOST",
4808 "NMI_INTERRUPT_ACTION_PT",
4809 "NMI_INTERRUPT_UNKNOWN",
4810 "UCODE_VERSION_MISMATCH",
4811 "HW_ERROR_ABS_LOCK",
4812 "HW_ERROR_CAL_LOCK_FAIL",
4813 "NMI_INTERRUPT_INST_ACTION_PT",
4814 "NMI_INTERRUPT_DATA_ACTION_PT",
4816 "NMI_INTERRUPT_TRM",
4817 "NMI_INTERRUPT_BREAK_POINT",
4824 static struct { char *name; u8 num; } advanced_lookup[] = {
4825 { "NMI_INTERRUPT_WDG", 0x34 },
4826 { "SYSASSERT", 0x35 },
4827 { "UCODE_VERSION_MISMATCH", 0x37 },
4828 { "BAD_COMMAND", 0x38 },
4829 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4830 { "FATAL_ERROR", 0x3D },
4831 { "NMI_TRM_HW_ERR", 0x46 },
4832 { "NMI_INTERRUPT_TRM", 0x4C },
4833 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4834 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4835 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4836 { "NMI_INTERRUPT_HOST", 0x66 },
4837 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4838 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4839 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4840 { "ADVANCED_SYSASSERT", 0 },
4843 static const char *il4965_desc_lookup(u32 num)
4846 int max = ARRAY_SIZE(desc_lookup_text);
4849 return desc_lookup_text[num];
4851 max = ARRAY_SIZE(advanced_lookup) - 1;
4852 for (i = 0; i < max; i++) {
4853 if (advanced_lookup[i].num == num)
4856 return advanced_lookup[i].name;
4859 #define ERROR_START_OFFSET (1 * sizeof(u32))
4860 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
4862 void il4965_dump_nic_error_log(struct il_priv *il)
4865 u32 desc, time, count, base, data1;
4866 u32 blink1, blink2, ilink1, ilink2;
4869 if (il->ucode_type == UCODE_INIT) {
4870 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
4872 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
4875 if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4877 "Not valid error log pointer 0x%08X for %s uCode\n",
4878 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
4882 count = il_read_targ_mem(il, base);
4884 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4885 IL_ERR("Start IWL Error Log Dump:\n");
4886 IL_ERR("Status: 0x%08lX, count: %d\n",
4890 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
4891 il->isr_stats.err_code = desc;
4892 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
4893 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
4894 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
4895 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
4896 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
4897 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
4898 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
4899 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
4900 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
4901 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
4904 "data1 data2 line\n");
4905 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
4906 il4965_desc_lookup(desc), desc, time, data1, data2, line);
4907 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
4908 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
4909 pc, blink1, blink2, ilink1, ilink2, hcmd);
4912 static void il4965_rf_kill_ct_config(struct il_priv *il)
4914 struct il_ct_kill_config cmd;
4915 unsigned long flags;
4918 spin_lock_irqsave(&il->lock, flags);
4919 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
4920 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4921 spin_unlock_irqrestore(&il->lock, flags);
4923 cmd.critical_temperature_R =
4924 cpu_to_le32(il->hw_params.ct_kill_threshold);
4926 ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG,
4929 IL_ERR("C_CT_KILL_CONFIG failed\n");
4931 D_INFO("C_CT_KILL_CONFIG "
4933 "critical temperature is %d\n",
4934 il->hw_params.ct_kill_threshold);
4937 static const s8 default_queue_to_tx_fifo[] = {
4947 #define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
4949 static int il4965_alive_notify(struct il_priv *il)
4952 unsigned long flags;
4956 spin_lock_irqsave(&il->lock, flags);
4958 /* Clear 4965's internal Tx Scheduler data base */
4959 il->scd_base_addr = il_rd_prph(il,
4960 IL49_SCD_SRAM_BASE_ADDR);
4961 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
4962 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
4963 il_write_targ_mem(il, a, 0);
4964 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
4965 il_write_targ_mem(il, a, 0);
4966 for (; a < il->scd_base_addr +
4967 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num); a += 4)
4968 il_write_targ_mem(il, a, 0);
4970 /* Tel 4965 where to find Tx byte count tables */
4971 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR,
4972 il->scd_bc_tbls.dma >> 10);
4974 /* Enable DMA channel */
4975 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
4977 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
4978 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4979 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
4981 /* Update FH chicken bits */
4982 reg_val = il_rd(il, FH_TX_CHICKEN_BITS_REG);
4983 il_wr(il, FH_TX_CHICKEN_BITS_REG,
4984 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
4986 /* Disable chain mode for all queues */
4987 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
4989 /* Initialize each Tx queue (including the command queue) */
4990 for (i = 0; i < il->hw_params.max_txq_num; i++) {
4992 /* TFD circular buffer read/write idxes */
4993 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
4994 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
4996 /* Max Tx Window size for Scheduler-ACK mode */
4997 il_write_targ_mem(il, il->scd_base_addr +
4998 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
5000 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
5001 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
5004 il_write_targ_mem(il, il->scd_base_addr +
5005 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
5008 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5009 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
5012 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
5013 (1 << il->hw_params.max_txq_num) - 1);
5015 /* Activate all Tx DMA/FIFO channels */
5016 il4965_txq_set_sched(il, IL_MASK(0, 6));
5018 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
5020 /* make sure all queue are not stopped */
5021 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
5022 for (i = 0; i < 4; i++)
5023 atomic_set(&il->queue_stop_count[i], 0);
5025 /* reset to 0 to enable all the queue first */
5026 il->txq_ctx_active_msk = 0;
5027 /* Map each Tx/cmd queue to its corresponding fifo */
5028 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
5030 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
5031 int ac = default_queue_to_tx_fifo[i];
5033 il_txq_ctx_activate(il, i);
5035 if (ac == IL_TX_FIFO_UNUSED)
5038 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
5041 spin_unlock_irqrestore(&il->lock, flags);
5047 * il4965_alive_start - called after N_ALIVE notification received
5048 * from protocol/runtime uCode (initialization uCode's
5049 * Alive gets handled by il_init_alive_start()).
5051 static void il4965_alive_start(struct il_priv *il)
5054 struct il_rxon_context *ctx = &il->ctx;
5056 D_INFO("Runtime Alive received.\n");
5058 if (il->card_alive.is_valid != UCODE_VALID_OK) {
5059 /* We had an error bringing up the hardware, so take it
5060 * all the way back down so we can try again */
5061 D_INFO("Alive failed.\n");
5065 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5066 * This is a paranoid check, because we would not have gotten the
5067 * "runtime" alive if code weren't properly loaded. */
5068 if (il4965_verify_ucode(il)) {
5069 /* Runtime instruction load was bad;
5070 * take it all the way back down so we can try again */
5071 D_INFO("Bad runtime uCode load.\n");
5075 ret = il4965_alive_notify(il);
5078 "Could not complete ALIVE transition [ntf]: %d\n", ret);
5083 /* After the ALIVE response, we can send host commands to the uCode */
5084 set_bit(S_ALIVE, &il->status);
5086 /* Enable watchdog to monitor the driver tx queues */
5087 il_setup_watchdog(il);
5089 if (il_is_rfkill(il))
5092 ieee80211_wake_queues(il->hw);
5094 il->active_rate = RATES_MASK;
5096 if (il_is_associated_ctx(ctx)) {
5097 struct il_rxon_cmd *active_rxon =
5098 (struct il_rxon_cmd *)&ctx->active;
5099 /* apply any changes in staging */
5100 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5101 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5103 /* Initialize our rx_config data */
5104 il_connection_init_rx_config(il, &il->ctx);
5106 if (il->cfg->ops->hcmd->set_rxon_chain)
5107 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
5110 /* Configure bluetooth coexistence if enabled */
5111 il_send_bt_config(il);
5113 il4965_reset_run_time_calib(il);
5115 set_bit(S_READY, &il->status);
5117 /* Configure the adapter for unassociated operation */
5118 il_commit_rxon(il, ctx);
5120 /* At this point, the NIC is initialized and operational */
5121 il4965_rf_kill_ct_config(il);
5123 D_INFO("ALIVE processing complete.\n");
5124 wake_up(&il->wait_command_queue);
5126 il_power_update_mode(il, true);
5127 D_INFO("Updated power mode\n");
5132 queue_work(il->workqueue, &il->restart);
5135 static void il4965_cancel_deferred_work(struct il_priv *il);
5137 static void __il4965_down(struct il_priv *il)
5139 unsigned long flags;
5142 D_INFO(DRV_NAME " is going down\n");
5144 il_scan_cancel_timeout(il, 200);
5146 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
5148 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
5149 * to prevent rearm timer */
5150 del_timer_sync(&il->watchdog);
5152 il_clear_ucode_stations(il, NULL);
5153 il_dealloc_bcast_stations(il);
5154 il_clear_driver_stations(il);
5156 /* Unblock any waiting calls */
5157 wake_up_all(&il->wait_command_queue);
5159 /* Wipe out the EXIT_PENDING status bit if we are not actually
5160 * exiting the module */
5162 clear_bit(S_EXIT_PENDING, &il->status);
5164 /* stop and reset the on-board processor */
5165 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5167 /* tell the device to stop sending interrupts */
5168 spin_lock_irqsave(&il->lock, flags);
5169 il_disable_interrupts(il);
5170 spin_unlock_irqrestore(&il->lock, flags);
5171 il4965_synchronize_irq(il);
5173 if (il->mac80211_registered)
5174 ieee80211_stop_queues(il->hw);
5176 /* If we have not previously called il_init() then
5177 * clear all bits but the RF Kill bit and return */
5178 if (!il_is_init(il)) {
5179 il->status = test_bit(S_RF_KILL_HW, &il->status) <<
5181 test_bit(S_GEO_CONFIGURED, &il->status) <<
5183 test_bit(S_EXIT_PENDING, &il->status) <<
5188 /* ...otherwise clear out all the status bits but the RF Kill
5189 * bit and continue taking the NIC down. */
5190 il->status &= test_bit(S_RF_KILL_HW, &il->status) <<
5192 test_bit(S_GEO_CONFIGURED, &il->status) <<
5194 test_bit(S_FW_ERROR, &il->status) <<
5196 test_bit(S_EXIT_PENDING, &il->status) <<
5199 il4965_txq_ctx_stop(il);
5200 il4965_rxq_stop(il);
5202 /* Power-down device's busmaster DMA clocks */
5203 il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
5206 /* Make sure (redundant) we've released our request to stay awake */
5207 il_clear_bit(il, CSR_GP_CNTRL,
5208 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5210 /* Stop the device, and put it in low power state */
5214 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
5216 dev_kfree_skb(il->beacon_skb);
5217 il->beacon_skb = NULL;
5219 /* clear out any free frames */
5220 il4965_clear_free_frames(il);
5223 static void il4965_down(struct il_priv *il)
5225 mutex_lock(&il->mutex);
5227 mutex_unlock(&il->mutex);
5229 il4965_cancel_deferred_work(il);
5232 #define HW_READY_TIMEOUT (50)
5234 static int il4965_set_hw_ready(struct il_priv *il)
5238 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5239 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5241 /* See if we got it */
5242 ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5243 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5244 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5246 if (ret != -ETIMEDOUT)
5247 il->hw_ready = true;
5249 il->hw_ready = false;
5251 D_INFO("hardware %s\n",
5252 (il->hw_ready == 1) ? "ready" : "not ready");
5256 static int il4965_prepare_card_hw(struct il_priv *il)
5260 D_INFO("il4965_prepare_card_hw enter\n");
5262 ret = il4965_set_hw_ready(il);
5266 /* If HW is not ready, prepare the conditions to check again */
5267 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5268 CSR_HW_IF_CONFIG_REG_PREPARE);
5270 ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5271 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5272 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5274 /* HW should be ready by now, check again. */
5275 if (ret != -ETIMEDOUT)
5276 il4965_set_hw_ready(il);
5281 #define MAX_HW_RESTARTS 5
5283 static int __il4965_up(struct il_priv *il)
5288 if (test_bit(S_EXIT_PENDING, &il->status)) {
5289 IL_WARN("Exit pending; will not bring the NIC up\n");
5293 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
5294 IL_ERR("ucode not available for device bringup\n");
5298 ret = il4965_alloc_bcast_station(il, &il->ctx);
5300 il_dealloc_bcast_stations(il);
5304 il4965_prepare_card_hw(il);
5306 if (!il->hw_ready) {
5307 IL_WARN("Exit HW not ready\n");
5311 /* If platform's RF_KILL switch is NOT set to KILL */
5313 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5314 clear_bit(S_RF_KILL_HW, &il->status);
5316 set_bit(S_RF_KILL_HW, &il->status);
5318 if (il_is_rfkill(il)) {
5319 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5321 il_enable_interrupts(il);
5322 IL_WARN("Radio disabled by HW RF Kill switch\n");
5326 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5328 /* must be initialised before il_hw_nic_init */
5329 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
5331 ret = il4965_hw_nic_init(il);
5333 IL_ERR("Unable to init nic\n");
5337 /* make sure rfkill handshake bits are cleared */
5338 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5339 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
5340 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5342 /* clear (again), then enable host interrupts */
5343 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5344 il_enable_interrupts(il);
5346 /* really make sure rfkill handshake bits are cleared */
5347 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5348 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5350 /* Copy original ucode data image from disk into backup cache.
5351 * This will be used to initialize the on-board processor's
5352 * data SRAM for a clean start when the runtime program first loads. */
5353 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5354 il->ucode_data.len);
5356 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5358 /* load bootstrap state machine,
5359 * load bootstrap program into processor's memory,
5360 * prepare to load the "initialize" uCode */
5361 ret = il->cfg->ops->lib->load_ucode(il);
5364 IL_ERR("Unable to set up bootstrap uCode: %d\n",
5369 /* start card; "initialize" will load runtime ucode */
5370 il4965_nic_start(il);
5372 D_INFO(DRV_NAME " is coming up\n");
5377 set_bit(S_EXIT_PENDING, &il->status);
5379 clear_bit(S_EXIT_PENDING, &il->status);
5381 /* tried to restart and config the device for as long as our
5382 * patience could withstand */
5383 IL_ERR("Unable to initialize device after %d attempts.\n", i);
5388 /*****************************************************************************
5390 * Workqueue callbacks
5392 *****************************************************************************/
5394 static void il4965_bg_init_alive_start(struct work_struct *data)
5396 struct il_priv *il =
5397 container_of(data, struct il_priv, init_alive_start.work);
5399 mutex_lock(&il->mutex);
5400 if (test_bit(S_EXIT_PENDING, &il->status))
5403 il->cfg->ops->lib->init_alive_start(il);
5405 mutex_unlock(&il->mutex);
5408 static void il4965_bg_alive_start(struct work_struct *data)
5410 struct il_priv *il =
5411 container_of(data, struct il_priv, alive_start.work);
5413 mutex_lock(&il->mutex);
5414 if (test_bit(S_EXIT_PENDING, &il->status))
5417 il4965_alive_start(il);
5419 mutex_unlock(&il->mutex);
5422 static void il4965_bg_run_time_calib_work(struct work_struct *work)
5424 struct il_priv *il = container_of(work, struct il_priv,
5425 run_time_calib_work);
5427 mutex_lock(&il->mutex);
5429 if (test_bit(S_EXIT_PENDING, &il->status) ||
5430 test_bit(S_SCANNING, &il->status)) {
5431 mutex_unlock(&il->mutex);
5435 if (il->start_calib) {
5436 il4965_chain_noise_calibration(il,
5437 (void *)&il->_4965.stats);
5438 il4965_sensitivity_calibration(il,
5439 (void *)&il->_4965.stats);
5442 mutex_unlock(&il->mutex);
5445 static void il4965_bg_restart(struct work_struct *data)
5447 struct il_priv *il = container_of(data, struct il_priv, restart);
5449 if (test_bit(S_EXIT_PENDING, &il->status))
5452 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
5453 mutex_lock(&il->mutex);
5459 mutex_unlock(&il->mutex);
5460 il4965_cancel_deferred_work(il);
5461 ieee80211_restart_hw(il->hw);
5465 mutex_lock(&il->mutex);
5466 if (test_bit(S_EXIT_PENDING, &il->status)) {
5467 mutex_unlock(&il->mutex);
5472 mutex_unlock(&il->mutex);
5476 static void il4965_bg_rx_replenish(struct work_struct *data)
5478 struct il_priv *il =
5479 container_of(data, struct il_priv, rx_replenish);
5481 if (test_bit(S_EXIT_PENDING, &il->status))
5484 mutex_lock(&il->mutex);
5485 il4965_rx_replenish(il);
5486 mutex_unlock(&il->mutex);
5489 /*****************************************************************************
5491 * mac80211 entry point functions
5493 *****************************************************************************/
5495 #define UCODE_READY_TIMEOUT (4 * HZ)
5498 * Not a mac80211 entry point function, but it fits in with all the
5499 * other mac80211 functions grouped here.
5501 static int il4965_mac_setup_register(struct il_priv *il,
5502 u32 max_probe_length)
5505 struct ieee80211_hw *hw = il->hw;
5507 hw->rate_control_algorithm = "iwl-4965-rs";
5509 /* Tell mac80211 our characteristics */
5510 hw->flags = IEEE80211_HW_SIGNAL_DBM |
5511 IEEE80211_HW_AMPDU_AGGREGATION |
5512 IEEE80211_HW_NEED_DTIM_PERIOD |
5513 IEEE80211_HW_SPECTRUM_MGMT |
5514 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
5516 if (il->cfg->sku & IL_SKU_N)
5517 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
5518 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
5520 hw->sta_data_size = sizeof(struct il_station_priv);
5521 hw->vif_data_size = sizeof(struct il_vif_priv);
5523 hw->wiphy->interface_modes |= il->ctx.interface_modes;
5524 hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
5526 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
5527 WIPHY_FLAG_DISABLE_BEACON_HINTS;
5530 * For now, disable PS by default because it affects
5531 * RX performance significantly.
5533 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5535 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5536 /* we create the 802.11 header and a zero-length SSID element */
5537 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5539 /* Default value; 4 EDCA QOS priorities */
5542 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5544 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
5545 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5546 &il->bands[IEEE80211_BAND_2GHZ];
5547 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
5548 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5549 &il->bands[IEEE80211_BAND_5GHZ];
5553 ret = ieee80211_register_hw(il->hw);
5555 IL_ERR("Failed to register hw (error %d)\n", ret);
5558 il->mac80211_registered = 1;
5564 int il4965_mac_start(struct ieee80211_hw *hw)
5566 struct il_priv *il = hw->priv;
5569 D_MAC80211("enter\n");
5571 /* we should be verifying the device is ready to be opened */
5572 mutex_lock(&il->mutex);
5573 ret = __il4965_up(il);
5574 mutex_unlock(&il->mutex);
5579 if (il_is_rfkill(il))
5582 D_INFO("Start UP work done.\n");
5584 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
5585 * mac80211 will not be run successfully. */
5586 ret = wait_event_timeout(il->wait_command_queue,
5587 test_bit(S_READY, &il->status),
5588 UCODE_READY_TIMEOUT);
5590 if (!test_bit(S_READY, &il->status)) {
5591 IL_ERR("START_ALIVE timeout after %dms.\n",
5592 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5597 il4965_led_enable(il);
5601 D_MAC80211("leave\n");
5605 void il4965_mac_stop(struct ieee80211_hw *hw)
5607 struct il_priv *il = hw->priv;
5609 D_MAC80211("enter\n");
5618 flush_workqueue(il->workqueue);
5620 /* User space software may expect getting rfkill changes
5621 * even if interface is down */
5622 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5623 il_enable_rfkill_int(il);
5625 D_MAC80211("leave\n");
5628 void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
5630 struct il_priv *il = hw->priv;
5632 D_MACDUMP("enter\n");
5634 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5635 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5637 if (il4965_tx_skb(il, skb))
5638 dev_kfree_skb_any(skb);
5640 D_MACDUMP("leave\n");
5643 void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
5644 struct ieee80211_vif *vif,
5645 struct ieee80211_key_conf *keyconf,
5646 struct ieee80211_sta *sta,
5647 u32 iv32, u16 *phase1key)
5649 struct il_priv *il = hw->priv;
5650 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5652 D_MAC80211("enter\n");
5654 il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta,
5657 D_MAC80211("leave\n");
5660 int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5661 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5662 struct ieee80211_key_conf *key)
5664 struct il_priv *il = hw->priv;
5665 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5666 struct il_rxon_context *ctx = vif_priv->ctx;
5669 bool is_default_wep_key = false;
5671 D_MAC80211("enter\n");
5673 if (il->cfg->mod_params->sw_crypto) {
5674 D_MAC80211("leave - hwcrypto disabled\n");
5678 sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
5679 if (sta_id == IL_INVALID_STATION)
5682 mutex_lock(&il->mutex);
5683 il_scan_cancel_timeout(il, 100);
5686 * If we are getting WEP group key and we didn't receive any key mapping
5687 * so far, we are in legacy wep mode (group key only), otherwise we are
5689 * In legacy wep mode, we use another host command to the uCode.
5691 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5692 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
5695 is_default_wep_key = !ctx->key_mapping_keys;
5697 is_default_wep_key =
5698 (key->hw_key_idx == HW_KEY_DEFAULT);
5703 if (is_default_wep_key)
5704 ret = il4965_set_default_wep_key(il,
5705 vif_priv->ctx, key);
5707 ret = il4965_set_dynamic_key(il, vif_priv->ctx,
5710 D_MAC80211("enable hwcrypto key\n");
5713 if (is_default_wep_key)
5714 ret = il4965_remove_default_wep_key(il, ctx, key);
5716 ret = il4965_remove_dynamic_key(il, ctx,
5719 D_MAC80211("disable hwcrypto key\n");
5725 mutex_unlock(&il->mutex);
5726 D_MAC80211("leave\n");
5731 int il4965_mac_ampdu_action(struct ieee80211_hw *hw,
5732 struct ieee80211_vif *vif,
5733 enum ieee80211_ampdu_mlme_action action,
5734 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5737 struct il_priv *il = hw->priv;
5740 D_HT("A-MPDU action on addr %pM tid %d\n",
5743 if (!(il->cfg->sku & IL_SKU_N))
5746 mutex_lock(&il->mutex);
5749 case IEEE80211_AMPDU_RX_START:
5751 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
5753 case IEEE80211_AMPDU_RX_STOP:
5755 ret = il4965_sta_rx_agg_stop(il, sta, tid);
5756 if (test_bit(S_EXIT_PENDING, &il->status))
5759 case IEEE80211_AMPDU_TX_START:
5761 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
5763 case IEEE80211_AMPDU_TX_STOP:
5765 ret = il4965_tx_agg_stop(il, vif, sta, tid);
5766 if (test_bit(S_EXIT_PENDING, &il->status))
5769 case IEEE80211_AMPDU_TX_OPERATIONAL:
5773 mutex_unlock(&il->mutex);
5778 int il4965_mac_sta_add(struct ieee80211_hw *hw,
5779 struct ieee80211_vif *vif,
5780 struct ieee80211_sta *sta)
5782 struct il_priv *il = hw->priv;
5783 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
5784 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5785 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
5789 D_INFO("received request to add station %pM\n",
5791 mutex_lock(&il->mutex);
5792 D_INFO("proceeding to add station %pM\n",
5794 sta_priv->common.sta_id = IL_INVALID_STATION;
5796 atomic_set(&sta_priv->pending_frames, 0);
5798 ret = il_add_station_common(il, vif_priv->ctx, sta->addr,
5799 is_ap, sta, &sta_id);
5801 IL_ERR("Unable to add station %pM (%d)\n",
5803 /* Should we return success if return code is EEXIST ? */
5804 mutex_unlock(&il->mutex);
5808 sta_priv->common.sta_id = sta_id;
5810 /* Initialize rate scaling */
5811 D_INFO("Initializing rate scaling for station %pM\n",
5813 il4965_rs_rate_init(il, sta, sta_id);
5814 mutex_unlock(&il->mutex);
5819 void il4965_mac_channel_switch(struct ieee80211_hw *hw,
5820 struct ieee80211_channel_switch *ch_switch)
5822 struct il_priv *il = hw->priv;
5823 const struct il_channel_info *ch_info;
5824 struct ieee80211_conf *conf = &hw->conf;
5825 struct ieee80211_channel *channel = ch_switch->channel;
5826 struct il_ht_config *ht_conf = &il->current_ht_config;
5828 struct il_rxon_context *ctx = &il->ctx;
5831 D_MAC80211("enter\n");
5833 mutex_lock(&il->mutex);
5835 if (il_is_rfkill(il))
5838 if (test_bit(S_EXIT_PENDING, &il->status) ||
5839 test_bit(S_SCANNING, &il->status) ||
5840 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
5843 if (!il_is_associated_ctx(ctx))
5846 if (!il->cfg->ops->lib->set_channel_switch)
5849 ch = channel->hw_value;
5850 if (le16_to_cpu(ctx->active.channel) == ch)
5853 ch_info = il_get_channel_info(il, channel->band, ch);
5854 if (!il_is_channel_valid(ch_info)) {
5855 D_MAC80211("invalid channel\n");
5859 spin_lock_irq(&il->lock);
5861 il->current_ht_config.smps = conf->smps_mode;
5863 /* Configure HT40 channels */
5864 ctx->ht.enabled = conf_is_ht(conf);
5865 if (ctx->ht.enabled) {
5866 if (conf_is_ht40_minus(conf)) {
5867 ctx->ht.extension_chan_offset =
5868 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5869 ctx->ht.is_40mhz = true;
5870 } else if (conf_is_ht40_plus(conf)) {
5871 ctx->ht.extension_chan_offset =
5872 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5873 ctx->ht.is_40mhz = true;
5875 ctx->ht.extension_chan_offset =
5876 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5877 ctx->ht.is_40mhz = false;
5880 ctx->ht.is_40mhz = false;
5882 if ((le16_to_cpu(ctx->staging.channel) != ch))
5883 ctx->staging.flags = 0;
5885 il_set_rxon_channel(il, channel, ctx);
5886 il_set_rxon_ht(il, ht_conf);
5887 il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
5889 spin_unlock_irq(&il->lock);
5893 * at this point, staging_rxon has the
5894 * configuration for channel switch
5896 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
5897 il->switch_channel = cpu_to_le16(ch);
5898 if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
5899 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
5900 il->switch_channel = 0;
5901 ieee80211_chswitch_done(ctx->vif, false);
5905 mutex_unlock(&il->mutex);
5906 D_MAC80211("leave\n");
5909 void il4965_configure_filter(struct ieee80211_hw *hw,
5910 unsigned int changed_flags,
5911 unsigned int *total_flags,
5914 struct il_priv *il = hw->priv;
5915 __le32 filter_or = 0, filter_nand = 0;
5917 #define CHK(test, flag) do { \
5918 if (*total_flags & (test)) \
5919 filter_or |= (flag); \
5921 filter_nand |= (flag); \
5924 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
5925 changed_flags, *total_flags);
5927 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
5928 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
5929 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
5930 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
5934 mutex_lock(&il->mutex);
5936 il->ctx.staging.filter_flags &= ~filter_nand;
5937 il->ctx.staging.filter_flags |= filter_or;
5940 * Not committing directly because hardware can perform a scan,
5941 * but we'll eventually commit the filter flags change anyway.
5944 mutex_unlock(&il->mutex);
5947 * Receiving all multicast frames is always enabled by the
5948 * default flags setup in il_connection_init_rx_config()
5949 * since we currently do not support programming multicast
5950 * filters into the device.
5952 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
5953 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
5956 /*****************************************************************************
5958 * driver setup and teardown
5960 *****************************************************************************/
5962 static void il4965_bg_txpower_work(struct work_struct *work)
5964 struct il_priv *il = container_of(work, struct il_priv,
5967 mutex_lock(&il->mutex);
5969 /* If a scan happened to start before we got here
5970 * then just return; the stats notification will
5971 * kick off another scheduled work to compensate for
5972 * any temperature delta we missed here. */
5973 if (test_bit(S_EXIT_PENDING, &il->status) ||
5974 test_bit(S_SCANNING, &il->status))
5977 /* Regardless of if we are associated, we must reconfigure the
5978 * TX power since frames can be sent on non-radar channels while
5980 il->cfg->ops->lib->send_tx_power(il);
5982 /* Update last_temperature to keep is_calib_needed from running
5983 * when it isn't needed... */
5984 il->last_temperature = il->temperature;
5986 mutex_unlock(&il->mutex);
5989 static void il4965_setup_deferred_work(struct il_priv *il)
5991 il->workqueue = create_singlethread_workqueue(DRV_NAME);
5993 init_waitqueue_head(&il->wait_command_queue);
5995 INIT_WORK(&il->restart, il4965_bg_restart);
5996 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
5997 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
5998 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
5999 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
6001 il_setup_scan_deferred_work(il);
6003 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
6005 init_timer(&il->stats_periodic);
6006 il->stats_periodic.data = (unsigned long)il;
6007 il->stats_periodic.function = il4965_bg_stats_periodic;
6009 init_timer(&il->watchdog);
6010 il->watchdog.data = (unsigned long)il;
6011 il->watchdog.function = il_bg_watchdog;
6013 tasklet_init(&il->irq_tasklet, (void (*)(unsigned long))
6014 il4965_irq_tasklet, (unsigned long)il);
6017 static void il4965_cancel_deferred_work(struct il_priv *il)
6019 cancel_work_sync(&il->txpower_work);
6020 cancel_delayed_work_sync(&il->init_alive_start);
6021 cancel_delayed_work(&il->alive_start);
6022 cancel_work_sync(&il->run_time_calib_work);
6024 il_cancel_scan_deferred_work(il);
6026 del_timer_sync(&il->stats_periodic);
6029 static void il4965_init_hw_rates(struct il_priv *il,
6030 struct ieee80211_rate *rates)
6034 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
6035 rates[i].bitrate = il_rates[i].ieee * 5;
6036 rates[i].hw_value = i; /* Rate scaling will work on idxes */
6037 rates[i].hw_value_short = i;
6039 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
6041 * If CCK != 1M then set short preamble rate flag.
6044 (il_rates[i].plcp == RATE_1M_PLCP) ?
6045 0 : IEEE80211_RATE_SHORT_PREAMBLE;
6050 * Acquire il->lock before calling this function !
6052 void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
6054 il_wr(il, HBUS_TARG_WRPTR,
6055 (idx & 0xff) | (txq_id << 8));
6056 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
6059 void il4965_tx_queue_set_status(struct il_priv *il,
6060 struct il_tx_queue *txq,
6061 int tx_fifo_id, int scd_retry)
6063 int txq_id = txq->q.id;
6065 /* Find out whether to activate Tx queue */
6066 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
6068 /* Set up and activate */
6069 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6070 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6071 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6072 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6073 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6074 IL49_SCD_QUEUE_STTS_REG_MSK);
6076 txq->sched_retry = scd_retry;
6078 D_INFO("%s %s Queue %d on AC %d\n",
6079 active ? "Activate" : "Deactivate",
6080 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6084 static int il4965_init_drv(struct il_priv *il)
6088 spin_lock_init(&il->sta_lock);
6089 spin_lock_init(&il->hcmd_lock);
6091 INIT_LIST_HEAD(&il->free_frames);
6093 mutex_init(&il->mutex);
6095 il->ieee_channels = NULL;
6096 il->ieee_rates = NULL;
6097 il->band = IEEE80211_BAND_2GHZ;
6099 il->iw_mode = NL80211_IFTYPE_STATION;
6100 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6101 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
6103 /* initialize force reset */
6104 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
6106 /* Choose which receivers/antennas to use */
6107 if (il->cfg->ops->hcmd->set_rxon_chain)
6108 il->cfg->ops->hcmd->set_rxon_chain(il,
6111 il_init_scan_params(il);
6113 ret = il_init_channel_map(il);
6115 IL_ERR("initializing regulatory failed: %d\n", ret);
6119 ret = il_init_geos(il);
6121 IL_ERR("initializing geos failed: %d\n", ret);
6122 goto err_free_channel_map;
6124 il4965_init_hw_rates(il, il->ieee_rates);
6128 err_free_channel_map:
6129 il_free_channel_map(il);
6134 static void il4965_uninit_drv(struct il_priv *il)
6136 il4965_calib_free_results(il);
6138 il_free_channel_map(il);
6139 kfree(il->scan_cmd);
6142 static void il4965_hw_detect(struct il_priv *il)
6144 il->hw_rev = _il_rd(il, CSR_HW_REV);
6145 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
6146 il->rev_id = il->pci_dev->revision;
6147 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
6150 static int il4965_set_hw_params(struct il_priv *il)
6152 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6153 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6154 if (il->cfg->mod_params->amsdu_size_8K)
6155 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
6157 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
6159 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
6161 if (il->cfg->mod_params->disable_11n)
6162 il->cfg->sku &= ~IL_SKU_N;
6164 /* Device-specific setup */
6165 return il->cfg->ops->lib->set_hw_params(il);
6168 static const u8 il4965_bss_ac_to_fifo[] = {
6175 static const u8 il4965_bss_ac_to_queue[] = {
6180 il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6184 struct ieee80211_hw *hw;
6185 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
6186 unsigned long flags;
6189 /************************
6190 * 1. Allocating HW data
6191 ************************/
6193 hw = il_alloc_all(cfg);
6199 /* At this point both hw and il are allocated. */
6203 il->ctx.always_active = true;
6204 il->ctx.is_active = true;
6205 il->ctx.rxon_cmd = C_RXON;
6206 il->ctx.rxon_timing_cmd = C_RXON_TIMING;
6207 il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
6208 il->ctx.qos_cmd = C_QOS_PARAM;
6209 il->ctx.ap_sta_id = IL_AP_ID;
6210 il->ctx.wep_key_cmd = C_WEPKEY;
6211 il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
6212 il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
6213 il->ctx.exclusive_interface_modes =
6214 BIT(NL80211_IFTYPE_ADHOC);
6215 il->ctx.interface_modes =
6216 BIT(NL80211_IFTYPE_STATION);
6217 il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
6218 il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
6219 il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
6220 il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
6222 SET_IEEE80211_DEV(hw, &pdev->dev);
6224 D_INFO("*** LOAD DRIVER ***\n");
6227 il->inta_mask = CSR_INI_SET_MASK;
6229 if (il_alloc_traffic_mem(il))
6230 IL_ERR("Not enough memory to generate traffic log\n");
6232 /**************************
6233 * 2. Initializing PCI bus
6234 **************************/
6235 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6236 PCIE_LINK_STATE_CLKPM);
6238 if (pci_enable_device(pdev)) {
6240 goto out_ieee80211_free_hw;
6243 pci_set_master(pdev);
6245 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6247 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6249 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6251 err = pci_set_consistent_dma_mask(pdev,
6253 /* both attempts failed: */
6255 IL_WARN("No suitable DMA available.\n");
6256 goto out_pci_disable_device;
6260 err = pci_request_regions(pdev, DRV_NAME);
6262 goto out_pci_disable_device;
6264 pci_set_drvdata(pdev, il);
6267 /***********************
6268 * 3. Read REV register
6269 ***********************/
6270 il->hw_base = pci_iomap(pdev, 0, 0);
6273 goto out_pci_release_regions;
6276 D_INFO("pci_resource_len = 0x%08llx\n",
6277 (unsigned long long) pci_resource_len(pdev, 0));
6278 D_INFO("pci_resource_base = %p\n", il->hw_base);
6280 /* these spin locks will be used in apm_ops.init and EEPROM access
6281 * we should init now
6283 spin_lock_init(&il->reg_lock);
6284 spin_lock_init(&il->lock);
6287 * stop and reset the on-board processor just in case it is in a
6288 * strange state ... like being left stranded by a primary kernel
6289 * and this is now the kdump kernel trying to start up
6291 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6293 il4965_hw_detect(il);
6294 IL_INFO("Detected %s, REV=0x%X\n",
6295 il->cfg->name, il->hw_rev);
6297 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6298 * PCI Tx retries from interfering with C3 CPU state */
6299 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6301 il4965_prepare_card_hw(il);
6302 if (!il->hw_ready) {
6303 IL_WARN("Failed, HW not ready\n");
6310 /* Read the EEPROM */
6311 err = il_eeprom_init(il);
6313 IL_ERR("Unable to init EEPROM\n");
6316 err = il4965_eeprom_check_version(il);
6318 goto out_free_eeprom;
6321 goto out_free_eeprom;
6323 /* extract MAC Address */
6324 il4965_eeprom_get_mac(il, il->addresses[0].addr);
6325 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
6326 il->hw->wiphy->addresses = il->addresses;
6327 il->hw->wiphy->n_addresses = 1;
6329 /************************
6330 * 5. Setup HW constants
6331 ************************/
6332 if (il4965_set_hw_params(il)) {
6333 IL_ERR("failed to set hw parameters\n");
6334 goto out_free_eeprom;
6337 /*******************
6339 *******************/
6341 err = il4965_init_drv(il);
6343 goto out_free_eeprom;
6344 /* At this point both hw and il are initialized. */
6346 /********************
6348 ********************/
6349 spin_lock_irqsave(&il->lock, flags);
6350 il_disable_interrupts(il);
6351 spin_unlock_irqrestore(&il->lock, flags);
6353 pci_enable_msi(il->pci_dev);
6355 err = request_irq(il->pci_dev->irq, il_isr,
6356 IRQF_SHARED, DRV_NAME, il);
6358 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
6359 goto out_disable_msi;
6362 il4965_setup_deferred_work(il);
6363 il4965_setup_handlers(il);
6365 /*********************************************
6366 * 8. Enable interrupts and read RFKILL state
6367 *********************************************/
6369 /* enable rfkill interrupt: hw bug w/a */
6370 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
6371 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6372 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6373 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
6376 il_enable_rfkill_int(il);
6378 /* If platform's RF_KILL switch is NOT set to KILL */
6379 if (_il_rd(il, CSR_GP_CNTRL) &
6380 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6381 clear_bit(S_RF_KILL_HW, &il->status);
6383 set_bit(S_RF_KILL_HW, &il->status);
6385 wiphy_rfkill_set_hw_state(il->hw->wiphy,
6386 test_bit(S_RF_KILL_HW, &il->status));
6388 il_power_initialize(il);
6390 init_completion(&il->_4965.firmware_loading_complete);
6392 err = il4965_request_firmware(il, true);
6394 goto out_destroy_workqueue;
6398 out_destroy_workqueue:
6399 destroy_workqueue(il->workqueue);
6400 il->workqueue = NULL;
6401 free_irq(il->pci_dev->irq, il);
6403 pci_disable_msi(il->pci_dev);
6404 il4965_uninit_drv(il);
6408 pci_iounmap(pdev, il->hw_base);
6409 out_pci_release_regions:
6410 pci_set_drvdata(pdev, NULL);
6411 pci_release_regions(pdev);
6412 out_pci_disable_device:
6413 pci_disable_device(pdev);
6414 out_ieee80211_free_hw:
6415 il_free_traffic_mem(il);
6416 ieee80211_free_hw(il->hw);
6421 static void __devexit il4965_pci_remove(struct pci_dev *pdev)
6423 struct il_priv *il = pci_get_drvdata(pdev);
6424 unsigned long flags;
6429 wait_for_completion(&il->_4965.firmware_loading_complete);
6431 D_INFO("*** UNLOAD DRIVER ***\n");
6433 il_dbgfs_unregister(il);
6434 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
6436 /* ieee80211_unregister_hw call wil cause il_mac_stop to
6437 * to be called and il4965_down since we are removing the device
6438 * we need to set S_EXIT_PENDING bit.
6440 set_bit(S_EXIT_PENDING, &il->status);
6444 if (il->mac80211_registered) {
6445 ieee80211_unregister_hw(il->hw);
6446 il->mac80211_registered = 0;
6452 * Make sure device is reset to low power before unloading driver.
6453 * This may be redundant with il4965_down(), but there are paths to
6454 * run il4965_down() without calling apm_ops.stop(), and there are
6455 * paths to avoid running il4965_down() at all before leaving driver.
6456 * This (inexpensive) call *makes sure* device is reset.
6460 /* make sure we flush any pending irq or
6461 * tasklet for the driver
6463 spin_lock_irqsave(&il->lock, flags);
6464 il_disable_interrupts(il);
6465 spin_unlock_irqrestore(&il->lock, flags);
6467 il4965_synchronize_irq(il);
6469 il4965_dealloc_ucode_pci(il);
6472 il4965_rx_queue_free(il, &il->rxq);
6473 il4965_hw_txq_ctx_free(il);
6478 /*netif_stop_queue(dev); */
6479 flush_workqueue(il->workqueue);
6481 /* ieee80211_unregister_hw calls il_mac_stop, which flushes
6482 * il->workqueue... so we can't take down the workqueue
6484 destroy_workqueue(il->workqueue);
6485 il->workqueue = NULL;
6486 il_free_traffic_mem(il);
6488 free_irq(il->pci_dev->irq, il);
6489 pci_disable_msi(il->pci_dev);
6490 pci_iounmap(pdev, il->hw_base);
6491 pci_release_regions(pdev);
6492 pci_disable_device(pdev);
6493 pci_set_drvdata(pdev, NULL);
6495 il4965_uninit_drv(il);
6497 dev_kfree_skb(il->beacon_skb);
6499 ieee80211_free_hw(il->hw);
6503 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
6504 * must be called under il->lock and mac access
6506 void il4965_txq_set_sched(struct il_priv *il, u32 mask)
6508 il_wr_prph(il, IL49_SCD_TXFACT, mask);
6511 /*****************************************************************************
6513 * driver and module entry point
6515 *****************************************************************************/
6517 /* Hardware specific file defines the PCI IDs table for that hardware module */
6518 static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
6519 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6520 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
6523 MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
6525 static struct pci_driver il4965_driver = {
6527 .id_table = il4965_hw_card_ids,
6528 .probe = il4965_pci_probe,
6529 .remove = __devexit_p(il4965_pci_remove),
6530 .driver.pm = IL_LEGACY_PM_OPS,
6533 static int __init il4965_init(void)
6537 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6538 pr_info(DRV_COPYRIGHT "\n");
6540 ret = il4965_rate_control_register();
6542 pr_err("Unable to register rate control algorithm: %d\n", ret);
6546 ret = pci_register_driver(&il4965_driver);
6548 pr_err("Unable to initialize PCI module\n");
6549 goto error_register;
6555 il4965_rate_control_unregister();
6559 static void __exit il4965_exit(void)
6561 pci_unregister_driver(&il4965_driver);
6562 il4965_rate_control_unregister();
6565 module_exit(il4965_exit);
6566 module_init(il4965_init);
6568 #ifdef CONFIG_IWLEGACY_DEBUG
6569 module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
6570 MODULE_PARM_DESC(debug, "debug output mask");
6573 module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
6574 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
6575 module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
6576 MODULE_PARM_DESC(queues_num, "number of hw queues.");
6577 module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
6578 MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6579 module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K,
6581 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
6582 module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
6583 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");