1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #include <linux/pci.h>
64 #include <linux/pci-aspm.h>
65 #include <linux/interrupt.h>
66 #include <linux/debugfs.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
69 #include <linux/gfp.h>
70 #include <linux/vmalloc.h>
73 #include "iwl-trans.h"
76 #include "iwl-agn-hw.h"
77 #include "iwl-fw-error-dump.h"
80 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
82 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
84 if (!trans_pcie->fw_mon_page)
87 dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
88 trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
89 __free_pages(trans_pcie->fw_mon_page,
90 get_order(trans_pcie->fw_mon_size));
91 trans_pcie->fw_mon_page = NULL;
92 trans_pcie->fw_mon_phys = 0;
93 trans_pcie->fw_mon_size = 0;
96 static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
98 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
104 if (trans_pcie->fw_mon_page) {
105 dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
106 trans_pcie->fw_mon_size,
112 for (power = 26; power >= 11; power--) {
116 order = get_order(size);
117 page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
122 phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
124 if (dma_mapping_error(trans->dev, phys)) {
125 __free_pages(page, order);
129 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
137 trans_pcie->fw_mon_page = page;
138 trans_pcie->fw_mon_phys = phys;
139 trans_pcie->fw_mon_size = size;
142 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
144 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
145 ((reg & 0x0000ffff) | (2 << 28)));
146 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
149 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
151 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
152 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
153 ((reg & 0x0000ffff) | (3 << 28)));
156 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
158 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
159 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
160 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
161 ~APMG_PS_CTRL_MSK_PWR_SRC);
163 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
164 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
165 ~APMG_PS_CTRL_MSK_PWR_SRC);
169 #define PCI_CFG_RETRY_TIMEOUT 0x041
171 static void iwl_pcie_apm_config(struct iwl_trans *trans)
173 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
177 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
178 * Check if BIOS (or OS) enabled L1-ASPM on this device.
179 * If so (likely), disable L0S, so device moves directly L0->L1;
180 * costs negligible amount of power savings.
181 * If not (unlikely), enable L0S, so there is at least some
182 * power savings, even without L1.
184 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
185 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
186 /* L1-ASPM enabled; disable(!) L0S */
187 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
188 dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
190 /* L1-ASPM disabled; enable(!) L0S */
191 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
192 dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
194 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
198 * Start up NIC's basic functionality after it has been reset
199 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
200 * NOTE: This does not load uCode nor start the embedded processor
202 static int iwl_pcie_apm_init(struct iwl_trans *trans)
205 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
208 * Use "set_bit" below rather than "write", to preserve any hardware
209 * bits already set by default after reset.
212 /* Disable L0S exit timer (platform NMI Work/Around) */
213 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
214 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
215 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
218 * Disable L0s without affecting L1;
219 * don't wait for ICH L0s (ICH bug W/A)
221 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
222 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
224 /* Set FH wait threshold to maximum (HW error during stress W/A) */
225 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
228 * Enable HAP INTA (interrupt from management bus) to
229 * wake device's PCI Express link L1a -> L0s
231 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
232 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
234 iwl_pcie_apm_config(trans);
236 /* Configure analog phase-lock-loop before activating to D0A */
237 if (trans->cfg->base_params->pll_cfg_val)
238 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
239 trans->cfg->base_params->pll_cfg_val);
242 * Set "initialization complete" bit to move adapter from
243 * D0U* --> D0A* (powered-up active) state.
245 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
248 * Wait for clock stabilization; once stabilized, access to
249 * device-internal resources is supported, e.g. iwl_write_prph()
250 * and accesses to uCode SRAM.
252 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
253 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
254 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
256 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
260 if (trans->cfg->host_interrupt_operation_mode) {
262 * This is a bit of an abuse - This is needed for 7260 / 3160
263 * only check host_interrupt_operation_mode even if this is
264 * not related to host_interrupt_operation_mode.
266 * Enable the oscillator to count wake up time for L1 exit. This
267 * consumes slightly more power (100uA) - but allows to be sure
268 * that we wake up from L1 on time.
270 * This looks weird: read twice the same register, discard the
271 * value, set a bit, and yet again, read that same register
272 * just to discard the value. But that's the way the hardware
275 iwl_read_prph(trans, OSC_CLK);
276 iwl_read_prph(trans, OSC_CLK);
277 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
278 iwl_read_prph(trans, OSC_CLK);
279 iwl_read_prph(trans, OSC_CLK);
283 * Enable DMA clock and wait for it to stabilize.
285 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
286 * bits do not disable clocks. This preserves any hardware
287 * bits already set by default in "CLK_CTRL_REG" after reset.
289 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
290 iwl_write_prph(trans, APMG_CLK_EN_REG,
291 APMG_CLK_VAL_DMA_CLK_RQT);
294 /* Disable L1-Active */
295 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
296 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
298 /* Clear the interrupt in APMG if the NIC is in RFKILL */
299 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
300 APMG_RTC_INT_STT_RFKILL);
303 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
310 * Enable LP XTAL to avoid HW bug where device may consume much power if
311 * FW is not loaded after device reset. LP XTAL is disabled by default
312 * after device HW reset. Do it only if XTAL is fed by internal source.
313 * Configure device's "persistence" mode to avoid resetting XTAL again when
314 * SHRD_HW_RST occurs in S3.
316 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
320 u32 apmg_xtal_cfg_reg;
324 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
325 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
327 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
328 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
333 * Set "initialization complete" bit to move adapter from
334 * D0U* --> D0A* (powered-up active) state.
336 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
339 * Wait for clock stabilization; once stabilized, access to
340 * device-internal resources is possible.
342 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
343 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
344 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
346 if (WARN_ON(ret < 0)) {
347 IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
348 /* Release XTAL ON request */
349 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
350 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
355 * Clear "disable persistence" to avoid LP XTAL resetting when
356 * SHRD_HW_RST is applied in S3.
358 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
359 APMG_PCIDEV_STT_VAL_PERSIST_DIS);
362 * Force APMG XTAL to be active to prevent its disabling by HW
363 * caused by APMG idle state.
365 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
366 SHR_APMG_XTAL_CFG_REG);
367 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
369 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
372 * Reset entire device again - do controller reset (results in
373 * SHRD_HW_RST). Turn MAC off before proceeding.
375 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
379 /* Enable LP XTAL by indirect access through CSR */
380 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
381 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
382 SHR_APMG_GP1_WF_XTAL_LP_EN |
383 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
385 /* Clear delay line clock power up */
386 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
387 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
388 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
391 * Enable persistence mode to avoid LP XTAL resetting when
392 * SHRD_HW_RST is applied in S3.
394 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
395 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
398 * Clear "initialization complete" bit to move adapter from
399 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
401 iwl_clear_bit(trans, CSR_GP_CNTRL,
402 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
404 /* Activates XTAL resources monitor */
405 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
406 CSR_MONITOR_XTAL_RESOURCES);
408 /* Release XTAL ON request */
409 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
410 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
413 /* Release APMG XTAL */
414 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
416 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
419 static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
423 /* stop device's busmaster DMA activity */
424 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
426 ret = iwl_poll_bit(trans, CSR_RESET,
427 CSR_RESET_REG_FLAG_MASTER_DISABLED,
428 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
430 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
432 IWL_DEBUG_INFO(trans, "stop master\n");
437 static void iwl_pcie_apm_stop(struct iwl_trans *trans)
439 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
441 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
443 /* Stop device's DMA activity */
444 iwl_pcie_apm_stop_master(trans);
446 if (trans->cfg->lp_xtal_workaround) {
447 iwl_pcie_apm_lp_xtal_enable(trans);
451 /* Reset the entire device */
452 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
457 * Clear "initialization complete" bit to move adapter from
458 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
460 iwl_clear_bit(trans, CSR_GP_CNTRL,
461 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
464 static int iwl_pcie_nic_init(struct iwl_trans *trans)
466 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
469 spin_lock(&trans_pcie->irq_lock);
470 iwl_pcie_apm_init(trans);
472 spin_unlock(&trans_pcie->irq_lock);
474 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
475 iwl_pcie_set_pwr(trans, false);
477 iwl_op_mode_nic_config(trans->op_mode);
479 /* Allocate the RX queue, or reset if it is already allocated */
480 iwl_pcie_rx_init(trans);
482 /* Allocate or reset and init all Tx and Command queues */
483 if (iwl_pcie_tx_init(trans))
486 if (trans->cfg->base_params->shadow_reg_enable) {
487 /* enable shadow regs in HW */
488 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
489 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
495 #define HW_READY_TIMEOUT (50)
497 /* Note: returns poll_bit return value, which is >= 0 if success */
498 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
502 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
503 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
505 /* See if we got it */
506 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
507 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
508 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
511 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
515 /* Note: returns standard 0/-ERROR code */
516 static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
522 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
524 ret = iwl_pcie_set_hw_ready(trans);
525 /* If the card is ready, exit 0 */
529 for (iter = 0; iter < 10; iter++) {
530 /* If HW is not ready, prepare the conditions to check again */
531 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
532 CSR_HW_IF_CONFIG_REG_PREPARE);
535 ret = iwl_pcie_set_hw_ready(trans);
539 usleep_range(200, 1000);
541 } while (t < 150000);
545 IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
553 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
554 dma_addr_t phy_addr, u32 byte_cnt)
556 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
559 trans_pcie->ucode_write_complete = false;
561 iwl_write_direct32(trans,
562 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
563 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
565 iwl_write_direct32(trans,
566 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
569 iwl_write_direct32(trans,
570 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
571 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
573 iwl_write_direct32(trans,
574 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
575 (iwl_get_dma_hi_addr(phy_addr)
576 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
578 iwl_write_direct32(trans,
579 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
580 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
581 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
582 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
584 iwl_write_direct32(trans,
585 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
586 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
587 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
588 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
590 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
591 trans_pcie->ucode_write_complete, 5 * HZ);
593 IWL_ERR(trans, "Failed to load firmware chunk!\n");
600 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
601 const struct fw_desc *section)
605 u32 offset, chunk_sz = section->len;
608 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
611 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
612 GFP_KERNEL | __GFP_NOWARN);
614 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
615 chunk_sz = PAGE_SIZE;
616 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
617 &p_addr, GFP_KERNEL);
622 for (offset = 0; offset < section->len; offset += chunk_sz) {
625 copy_size = min_t(u32, chunk_sz, section->len - offset);
627 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
628 ret = iwl_pcie_load_firmware_chunk(trans,
629 section->offset + offset,
633 "Could not load the [%d] uCode section\n",
639 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
643 static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
644 const struct fw_img *image,
646 int *first_ucode_section)
650 u32 last_read_idx = 0;
654 *first_ucode_section = 0;
657 (*first_ucode_section)++;
660 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
663 if (!image->sec[i].data ||
664 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
666 "Break since Data not valid or Empty section, sec = %d\n",
671 if (i == (*first_ucode_section) + 1)
672 /* set CPU to started */
673 iwl_set_bits_prph(trans,
674 CSR_UCODE_LOAD_STATUS_ADDR,
675 LMPM_CPU_HDRS_LOADING_COMPLETED
678 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
682 /* image loading complete */
683 iwl_set_bits_prph(trans,
684 CSR_UCODE_LOAD_STATUS_ADDR,
685 LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
687 *first_ucode_section = last_read_idx;
692 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
693 const struct fw_img *image,
695 int *first_ucode_section)
699 u32 last_read_idx = 0;
703 *first_ucode_section = 0;
706 (*first_ucode_section)++;
709 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
712 if (!image->sec[i].data ||
713 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
715 "Break since Data not valid or Empty section, sec = %d\n",
720 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
725 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
726 iwl_set_bits_prph(trans,
727 CSR_UCODE_LOAD_STATUS_ADDR,
728 (LMPM_CPU_UCODE_LOADING_COMPLETED |
729 LMPM_CPU_HDRS_LOADING_COMPLETED |
730 LMPM_CPU_UCODE_LOADING_STARTED) <<
733 *first_ucode_section = last_read_idx;
738 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
739 const struct fw_img *image)
741 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
743 int first_ucode_section;
746 "working with %s image\n",
747 image->is_secure ? "Secured" : "Non Secured");
749 "working with %s CPU\n",
750 image->is_dual_cpus ? "Dual" : "Single");
752 /* configure the ucode to be ready to get the secured image */
753 if (image->is_secure) {
754 /* set secure boot inspector addresses */
755 iwl_write_prph(trans,
756 LMPM_SECURE_INSPECTOR_CODE_ADDR,
757 LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
759 iwl_write_prph(trans,
760 LMPM_SECURE_INSPECTOR_DATA_ADDR,
761 LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
763 /* set CPU1 header address */
764 iwl_write_prph(trans,
765 LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
766 LMPM_SECURE_CPU1_HDR_MEM_SPACE);
768 /* load to FW the binary Secured sections of CPU1 */
769 ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
770 &first_ucode_section);
775 /* load to FW the binary Non secured sections of CPU1 */
776 ret = iwl_pcie_load_cpu_sections(trans, image, 1,
777 &first_ucode_section);
782 if (image->is_dual_cpus) {
783 /* set CPU2 header address */
784 iwl_write_prph(trans,
785 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
786 LMPM_SECURE_CPU2_HDR_MEM_SPACE);
788 /* load to FW the binary sections of CPU2 */
789 if (image->is_secure)
790 ret = iwl_pcie_load_cpu_secured_sections(
792 &first_ucode_section);
794 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
795 &first_ucode_section);
800 /* supported for 7000 only for the moment */
801 if (iwlwifi_mod_params.fw_monitor &&
802 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
803 iwl_pcie_alloc_fw_monitor(trans);
805 if (trans_pcie->fw_mon_size) {
806 iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
807 trans_pcie->fw_mon_phys >> 4);
808 iwl_write_prph(trans, MON_BUFF_END_ADDR,
809 (trans_pcie->fw_mon_phys +
810 trans_pcie->fw_mon_size) >> 4);
814 /* release CPU reset */
815 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
816 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
818 iwl_write32(trans, CSR_RESET, 0);
820 if (image->is_secure) {
821 /* wait for image verification to complete */
822 ret = iwl_poll_prph_bit(trans,
823 LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
824 LMPM_SECURE_BOOT_STATUS_SUCCESS,
825 LMPM_SECURE_BOOT_STATUS_SUCCESS,
826 LMPM_SECURE_TIME_OUT);
829 IWL_ERR(trans, "Time out on secure boot process\n");
837 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
838 const struct fw_img *fw, bool run_in_rfkill)
843 /* This may fail if AMT took ownership of the device */
844 if (iwl_pcie_prepare_card_hw(trans)) {
845 IWL_WARN(trans, "Exit HW not ready\n");
849 iwl_enable_rfkill_int(trans);
851 /* If platform's RF_KILL switch is NOT set to KILL */
852 hw_rfkill = iwl_is_rfkill_set(trans);
854 set_bit(STATUS_RFKILL, &trans->status);
856 clear_bit(STATUS_RFKILL, &trans->status);
857 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
858 if (hw_rfkill && !run_in_rfkill)
861 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
863 ret = iwl_pcie_nic_init(trans);
865 IWL_ERR(trans, "Unable to init nic\n");
869 /* make sure rfkill handshake bits are cleared */
870 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
871 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
872 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
874 /* clear (again), then enable host interrupts */
875 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
876 iwl_enable_interrupts(trans);
878 /* really make sure rfkill handshake bits are cleared */
879 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
880 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
882 /* Load the given image to the HW */
883 return iwl_pcie_load_given_ucode(trans, fw);
886 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
888 iwl_pcie_reset_ict(trans);
889 iwl_pcie_tx_start(trans, scd_addr);
892 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
894 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
895 bool hw_rfkill, was_hw_rfkill;
897 was_hw_rfkill = iwl_is_rfkill_set(trans);
899 /* tell the device to stop sending interrupts */
900 spin_lock(&trans_pcie->irq_lock);
901 iwl_disable_interrupts(trans);
902 spin_unlock(&trans_pcie->irq_lock);
904 /* device going down, Stop using ICT table */
905 iwl_pcie_disable_ict(trans);
908 * If a HW restart happens during firmware loading,
909 * then the firmware loading might call this function
910 * and later it might be called again due to the
911 * restart. So don't process again if the device is
914 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
915 iwl_pcie_tx_stop(trans);
916 iwl_pcie_rx_stop(trans);
918 /* Power-down device's busmaster DMA clocks */
919 iwl_write_prph(trans, APMG_CLK_DIS_REG,
920 APMG_CLK_VAL_DMA_CLK_RQT);
924 /* Make sure (redundant) we've released our request to stay awake */
925 iwl_clear_bit(trans, CSR_GP_CNTRL,
926 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
928 /* Stop the device, and put it in low power state */
929 iwl_pcie_apm_stop(trans);
931 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
932 * Clean again the interrupt here
934 spin_lock(&trans_pcie->irq_lock);
935 iwl_disable_interrupts(trans);
936 spin_unlock(&trans_pcie->irq_lock);
938 /* stop and reset the on-board processor */
939 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
941 /* clear all status bits */
942 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
943 clear_bit(STATUS_INT_ENABLED, &trans->status);
944 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
945 clear_bit(STATUS_TPOWER_PMI, &trans->status);
946 clear_bit(STATUS_RFKILL, &trans->status);
949 * Even if we stop the HW, we still want the RF kill
952 iwl_enable_rfkill_int(trans);
955 * Check again since the RF kill state may have changed while
956 * all the interrupts were disabled, in this case we couldn't
957 * receive the RF kill interrupt and update the state in the
959 * Don't call the op_mode if the rkfill state hasn't changed.
960 * This allows the op_mode to call stop_device from the rfkill
961 * notification without endless recursion. Under very rare
962 * circumstances, we might have a small recursion if the rfkill
963 * state changed exactly now while we were called from stop_device.
964 * This is very unlikely but can happen and is supported.
966 hw_rfkill = iwl_is_rfkill_set(trans);
968 set_bit(STATUS_RFKILL, &trans->status);
970 clear_bit(STATUS_RFKILL, &trans->status);
971 if (hw_rfkill != was_hw_rfkill)
972 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
975 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
977 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
978 iwl_trans_pcie_stop_device(trans);
981 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
983 iwl_disable_interrupts(trans);
986 * in testing mode, the host stays awake and the
987 * hardware won't be reset (not even partially)
992 iwl_pcie_disable_ict(trans);
994 iwl_clear_bit(trans, CSR_GP_CNTRL,
995 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
996 iwl_clear_bit(trans, CSR_GP_CNTRL,
997 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1000 * reset TX queues -- some of their registers reset during S3
1001 * so if we don't reset everything here the D3 image would try
1002 * to execute some invalid memory upon resume
1004 iwl_trans_pcie_tx_reset(trans);
1006 iwl_pcie_set_pwr(trans, true);
1009 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1010 enum iwl_d3_status *status,
1017 iwl_enable_interrupts(trans);
1018 *status = IWL_D3_STATUS_ALIVE;
1022 iwl_pcie_set_pwr(trans, false);
1024 val = iwl_read32(trans, CSR_RESET);
1025 if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
1026 *status = IWL_D3_STATUS_RESET;
1031 * Also enables interrupts - none will happen as the device doesn't
1032 * know we're waking it up, only when the opmode actually tells it
1035 iwl_pcie_reset_ict(trans);
1037 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1038 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1040 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1041 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1042 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1045 IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
1049 iwl_trans_pcie_tx_reset(trans);
1051 ret = iwl_pcie_rx_init(trans);
1053 IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
1057 *status = IWL_D3_STATUS_ALIVE;
1061 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1066 err = iwl_pcie_prepare_card_hw(trans);
1068 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1072 /* Reset the entire device */
1073 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1075 usleep_range(10, 15);
1077 iwl_pcie_apm_init(trans);
1079 /* From now on, the op_mode will be kept updated about RF kill state */
1080 iwl_enable_rfkill_int(trans);
1082 hw_rfkill = iwl_is_rfkill_set(trans);
1084 set_bit(STATUS_RFKILL, &trans->status);
1086 clear_bit(STATUS_RFKILL, &trans->status);
1087 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1092 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1094 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1096 /* disable interrupts - don't enable HW RF kill interrupt */
1097 spin_lock(&trans_pcie->irq_lock);
1098 iwl_disable_interrupts(trans);
1099 spin_unlock(&trans_pcie->irq_lock);
1101 iwl_pcie_apm_stop(trans);
1103 spin_lock(&trans_pcie->irq_lock);
1104 iwl_disable_interrupts(trans);
1105 spin_unlock(&trans_pcie->irq_lock);
1107 iwl_pcie_disable_ict(trans);
1110 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1112 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1115 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1117 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1120 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1122 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1125 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1127 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1128 ((reg & 0x000FFFFF) | (3 << 24)));
1129 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1132 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1135 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1136 ((addr & 0x000FFFFF) | (3 << 24)));
1137 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1140 static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1146 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1147 const struct iwl_trans_config *trans_cfg)
1149 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1151 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1152 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1153 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1154 trans_pcie->n_no_reclaim_cmds = 0;
1156 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1157 if (trans_pcie->n_no_reclaim_cmds)
1158 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1159 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1161 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1162 if (trans_pcie->rx_buf_size_8k)
1163 trans_pcie->rx_page_order = get_order(8 * 1024);
1165 trans_pcie->rx_page_order = get_order(4 * 1024);
1167 trans_pcie->wd_timeout =
1168 msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
1170 trans_pcie->command_names = trans_cfg->command_names;
1171 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1173 /* Initialize NAPI here - it should be before registering to mac80211
1174 * in the opmode but after the HW struct is allocated.
1175 * As this function may be called again in some corner cases don't
1176 * do anything if NAPI was already initialized.
1178 if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
1179 init_dummy_netdev(&trans_pcie->napi_dev);
1180 iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
1181 &trans_pcie->napi_dev,
1182 iwl_pcie_dummy_napi_poll, 64);
1186 void iwl_trans_pcie_free(struct iwl_trans *trans)
1188 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1190 synchronize_irq(trans_pcie->pci_dev->irq);
1192 iwl_pcie_tx_free(trans);
1193 iwl_pcie_rx_free(trans);
1195 free_irq(trans_pcie->pci_dev->irq, trans);
1196 iwl_pcie_free_ict(trans);
1198 pci_disable_msi(trans_pcie->pci_dev);
1199 iounmap(trans_pcie->hw_base);
1200 pci_release_regions(trans_pcie->pci_dev);
1201 pci_disable_device(trans_pcie->pci_dev);
1202 kmem_cache_destroy(trans->dev_cmd_pool);
1204 if (trans_pcie->napi.poll)
1205 netif_napi_del(&trans_pcie->napi);
1207 iwl_pcie_free_fw_monitor(trans);
1212 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1215 set_bit(STATUS_TPOWER_PMI, &trans->status);
1217 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1220 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
1221 unsigned long *flags)
1224 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1226 spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1228 if (trans_pcie->cmd_in_flight)
1231 /* this bit wakes up the NIC */
1232 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1233 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1236 * These bits say the device is running, and should keep running for
1237 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1238 * but they do not indicate that embedded SRAM is restored yet;
1239 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1240 * to/from host DRAM when sleeping/waking for power-saving.
1241 * Each direction takes approximately 1/4 millisecond; with this
1242 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1243 * series of register accesses are expected (e.g. reading Event Log),
1244 * to keep device from sleeping.
1246 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1247 * SRAM is okay/restored. We don't check that here because this call
1248 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1249 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1251 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1252 * and do not save/restore SRAM when power cycling.
1254 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1255 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1256 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1257 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
1258 if (unlikely(ret < 0)) {
1259 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
1261 u32 val = iwl_read32(trans, CSR_GP_CNTRL);
1263 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1265 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1272 * Fool sparse by faking we release the lock - sparse will
1273 * track nic_access anyway.
1275 __release(&trans_pcie->reg_lock);
1279 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
1280 unsigned long *flags)
1282 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1284 lockdep_assert_held(&trans_pcie->reg_lock);
1287 * Fool sparse by faking we acquiring the lock - sparse will
1288 * track nic_access anyway.
1290 __acquire(&trans_pcie->reg_lock);
1292 if (trans_pcie->cmd_in_flight)
1295 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1296 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1298 * Above we read the CSR_GP_CNTRL register, which will flush
1299 * any previous writes, but we need the write that clears the
1300 * MAC_ACCESS_REQ bit to be performed before any other writes
1301 * scheduled on different CPUs (after we drop reg_lock).
1305 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1308 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
1309 void *buf, int dwords)
1311 unsigned long flags;
1315 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1316 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
1317 for (offs = 0; offs < dwords; offs++)
1318 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1319 iwl_trans_release_nic_access(trans, &flags);
1326 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1327 const void *buf, int dwords)
1329 unsigned long flags;
1331 const u32 *vals = buf;
1333 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1334 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
1335 for (offs = 0; offs < dwords; offs++)
1336 iwl_write32(trans, HBUS_TARG_MEM_WDAT,
1337 vals ? vals[offs] : 0);
1338 iwl_trans_release_nic_access(trans, &flags);
1345 #define IWL_FLUSH_WAIT_MS 2000
1347 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
1349 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1350 struct iwl_txq *txq;
1351 struct iwl_queue *q;
1353 unsigned long now = jiffies;
1358 /* waiting for all the tx frames complete might take a while */
1359 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1362 if (cnt == trans_pcie->cmd_queue)
1364 if (!test_bit(cnt, trans_pcie->queue_used))
1366 if (!(BIT(cnt) & txq_bm))
1369 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
1370 txq = &trans_pcie->txq[cnt];
1372 wr_ptr = ACCESS_ONCE(q->write_ptr);
1374 while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
1375 !time_after(jiffies,
1376 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
1377 u8 write_ptr = ACCESS_ONCE(q->write_ptr);
1379 if (WARN_ONCE(wr_ptr != write_ptr,
1380 "WR pointer moved while flushing %d -> %d\n",
1386 if (q->read_ptr != q->write_ptr) {
1388 "fail to flush all tx fifo queues Q %d\n", cnt);
1392 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
1398 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1399 txq->q.read_ptr, txq->q.write_ptr);
1401 scd_sram_addr = trans_pcie->scd_base_addr +
1402 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
1403 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
1405 iwl_print_hex_error(trans, buf, sizeof(buf));
1407 for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
1408 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
1409 iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
1411 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1412 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
1413 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1414 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1416 iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
1417 SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
1420 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
1422 tbl_dw = tbl_dw & 0x0000FFFF;
1425 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1426 cnt, active ? "" : "in", fifo, tbl_dw,
1427 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
1428 (TFD_QUEUE_SIZE_MAX - 1),
1429 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1435 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1436 u32 mask, u32 value)
1438 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1439 unsigned long flags;
1441 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1442 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
1443 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1446 static const char *get_csr_string(int cmd)
1448 #define IWL_CMD(x) case x: return #x
1450 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1451 IWL_CMD(CSR_INT_COALESCING);
1453 IWL_CMD(CSR_INT_MASK);
1454 IWL_CMD(CSR_FH_INT_STATUS);
1455 IWL_CMD(CSR_GPIO_IN);
1457 IWL_CMD(CSR_GP_CNTRL);
1458 IWL_CMD(CSR_HW_REV);
1459 IWL_CMD(CSR_EEPROM_REG);
1460 IWL_CMD(CSR_EEPROM_GP);
1461 IWL_CMD(CSR_OTP_GP_REG);
1462 IWL_CMD(CSR_GIO_REG);
1463 IWL_CMD(CSR_GP_UCODE_REG);
1464 IWL_CMD(CSR_GP_DRIVER_REG);
1465 IWL_CMD(CSR_UCODE_DRV_GP1);
1466 IWL_CMD(CSR_UCODE_DRV_GP2);
1467 IWL_CMD(CSR_LED_REG);
1468 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1469 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1470 IWL_CMD(CSR_ANA_PLL_CFG);
1471 IWL_CMD(CSR_HW_REV_WA_REG);
1472 IWL_CMD(CSR_MONITOR_STATUS_REG);
1473 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1480 void iwl_pcie_dump_csr(struct iwl_trans *trans)
1483 static const u32 csr_tbl[] = {
1484 CSR_HW_IF_CONFIG_REG,
1502 CSR_DRAM_INT_TBL_REG,
1503 CSR_GIO_CHICKEN_BITS,
1505 CSR_MONITOR_STATUS_REG,
1507 CSR_DBG_HPET_MEM_REG
1509 IWL_ERR(trans, "CSR values:\n");
1510 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1511 "CSR_INT_PERIODIC_REG)\n");
1512 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1513 IWL_ERR(trans, " %25s: 0X%08x\n",
1514 get_csr_string(csr_tbl[i]),
1515 iwl_read32(trans, csr_tbl[i]));
1519 #ifdef CONFIG_IWLWIFI_DEBUGFS
1520 /* create and remove of files */
1521 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1522 if (!debugfs_create_file(#name, mode, parent, trans, \
1523 &iwl_dbgfs_##name##_ops)) \
1527 /* file operation */
1528 #define DEBUGFS_READ_FILE_OPS(name) \
1529 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1530 .read = iwl_dbgfs_##name##_read, \
1531 .open = simple_open, \
1532 .llseek = generic_file_llseek, \
1535 #define DEBUGFS_WRITE_FILE_OPS(name) \
1536 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1537 .write = iwl_dbgfs_##name##_write, \
1538 .open = simple_open, \
1539 .llseek = generic_file_llseek, \
1542 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1543 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1544 .write = iwl_dbgfs_##name##_write, \
1545 .read = iwl_dbgfs_##name##_read, \
1546 .open = simple_open, \
1547 .llseek = generic_file_llseek, \
1550 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1551 char __user *user_buf,
1552 size_t count, loff_t *ppos)
1554 struct iwl_trans *trans = file->private_data;
1555 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1556 struct iwl_txq *txq;
1557 struct iwl_queue *q;
1564 bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
1566 if (!trans_pcie->txq)
1569 buf = kzalloc(bufsz, GFP_KERNEL);
1573 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1574 txq = &trans_pcie->txq[cnt];
1576 pos += scnprintf(buf + pos, bufsz - pos,
1577 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d%s\n",
1578 cnt, q->read_ptr, q->write_ptr,
1579 !!test_bit(cnt, trans_pcie->queue_used),
1580 !!test_bit(cnt, trans_pcie->queue_stopped),
1582 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
1584 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1589 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1590 char __user *user_buf,
1591 size_t count, loff_t *ppos)
1593 struct iwl_trans *trans = file->private_data;
1594 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1595 struct iwl_rxq *rxq = &trans_pcie->rxq;
1598 const size_t bufsz = sizeof(buf);
1600 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1602 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1604 pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
1606 pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
1608 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1611 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1612 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1614 pos += scnprintf(buf + pos, bufsz - pos,
1615 "closed_rb_num: Not Allocated\n");
1617 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1620 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1621 char __user *user_buf,
1622 size_t count, loff_t *ppos)
1624 struct iwl_trans *trans = file->private_data;
1625 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1626 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1630 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1633 buf = kzalloc(bufsz, GFP_KERNEL);
1637 pos += scnprintf(buf + pos, bufsz - pos,
1638 "Interrupt Statistics Report:\n");
1640 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1642 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1644 if (isr_stats->sw || isr_stats->hw) {
1645 pos += scnprintf(buf + pos, bufsz - pos,
1646 "\tLast Restarting Code: 0x%X\n",
1647 isr_stats->err_code);
1649 #ifdef CONFIG_IWLWIFI_DEBUG
1650 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1652 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1655 pos += scnprintf(buf + pos, bufsz - pos,
1656 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1658 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1661 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1664 pos += scnprintf(buf + pos, bufsz - pos,
1665 "Rx command responses:\t\t %u\n", isr_stats->rx);
1667 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1670 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1671 isr_stats->unhandled);
1673 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1678 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1679 const char __user *user_buf,
1680 size_t count, loff_t *ppos)
1682 struct iwl_trans *trans = file->private_data;
1683 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1684 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1690 memset(buf, 0, sizeof(buf));
1691 buf_size = min(count, sizeof(buf) - 1);
1692 if (copy_from_user(buf, user_buf, buf_size))
1694 if (sscanf(buf, "%x", &reset_flag) != 1)
1696 if (reset_flag == 0)
1697 memset(isr_stats, 0, sizeof(*isr_stats));
1702 static ssize_t iwl_dbgfs_csr_write(struct file *file,
1703 const char __user *user_buf,
1704 size_t count, loff_t *ppos)
1706 struct iwl_trans *trans = file->private_data;
1711 memset(buf, 0, sizeof(buf));
1712 buf_size = min(count, sizeof(buf) - 1);
1713 if (copy_from_user(buf, user_buf, buf_size))
1715 if (sscanf(buf, "%d", &csr) != 1)
1718 iwl_pcie_dump_csr(trans);
1723 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1724 char __user *user_buf,
1725 size_t count, loff_t *ppos)
1727 struct iwl_trans *trans = file->private_data;
1731 ret = iwl_dump_fh(trans, &buf);
1736 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
1741 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1742 DEBUGFS_READ_FILE_OPS(fh_reg);
1743 DEBUGFS_READ_FILE_OPS(rx_queue);
1744 DEBUGFS_READ_FILE_OPS(tx_queue);
1745 DEBUGFS_WRITE_FILE_OPS(csr);
1748 * Create the debugfs files and directories
1751 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1754 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1755 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1756 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1757 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1758 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1762 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
1766 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
1771 for (i = 0; i < IWL_NUM_OF_TBS; i++)
1772 cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
1778 struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
1780 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1781 struct iwl_fw_error_dump_data *data;
1782 struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
1783 struct iwl_fw_error_dump_txcmd *txcmd;
1784 struct iwl_trans_dump_data *dump_data;
1788 len = sizeof(*dump_data) + sizeof(*data) +
1789 cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
1791 if (trans_pcie->fw_mon_page)
1792 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
1793 trans_pcie->fw_mon_size;
1795 dump_data = vzalloc(len);
1800 data = (void *)dump_data->data;
1801 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
1802 txcmd = (void *)data->data;
1803 spin_lock_bh(&cmdq->lock);
1804 ptr = cmdq->q.write_ptr;
1805 for (i = 0; i < cmdq->q.n_window; i++) {
1806 u8 idx = get_cmd_index(&cmdq->q, ptr);
1809 cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
1810 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
1813 len += sizeof(*txcmd) + caplen;
1814 txcmd->cmdlen = cpu_to_le32(cmdlen);
1815 txcmd->caplen = cpu_to_le32(caplen);
1816 memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
1817 txcmd = (void *)((u8 *)txcmd->data + caplen);
1820 ptr = iwl_queue_dec_wrap(ptr);
1822 spin_unlock_bh(&cmdq->lock);
1824 data->len = cpu_to_le32(len);
1825 len += sizeof(*data);
1827 if (trans_pcie->fw_mon_page) {
1828 struct iwl_fw_error_dump_fw_mon *fw_mon_data;
1830 data = iwl_fw_error_next_data(data);
1831 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
1832 data->len = cpu_to_le32(trans_pcie->fw_mon_size +
1833 sizeof(*fw_mon_data));
1834 fw_mon_data = (void *)data->data;
1835 fw_mon_data->fw_mon_wr_ptr =
1836 cpu_to_le32(iwl_read_prph(trans, MON_BUFF_WRPTR));
1837 fw_mon_data->fw_mon_cycle_cnt =
1838 cpu_to_le32(iwl_read_prph(trans, MON_BUFF_CYCLE_CNT));
1839 fw_mon_data->fw_mon_base_ptr =
1840 cpu_to_le32(iwl_read_prph(trans, MON_BUFF_BASE_ADDR));
1843 * The firmware is now asserted, it won't write anything to
1844 * the buffer. CPU can take ownership to fetch the data.
1845 * The buffer will be handed back to the device before the
1846 * firmware will be restarted.
1848 dma_sync_single_for_cpu(trans->dev, trans_pcie->fw_mon_phys,
1849 trans_pcie->fw_mon_size,
1851 memcpy(fw_mon_data->data, page_address(trans_pcie->fw_mon_page),
1852 trans_pcie->fw_mon_size);
1854 len += sizeof(*data) + sizeof(*fw_mon_data) +
1855 trans_pcie->fw_mon_size;
1858 dump_data->len = len;
1863 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1868 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1870 static const struct iwl_trans_ops trans_ops_pcie = {
1871 .start_hw = iwl_trans_pcie_start_hw,
1872 .op_mode_leave = iwl_trans_pcie_op_mode_leave,
1873 .fw_alive = iwl_trans_pcie_fw_alive,
1874 .start_fw = iwl_trans_pcie_start_fw,
1875 .stop_device = iwl_trans_pcie_stop_device,
1877 .d3_suspend = iwl_trans_pcie_d3_suspend,
1878 .d3_resume = iwl_trans_pcie_d3_resume,
1880 .send_cmd = iwl_trans_pcie_send_hcmd,
1882 .tx = iwl_trans_pcie_tx,
1883 .reclaim = iwl_trans_pcie_reclaim,
1885 .txq_disable = iwl_trans_pcie_txq_disable,
1886 .txq_enable = iwl_trans_pcie_txq_enable,
1888 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1890 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
1892 .write8 = iwl_trans_pcie_write8,
1893 .write32 = iwl_trans_pcie_write32,
1894 .read32 = iwl_trans_pcie_read32,
1895 .read_prph = iwl_trans_pcie_read_prph,
1896 .write_prph = iwl_trans_pcie_write_prph,
1897 .read_mem = iwl_trans_pcie_read_mem,
1898 .write_mem = iwl_trans_pcie_write_mem,
1899 .configure = iwl_trans_pcie_configure,
1900 .set_pmi = iwl_trans_pcie_set_pmi,
1901 .grab_nic_access = iwl_trans_pcie_grab_nic_access,
1902 .release_nic_access = iwl_trans_pcie_release_nic_access,
1903 .set_bits_mask = iwl_trans_pcie_set_bits_mask,
1905 #ifdef CONFIG_IWLWIFI_DEBUGFS
1906 .dump_data = iwl_trans_pcie_dump_data,
1910 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1911 const struct pci_device_id *ent,
1912 const struct iwl_cfg *cfg)
1914 struct iwl_trans_pcie *trans_pcie;
1915 struct iwl_trans *trans;
1919 trans = kzalloc(sizeof(struct iwl_trans) +
1920 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
1926 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1928 trans->ops = &trans_ops_pcie;
1930 trans_lockdep_init(trans);
1931 trans_pcie->trans = trans;
1932 spin_lock_init(&trans_pcie->irq_lock);
1933 spin_lock_init(&trans_pcie->reg_lock);
1934 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1936 err = pci_enable_device(pdev);
1940 if (!cfg->base_params->pcie_l1_allowed) {
1942 * W/A - seems to solve weird behavior. We need to remove this
1943 * if we don't want to stay in L1 all the time. This wastes a
1946 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
1947 PCIE_LINK_STATE_L1 |
1948 PCIE_LINK_STATE_CLKPM);
1951 pci_set_master(pdev);
1953 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
1955 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
1957 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1959 err = pci_set_consistent_dma_mask(pdev,
1961 /* both attempts failed: */
1963 dev_err(&pdev->dev, "No suitable DMA available\n");
1964 goto out_pci_disable_device;
1968 err = pci_request_regions(pdev, DRV_NAME);
1970 dev_err(&pdev->dev, "pci_request_regions failed\n");
1971 goto out_pci_disable_device;
1974 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
1975 if (!trans_pcie->hw_base) {
1976 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
1978 goto out_pci_release_regions;
1981 /* We disable the RETRY_TIMEOUT register (0x41) to keep
1982 * PCI Tx retries from interfering with C3 CPU state */
1983 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1985 trans->dev = &pdev->dev;
1986 trans_pcie->pci_dev = pdev;
1987 iwl_disable_interrupts(trans);
1989 err = pci_enable_msi(pdev);
1991 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
1992 /* enable rfkill interrupt: hw bug w/a */
1993 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1994 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1995 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1996 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2000 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
2002 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
2003 * changed, and now the revision step also includes bit 0-1 (no more
2004 * "dash" value). To keep hw_rev backwards compatible - we'll store it
2005 * in the old format.
2007 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
2008 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2009 ((trans->hw_rev << 2) & 0xc);
2011 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2012 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2013 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2015 /* Initialize the wait queue for commands */
2016 init_waitqueue_head(&trans_pcie->wait_command_queue);
2018 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
2019 "iwl_cmd_pool:%s", dev_name(trans->dev));
2021 trans->dev_cmd_headroom = 0;
2022 trans->dev_cmd_pool =
2023 kmem_cache_create(trans->dev_cmd_pool_name,
2024 sizeof(struct iwl_device_cmd)
2025 + trans->dev_cmd_headroom,
2030 if (!trans->dev_cmd_pool) {
2032 goto out_pci_disable_msi;
2035 if (iwl_pcie_alloc_ict(trans))
2036 goto out_free_cmd_pool;
2038 err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2039 iwl_pcie_irq_handler,
2040 IRQF_SHARED, DRV_NAME, trans);
2042 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2046 trans_pcie->inta_mask = CSR_INI_SET_MASK;
2051 iwl_pcie_free_ict(trans);
2053 kmem_cache_destroy(trans->dev_cmd_pool);
2054 out_pci_disable_msi:
2055 pci_disable_msi(pdev);
2056 out_pci_release_regions:
2057 pci_release_regions(pdev);
2058 out_pci_disable_device:
2059 pci_disable_device(pdev);
2063 return ERR_PTR(err);