1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *****************************************************************************/
65 #include <linux/pci.h>
66 #include <linux/pci-aspm.h>
67 #include <linux/interrupt.h>
68 #include <linux/debugfs.h>
69 #include <linux/sched.h>
70 #include <linux/bitops.h>
71 #include <linux/gfp.h>
72 #include <linux/vmalloc.h>
75 #include "iwl-trans.h"
79 #include "iwl-agn-hw.h"
80 #include "iwl-fw-error-dump.h"
84 /* extended range in FW SRAM */
85 #define IWL_FW_MEM_EXTENDED_START 0x40000
86 #define IWL_FW_MEM_EXTENDED_END 0x57FFF
88 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
90 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
92 if (!trans_pcie->fw_mon_page)
95 dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
96 trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
97 __free_pages(trans_pcie->fw_mon_page,
98 get_order(trans_pcie->fw_mon_size));
99 trans_pcie->fw_mon_page = NULL;
100 trans_pcie->fw_mon_phys = 0;
101 trans_pcie->fw_mon_size = 0;
104 static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
106 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
112 if (trans_pcie->fw_mon_page) {
113 dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
114 trans_pcie->fw_mon_size,
120 for (power = 26; power >= 11; power--) {
124 order = get_order(size);
125 page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
130 phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
132 if (dma_mapping_error(trans->dev, phys)) {
133 __free_pages(page, order);
137 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
142 if (WARN_ON_ONCE(!page))
145 trans_pcie->fw_mon_page = page;
146 trans_pcie->fw_mon_phys = phys;
147 trans_pcie->fw_mon_size = size;
150 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
152 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
153 ((reg & 0x0000ffff) | (2 << 28)));
154 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
157 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
159 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
160 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
161 ((reg & 0x0000ffff) | (3 << 28)));
164 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
166 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
167 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
168 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
169 ~APMG_PS_CTRL_MSK_PWR_SRC);
171 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
172 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
173 ~APMG_PS_CTRL_MSK_PWR_SRC);
177 #define PCI_CFG_RETRY_TIMEOUT 0x041
179 static void iwl_pcie_apm_config(struct iwl_trans *trans)
181 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
186 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
187 * Check if BIOS (or OS) enabled L1-ASPM on this device.
188 * If so (likely), disable L0S, so device moves directly L0->L1;
189 * costs negligible amount of power savings.
190 * If not (unlikely), enable L0S, so there is at least some
191 * power savings, even without L1.
193 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
194 if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
195 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
197 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
198 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
200 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
201 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
202 dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
203 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
204 trans->ltr_enabled ? "En" : "Dis");
208 * Start up NIC's basic functionality after it has been reset
209 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
210 * NOTE: This does not load uCode nor start the embedded processor
212 static int iwl_pcie_apm_init(struct iwl_trans *trans)
215 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
218 * Use "set_bit" below rather than "write", to preserve any hardware
219 * bits already set by default after reset.
222 /* Disable L0S exit timer (platform NMI Work/Around) */
223 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
224 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
225 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
228 * Disable L0s without affecting L1;
229 * don't wait for ICH L0s (ICH bug W/A)
231 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
232 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
234 /* Set FH wait threshold to maximum (HW error during stress W/A) */
235 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
238 * Enable HAP INTA (interrupt from management bus) to
239 * wake device's PCI Express link L1a -> L0s
241 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
242 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
244 iwl_pcie_apm_config(trans);
246 /* Configure analog phase-lock-loop before activating to D0A */
247 if (trans->cfg->base_params->pll_cfg_val)
248 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
249 trans->cfg->base_params->pll_cfg_val);
252 * Set "initialization complete" bit to move adapter from
253 * D0U* --> D0A* (powered-up active) state.
255 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
258 * Wait for clock stabilization; once stabilized, access to
259 * device-internal resources is supported, e.g. iwl_write_prph()
260 * and accesses to uCode SRAM.
262 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
263 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
264 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
266 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
270 if (trans->cfg->host_interrupt_operation_mode) {
272 * This is a bit of an abuse - This is needed for 7260 / 3160
273 * only check host_interrupt_operation_mode even if this is
274 * not related to host_interrupt_operation_mode.
276 * Enable the oscillator to count wake up time for L1 exit. This
277 * consumes slightly more power (100uA) - but allows to be sure
278 * that we wake up from L1 on time.
280 * This looks weird: read twice the same register, discard the
281 * value, set a bit, and yet again, read that same register
282 * just to discard the value. But that's the way the hardware
285 iwl_read_prph(trans, OSC_CLK);
286 iwl_read_prph(trans, OSC_CLK);
287 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
288 iwl_read_prph(trans, OSC_CLK);
289 iwl_read_prph(trans, OSC_CLK);
293 * Enable DMA clock and wait for it to stabilize.
295 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
296 * bits do not disable clocks. This preserves any hardware
297 * bits already set by default in "CLK_CTRL_REG" after reset.
299 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
300 iwl_write_prph(trans, APMG_CLK_EN_REG,
301 APMG_CLK_VAL_DMA_CLK_RQT);
304 /* Disable L1-Active */
305 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
306 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
308 /* Clear the interrupt in APMG if the NIC is in RFKILL */
309 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
310 APMG_RTC_INT_STT_RFKILL);
313 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
320 * Enable LP XTAL to avoid HW bug where device may consume much power if
321 * FW is not loaded after device reset. LP XTAL is disabled by default
322 * after device HW reset. Do it only if XTAL is fed by internal source.
323 * Configure device's "persistence" mode to avoid resetting XTAL again when
324 * SHRD_HW_RST occurs in S3.
326 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
330 u32 apmg_xtal_cfg_reg;
334 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
335 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
337 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
338 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
343 * Set "initialization complete" bit to move adapter from
344 * D0U* --> D0A* (powered-up active) state.
346 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
349 * Wait for clock stabilization; once stabilized, access to
350 * device-internal resources is possible.
352 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
353 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
354 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
356 if (WARN_ON(ret < 0)) {
357 IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
358 /* Release XTAL ON request */
359 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
360 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
365 * Clear "disable persistence" to avoid LP XTAL resetting when
366 * SHRD_HW_RST is applied in S3.
368 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
369 APMG_PCIDEV_STT_VAL_PERSIST_DIS);
372 * Force APMG XTAL to be active to prevent its disabling by HW
373 * caused by APMG idle state.
375 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
376 SHR_APMG_XTAL_CFG_REG);
377 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
379 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
382 * Reset entire device again - do controller reset (results in
383 * SHRD_HW_RST). Turn MAC off before proceeding.
385 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
389 /* Enable LP XTAL by indirect access through CSR */
390 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
391 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
392 SHR_APMG_GP1_WF_XTAL_LP_EN |
393 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
395 /* Clear delay line clock power up */
396 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
397 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
398 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
401 * Enable persistence mode to avoid LP XTAL resetting when
402 * SHRD_HW_RST is applied in S3.
404 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
405 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
408 * Clear "initialization complete" bit to move adapter from
409 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
411 iwl_clear_bit(trans, CSR_GP_CNTRL,
412 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
414 /* Activates XTAL resources monitor */
415 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
416 CSR_MONITOR_XTAL_RESOURCES);
418 /* Release XTAL ON request */
419 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
420 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
423 /* Release APMG XTAL */
424 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
426 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
429 static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
433 /* stop device's busmaster DMA activity */
434 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
436 ret = iwl_poll_bit(trans, CSR_RESET,
437 CSR_RESET_REG_FLAG_MASTER_DISABLED,
438 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
440 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
442 IWL_DEBUG_INFO(trans, "stop master\n");
447 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
449 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
452 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
453 iwl_pcie_apm_init(trans);
455 /* inform ME that we are leaving */
456 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
457 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
458 APMG_PCIDEV_STT_VAL_WAKE_ME);
459 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
460 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
461 CSR_HW_IF_CONFIG_REG_PREPARE |
462 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
466 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
468 /* Stop device's DMA activity */
469 iwl_pcie_apm_stop_master(trans);
471 if (trans->cfg->lp_xtal_workaround) {
472 iwl_pcie_apm_lp_xtal_enable(trans);
476 /* Reset the entire device */
477 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
482 * Clear "initialization complete" bit to move adapter from
483 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
485 iwl_clear_bit(trans, CSR_GP_CNTRL,
486 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
489 static int iwl_pcie_nic_init(struct iwl_trans *trans)
491 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
494 spin_lock(&trans_pcie->irq_lock);
495 iwl_pcie_apm_init(trans);
497 spin_unlock(&trans_pcie->irq_lock);
499 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
500 iwl_pcie_set_pwr(trans, false);
502 iwl_op_mode_nic_config(trans->op_mode);
504 /* Allocate the RX queue, or reset if it is already allocated */
505 iwl_pcie_rx_init(trans);
507 /* Allocate or reset and init all Tx and Command queues */
508 if (iwl_pcie_tx_init(trans))
511 if (trans->cfg->base_params->shadow_reg_enable) {
512 /* enable shadow regs in HW */
513 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
514 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
520 #define HW_READY_TIMEOUT (50)
522 /* Note: returns poll_bit return value, which is >= 0 if success */
523 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
527 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
528 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
530 /* See if we got it */
531 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
532 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
533 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
537 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
539 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
543 /* Note: returns standard 0/-ERROR code */
544 static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
550 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
552 ret = iwl_pcie_set_hw_ready(trans);
553 /* If the card is ready, exit 0 */
557 for (iter = 0; iter < 10; iter++) {
558 /* If HW is not ready, prepare the conditions to check again */
559 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
560 CSR_HW_IF_CONFIG_REG_PREPARE);
563 ret = iwl_pcie_set_hw_ready(trans);
567 usleep_range(200, 1000);
569 } while (t < 150000);
573 IWL_ERR(trans, "Couldn't prepare the card\n");
581 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
582 dma_addr_t phy_addr, u32 byte_cnt)
584 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
587 trans_pcie->ucode_write_complete = false;
589 iwl_write_direct32(trans,
590 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
591 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
593 iwl_write_direct32(trans,
594 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
597 iwl_write_direct32(trans,
598 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
599 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
601 iwl_write_direct32(trans,
602 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
603 (iwl_get_dma_hi_addr(phy_addr)
604 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
606 iwl_write_direct32(trans,
607 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
608 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
609 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
610 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
612 iwl_write_direct32(trans,
613 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
614 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
615 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
616 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
618 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
619 trans_pcie->ucode_write_complete, 5 * HZ);
621 IWL_ERR(trans, "Failed to load firmware chunk!\n");
628 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
629 const struct fw_desc *section)
633 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
636 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
639 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
640 GFP_KERNEL | __GFP_NOWARN);
642 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
643 chunk_sz = PAGE_SIZE;
644 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
645 &p_addr, GFP_KERNEL);
650 for (offset = 0; offset < section->len; offset += chunk_sz) {
651 u32 copy_size, dst_addr;
652 bool extended_addr = false;
654 copy_size = min_t(u32, chunk_sz, section->len - offset);
655 dst_addr = section->offset + offset;
657 if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
658 dst_addr <= IWL_FW_MEM_EXTENDED_END)
659 extended_addr = true;
662 iwl_set_bits_prph(trans, LMPM_CHICK,
663 LMPM_CHICK_EXTENDED_ADDR_SPACE);
665 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
666 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
670 iwl_clear_bits_prph(trans, LMPM_CHICK,
671 LMPM_CHICK_EXTENDED_ADDR_SPACE);
675 "Could not load the [%d] uCode section\n",
681 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
686 * Driver Takes the ownership on secure machine before FW load
687 * and prevent race with the BT load.
688 * W/A for ROM bug. (should be remove in the next Si step)
690 static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
692 u32 val, loop = 1000;
694 /* Check the RSA semaphore is accessible - if not, we are in trouble */
695 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
696 if (val & (BIT(1) | BIT(17))) {
698 "can't access the RSA semaphore it is write protected\n");
702 /* take ownership on the AUX IF */
703 iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
704 iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
707 iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
708 val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
710 iwl_write_prph(trans, RSA_ENABLE, 0);
718 IWL_ERR(trans, "Failed to take ownership on secure machine\n");
722 static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
723 const struct fw_img *image,
725 int *first_ucode_section)
728 int i, ret = 0, sec_num = 0x1;
729 u32 val, last_read_idx = 0;
733 *first_ucode_section = 0;
736 (*first_ucode_section)++;
739 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
742 if (!image->sec[i].data ||
743 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
745 "Break since Data not valid or Empty section, sec = %d\n",
750 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
754 /* Notify the ucode of the loaded section number and status */
755 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
756 val = val | (sec_num << shift_param);
757 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
758 sec_num = (sec_num << 1) | 0x1;
761 *first_ucode_section = last_read_idx;
764 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
766 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
771 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
772 const struct fw_img *image,
774 int *first_ucode_section)
778 u32 last_read_idx = 0;
782 *first_ucode_section = 0;
785 (*first_ucode_section)++;
788 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
791 if (!image->sec[i].data ||
792 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
794 "Break since Data not valid or Empty section, sec = %d\n",
799 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
804 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
805 iwl_set_bits_prph(trans,
806 CSR_UCODE_LOAD_STATUS_ADDR,
807 (LMPM_CPU_UCODE_LOADING_COMPLETED |
808 LMPM_CPU_HDRS_LOADING_COMPLETED |
809 LMPM_CPU_UCODE_LOADING_STARTED) <<
812 *first_ucode_section = last_read_idx;
817 static void iwl_pcie_apply_destination(struct iwl_trans *trans)
819 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
820 const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
825 "DBG DEST version is %d - expect issues\n",
828 IWL_INFO(trans, "Applying debug destination %s\n",
829 get_fw_dbg_mode_string(dest->monitor_mode));
831 if (dest->monitor_mode == EXTERNAL_MODE)
832 iwl_pcie_alloc_fw_monitor(trans);
834 IWL_WARN(trans, "PCI should have external buffer debug\n");
836 for (i = 0; i < trans->dbg_dest_reg_num; i++) {
837 u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
838 u32 val = le32_to_cpu(dest->reg_ops[i].val);
840 switch (dest->reg_ops[i].op) {
842 iwl_write32(trans, addr, val);
845 iwl_set_bit(trans, addr, BIT(val));
848 iwl_clear_bit(trans, addr, BIT(val));
851 iwl_write_prph(trans, addr, val);
854 iwl_set_bits_prph(trans, addr, BIT(val));
857 iwl_clear_bits_prph(trans, addr, BIT(val));
860 IWL_ERR(trans, "FW debug - unknown OP %d\n",
861 dest->reg_ops[i].op);
866 if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
867 iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
868 trans_pcie->fw_mon_phys >> dest->base_shift);
869 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
870 (trans_pcie->fw_mon_phys +
871 trans_pcie->fw_mon_size) >> dest->end_shift);
875 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
876 const struct fw_img *image)
878 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
880 int first_ucode_section;
882 IWL_DEBUG_FW(trans, "working with %s CPU\n",
883 image->is_dual_cpus ? "Dual" : "Single");
885 /* load to FW the binary non secured sections of CPU1 */
886 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
890 if (image->is_dual_cpus) {
891 /* set CPU2 header address */
892 iwl_write_prph(trans,
893 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
894 LMPM_SECURE_CPU2_HDR_MEM_SPACE);
896 /* load to FW the binary sections of CPU2 */
897 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
898 &first_ucode_section);
903 /* supported for 7000 only for the moment */
904 if (iwlwifi_mod_params.fw_monitor &&
905 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
906 iwl_pcie_alloc_fw_monitor(trans);
908 if (trans_pcie->fw_mon_size) {
909 iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
910 trans_pcie->fw_mon_phys >> 4);
911 iwl_write_prph(trans, MON_BUFF_END_ADDR,
912 (trans_pcie->fw_mon_phys +
913 trans_pcie->fw_mon_size) >> 4);
915 } else if (trans->dbg_dest_tlv) {
916 iwl_pcie_apply_destination(trans);
919 /* release CPU reset */
920 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
921 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
923 iwl_write32(trans, CSR_RESET, 0);
928 static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
929 const struct fw_img *image)
932 int first_ucode_section;
935 IWL_DEBUG_FW(trans, "working with %s CPU\n",
936 image->is_dual_cpus ? "Dual" : "Single");
938 if (trans->dbg_dest_tlv)
939 iwl_pcie_apply_destination(trans);
941 /* TODO: remove in the next Si step */
942 ret = iwl_pcie_rsa_race_bug_wa(trans);
946 /* configure the ucode to be ready to get the secured image */
947 /* release CPU reset */
948 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
950 /* load to FW the binary Secured sections of CPU1 */
951 ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 1,
952 &first_ucode_section);
956 /* load to FW the binary sections of CPU2 */
957 ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 2,
958 &first_ucode_section);
962 /* wait for image verification to complete */
963 ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
964 LMPM_SECURE_BOOT_STATUS_SUCCESS,
965 LMPM_SECURE_BOOT_STATUS_SUCCESS,
966 LMPM_SECURE_TIME_OUT);
968 reg = iwl_read_prph(trans,
969 LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0);
971 IWL_ERR(trans, "Timeout on secure boot process, reg = %x\n",
979 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
980 const struct fw_img *fw, bool run_in_rfkill)
982 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
986 /* This may fail if AMT took ownership of the device */
987 if (iwl_pcie_prepare_card_hw(trans)) {
988 IWL_WARN(trans, "Exit HW not ready\n");
992 iwl_enable_rfkill_int(trans);
994 /* If platform's RF_KILL switch is NOT set to KILL */
995 hw_rfkill = iwl_is_rfkill_set(trans);
997 set_bit(STATUS_RFKILL, &trans->status);
999 clear_bit(STATUS_RFKILL, &trans->status);
1000 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1001 if (hw_rfkill && !run_in_rfkill)
1004 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1006 ret = iwl_pcie_nic_init(trans);
1008 IWL_ERR(trans, "Unable to init nic\n");
1012 /* init ref_count to 1 (should be cleared when ucode is loaded) */
1013 trans_pcie->ref_count = 1;
1015 /* make sure rfkill handshake bits are cleared */
1016 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1017 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1018 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1020 /* clear (again), then enable host interrupts */
1021 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1022 iwl_enable_interrupts(trans);
1024 /* really make sure rfkill handshake bits are cleared */
1025 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1026 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1028 /* Load the given image to the HW */
1029 if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
1030 (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP))
1031 return iwl_pcie_load_given_ucode_8000b(trans, fw);
1033 return iwl_pcie_load_given_ucode(trans, fw);
1036 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1038 iwl_pcie_reset_ict(trans);
1039 iwl_pcie_tx_start(trans, scd_addr);
1042 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1044 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1045 bool hw_rfkill, was_hw_rfkill;
1047 was_hw_rfkill = iwl_is_rfkill_set(trans);
1049 /* tell the device to stop sending interrupts */
1050 spin_lock(&trans_pcie->irq_lock);
1051 iwl_disable_interrupts(trans);
1052 spin_unlock(&trans_pcie->irq_lock);
1054 /* device going down, Stop using ICT table */
1055 iwl_pcie_disable_ict(trans);
1058 * If a HW restart happens during firmware loading,
1059 * then the firmware loading might call this function
1060 * and later it might be called again due to the
1061 * restart. So don't process again if the device is
1064 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1065 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
1066 iwl_pcie_tx_stop(trans);
1067 iwl_pcie_rx_stop(trans);
1069 /* Power-down device's busmaster DMA clocks */
1070 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1071 APMG_CLK_VAL_DMA_CLK_RQT);
1075 /* Make sure (redundant) we've released our request to stay awake */
1076 iwl_clear_bit(trans, CSR_GP_CNTRL,
1077 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1079 /* Stop the device, and put it in low power state */
1080 iwl_pcie_apm_stop(trans, false);
1082 /* stop and reset the on-board processor */
1083 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1087 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1088 * This is a bug in certain verions of the hardware.
1089 * Certain devices also keep sending HW RF kill interrupt all
1090 * the time, unless the interrupt is ACKed even if the interrupt
1091 * should be masked. Re-ACK all the interrupts here.
1093 spin_lock(&trans_pcie->irq_lock);
1094 iwl_disable_interrupts(trans);
1095 spin_unlock(&trans_pcie->irq_lock);
1098 /* clear all status bits */
1099 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1100 clear_bit(STATUS_INT_ENABLED, &trans->status);
1101 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1102 clear_bit(STATUS_RFKILL, &trans->status);
1105 * Even if we stop the HW, we still want the RF kill
1108 iwl_enable_rfkill_int(trans);
1111 * Check again since the RF kill state may have changed while
1112 * all the interrupts were disabled, in this case we couldn't
1113 * receive the RF kill interrupt and update the state in the
1115 * Don't call the op_mode if the rkfill state hasn't changed.
1116 * This allows the op_mode to call stop_device from the rfkill
1117 * notification without endless recursion. Under very rare
1118 * circumstances, we might have a small recursion if the rfkill
1119 * state changed exactly now while we were called from stop_device.
1120 * This is very unlikely but can happen and is supported.
1122 hw_rfkill = iwl_is_rfkill_set(trans);
1124 set_bit(STATUS_RFKILL, &trans->status);
1126 clear_bit(STATUS_RFKILL, &trans->status);
1127 if (hw_rfkill != was_hw_rfkill)
1128 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1130 /* re-take ownership to prevent other users from stealing the deivce */
1131 iwl_pcie_prepare_card_hw(trans);
1134 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1136 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
1137 iwl_trans_pcie_stop_device(trans);
1140 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
1142 iwl_disable_interrupts(trans);
1145 * in testing mode, the host stays awake and the
1146 * hardware won't be reset (not even partially)
1151 iwl_pcie_disable_ict(trans);
1153 iwl_clear_bit(trans, CSR_GP_CNTRL,
1154 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1155 iwl_clear_bit(trans, CSR_GP_CNTRL,
1156 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1159 * reset TX queues -- some of their registers reset during S3
1160 * so if we don't reset everything here the D3 image would try
1161 * to execute some invalid memory upon resume
1163 iwl_trans_pcie_tx_reset(trans);
1165 iwl_pcie_set_pwr(trans, true);
1168 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1169 enum iwl_d3_status *status,
1176 iwl_enable_interrupts(trans);
1177 *status = IWL_D3_STATUS_ALIVE;
1182 * Also enables interrupts - none will happen as the device doesn't
1183 * know we're waking it up, only when the opmode actually tells it
1186 iwl_pcie_reset_ict(trans);
1188 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1189 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1191 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1194 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1195 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1196 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1199 IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
1203 iwl_pcie_set_pwr(trans, false);
1205 iwl_trans_pcie_tx_reset(trans);
1207 ret = iwl_pcie_rx_init(trans);
1209 IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
1213 val = iwl_read32(trans, CSR_RESET);
1214 if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1215 *status = IWL_D3_STATUS_RESET;
1217 *status = IWL_D3_STATUS_ALIVE;
1222 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1227 err = iwl_pcie_prepare_card_hw(trans);
1229 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1233 /* Reset the entire device */
1234 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1236 usleep_range(10, 15);
1238 iwl_pcie_apm_init(trans);
1240 /* From now on, the op_mode will be kept updated about RF kill state */
1241 iwl_enable_rfkill_int(trans);
1243 hw_rfkill = iwl_is_rfkill_set(trans);
1245 set_bit(STATUS_RFKILL, &trans->status);
1247 clear_bit(STATUS_RFKILL, &trans->status);
1248 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1253 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1255 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1257 /* disable interrupts - don't enable HW RF kill interrupt */
1258 spin_lock(&trans_pcie->irq_lock);
1259 iwl_disable_interrupts(trans);
1260 spin_unlock(&trans_pcie->irq_lock);
1262 iwl_pcie_apm_stop(trans, true);
1264 spin_lock(&trans_pcie->irq_lock);
1265 iwl_disable_interrupts(trans);
1266 spin_unlock(&trans_pcie->irq_lock);
1268 iwl_pcie_disable_ict(trans);
1271 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1273 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1276 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1278 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1281 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1283 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1286 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1288 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1289 ((reg & 0x000FFFFF) | (3 << 24)));
1290 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1293 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1296 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1297 ((addr & 0x000FFFFF) | (3 << 24)));
1298 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1301 static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1307 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1308 const struct iwl_trans_config *trans_cfg)
1310 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1312 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1313 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1314 trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1315 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1316 trans_pcie->n_no_reclaim_cmds = 0;
1318 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1319 if (trans_pcie->n_no_reclaim_cmds)
1320 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1321 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1323 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1324 if (trans_pcie->rx_buf_size_8k)
1325 trans_pcie->rx_page_order = get_order(8 * 1024);
1327 trans_pcie->rx_page_order = get_order(4 * 1024);
1329 trans_pcie->command_names = trans_cfg->command_names;
1330 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1331 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1333 /* Initialize NAPI here - it should be before registering to mac80211
1334 * in the opmode but after the HW struct is allocated.
1335 * As this function may be called again in some corner cases don't
1336 * do anything if NAPI was already initialized.
1338 if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
1339 init_dummy_netdev(&trans_pcie->napi_dev);
1340 iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
1341 &trans_pcie->napi_dev,
1342 iwl_pcie_dummy_napi_poll, 64);
1346 void iwl_trans_pcie_free(struct iwl_trans *trans)
1348 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1350 synchronize_irq(trans_pcie->pci_dev->irq);
1352 iwl_pcie_tx_free(trans);
1353 iwl_pcie_rx_free(trans);
1355 free_irq(trans_pcie->pci_dev->irq, trans);
1356 iwl_pcie_free_ict(trans);
1358 pci_disable_msi(trans_pcie->pci_dev);
1359 iounmap(trans_pcie->hw_base);
1360 pci_release_regions(trans_pcie->pci_dev);
1361 pci_disable_device(trans_pcie->pci_dev);
1362 kmem_cache_destroy(trans->dev_cmd_pool);
1364 if (trans_pcie->napi.poll)
1365 netif_napi_del(&trans_pcie->napi);
1367 iwl_pcie_free_fw_monitor(trans);
1372 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1375 set_bit(STATUS_TPOWER_PMI, &trans->status);
1377 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1380 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
1381 unsigned long *flags)
1384 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1386 spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1388 if (trans_pcie->cmd_in_flight)
1391 /* this bit wakes up the NIC */
1392 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1393 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1394 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1398 * These bits say the device is running, and should keep running for
1399 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1400 * but they do not indicate that embedded SRAM is restored yet;
1401 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1402 * to/from host DRAM when sleeping/waking for power-saving.
1403 * Each direction takes approximately 1/4 millisecond; with this
1404 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1405 * series of register accesses are expected (e.g. reading Event Log),
1406 * to keep device from sleeping.
1408 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1409 * SRAM is okay/restored. We don't check that here because this call
1410 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1411 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1413 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1414 * and do not save/restore SRAM when power cycling.
1416 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1417 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1418 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1419 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
1420 if (unlikely(ret < 0)) {
1421 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
1423 u32 val = iwl_read32(trans, CSR_GP_CNTRL);
1425 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1427 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1434 * Fool sparse by faking we release the lock - sparse will
1435 * track nic_access anyway.
1437 __release(&trans_pcie->reg_lock);
1441 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
1442 unsigned long *flags)
1444 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1446 lockdep_assert_held(&trans_pcie->reg_lock);
1449 * Fool sparse by faking we acquiring the lock - sparse will
1450 * track nic_access anyway.
1452 __acquire(&trans_pcie->reg_lock);
1454 if (trans_pcie->cmd_in_flight)
1457 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1458 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1460 * Above we read the CSR_GP_CNTRL register, which will flush
1461 * any previous writes, but we need the write that clears the
1462 * MAC_ACCESS_REQ bit to be performed before any other writes
1463 * scheduled on different CPUs (after we drop reg_lock).
1467 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1470 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
1471 void *buf, int dwords)
1473 unsigned long flags;
1477 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1478 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
1479 for (offs = 0; offs < dwords; offs++)
1480 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1481 iwl_trans_release_nic_access(trans, &flags);
1488 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1489 const void *buf, int dwords)
1491 unsigned long flags;
1493 const u32 *vals = buf;
1495 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1496 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
1497 for (offs = 0; offs < dwords; offs++)
1498 iwl_write32(trans, HBUS_TARG_MEM_WDAT,
1499 vals ? vals[offs] : 0);
1500 iwl_trans_release_nic_access(trans, &flags);
1507 static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
1511 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1514 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1515 struct iwl_txq *txq = &trans_pcie->txq[queue];
1518 spin_lock_bh(&txq->lock);
1522 if (txq->frozen == freeze)
1525 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1526 freeze ? "Freezing" : "Waking", queue);
1528 txq->frozen = freeze;
1530 if (txq->q.read_ptr == txq->q.write_ptr)
1534 if (unlikely(time_after(now,
1535 txq->stuck_timer.expires))) {
1537 * The timer should have fired, maybe it is
1538 * spinning right now on the lock.
1542 /* remember how long until the timer fires */
1543 txq->frozen_expiry_remainder =
1544 txq->stuck_timer.expires - now;
1545 del_timer(&txq->stuck_timer);
1550 * Wake a non-empty queue -> arm timer with the
1551 * remainder before it froze
1553 mod_timer(&txq->stuck_timer,
1554 now + txq->frozen_expiry_remainder);
1557 spin_unlock_bh(&txq->lock);
1561 #define IWL_FLUSH_WAIT_MS 2000
1563 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
1565 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1566 struct iwl_txq *txq;
1567 struct iwl_queue *q;
1569 unsigned long now = jiffies;
1574 /* waiting for all the tx frames complete might take a while */
1575 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1578 if (cnt == trans_pcie->cmd_queue)
1580 if (!test_bit(cnt, trans_pcie->queue_used))
1582 if (!(BIT(cnt) & txq_bm))
1585 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
1586 txq = &trans_pcie->txq[cnt];
1588 wr_ptr = ACCESS_ONCE(q->write_ptr);
1590 while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
1591 !time_after(jiffies,
1592 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
1593 u8 write_ptr = ACCESS_ONCE(q->write_ptr);
1595 if (WARN_ONCE(wr_ptr != write_ptr,
1596 "WR pointer moved while flushing %d -> %d\n",
1602 if (q->read_ptr != q->write_ptr) {
1604 "fail to flush all tx fifo queues Q %d\n", cnt);
1608 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
1614 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1615 txq->q.read_ptr, txq->q.write_ptr);
1617 scd_sram_addr = trans_pcie->scd_base_addr +
1618 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
1619 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
1621 iwl_print_hex_error(trans, buf, sizeof(buf));
1623 for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
1624 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
1625 iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
1627 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1628 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
1629 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1630 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1632 iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
1633 SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
1636 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
1638 tbl_dw = tbl_dw & 0x0000FFFF;
1641 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1642 cnt, active ? "" : "in", fifo, tbl_dw,
1643 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
1644 (TFD_QUEUE_SIZE_MAX - 1),
1645 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1651 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1652 u32 mask, u32 value)
1654 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1655 unsigned long flags;
1657 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1658 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
1659 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1662 void iwl_trans_pcie_ref(struct iwl_trans *trans)
1664 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1665 unsigned long flags;
1667 if (iwlwifi_mod_params.d0i3_disable)
1670 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
1671 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
1672 trans_pcie->ref_count++;
1673 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1676 void iwl_trans_pcie_unref(struct iwl_trans *trans)
1678 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1679 unsigned long flags;
1681 if (iwlwifi_mod_params.d0i3_disable)
1684 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
1685 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
1686 if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
1687 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1690 trans_pcie->ref_count--;
1691 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1694 static const char *get_csr_string(int cmd)
1696 #define IWL_CMD(x) case x: return #x
1698 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1699 IWL_CMD(CSR_INT_COALESCING);
1701 IWL_CMD(CSR_INT_MASK);
1702 IWL_CMD(CSR_FH_INT_STATUS);
1703 IWL_CMD(CSR_GPIO_IN);
1705 IWL_CMD(CSR_GP_CNTRL);
1706 IWL_CMD(CSR_HW_REV);
1707 IWL_CMD(CSR_EEPROM_REG);
1708 IWL_CMD(CSR_EEPROM_GP);
1709 IWL_CMD(CSR_OTP_GP_REG);
1710 IWL_CMD(CSR_GIO_REG);
1711 IWL_CMD(CSR_GP_UCODE_REG);
1712 IWL_CMD(CSR_GP_DRIVER_REG);
1713 IWL_CMD(CSR_UCODE_DRV_GP1);
1714 IWL_CMD(CSR_UCODE_DRV_GP2);
1715 IWL_CMD(CSR_LED_REG);
1716 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1717 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1718 IWL_CMD(CSR_ANA_PLL_CFG);
1719 IWL_CMD(CSR_HW_REV_WA_REG);
1720 IWL_CMD(CSR_MONITOR_STATUS_REG);
1721 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1728 void iwl_pcie_dump_csr(struct iwl_trans *trans)
1731 static const u32 csr_tbl[] = {
1732 CSR_HW_IF_CONFIG_REG,
1750 CSR_DRAM_INT_TBL_REG,
1751 CSR_GIO_CHICKEN_BITS,
1753 CSR_MONITOR_STATUS_REG,
1755 CSR_DBG_HPET_MEM_REG
1757 IWL_ERR(trans, "CSR values:\n");
1758 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1759 "CSR_INT_PERIODIC_REG)\n");
1760 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1761 IWL_ERR(trans, " %25s: 0X%08x\n",
1762 get_csr_string(csr_tbl[i]),
1763 iwl_read32(trans, csr_tbl[i]));
1767 #ifdef CONFIG_IWLWIFI_DEBUGFS
1768 /* create and remove of files */
1769 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1770 if (!debugfs_create_file(#name, mode, parent, trans, \
1771 &iwl_dbgfs_##name##_ops)) \
1775 /* file operation */
1776 #define DEBUGFS_READ_FILE_OPS(name) \
1777 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1778 .read = iwl_dbgfs_##name##_read, \
1779 .open = simple_open, \
1780 .llseek = generic_file_llseek, \
1783 #define DEBUGFS_WRITE_FILE_OPS(name) \
1784 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1785 .write = iwl_dbgfs_##name##_write, \
1786 .open = simple_open, \
1787 .llseek = generic_file_llseek, \
1790 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1791 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1792 .write = iwl_dbgfs_##name##_write, \
1793 .read = iwl_dbgfs_##name##_read, \
1794 .open = simple_open, \
1795 .llseek = generic_file_llseek, \
1798 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1799 char __user *user_buf,
1800 size_t count, loff_t *ppos)
1802 struct iwl_trans *trans = file->private_data;
1803 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1804 struct iwl_txq *txq;
1805 struct iwl_queue *q;
1812 bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
1814 if (!trans_pcie->txq)
1817 buf = kzalloc(bufsz, GFP_KERNEL);
1821 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1822 txq = &trans_pcie->txq[cnt];
1824 pos += scnprintf(buf + pos, bufsz - pos,
1825 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
1826 cnt, q->read_ptr, q->write_ptr,
1827 !!test_bit(cnt, trans_pcie->queue_used),
1828 !!test_bit(cnt, trans_pcie->queue_stopped),
1829 txq->need_update, txq->frozen,
1830 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
1832 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1837 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1838 char __user *user_buf,
1839 size_t count, loff_t *ppos)
1841 struct iwl_trans *trans = file->private_data;
1842 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1843 struct iwl_rxq *rxq = &trans_pcie->rxq;
1846 const size_t bufsz = sizeof(buf);
1848 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1850 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1852 pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
1854 pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
1856 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1859 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1860 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1862 pos += scnprintf(buf + pos, bufsz - pos,
1863 "closed_rb_num: Not Allocated\n");
1865 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1868 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1869 char __user *user_buf,
1870 size_t count, loff_t *ppos)
1872 struct iwl_trans *trans = file->private_data;
1873 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1874 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1878 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1881 buf = kzalloc(bufsz, GFP_KERNEL);
1885 pos += scnprintf(buf + pos, bufsz - pos,
1886 "Interrupt Statistics Report:\n");
1888 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1890 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1892 if (isr_stats->sw || isr_stats->hw) {
1893 pos += scnprintf(buf + pos, bufsz - pos,
1894 "\tLast Restarting Code: 0x%X\n",
1895 isr_stats->err_code);
1897 #ifdef CONFIG_IWLWIFI_DEBUG
1898 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1900 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1903 pos += scnprintf(buf + pos, bufsz - pos,
1904 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1906 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1909 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1912 pos += scnprintf(buf + pos, bufsz - pos,
1913 "Rx command responses:\t\t %u\n", isr_stats->rx);
1915 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1918 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1919 isr_stats->unhandled);
1921 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1926 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1927 const char __user *user_buf,
1928 size_t count, loff_t *ppos)
1930 struct iwl_trans *trans = file->private_data;
1931 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1932 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1938 memset(buf, 0, sizeof(buf));
1939 buf_size = min(count, sizeof(buf) - 1);
1940 if (copy_from_user(buf, user_buf, buf_size))
1942 if (sscanf(buf, "%x", &reset_flag) != 1)
1944 if (reset_flag == 0)
1945 memset(isr_stats, 0, sizeof(*isr_stats));
1950 static ssize_t iwl_dbgfs_csr_write(struct file *file,
1951 const char __user *user_buf,
1952 size_t count, loff_t *ppos)
1954 struct iwl_trans *trans = file->private_data;
1959 memset(buf, 0, sizeof(buf));
1960 buf_size = min(count, sizeof(buf) - 1);
1961 if (copy_from_user(buf, user_buf, buf_size))
1963 if (sscanf(buf, "%d", &csr) != 1)
1966 iwl_pcie_dump_csr(trans);
1971 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1972 char __user *user_buf,
1973 size_t count, loff_t *ppos)
1975 struct iwl_trans *trans = file->private_data;
1979 ret = iwl_dump_fh(trans, &buf);
1984 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
1989 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1990 DEBUGFS_READ_FILE_OPS(fh_reg);
1991 DEBUGFS_READ_FILE_OPS(rx_queue);
1992 DEBUGFS_READ_FILE_OPS(tx_queue);
1993 DEBUGFS_WRITE_FILE_OPS(csr);
1996 * Create the debugfs files and directories
1999 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2002 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2003 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2004 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2005 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2006 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2010 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
2014 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2019 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2021 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
2026 for (i = 0; i < IWL_NUM_OF_TBS; i++)
2027 cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
2032 static const struct {
2034 } iwl_prph_dump_addr[] = {
2035 { .start = 0x00a00000, .end = 0x00a00000 },
2036 { .start = 0x00a0000c, .end = 0x00a00024 },
2037 { .start = 0x00a0002c, .end = 0x00a0003c },
2038 { .start = 0x00a00410, .end = 0x00a00418 },
2039 { .start = 0x00a00420, .end = 0x00a00420 },
2040 { .start = 0x00a00428, .end = 0x00a00428 },
2041 { .start = 0x00a00430, .end = 0x00a0043c },
2042 { .start = 0x00a00444, .end = 0x00a00444 },
2043 { .start = 0x00a004c0, .end = 0x00a004cc },
2044 { .start = 0x00a004d8, .end = 0x00a004d8 },
2045 { .start = 0x00a004e0, .end = 0x00a004f0 },
2046 { .start = 0x00a00840, .end = 0x00a00840 },
2047 { .start = 0x00a00850, .end = 0x00a00858 },
2048 { .start = 0x00a01004, .end = 0x00a01008 },
2049 { .start = 0x00a01010, .end = 0x00a01010 },
2050 { .start = 0x00a01018, .end = 0x00a01018 },
2051 { .start = 0x00a01024, .end = 0x00a01024 },
2052 { .start = 0x00a0102c, .end = 0x00a01034 },
2053 { .start = 0x00a0103c, .end = 0x00a01040 },
2054 { .start = 0x00a01048, .end = 0x00a01094 },
2055 { .start = 0x00a01c00, .end = 0x00a01c20 },
2056 { .start = 0x00a01c58, .end = 0x00a01c58 },
2057 { .start = 0x00a01c7c, .end = 0x00a01c7c },
2058 { .start = 0x00a01c28, .end = 0x00a01c54 },
2059 { .start = 0x00a01c5c, .end = 0x00a01c5c },
2060 { .start = 0x00a01c60, .end = 0x00a01cdc },
2061 { .start = 0x00a01ce0, .end = 0x00a01d0c },
2062 { .start = 0x00a01d18, .end = 0x00a01d20 },
2063 { .start = 0x00a01d2c, .end = 0x00a01d30 },
2064 { .start = 0x00a01d40, .end = 0x00a01d5c },
2065 { .start = 0x00a01d80, .end = 0x00a01d80 },
2066 { .start = 0x00a01d98, .end = 0x00a01d9c },
2067 { .start = 0x00a01da8, .end = 0x00a01da8 },
2068 { .start = 0x00a01db8, .end = 0x00a01df4 },
2069 { .start = 0x00a01dc0, .end = 0x00a01dfc },
2070 { .start = 0x00a01e00, .end = 0x00a01e2c },
2071 { .start = 0x00a01e40, .end = 0x00a01e60 },
2072 { .start = 0x00a01e68, .end = 0x00a01e6c },
2073 { .start = 0x00a01e74, .end = 0x00a01e74 },
2074 { .start = 0x00a01e84, .end = 0x00a01e90 },
2075 { .start = 0x00a01e9c, .end = 0x00a01ec4 },
2076 { .start = 0x00a01ed0, .end = 0x00a01ee0 },
2077 { .start = 0x00a01f00, .end = 0x00a01f1c },
2078 { .start = 0x00a01f44, .end = 0x00a01ffc },
2079 { .start = 0x00a02000, .end = 0x00a02048 },
2080 { .start = 0x00a02068, .end = 0x00a020f0 },
2081 { .start = 0x00a02100, .end = 0x00a02118 },
2082 { .start = 0x00a02140, .end = 0x00a0214c },
2083 { .start = 0x00a02168, .end = 0x00a0218c },
2084 { .start = 0x00a021c0, .end = 0x00a021c0 },
2085 { .start = 0x00a02400, .end = 0x00a02410 },
2086 { .start = 0x00a02418, .end = 0x00a02420 },
2087 { .start = 0x00a02428, .end = 0x00a0242c },
2088 { .start = 0x00a02434, .end = 0x00a02434 },
2089 { .start = 0x00a02440, .end = 0x00a02460 },
2090 { .start = 0x00a02468, .end = 0x00a024b0 },
2091 { .start = 0x00a024c8, .end = 0x00a024cc },
2092 { .start = 0x00a02500, .end = 0x00a02504 },
2093 { .start = 0x00a0250c, .end = 0x00a02510 },
2094 { .start = 0x00a02540, .end = 0x00a02554 },
2095 { .start = 0x00a02580, .end = 0x00a025f4 },
2096 { .start = 0x00a02600, .end = 0x00a0260c },
2097 { .start = 0x00a02648, .end = 0x00a02650 },
2098 { .start = 0x00a02680, .end = 0x00a02680 },
2099 { .start = 0x00a026c0, .end = 0x00a026d0 },
2100 { .start = 0x00a02700, .end = 0x00a0270c },
2101 { .start = 0x00a02804, .end = 0x00a02804 },
2102 { .start = 0x00a02818, .end = 0x00a0281c },
2103 { .start = 0x00a02c00, .end = 0x00a02db4 },
2104 { .start = 0x00a02df4, .end = 0x00a02fb0 },
2105 { .start = 0x00a03000, .end = 0x00a03014 },
2106 { .start = 0x00a0301c, .end = 0x00a0302c },
2107 { .start = 0x00a03034, .end = 0x00a03038 },
2108 { .start = 0x00a03040, .end = 0x00a03048 },
2109 { .start = 0x00a03060, .end = 0x00a03068 },
2110 { .start = 0x00a03070, .end = 0x00a03074 },
2111 { .start = 0x00a0307c, .end = 0x00a0307c },
2112 { .start = 0x00a03080, .end = 0x00a03084 },
2113 { .start = 0x00a0308c, .end = 0x00a03090 },
2114 { .start = 0x00a03098, .end = 0x00a03098 },
2115 { .start = 0x00a030a0, .end = 0x00a030a0 },
2116 { .start = 0x00a030a8, .end = 0x00a030b4 },
2117 { .start = 0x00a030bc, .end = 0x00a030bc },
2118 { .start = 0x00a030c0, .end = 0x00a0312c },
2119 { .start = 0x00a03c00, .end = 0x00a03c5c },
2120 { .start = 0x00a04400, .end = 0x00a04454 },
2121 { .start = 0x00a04460, .end = 0x00a04474 },
2122 { .start = 0x00a044c0, .end = 0x00a044ec },
2123 { .start = 0x00a04500, .end = 0x00a04504 },
2124 { .start = 0x00a04510, .end = 0x00a04538 },
2125 { .start = 0x00a04540, .end = 0x00a04548 },
2126 { .start = 0x00a04560, .end = 0x00a0457c },
2127 { .start = 0x00a04590, .end = 0x00a04598 },
2128 { .start = 0x00a045c0, .end = 0x00a045f4 },
2131 static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
2132 struct iwl_fw_error_dump_data **data)
2134 struct iwl_fw_error_dump_prph *prph;
2135 unsigned long flags;
2136 u32 prph_len = 0, i;
2138 if (!iwl_trans_grab_nic_access(trans, false, &flags))
2141 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
2142 /* The range includes both boundaries */
2143 int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
2144 iwl_prph_dump_addr[i].start + 4;
2148 prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
2150 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
2151 (*data)->len = cpu_to_le32(sizeof(*prph) +
2152 num_bytes_in_chunk);
2153 prph = (void *)(*data)->data;
2154 prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
2155 val = (void *)prph->data;
2157 for (reg = iwl_prph_dump_addr[i].start;
2158 reg <= iwl_prph_dump_addr[i].end;
2160 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
2162 *data = iwl_fw_error_next_data(*data);
2165 iwl_trans_release_nic_access(trans, &flags);
2170 #define IWL_CSR_TO_DUMP (0x250)
2172 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2173 struct iwl_fw_error_dump_data **data)
2175 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2179 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2180 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
2181 val = (void *)(*data)->data;
2183 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
2184 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2186 *data = iwl_fw_error_next_data(*data);
2191 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
2192 struct iwl_fw_error_dump_data **data)
2194 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
2195 unsigned long flags;
2199 if (!iwl_trans_grab_nic_access(trans, false, &flags))
2202 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
2203 (*data)->len = cpu_to_le32(fh_regs_len);
2204 val = (void *)(*data)->data;
2206 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
2207 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2209 iwl_trans_release_nic_access(trans, &flags);
2211 *data = iwl_fw_error_next_data(*data);
2213 return sizeof(**data) + fh_regs_len;
2217 struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
2219 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2220 struct iwl_fw_error_dump_data *data;
2221 struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
2222 struct iwl_fw_error_dump_txcmd *txcmd;
2223 struct iwl_trans_dump_data *dump_data;
2228 /* transport dump header */
2229 len = sizeof(*dump_data);
2232 len += sizeof(*data) +
2233 cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
2236 len += sizeof(*data) + IWL_CSR_TO_DUMP;
2238 /* PRPH registers */
2239 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
2240 /* The range includes both boundaries */
2241 int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
2242 iwl_prph_dump_addr[i].start + 4;
2244 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
2249 len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
2252 if (trans_pcie->fw_mon_page) {
2253 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2254 trans_pcie->fw_mon_size;
2255 monitor_len = trans_pcie->fw_mon_size;
2256 } else if (trans->dbg_dest_tlv) {
2259 base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
2260 end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
2262 base = iwl_read_prph(trans, base) <<
2263 trans->dbg_dest_tlv->base_shift;
2264 end = iwl_read_prph(trans, end) <<
2265 trans->dbg_dest_tlv->end_shift;
2267 /* Make "end" point to the actual end */
2268 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
2269 end += (1 << trans->dbg_dest_tlv->end_shift);
2270 monitor_len = end - base;
2271 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2277 dump_data = vzalloc(len);
2282 data = (void *)dump_data->data;
2283 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
2284 txcmd = (void *)data->data;
2285 spin_lock_bh(&cmdq->lock);
2286 ptr = cmdq->q.write_ptr;
2287 for (i = 0; i < cmdq->q.n_window; i++) {
2288 u8 idx = get_cmd_index(&cmdq->q, ptr);
2291 cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
2292 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
2295 len += sizeof(*txcmd) + caplen;
2296 txcmd->cmdlen = cpu_to_le32(cmdlen);
2297 txcmd->caplen = cpu_to_le32(caplen);
2298 memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
2299 txcmd = (void *)((u8 *)txcmd->data + caplen);
2302 ptr = iwl_queue_dec_wrap(ptr);
2304 spin_unlock_bh(&cmdq->lock);
2306 data->len = cpu_to_le32(len);
2307 len += sizeof(*data);
2308 data = iwl_fw_error_next_data(data);
2310 len += iwl_trans_pcie_dump_prph(trans, &data);
2311 len += iwl_trans_pcie_dump_csr(trans, &data);
2312 len += iwl_trans_pcie_fh_regs_dump(trans, &data);
2313 /* data is already pointing to the next section */
2315 if ((trans_pcie->fw_mon_page &&
2316 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
2317 trans->dbg_dest_tlv) {
2318 struct iwl_fw_error_dump_fw_mon *fw_mon_data;
2319 u32 base, write_ptr, wrap_cnt;
2321 /* If there was a dest TLV - use the values from there */
2322 if (trans->dbg_dest_tlv) {
2324 le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
2325 wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
2326 base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
2328 base = MON_BUFF_BASE_ADDR;
2329 write_ptr = MON_BUFF_WRPTR;
2330 wrap_cnt = MON_BUFF_CYCLE_CNT;
2333 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
2334 fw_mon_data = (void *)data->data;
2335 fw_mon_data->fw_mon_wr_ptr =
2336 cpu_to_le32(iwl_read_prph(trans, write_ptr));
2337 fw_mon_data->fw_mon_cycle_cnt =
2338 cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
2339 fw_mon_data->fw_mon_base_ptr =
2340 cpu_to_le32(iwl_read_prph(trans, base));
2342 len += sizeof(*data) + sizeof(*fw_mon_data);
2343 if (trans_pcie->fw_mon_page) {
2344 data->len = cpu_to_le32(trans_pcie->fw_mon_size +
2345 sizeof(*fw_mon_data));
2348 * The firmware is now asserted, it won't write anything
2349 * to the buffer. CPU can take ownership to fetch the
2350 * data. The buffer will be handed back to the device
2351 * before the firmware will be restarted.
2353 dma_sync_single_for_cpu(trans->dev,
2354 trans_pcie->fw_mon_phys,
2355 trans_pcie->fw_mon_size,
2357 memcpy(fw_mon_data->data,
2358 page_address(trans_pcie->fw_mon_page),
2359 trans_pcie->fw_mon_size);
2361 len += trans_pcie->fw_mon_size;
2363 /* If we are here then the buffer is internal */
2366 * Update pointers to reflect actual values after
2369 base = iwl_read_prph(trans, base) <<
2370 trans->dbg_dest_tlv->base_shift;
2371 iwl_trans_read_mem(trans, base, fw_mon_data->data,
2372 monitor_len / sizeof(u32));
2373 data->len = cpu_to_le32(sizeof(*fw_mon_data) +
2379 dump_data->len = len;
2384 static const struct iwl_trans_ops trans_ops_pcie = {
2385 .start_hw = iwl_trans_pcie_start_hw,
2386 .op_mode_leave = iwl_trans_pcie_op_mode_leave,
2387 .fw_alive = iwl_trans_pcie_fw_alive,
2388 .start_fw = iwl_trans_pcie_start_fw,
2389 .stop_device = iwl_trans_pcie_stop_device,
2391 .d3_suspend = iwl_trans_pcie_d3_suspend,
2392 .d3_resume = iwl_trans_pcie_d3_resume,
2394 .send_cmd = iwl_trans_pcie_send_hcmd,
2396 .tx = iwl_trans_pcie_tx,
2397 .reclaim = iwl_trans_pcie_reclaim,
2399 .txq_disable = iwl_trans_pcie_txq_disable,
2400 .txq_enable = iwl_trans_pcie_txq_enable,
2402 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2404 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
2405 .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
2407 .write8 = iwl_trans_pcie_write8,
2408 .write32 = iwl_trans_pcie_write32,
2409 .read32 = iwl_trans_pcie_read32,
2410 .read_prph = iwl_trans_pcie_read_prph,
2411 .write_prph = iwl_trans_pcie_write_prph,
2412 .read_mem = iwl_trans_pcie_read_mem,
2413 .write_mem = iwl_trans_pcie_write_mem,
2414 .configure = iwl_trans_pcie_configure,
2415 .set_pmi = iwl_trans_pcie_set_pmi,
2416 .grab_nic_access = iwl_trans_pcie_grab_nic_access,
2417 .release_nic_access = iwl_trans_pcie_release_nic_access,
2418 .set_bits_mask = iwl_trans_pcie_set_bits_mask,
2420 .ref = iwl_trans_pcie_ref,
2421 .unref = iwl_trans_pcie_unref,
2423 .dump_data = iwl_trans_pcie_dump_data,
2426 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2427 const struct pci_device_id *ent,
2428 const struct iwl_cfg *cfg)
2430 struct iwl_trans_pcie *trans_pcie;
2431 struct iwl_trans *trans;
2435 trans = kzalloc(sizeof(struct iwl_trans) +
2436 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
2442 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2444 trans->ops = &trans_ops_pcie;
2446 trans_lockdep_init(trans);
2447 trans_pcie->trans = trans;
2448 spin_lock_init(&trans_pcie->irq_lock);
2449 spin_lock_init(&trans_pcie->reg_lock);
2450 spin_lock_init(&trans_pcie->ref_lock);
2451 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2453 err = pci_enable_device(pdev);
2457 if (!cfg->base_params->pcie_l1_allowed) {
2459 * W/A - seems to solve weird behavior. We need to remove this
2460 * if we don't want to stay in L1 all the time. This wastes a
2463 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
2464 PCIE_LINK_STATE_L1 |
2465 PCIE_LINK_STATE_CLKPM);
2468 pci_set_master(pdev);
2470 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2472 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2474 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2476 err = pci_set_consistent_dma_mask(pdev,
2478 /* both attempts failed: */
2480 dev_err(&pdev->dev, "No suitable DMA available\n");
2481 goto out_pci_disable_device;
2485 err = pci_request_regions(pdev, DRV_NAME);
2487 dev_err(&pdev->dev, "pci_request_regions failed\n");
2488 goto out_pci_disable_device;
2491 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2492 if (!trans_pcie->hw_base) {
2493 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2495 goto out_pci_release_regions;
2498 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2499 * PCI Tx retries from interfering with C3 CPU state */
2500 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2502 trans->dev = &pdev->dev;
2503 trans_pcie->pci_dev = pdev;
2504 iwl_disable_interrupts(trans);
2506 err = pci_enable_msi(pdev);
2508 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
2509 /* enable rfkill interrupt: hw bug w/a */
2510 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2511 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2512 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2513 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2517 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
2519 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
2520 * changed, and now the revision step also includes bit 0-1 (no more
2521 * "dash" value). To keep hw_rev backwards compatible - we'll store it
2522 * in the old format.
2524 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2525 unsigned long flags;
2528 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2529 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2532 * in-order to recognize C step driver should read chip version
2533 * id located at the AUX bus MISC address space.
2535 iwl_set_bit(trans, CSR_GP_CNTRL,
2536 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2539 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2540 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2541 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2544 IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
2545 goto out_pci_disable_msi;
2548 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
2551 hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
2552 hw_step |= ENABLE_WFPM;
2553 __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
2554 hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
2555 hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
2557 trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
2558 (SILICON_C_STEP << 2);
2559 iwl_trans_release_nic_access(trans, &flags);
2563 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2564 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2565 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2567 /* Initialize the wait queue for commands */
2568 init_waitqueue_head(&trans_pcie->wait_command_queue);
2570 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
2571 "iwl_cmd_pool:%s", dev_name(trans->dev));
2573 trans->dev_cmd_headroom = 0;
2574 trans->dev_cmd_pool =
2575 kmem_cache_create(trans->dev_cmd_pool_name,
2576 sizeof(struct iwl_device_cmd)
2577 + trans->dev_cmd_headroom,
2582 if (!trans->dev_cmd_pool) {
2584 goto out_pci_disable_msi;
2587 if (iwl_pcie_alloc_ict(trans))
2588 goto out_free_cmd_pool;
2590 err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2591 iwl_pcie_irq_handler,
2592 IRQF_SHARED, DRV_NAME, trans);
2594 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2598 trans_pcie->inta_mask = CSR_INI_SET_MASK;
2599 trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND;
2604 iwl_pcie_free_ict(trans);
2606 kmem_cache_destroy(trans->dev_cmd_pool);
2607 out_pci_disable_msi:
2608 pci_disable_msi(pdev);
2609 out_pci_release_regions:
2610 pci_release_regions(pdev);
2611 out_pci_disable_device:
2612 pci_disable_device(pdev);
2616 return ERR_PTR(err);