1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #include <linux/interrupt.h>
64 #include <linux/debugfs.h>
67 #include "iwl-trans.h"
69 #include "iwl-helpers.h"
70 #include "iwl-trans-int-pcie.h"
71 /*TODO remove uneeded includes when the transport layer tx_free will be here */
74 #include "iwl-shared.h"
76 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
78 struct iwl_trans_pcie *trans_pcie =
79 IWL_TRANS_GET_PCIE_TRANS(trans);
80 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
81 struct device *dev = bus(trans)->dev;
83 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
85 spin_lock_init(&rxq->lock);
86 INIT_LIST_HEAD(&rxq->rx_free);
87 INIT_LIST_HEAD(&rxq->rx_used);
89 if (WARN_ON(rxq->bd || rxq->rb_stts))
92 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
93 rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
94 &rxq->bd_dma, GFP_KERNEL);
97 memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
99 /*Allocate the driver's pointer to receive buffer status */
100 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
101 &rxq->rb_stts_dma, GFP_KERNEL);
104 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
109 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
110 rxq->bd, rxq->bd_dma);
111 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
117 static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
119 struct iwl_trans_pcie *trans_pcie =
120 IWL_TRANS_GET_PCIE_TRANS(trans);
121 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
124 /* Fill the rx_used queue with _all_ of the Rx buffers */
125 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
126 /* In the reset function, these buffers may have been allocated
127 * to an SKB, so we need to unmap and free potential storage */
128 if (rxq->pool[i].page != NULL) {
129 dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
130 PAGE_SIZE << hw_params(trans).rx_page_order,
132 __iwl_free_pages(priv(trans), rxq->pool[i].page);
133 rxq->pool[i].page = NULL;
135 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
139 static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
140 struct iwl_rx_queue *rxq)
143 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
144 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
146 rb_timeout = RX_RB_TIMEOUT;
148 if (iwlagn_mod_params.amsdu_size_8K)
149 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
151 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
154 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
156 /* Reset driver's Rx queue write index */
157 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
159 /* Tell device where to find RBD circular buffer in DRAM */
160 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
161 (u32)(rxq->bd_dma >> 8));
163 /* Tell device where in DRAM to update its Rx status */
164 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
165 rxq->rb_stts_dma >> 4);
168 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
169 * the credit mechanism in 5000 HW RX FIFO
170 * Direct rx interrupts to hosts
171 * Rx buffer size 4 or 8k
175 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
176 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
177 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
178 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
179 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
181 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
182 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
184 /* Set interrupt coalescing timer to default (2048 usecs) */
185 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
188 static int iwl_rx_init(struct iwl_trans *trans)
190 struct iwl_trans_pcie *trans_pcie =
191 IWL_TRANS_GET_PCIE_TRANS(trans);
192 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
198 err = iwl_trans_rx_alloc(trans);
203 spin_lock_irqsave(&rxq->lock, flags);
204 INIT_LIST_HEAD(&rxq->rx_free);
205 INIT_LIST_HEAD(&rxq->rx_used);
207 iwl_trans_rxq_free_rx_bufs(trans);
209 for (i = 0; i < RX_QUEUE_SIZE; i++)
210 rxq->queue[i] = NULL;
212 /* Set us so that we have processed and used all buffers, but have
213 * not restocked the Rx queue with fresh buffers */
214 rxq->read = rxq->write = 0;
215 rxq->write_actual = 0;
217 spin_unlock_irqrestore(&rxq->lock, flags);
219 iwlagn_rx_replenish(trans);
221 iwl_trans_rx_hw_init(priv(trans), rxq);
223 spin_lock_irqsave(&trans->shrd->lock, flags);
224 rxq->need_update = 1;
225 iwl_rx_queue_update_write_ptr(trans, rxq);
226 spin_unlock_irqrestore(&trans->shrd->lock, flags);
231 static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
233 struct iwl_trans_pcie *trans_pcie =
234 IWL_TRANS_GET_PCIE_TRANS(trans);
235 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
239 /*if rxq->bd is NULL, it means that nothing has been allocated,
242 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
246 spin_lock_irqsave(&rxq->lock, flags);
247 iwl_trans_rxq_free_rx_bufs(trans);
248 spin_unlock_irqrestore(&rxq->lock, flags);
250 dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
251 rxq->bd, rxq->bd_dma);
252 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
256 dma_free_coherent(bus(trans)->dev,
257 sizeof(struct iwl_rb_status),
258 rxq->rb_stts, rxq->rb_stts_dma);
260 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
261 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
265 static int iwl_trans_rx_stop(struct iwl_priv *priv)
269 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
270 return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
271 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
274 static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
275 struct iwl_dma_ptr *ptr, size_t size)
277 if (WARN_ON(ptr->addr))
280 ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
281 &ptr->dma, GFP_KERNEL);
288 static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
289 struct iwl_dma_ptr *ptr)
291 if (unlikely(!ptr->addr))
294 dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
295 memset(ptr, 0, sizeof(*ptr));
298 static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
299 int slots_num, u32 txq_id)
301 size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX;
304 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
307 txq->q.n_window = slots_num;
309 txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
311 txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
314 if (!txq->meta || !txq->cmd)
317 for (i = 0; i < slots_num; i++) {
318 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
324 /* Alloc driver data array and TFD circular buffer */
325 /* Driver private data, only for Tx (not command) queues,
326 * not shared with device. */
327 if (txq_id != priv->shrd->cmd_queue) {
328 txq->txb = kzalloc(sizeof(txq->txb[0]) *
329 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
331 IWL_ERR(priv, "kmalloc for auxiliary BD "
332 "structures failed\n");
339 /* Circular buffer of transmit frame descriptors (TFDs),
340 * shared with device */
341 txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
344 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
353 /* since txq->cmd has been zeroed,
354 * all non allocated cmd[i] will be NULL */
356 for (i = 0; i < slots_num; i++)
367 static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
368 int slots_num, u32 txq_id)
372 txq->need_update = 0;
373 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
376 * For the default queues 0-3, set up the swq_id
377 * already -- all others need to get one later
378 * (if they need one at all).
381 iwl_set_swq_id(txq, txq_id, txq_id);
383 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
384 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
385 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
387 /* Initialize queue's high/low-water marks, and head/tail indexes */
388 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
394 * Tell nic where to find circular buffer of Tx Frame Descriptors for
395 * given Tx queue, and enable the DMA channel used for that queue.
396 * Circular buffer (TFD queue in DRAM) physical base address */
397 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
398 txq->q.dma_addr >> 8);
404 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
406 static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
408 struct iwl_tx_queue *txq = &priv->txq[txq_id];
409 struct iwl_queue *q = &txq->q;
414 while (q->write_ptr != q->read_ptr) {
415 /* The read_ptr needs to bound by q->n_window */
416 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
417 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
422 * iwl_tx_queue_free - Deallocate DMA queue.
423 * @txq: Transmit queue to deallocate.
425 * Empty queue by removing and destroying all BD's.
427 * 0-fill, but do not free "txq" descriptor structure.
429 static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
431 struct iwl_tx_queue *txq = &priv->txq[txq_id];
432 struct device *dev = priv->bus->dev;
437 iwl_tx_queue_unmap(priv, txq_id);
439 /* De-alloc array of command/tx buffers */
440 for (i = 0; i < txq->q.n_window; i++)
443 /* De-alloc circular buffer of TFDs */
445 dma_free_coherent(dev, hw_params(priv).tfd_size *
446 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
447 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
450 /* De-alloc array of per-TFD driver data */
454 /* deallocate arrays */
460 /* 0-fill queue descriptor structure */
461 memset(txq, 0, sizeof(*txq));
465 * iwl_trans_tx_free - Free TXQ Context
467 * Destroy all TX DMA queues and structures
469 static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
476 txq_id < hw_params(priv).max_txq_num; txq_id++)
477 iwl_tx_queue_free(priv, txq_id);
483 iwlagn_free_dma_ptr(priv, &priv->kw);
485 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
489 * iwl_trans_tx_alloc - allocate TX context
490 * Allocate all Tx DMA structures and initialize them
495 static int iwl_trans_tx_alloc(struct iwl_priv *priv)
498 int txq_id, slots_num;
500 /*It is not allowed to alloc twice, so warn when this happens.
501 * We cannot rely on the previous allocation, so free and fail */
502 if (WARN_ON(priv->txq)) {
507 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
508 hw_params(priv).scd_bc_tbls_size);
510 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
514 /* Alloc keep-warm buffer */
515 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
517 IWL_ERR(priv, "Keep Warm allocation failed\n");
521 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
522 priv->cfg->base_params->num_of_queues, GFP_KERNEL);
524 IWL_ERR(priv, "Not enough memory for txq\n");
529 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
530 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
531 slots_num = (txq_id == priv->shrd->cmd_queue) ?
532 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
533 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
536 IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
544 iwl_trans_tx_free(trans(priv));
548 static int iwl_tx_init(struct iwl_priv *priv)
551 int txq_id, slots_num;
556 ret = iwl_trans_tx_alloc(priv);
562 spin_lock_irqsave(&priv->shrd->lock, flags);
564 /* Turn off all Tx DMA fifos */
565 iwl_write_prph(priv, SCD_TXFACT, 0);
567 /* Tell NIC where to find the "keep warm" buffer */
568 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
570 spin_unlock_irqrestore(&priv->shrd->lock, flags);
572 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
573 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
574 slots_num = (txq_id == priv->shrd->cmd_queue) ?
575 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
576 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
579 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
586 /*Upon error, free only if we allocated something */
588 iwl_trans_tx_free(trans(priv));
592 static void iwl_set_pwr_vmain(struct iwl_priv *priv)
595 * (for documentation purposes)
596 * to set power to V_AUX, do:
598 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
599 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
600 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
601 ~APMG_PS_CTRL_MSK_PWR_SRC);
604 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
605 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
606 ~APMG_PS_CTRL_MSK_PWR_SRC);
609 static int iwl_nic_init(struct iwl_priv *priv)
614 spin_lock_irqsave(&priv->shrd->lock, flags);
617 /* Set interrupt coalescing calibration timer to default (512 usecs) */
618 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
620 spin_unlock_irqrestore(&priv->shrd->lock, flags);
622 iwl_set_pwr_vmain(priv);
624 priv->cfg->lib->nic_config(priv);
626 /* Allocate the RX queue, or reset if it is already allocated */
627 iwl_rx_init(trans(priv));
629 /* Allocate or reset and init all Tx and Command queues */
630 if (iwl_tx_init(priv))
633 if (priv->cfg->base_params->shadow_reg_enable) {
634 /* enable shadow regs in HW */
635 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
639 set_bit(STATUS_INIT, &priv->shrd->status);
644 #define HW_READY_TIMEOUT (50)
646 /* Note: returns poll_bit return value, which is >= 0 if success */
647 static int iwl_set_hw_ready(struct iwl_priv *priv)
651 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
652 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
654 /* See if we got it */
655 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
656 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
657 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
660 IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
664 /* Note: returns standard 0/-ERROR code */
665 static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv)
669 IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
671 ret = iwl_set_hw_ready(priv);
675 /* If HW is not ready, prepare the conditions to check again */
676 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
677 CSR_HW_IF_CONFIG_REG_PREPARE);
679 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
680 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
681 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
686 /* HW should be ready by now, check again. */
687 ret = iwl_set_hw_ready(priv);
693 static int iwl_trans_pcie_start_device(struct iwl_priv *priv)
697 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
699 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
700 iwl_trans_pcie_prepare_card_hw(priv)) {
701 IWL_WARN(priv, "Exit HW not ready\n");
705 /* If platform's RF_KILL switch is NOT set to KILL */
706 if (iwl_read32(priv, CSR_GP_CNTRL) &
707 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
708 clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
710 set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
712 if (iwl_is_rfkill(priv)) {
713 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
714 iwl_enable_interrupts(trans(priv));
718 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
720 ret = iwl_nic_init(priv);
722 IWL_ERR(priv, "Unable to init nic\n");
726 /* make sure rfkill handshake bits are cleared */
727 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
728 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
729 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
731 /* clear (again), then enable host interrupts */
732 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
733 iwl_enable_interrupts(trans(priv));
735 /* really make sure rfkill handshake bits are cleared */
736 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
737 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
743 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
744 * must be called under priv->shrd->lock and mac access
746 static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
748 iwl_write_prph(priv, SCD_TXFACT, mask);
751 #define IWL_AC_UNSET -1
753 struct queue_to_fifo_ac {
757 static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
758 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
759 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
760 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
761 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
762 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
763 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
764 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
765 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
766 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
767 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
768 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
771 static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
772 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
773 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
774 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
775 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
776 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
777 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
778 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
779 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
780 { IWL_TX_FIFO_BE_IPAN, 2, },
781 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
782 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
784 static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
786 const struct queue_to_fifo_ac *queue_to_fifo;
787 struct iwl_rxon_context *ctx;
793 spin_lock_irqsave(&priv->shrd->lock, flags);
795 priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
796 a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
797 /* reset conext data memory */
798 for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
800 iwl_write_targ_mem(priv, a, 0);
801 /* reset tx status memory */
802 for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
804 iwl_write_targ_mem(priv, a, 0);
805 for (; a < priv->scd_base_addr +
806 SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
808 iwl_write_targ_mem(priv, a, 0);
810 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
811 priv->scd_bc_tbls.dma >> 10);
813 /* Enable DMA channel */
814 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
815 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
816 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
817 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
819 /* Update FH chicken bits */
820 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
821 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
822 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
824 iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
825 SCD_QUEUECHAIN_SEL_ALL(priv));
826 iwl_write_prph(priv, SCD_AGGR_SEL, 0);
828 /* initiate the queues */
829 for (i = 0; i < hw_params(priv).max_txq_num; i++) {
830 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
831 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
832 iwl_write_targ_mem(priv, priv->scd_base_addr +
833 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
834 iwl_write_targ_mem(priv, priv->scd_base_addr +
835 SCD_CONTEXT_QUEUE_OFFSET(i) +
838 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
839 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
841 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
842 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
845 iwl_write_prph(priv, SCD_INTERRUPT_MASK,
846 IWL_MASK(0, hw_params(priv).max_txq_num));
848 /* Activate all Tx DMA/FIFO channels */
849 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
851 /* map queues to FIFOs */
852 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
853 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
855 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
857 iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0);
859 /* make sure all queue are not stopped */
860 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
861 for (i = 0; i < 4; i++)
862 atomic_set(&priv->queue_stop_count[i], 0);
863 for_each_context(priv, ctx)
864 ctx->last_tx_rejected = false;
866 /* reset to 0 to enable all the queue first */
867 priv->txq_ctx_active_msk = 0;
869 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) !=
870 IWLAGN_FIRST_AMPDU_QUEUE);
871 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) !=
872 IWLAGN_FIRST_AMPDU_QUEUE);
874 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
875 int fifo = queue_to_fifo[i].fifo;
876 int ac = queue_to_fifo[i].ac;
878 iwl_txq_ctx_activate(priv, i);
880 if (fifo == IWL_TX_FIFO_UNUSED)
883 if (ac != IWL_AC_UNSET)
884 iwl_set_swq_id(&priv->txq[i], ac, i);
885 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
888 spin_unlock_irqrestore(&priv->shrd->lock, flags);
890 /* Enable L1-Active */
891 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
892 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
896 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
898 static int iwl_trans_tx_stop(struct iwl_priv *priv)
903 /* Turn off all Tx DMA fifos */
904 spin_lock_irqsave(&priv->shrd->lock, flags);
906 iwl_trans_txq_set_sched(priv, 0);
908 /* Stop each Tx DMA channel, and wait for it to be idle */
909 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
910 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
911 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
912 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
914 IWL_ERR(priv, "Failing on timeout while stopping"
915 " DMA channel %d [0x%08x]", ch,
916 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
918 spin_unlock_irqrestore(&priv->shrd->lock, flags);
921 IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
925 /* Unmap DMA from host system and free skb's */
926 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
927 iwl_tx_queue_unmap(priv, txq_id);
932 static void iwl_trans_pcie_stop_device(struct iwl_priv *priv)
934 /* stop and reset the on-board processor */
935 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
937 /* tell the device to stop sending interrupts */
938 iwl_trans_disable_sync_irq(trans(priv));
940 /* device going down, Stop using ICT table */
941 iwl_disable_ict(trans(priv));
944 * If a HW restart happens during firmware loading,
945 * then the firmware loading might call this function
946 * and later it might be called again due to the
947 * restart. So don't process again if the device is
950 if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) {
951 iwl_trans_tx_stop(priv);
952 iwl_trans_rx_stop(priv);
954 /* Power-down device's busmaster DMA clocks */
955 iwl_write_prph(priv, APMG_CLK_DIS_REG,
956 APMG_CLK_VAL_DMA_CLK_RQT);
960 /* Make sure (redundant) we've released our request to stay awake */
961 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
963 /* Stop the device, and put it in low power state */
967 static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_priv *priv,
970 struct iwl_tx_queue *txq = &priv->txq[txq_id];
971 struct iwl_queue *q = &txq->q;
972 struct iwl_device_cmd *dev_cmd;
974 if (unlikely(iwl_queue_space(q) < q->high_mark))
978 * Set up the Tx-command (not MAC!) header.
979 * Store the chosen Tx queue and TFD index within the sequence field;
980 * after Tx, uCode's Tx response will return this value so driver can
981 * locate the frame within the tx queue and do post-tx processing.
983 dev_cmd = txq->cmd[q->write_ptr];
984 memset(dev_cmd, 0, sizeof(*dev_cmd));
985 dev_cmd->hdr.cmd = REPLY_TX;
986 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
987 INDEX_TO_SEQ(q->write_ptr)));
988 return &dev_cmd->cmd.tx;
991 static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
992 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
993 struct iwl_rxon_context *ctx)
995 struct iwl_tx_queue *txq = &priv->txq[txq_id];
996 struct iwl_queue *q = &txq->q;
997 struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
998 struct iwl_cmd_meta *out_meta;
1000 dma_addr_t phys_addr = 0;
1001 dma_addr_t txcmd_phys;
1002 dma_addr_t scratch_phys;
1003 u16 len, firstlen, secondlen;
1004 u8 wait_write_ptr = 0;
1005 u8 hdr_len = ieee80211_hdrlen(fc);
1007 /* Set up driver data for this TFD */
1008 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
1009 txq->txb[q->write_ptr].skb = skb;
1010 txq->txb[q->write_ptr].ctx = ctx;
1012 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1013 out_meta = &txq->meta[q->write_ptr];
1016 * Use the first empty entry in this queue's command buffer array
1017 * to contain the Tx command and MAC header concatenated together
1018 * (payload data will be in another buffer).
1019 * Size of this varies, due to varying MAC header length.
1020 * If end is not dword aligned, we'll have 2 extra bytes at the end
1021 * of the MAC header (device reads on dword boundaries).
1022 * We'll tell device about this padding later.
1024 len = sizeof(struct iwl_tx_cmd) +
1025 sizeof(struct iwl_cmd_header) + hdr_len;
1026 firstlen = (len + 3) & ~3;
1028 /* Tell NIC about any 2-byte padding after MAC header */
1029 if (firstlen != len)
1030 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1032 /* Physical address of this Tx command's header (not MAC header!),
1033 * within command buffer array. */
1034 txcmd_phys = dma_map_single(priv->bus->dev,
1035 &dev_cmd->hdr, firstlen,
1037 if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
1039 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1040 dma_unmap_len_set(out_meta, len, firstlen);
1042 if (!ieee80211_has_morefrags(fc)) {
1043 txq->need_update = 1;
1046 txq->need_update = 0;
1049 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1050 * if any (802.11 null frames have no payload). */
1051 secondlen = skb->len - hdr_len;
1052 if (secondlen > 0) {
1053 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
1054 secondlen, DMA_TO_DEVICE);
1055 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
1056 dma_unmap_single(priv->bus->dev,
1057 dma_unmap_addr(out_meta, mapping),
1058 dma_unmap_len(out_meta, len),
1064 /* Attach buffers to TFD */
1065 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
1067 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
1070 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1071 offsetof(struct iwl_tx_cmd, scratch);
1073 /* take back ownership of DMA buffer to enable update */
1074 dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
1076 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1077 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1079 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
1080 le16_to_cpu(dev_cmd->hdr.sequence));
1081 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1082 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1083 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1085 /* Set up entry for this TFD in Tx byte-count array */
1087 iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
1088 le16_to_cpu(tx_cmd->len));
1090 dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
1093 trace_iwlwifi_dev_tx(priv,
1094 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1095 sizeof(struct iwl_tfd),
1096 &dev_cmd->hdr, firstlen,
1097 skb->data + hdr_len, secondlen);
1099 /* Tell device the write index *just past* this latest filled TFD */
1100 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1101 iwl_txq_update_write_ptr(priv, txq);
1104 * At this point the frame is "transmitted" successfully
1105 * and we will get a TX status notification eventually,
1106 * regardless of the value of ret. "ret" only indicates
1107 * whether or not we should update the write pointer.
1109 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1110 if (wait_write_ptr) {
1111 txq->need_update = 1;
1112 iwl_txq_update_write_ptr(priv, txq);
1114 iwl_stop_queue(priv, txq);
1120 static void iwl_trans_pcie_kick_nic(struct iwl_priv *priv)
1122 /* Remove all resets to allow NIC to operate */
1123 iwl_write32(priv, CSR_RESET, 0);
1126 static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
1128 struct iwl_trans_pcie *trans_pcie =
1129 IWL_TRANS_GET_PCIE_TRANS(trans);
1132 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1134 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1135 iwl_irq_tasklet, (unsigned long)trans);
1137 iwl_alloc_isr_ict(trans);
1139 err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
1142 IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
1143 iwl_free_isr_ict(trans);
1147 INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
1151 static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
1153 unsigned long flags;
1154 struct iwl_trans_pcie *trans_pcie =
1155 IWL_TRANS_GET_PCIE_TRANS(trans);
1157 spin_lock_irqsave(&trans->shrd->lock, flags);
1158 iwl_disable_interrupts(trans);
1159 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1161 /* wait to make sure we flush pending tasklet*/
1162 synchronize_irq(bus(trans)->irq);
1163 tasklet_kill(&trans_pcie->irq_tasklet);
1166 static void iwl_trans_pcie_free(struct iwl_priv *priv)
1168 free_irq(priv->bus->irq, trans(priv));
1169 iwl_free_isr_ict(trans(priv));
1176 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1179 * This function is called when system goes into suspend state
1180 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1181 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1182 * it will not call apm_ops.stop() to stop the DMA operation.
1183 * Calling apm_ops.stop here to make sure we stop the DMA.
1185 * But of course ... if we have configured WoWLAN then we did other
1186 * things already :-)
1188 if (!trans->shrd->wowlan)
1189 iwl_apm_stop(priv(trans));
1194 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1196 bool hw_rfkill = false;
1198 iwl_enable_interrupts(trans);
1200 if (!(iwl_read32(priv(trans), CSR_GP_CNTRL) &
1201 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1205 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1207 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1209 wiphy_rfkill_set_hw_state(priv(trans)->hw->wiphy, hw_rfkill);
1213 #else /* CONFIG_PM */
1214 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1217 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1220 #endif /* CONFIG_PM */
1222 const struct iwl_trans_ops trans_ops_pcie;
1224 static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1226 struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1227 sizeof(struct iwl_trans_pcie),
1230 struct iwl_trans_pcie *trans_pcie =
1231 IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
1232 iwl_trans->ops = &trans_ops_pcie;
1233 iwl_trans->shrd = shrd;
1234 trans_pcie->trans = iwl_trans;
1240 #ifdef CONFIG_IWLWIFI_DEBUGFS
1241 /* create and remove of files */
1242 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1243 if (!debugfs_create_file(#name, mode, parent, trans, \
1244 &iwl_dbgfs_##name##_ops)) \
1248 /* file operation */
1249 #define DEBUGFS_READ_FUNC(name) \
1250 static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1251 char __user *user_buf, \
1252 size_t count, loff_t *ppos);
1254 #define DEBUGFS_WRITE_FUNC(name) \
1255 static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1256 const char __user *user_buf, \
1257 size_t count, loff_t *ppos);
1260 static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1262 file->private_data = inode->i_private;
1266 #define DEBUGFS_READ_FILE_OPS(name) \
1267 DEBUGFS_READ_FUNC(name); \
1268 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1269 .read = iwl_dbgfs_##name##_read, \
1270 .open = iwl_dbgfs_open_file_generic, \
1271 .llseek = generic_file_llseek, \
1274 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1275 DEBUGFS_READ_FUNC(name); \
1276 DEBUGFS_WRITE_FUNC(name); \
1277 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1278 .write = iwl_dbgfs_##name##_write, \
1279 .read = iwl_dbgfs_##name##_read, \
1280 .open = iwl_dbgfs_open_file_generic, \
1281 .llseek = generic_file_llseek, \
1284 static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1285 char __user *user_buf,
1286 size_t count, loff_t *ppos)
1288 struct iwl_trans *trans = file->private_data;
1289 struct iwl_priv *priv = priv(trans);
1290 int pos = 0, ofs = 0;
1292 struct iwl_trans_pcie *trans_pcie =
1293 IWL_TRANS_GET_PCIE_TRANS(trans);
1294 struct iwl_tx_queue *txq;
1295 struct iwl_queue *q;
1296 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1298 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
1299 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
1304 IWL_ERR(trans, "txq not ready\n");
1307 buf = kzalloc(bufsz, GFP_KERNEL);
1309 IWL_ERR(trans, "Can not allocate buffer\n");
1312 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
1313 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1314 txq = &priv->txq[cnt];
1316 pos += scnprintf(buf + pos, bufsz - pos,
1317 "q[%d]: read_ptr: %u, write_ptr: %u\n",
1318 cnt, q->read_ptr, q->write_ptr);
1320 if (priv->tx_traffic &&
1321 (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
1322 ptr = priv->tx_traffic;
1323 pos += scnprintf(buf + pos, bufsz - pos,
1324 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
1325 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1326 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1327 entry++, ofs += 16) {
1328 pos += scnprintf(buf + pos, bufsz - pos,
1330 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1331 buf + pos, bufsz - pos, 0);
1332 pos += strlen(buf + pos);
1333 if (bufsz - pos > 0)
1339 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
1340 pos += scnprintf(buf + pos, bufsz - pos,
1341 "read: %u, write: %u\n",
1342 rxq->read, rxq->write);
1344 if (priv->rx_traffic &&
1345 (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
1346 ptr = priv->rx_traffic;
1347 pos += scnprintf(buf + pos, bufsz - pos,
1348 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
1349 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1350 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1351 entry++, ofs += 16) {
1352 pos += scnprintf(buf + pos, bufsz - pos,
1354 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1355 buf + pos, bufsz - pos, 0);
1356 pos += strlen(buf + pos);
1357 if (bufsz - pos > 0)
1363 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1368 static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1369 const char __user *user_buf,
1370 size_t count, loff_t *ppos)
1372 struct iwl_trans *trans = file->private_data;
1377 memset(buf, 0, sizeof(buf));
1378 buf_size = min(count, sizeof(buf) - 1);
1379 if (copy_from_user(buf, user_buf, buf_size))
1381 if (sscanf(buf, "%d", &traffic_log) != 1)
1383 if (traffic_log == 0)
1384 iwl_reset_traffic_log(priv(trans));
1389 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1390 char __user *user_buf,
1391 size_t count, loff_t *ppos) {
1393 struct iwl_trans *trans = file->private_data;
1394 struct iwl_priv *priv = priv(trans);
1395 struct iwl_tx_queue *txq;
1396 struct iwl_queue *q;
1401 const size_t bufsz = sizeof(char) * 64 *
1402 priv->cfg->base_params->num_of_queues;
1405 IWL_ERR(priv, "txq not ready\n");
1408 buf = kzalloc(bufsz, GFP_KERNEL);
1412 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1413 txq = &priv->txq[cnt];
1415 pos += scnprintf(buf + pos, bufsz - pos,
1416 "hwq %.2d: read=%u write=%u stop=%d"
1417 " swq_id=%#.2x (ac %d/hwq %d)\n",
1418 cnt, q->read_ptr, q->write_ptr,
1419 !!test_bit(cnt, priv->queue_stopped),
1420 txq->swq_id, txq->swq_id & 3,
1421 (txq->swq_id >> 2) & 0x1f);
1424 /* for the ACs, display the stop count too */
1425 pos += scnprintf(buf + pos, bufsz - pos,
1426 " stop-count: %d\n",
1427 atomic_read(&priv->queue_stop_count[cnt]));
1429 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1434 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1435 char __user *user_buf,
1436 size_t count, loff_t *ppos) {
1437 struct iwl_trans *trans = file->private_data;
1438 struct iwl_trans_pcie *trans_pcie =
1439 IWL_TRANS_GET_PCIE_TRANS(trans);
1440 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1443 const size_t bufsz = sizeof(buf);
1445 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1447 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1449 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1452 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1453 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1455 pos += scnprintf(buf + pos, bufsz - pos,
1456 "closed_rb_num: Not Allocated\n");
1458 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1461 DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1462 DEBUGFS_READ_FILE_OPS(rx_queue);
1463 DEBUGFS_READ_FILE_OPS(tx_queue);
1466 * Create the debugfs files and directories
1469 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1472 DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
1473 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1474 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1478 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1482 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1484 const struct iwl_trans_ops trans_ops_pcie = {
1485 .alloc = iwl_trans_pcie_alloc,
1486 .request_irq = iwl_trans_pcie_request_irq,
1487 .start_device = iwl_trans_pcie_start_device,
1488 .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
1489 .stop_device = iwl_trans_pcie_stop_device,
1491 .tx_start = iwl_trans_pcie_tx_start,
1493 .rx_free = iwl_trans_pcie_rx_free,
1494 .tx_free = iwl_trans_pcie_tx_free,
1496 .send_cmd = iwl_trans_pcie_send_cmd,
1497 .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
1499 .get_tx_cmd = iwl_trans_pcie_get_tx_cmd,
1500 .tx = iwl_trans_pcie_tx,
1502 .txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
1503 .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
1505 .kick_nic = iwl_trans_pcie_kick_nic,
1507 .disable_sync_irq = iwl_trans_pcie_disable_sync_irq,
1508 .free = iwl_trans_pcie_free,
1510 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1511 .suspend = iwl_trans_pcie_suspend,
1512 .resume = iwl_trans_pcie_resume,