]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/wireless/iwlwifi/iwl-trans.c
iwlagn: reclaim the packets in transport layer
[karo-tx-linux.git] / drivers / net / wireless / iwlwifi / iwl-trans.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22  * USA
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called LICENSE.GPL.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <ilw@linux.intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  *
40  *  * Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  *  * Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in
44  *    the documentation and/or other materials provided with the
45  *    distribution.
46  *  * Neither the name Intel Corporation nor the names of its
47  *    contributors may be used to endorse or promote products derived
48  *    from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  *
62  *****************************************************************************/
63 #include <linux/interrupt.h>
64 #include <linux/debugfs.h>
65
66 #include "iwl-dev.h"
67 #include "iwl-trans.h"
68 #include "iwl-core.h"
69 #include "iwl-helpers.h"
70 #include "iwl-trans-int-pcie.h"
71 /*TODO remove uneeded includes when the transport layer tx_free will be here */
72 #include "iwl-agn.h"
73 #include "iwl-core.h"
74 #include "iwl-shared.h"
75
76 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
77 {
78         struct iwl_trans_pcie *trans_pcie =
79                 IWL_TRANS_GET_PCIE_TRANS(trans);
80         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
81         struct device *dev = bus(trans)->dev;
82
83         memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
84
85         spin_lock_init(&rxq->lock);
86         INIT_LIST_HEAD(&rxq->rx_free);
87         INIT_LIST_HEAD(&rxq->rx_used);
88
89         if (WARN_ON(rxq->bd || rxq->rb_stts))
90                 return -EINVAL;
91
92         /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
93         rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
94                                      &rxq->bd_dma, GFP_KERNEL);
95         if (!rxq->bd)
96                 goto err_bd;
97         memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
98
99         /*Allocate the driver's pointer to receive buffer status */
100         rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
101                                           &rxq->rb_stts_dma, GFP_KERNEL);
102         if (!rxq->rb_stts)
103                 goto err_rb_stts;
104         memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
105
106         return 0;
107
108 err_rb_stts:
109         dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
110                         rxq->bd, rxq->bd_dma);
111         memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
112         rxq->bd = NULL;
113 err_bd:
114         return -ENOMEM;
115 }
116
117 static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
118 {
119         struct iwl_trans_pcie *trans_pcie =
120                 IWL_TRANS_GET_PCIE_TRANS(trans);
121         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
122         int i;
123
124         /* Fill the rx_used queue with _all_ of the Rx buffers */
125         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
126                 /* In the reset function, these buffers may have been allocated
127                  * to an SKB, so we need to unmap and free potential storage */
128                 if (rxq->pool[i].page != NULL) {
129                         dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
130                                 PAGE_SIZE << hw_params(trans).rx_page_order,
131                                 DMA_FROM_DEVICE);
132                         __iwl_free_pages(priv(trans), rxq->pool[i].page);
133                         rxq->pool[i].page = NULL;
134                 }
135                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
136         }
137 }
138
139 static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
140                                  struct iwl_rx_queue *rxq)
141 {
142         u32 rb_size;
143         const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
144         u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
145
146         rb_timeout = RX_RB_TIMEOUT;
147
148         if (iwlagn_mod_params.amsdu_size_8K)
149                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
150         else
151                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
152
153         /* Stop Rx DMA */
154         iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
155
156         /* Reset driver's Rx queue write index */
157         iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
158
159         /* Tell device where to find RBD circular buffer in DRAM */
160         iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
161                            (u32)(rxq->bd_dma >> 8));
162
163         /* Tell device where in DRAM to update its Rx status */
164         iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
165                            rxq->rb_stts_dma >> 4);
166
167         /* Enable Rx DMA
168          * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
169          *      the credit mechanism in 5000 HW RX FIFO
170          * Direct rx interrupts to hosts
171          * Rx buffer size 4 or 8k
172          * RB timeout 0x10
173          * 256 RBDs
174          */
175         iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
176                            FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
177                            FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
178                            FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
179                            FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
180                            rb_size|
181                            (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
182                            (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
183
184         /* Set interrupt coalescing timer to default (2048 usecs) */
185         iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
186 }
187
188 static int iwl_rx_init(struct iwl_trans *trans)
189 {
190         struct iwl_trans_pcie *trans_pcie =
191                 IWL_TRANS_GET_PCIE_TRANS(trans);
192         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
193
194         int i, err;
195         unsigned long flags;
196
197         if (!rxq->bd) {
198                 err = iwl_trans_rx_alloc(trans);
199                 if (err)
200                         return err;
201         }
202
203         spin_lock_irqsave(&rxq->lock, flags);
204         INIT_LIST_HEAD(&rxq->rx_free);
205         INIT_LIST_HEAD(&rxq->rx_used);
206
207         iwl_trans_rxq_free_rx_bufs(trans);
208
209         for (i = 0; i < RX_QUEUE_SIZE; i++)
210                 rxq->queue[i] = NULL;
211
212         /* Set us so that we have processed and used all buffers, but have
213          * not restocked the Rx queue with fresh buffers */
214         rxq->read = rxq->write = 0;
215         rxq->write_actual = 0;
216         rxq->free_count = 0;
217         spin_unlock_irqrestore(&rxq->lock, flags);
218
219         iwlagn_rx_replenish(trans);
220
221         iwl_trans_rx_hw_init(priv(trans), rxq);
222
223         spin_lock_irqsave(&trans->shrd->lock, flags);
224         rxq->need_update = 1;
225         iwl_rx_queue_update_write_ptr(trans, rxq);
226         spin_unlock_irqrestore(&trans->shrd->lock, flags);
227
228         return 0;
229 }
230
231 static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
232 {
233         struct iwl_trans_pcie *trans_pcie =
234                 IWL_TRANS_GET_PCIE_TRANS(trans);
235         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
236
237         unsigned long flags;
238
239         /*if rxq->bd is NULL, it means that nothing has been allocated,
240          * exit now */
241         if (!rxq->bd) {
242                 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
243                 return;
244         }
245
246         spin_lock_irqsave(&rxq->lock, flags);
247         iwl_trans_rxq_free_rx_bufs(trans);
248         spin_unlock_irqrestore(&rxq->lock, flags);
249
250         dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
251                           rxq->bd, rxq->bd_dma);
252         memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
253         rxq->bd = NULL;
254
255         if (rxq->rb_stts)
256                 dma_free_coherent(bus(trans)->dev,
257                                   sizeof(struct iwl_rb_status),
258                                   rxq->rb_stts, rxq->rb_stts_dma);
259         else
260                 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
261         memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
262         rxq->rb_stts = NULL;
263 }
264
265 static int iwl_trans_rx_stop(struct iwl_priv *priv)
266 {
267
268         /* stop Rx DMA */
269         iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
270         return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
271                             FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
272 }
273
274 static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
275                                     struct iwl_dma_ptr *ptr, size_t size)
276 {
277         if (WARN_ON(ptr->addr))
278                 return -EINVAL;
279
280         ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
281                                        &ptr->dma, GFP_KERNEL);
282         if (!ptr->addr)
283                 return -ENOMEM;
284         ptr->size = size;
285         return 0;
286 }
287
288 static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
289                                     struct iwl_dma_ptr *ptr)
290 {
291         if (unlikely(!ptr->addr))
292                 return;
293
294         dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
295         memset(ptr, 0, sizeof(*ptr));
296 }
297
298 static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
299                       int slots_num, u32 txq_id)
300 {
301         size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX;
302         int i;
303
304         if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
305                 return -EINVAL;
306
307         txq->q.n_window = slots_num;
308
309         txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
310                             GFP_KERNEL);
311         txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
312                            GFP_KERNEL);
313
314         if (!txq->meta || !txq->cmd)
315                 goto error;
316
317         for (i = 0; i < slots_num; i++) {
318                 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
319                                         GFP_KERNEL);
320                 if (!txq->cmd[i])
321                         goto error;
322         }
323
324         /* Alloc driver data array and TFD circular buffer */
325         /* Driver private data, only for Tx (not command) queues,
326          * not shared with device. */
327         if (txq_id != priv->shrd->cmd_queue) {
328                 txq->txb = kzalloc(sizeof(txq->txb[0]) *
329                                    TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
330                 if (!txq->txb) {
331                         IWL_ERR(priv, "kmalloc for auxiliary BD "
332                                   "structures failed\n");
333                         goto error;
334                 }
335         } else {
336                 txq->txb = NULL;
337         }
338
339         /* Circular buffer of transmit frame descriptors (TFDs),
340          * shared with device */
341         txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
342                                        GFP_KERNEL);
343         if (!txq->tfds) {
344                 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
345                 goto error;
346         }
347         txq->q.id = txq_id;
348
349         return 0;
350 error:
351         kfree(txq->txb);
352         txq->txb = NULL;
353         /* since txq->cmd has been zeroed,
354          * all non allocated cmd[i] will be NULL */
355         if (txq->cmd)
356                 for (i = 0; i < slots_num; i++)
357                         kfree(txq->cmd[i]);
358         kfree(txq->meta);
359         kfree(txq->cmd);
360         txq->meta = NULL;
361         txq->cmd = NULL;
362
363         return -ENOMEM;
364
365 }
366
367 static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
368                       int slots_num, u32 txq_id)
369 {
370         int ret;
371
372         txq->need_update = 0;
373         memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
374
375         /*
376          * For the default queues 0-3, set up the swq_id
377          * already -- all others need to get one later
378          * (if they need one at all).
379          */
380         if (txq_id < 4)
381                 iwl_set_swq_id(txq, txq_id, txq_id);
382
383         /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
384          * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
385         BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
386
387         /* Initialize queue's high/low-water marks, and head/tail indexes */
388         ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
389                         txq_id);
390         if (ret)
391                 return ret;
392
393         /*
394          * Tell nic where to find circular buffer of Tx Frame Descriptors for
395          * given Tx queue, and enable the DMA channel used for that queue.
396          * Circular buffer (TFD queue in DRAM) physical base address */
397         iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
398                              txq->q.dma_addr >> 8);
399
400         return 0;
401 }
402
403 /**
404  * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
405  */
406 static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
407 {
408         struct iwl_tx_queue *txq = &priv->txq[txq_id];
409         struct iwl_queue *q = &txq->q;
410
411         if (!q->n_bd)
412                 return;
413
414         while (q->write_ptr != q->read_ptr) {
415                 /* The read_ptr needs to bound by q->n_window */
416                 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
417                 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
418         }
419 }
420
421 /**
422  * iwl_tx_queue_free - Deallocate DMA queue.
423  * @txq: Transmit queue to deallocate.
424  *
425  * Empty queue by removing and destroying all BD's.
426  * Free all buffers.
427  * 0-fill, but do not free "txq" descriptor structure.
428  */
429 static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
430 {
431         struct iwl_tx_queue *txq = &priv->txq[txq_id];
432         struct device *dev = priv->bus->dev;
433         int i;
434         if (WARN_ON(!txq))
435                 return;
436
437         iwl_tx_queue_unmap(priv, txq_id);
438
439         /* De-alloc array of command/tx buffers */
440         for (i = 0; i < txq->q.n_window; i++)
441                 kfree(txq->cmd[i]);
442
443         /* De-alloc circular buffer of TFDs */
444         if (txq->q.n_bd) {
445                 dma_free_coherent(dev, hw_params(priv).tfd_size *
446                                   txq->q.n_bd, txq->tfds, txq->q.dma_addr);
447                 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
448         }
449
450         /* De-alloc array of per-TFD driver data */
451         kfree(txq->txb);
452         txq->txb = NULL;
453
454         /* deallocate arrays */
455         kfree(txq->cmd);
456         kfree(txq->meta);
457         txq->cmd = NULL;
458         txq->meta = NULL;
459
460         /* 0-fill queue descriptor structure */
461         memset(txq, 0, sizeof(*txq));
462 }
463
464 /**
465  * iwl_trans_tx_free - Free TXQ Context
466  *
467  * Destroy all TX DMA queues and structures
468  */
469 static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
470 {
471         int txq_id;
472
473         /* Tx queues */
474         if (priv->txq) {
475                 for (txq_id = 0;
476                      txq_id < hw_params(priv).max_txq_num; txq_id++)
477                         iwl_tx_queue_free(priv, txq_id);
478         }
479
480         kfree(priv->txq);
481         priv->txq = NULL;
482
483         iwlagn_free_dma_ptr(priv, &priv->kw);
484
485         iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
486 }
487
488 /**
489  * iwl_trans_tx_alloc - allocate TX context
490  * Allocate all Tx DMA structures and initialize them
491  *
492  * @param priv
493  * @return error code
494  */
495 static int iwl_trans_tx_alloc(struct iwl_priv *priv)
496 {
497         int ret;
498         int txq_id, slots_num;
499
500         /*It is not allowed to alloc twice, so warn when this happens.
501          * We cannot rely on the previous allocation, so free and fail */
502         if (WARN_ON(priv->txq)) {
503                 ret = -EINVAL;
504                 goto error;
505         }
506
507         ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
508                                 hw_params(priv).scd_bc_tbls_size);
509         if (ret) {
510                 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
511                 goto error;
512         }
513
514         /* Alloc keep-warm buffer */
515         ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
516         if (ret) {
517                 IWL_ERR(priv, "Keep Warm allocation failed\n");
518                 goto error;
519         }
520
521         priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
522                         priv->cfg->base_params->num_of_queues, GFP_KERNEL);
523         if (!priv->txq) {
524                 IWL_ERR(priv, "Not enough memory for txq\n");
525                 ret = ENOMEM;
526                 goto error;
527         }
528
529         /* Alloc and init all Tx queues, including the command queue (#4/#9) */
530         for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
531                 slots_num = (txq_id == priv->shrd->cmd_queue) ?
532                                         TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
533                 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
534                                        txq_id);
535                 if (ret) {
536                         IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
537                         goto error;
538                 }
539         }
540
541         return 0;
542
543 error:
544         iwl_trans_tx_free(trans(priv));
545
546         return ret;
547 }
548 static int iwl_tx_init(struct iwl_priv *priv)
549 {
550         int ret;
551         int txq_id, slots_num;
552         unsigned long flags;
553         bool alloc = false;
554
555         if (!priv->txq) {
556                 ret = iwl_trans_tx_alloc(priv);
557                 if (ret)
558                         goto error;
559                 alloc = true;
560         }
561
562         spin_lock_irqsave(&priv->shrd->lock, flags);
563
564         /* Turn off all Tx DMA fifos */
565         iwl_write_prph(priv, SCD_TXFACT, 0);
566
567         /* Tell NIC where to find the "keep warm" buffer */
568         iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
569
570         spin_unlock_irqrestore(&priv->shrd->lock, flags);
571
572         /* Alloc and init all Tx queues, including the command queue (#4/#9) */
573         for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
574                 slots_num = (txq_id == priv->shrd->cmd_queue) ?
575                                         TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
576                 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
577                                        txq_id);
578                 if (ret) {
579                         IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
580                         goto error;
581                 }
582         }
583
584         return 0;
585 error:
586         /*Upon error, free only if we allocated something */
587         if (alloc)
588                 iwl_trans_tx_free(trans(priv));
589         return ret;
590 }
591
592 static void iwl_set_pwr_vmain(struct iwl_priv *priv)
593 {
594 /*
595  * (for documentation purposes)
596  * to set power to V_AUX, do:
597
598                 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
599                         iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
600                                                APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
601                                                ~APMG_PS_CTRL_MSK_PWR_SRC);
602  */
603
604         iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
605                                APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
606                                ~APMG_PS_CTRL_MSK_PWR_SRC);
607 }
608
609 static int iwl_nic_init(struct iwl_priv *priv)
610 {
611         unsigned long flags;
612
613         /* nic_init */
614         spin_lock_irqsave(&priv->shrd->lock, flags);
615         iwl_apm_init(priv);
616
617         /* Set interrupt coalescing calibration timer to default (512 usecs) */
618         iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
619
620         spin_unlock_irqrestore(&priv->shrd->lock, flags);
621
622         iwl_set_pwr_vmain(priv);
623
624         priv->cfg->lib->nic_config(priv);
625
626         /* Allocate the RX queue, or reset if it is already allocated */
627         iwl_rx_init(trans(priv));
628
629         /* Allocate or reset and init all Tx and Command queues */
630         if (iwl_tx_init(priv))
631                 return -ENOMEM;
632
633         if (priv->cfg->base_params->shadow_reg_enable) {
634                 /* enable shadow regs in HW */
635                 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
636                         0x800FFFFF);
637         }
638
639         set_bit(STATUS_INIT, &priv->shrd->status);
640
641         return 0;
642 }
643
644 #define HW_READY_TIMEOUT (50)
645
646 /* Note: returns poll_bit return value, which is >= 0 if success */
647 static int iwl_set_hw_ready(struct iwl_priv *priv)
648 {
649         int ret;
650
651         iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
652                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
653
654         /* See if we got it */
655         ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
656                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
657                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
658                                 HW_READY_TIMEOUT);
659
660         IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
661         return ret;
662 }
663
664 /* Note: returns standard 0/-ERROR code */
665 static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv)
666 {
667         int ret;
668
669         IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
670
671         ret = iwl_set_hw_ready(priv);
672         if (ret >= 0)
673                 return 0;
674
675         /* If HW is not ready, prepare the conditions to check again */
676         iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
677                         CSR_HW_IF_CONFIG_REG_PREPARE);
678
679         ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
680                         ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
681                         CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
682
683         if (ret < 0)
684                 return ret;
685
686         /* HW should be ready by now, check again. */
687         ret = iwl_set_hw_ready(priv);
688         if (ret >= 0)
689                 return 0;
690         return ret;
691 }
692
693 static int iwl_trans_pcie_start_device(struct iwl_priv *priv)
694 {
695         int ret;
696
697         priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
698
699         if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
700              iwl_trans_pcie_prepare_card_hw(priv)) {
701                 IWL_WARN(priv, "Exit HW not ready\n");
702                 return -EIO;
703         }
704
705         /* If platform's RF_KILL switch is NOT set to KILL */
706         if (iwl_read32(priv, CSR_GP_CNTRL) &
707                         CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
708                 clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
709         else
710                 set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
711
712         if (iwl_is_rfkill(priv)) {
713                 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
714                 iwl_enable_interrupts(trans(priv));
715                 return -ERFKILL;
716         }
717
718         iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
719
720         ret = iwl_nic_init(priv);
721         if (ret) {
722                 IWL_ERR(priv, "Unable to init nic\n");
723                 return ret;
724         }
725
726         /* make sure rfkill handshake bits are cleared */
727         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
728         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
729                     CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
730
731         /* clear (again), then enable host interrupts */
732         iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
733         iwl_enable_interrupts(trans(priv));
734
735         /* really make sure rfkill handshake bits are cleared */
736         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
737         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
738
739         return 0;
740 }
741
742 /*
743  * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
744  * must be called under priv->shrd->lock and mac access
745  */
746 static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
747 {
748         iwl_write_prph(priv, SCD_TXFACT, mask);
749 }
750
751 #define IWL_AC_UNSET -1
752
753 struct queue_to_fifo_ac {
754         s8 fifo, ac;
755 };
756
757 static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
758         { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
759         { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
760         { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
761         { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
762         { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
763         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
764         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
765         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
766         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
767         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
768         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
769 };
770
771 static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
772         { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
773         { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
774         { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
775         { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
776         { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
777         { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
778         { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
779         { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
780         { IWL_TX_FIFO_BE_IPAN, 2, },
781         { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
782         { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
783 };
784 static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
785 {
786         const struct queue_to_fifo_ac *queue_to_fifo;
787         struct iwl_rxon_context *ctx;
788         u32 a;
789         unsigned long flags;
790         int i, chan;
791         u32 reg_val;
792
793         spin_lock_irqsave(&priv->shrd->lock, flags);
794
795         priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
796         a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
797         /* reset conext data memory */
798         for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
799                 a += 4)
800                 iwl_write_targ_mem(priv, a, 0);
801         /* reset tx status memory */
802         for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
803                 a += 4)
804                 iwl_write_targ_mem(priv, a, 0);
805         for (; a < priv->scd_base_addr +
806                SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
807                a += 4)
808                 iwl_write_targ_mem(priv, a, 0);
809
810         iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
811                        priv->scd_bc_tbls.dma >> 10);
812
813         /* Enable DMA channel */
814         for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
815                 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
816                                 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
817                                 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
818
819         /* Update FH chicken bits */
820         reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
821         iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
822                            reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
823
824         iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
825                 SCD_QUEUECHAIN_SEL_ALL(priv));
826         iwl_write_prph(priv, SCD_AGGR_SEL, 0);
827
828         /* initiate the queues */
829         for (i = 0; i < hw_params(priv).max_txq_num; i++) {
830                 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
831                 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
832                 iwl_write_targ_mem(priv, priv->scd_base_addr +
833                                 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
834                 iwl_write_targ_mem(priv, priv->scd_base_addr +
835                                 SCD_CONTEXT_QUEUE_OFFSET(i) +
836                                 sizeof(u32),
837                                 ((SCD_WIN_SIZE <<
838                                 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
839                                 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
840                                 ((SCD_FRAME_LIMIT <<
841                                 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
842                                 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
843         }
844
845         iwl_write_prph(priv, SCD_INTERRUPT_MASK,
846                         IWL_MASK(0, hw_params(priv).max_txq_num));
847
848         /* Activate all Tx DMA/FIFO channels */
849         iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
850
851         /* map queues to FIFOs */
852         if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
853                 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
854         else
855                 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
856
857         iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0);
858
859         /* make sure all queue are not stopped */
860         memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
861         for (i = 0; i < 4; i++)
862                 atomic_set(&priv->queue_stop_count[i], 0);
863         for_each_context(priv, ctx)
864                 ctx->last_tx_rejected = false;
865
866         /* reset to 0 to enable all the queue first */
867         priv->txq_ctx_active_msk = 0;
868
869         BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) !=
870                                                 IWLAGN_FIRST_AMPDU_QUEUE);
871         BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) !=
872                                                 IWLAGN_FIRST_AMPDU_QUEUE);
873
874         for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
875                 int fifo = queue_to_fifo[i].fifo;
876                 int ac = queue_to_fifo[i].ac;
877
878                 iwl_txq_ctx_activate(priv, i);
879
880                 if (fifo == IWL_TX_FIFO_UNUSED)
881                         continue;
882
883                 if (ac != IWL_AC_UNSET)
884                         iwl_set_swq_id(&priv->txq[i], ac, i);
885                 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
886         }
887
888         spin_unlock_irqrestore(&priv->shrd->lock, flags);
889
890         /* Enable L1-Active */
891         iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
892                           APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
893 }
894
895 /**
896  * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
897  */
898 static int iwl_trans_tx_stop(struct iwl_priv *priv)
899 {
900         int ch, txq_id;
901         unsigned long flags;
902
903         /* Turn off all Tx DMA fifos */
904         spin_lock_irqsave(&priv->shrd->lock, flags);
905
906         iwl_trans_txq_set_sched(priv, 0);
907
908         /* Stop each Tx DMA channel, and wait for it to be idle */
909         for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
910                 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
911                 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
912                                     FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
913                                     1000))
914                         IWL_ERR(priv, "Failing on timeout while stopping"
915                             " DMA channel %d [0x%08x]", ch,
916                             iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
917         }
918         spin_unlock_irqrestore(&priv->shrd->lock, flags);
919
920         if (!priv->txq) {
921                 IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
922                 return 0;
923         }
924
925         /* Unmap DMA from host system and free skb's */
926         for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
927                 iwl_tx_queue_unmap(priv, txq_id);
928
929         return 0;
930 }
931
932 static void iwl_trans_pcie_stop_device(struct iwl_priv *priv)
933 {
934         /* stop and reset the on-board processor */
935         iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
936
937         /* tell the device to stop sending interrupts */
938         iwl_trans_disable_sync_irq(trans(priv));
939
940         /* device going down, Stop using ICT table */
941         iwl_disable_ict(trans(priv));
942
943         /*
944          * If a HW restart happens during firmware loading,
945          * then the firmware loading might call this function
946          * and later it might be called again due to the
947          * restart. So don't process again if the device is
948          * already dead.
949          */
950         if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) {
951                 iwl_trans_tx_stop(priv);
952                 iwl_trans_rx_stop(priv);
953
954                 /* Power-down device's busmaster DMA clocks */
955                 iwl_write_prph(priv, APMG_CLK_DIS_REG,
956                                APMG_CLK_VAL_DMA_CLK_RQT);
957                 udelay(5);
958         }
959
960         /* Make sure (redundant) we've released our request to stay awake */
961         iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
962
963         /* Stop the device, and put it in low power state */
964         iwl_apm_stop(priv);
965 }
966
967 static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_priv *priv,
968                                                 int txq_id)
969 {
970         struct iwl_tx_queue *txq = &priv->txq[txq_id];
971         struct iwl_queue *q = &txq->q;
972         struct iwl_device_cmd *dev_cmd;
973
974         if (unlikely(iwl_queue_space(q) < q->high_mark))
975                 return NULL;
976
977         /*
978          * Set up the Tx-command (not MAC!) header.
979          * Store the chosen Tx queue and TFD index within the sequence field;
980          * after Tx, uCode's Tx response will return this value so driver can
981          * locate the frame within the tx queue and do post-tx processing.
982          */
983         dev_cmd = txq->cmd[q->write_ptr];
984         memset(dev_cmd, 0, sizeof(*dev_cmd));
985         dev_cmd->hdr.cmd = REPLY_TX;
986         dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
987                                 INDEX_TO_SEQ(q->write_ptr)));
988         return &dev_cmd->cmd.tx;
989 }
990
991 static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
992                 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
993                 struct iwl_rxon_context *ctx)
994 {
995         struct iwl_tx_queue *txq = &priv->txq[txq_id];
996         struct iwl_queue *q = &txq->q;
997         struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
998         struct iwl_cmd_meta *out_meta;
999
1000         dma_addr_t phys_addr = 0;
1001         dma_addr_t txcmd_phys;
1002         dma_addr_t scratch_phys;
1003         u16 len, firstlen, secondlen;
1004         u8 wait_write_ptr = 0;
1005         u8 hdr_len = ieee80211_hdrlen(fc);
1006
1007         /* Set up driver data for this TFD */
1008         memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
1009         txq->txb[q->write_ptr].skb = skb;
1010         txq->txb[q->write_ptr].ctx = ctx;
1011
1012         /* Set up first empty entry in queue's array of Tx/cmd buffers */
1013         out_meta = &txq->meta[q->write_ptr];
1014
1015         /*
1016          * Use the first empty entry in this queue's command buffer array
1017          * to contain the Tx command and MAC header concatenated together
1018          * (payload data will be in another buffer).
1019          * Size of this varies, due to varying MAC header length.
1020          * If end is not dword aligned, we'll have 2 extra bytes at the end
1021          * of the MAC header (device reads on dword boundaries).
1022          * We'll tell device about this padding later.
1023          */
1024         len = sizeof(struct iwl_tx_cmd) +
1025                 sizeof(struct iwl_cmd_header) + hdr_len;
1026         firstlen = (len + 3) & ~3;
1027
1028         /* Tell NIC about any 2-byte padding after MAC header */
1029         if (firstlen != len)
1030                 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1031
1032         /* Physical address of this Tx command's header (not MAC header!),
1033          * within command buffer array. */
1034         txcmd_phys = dma_map_single(priv->bus->dev,
1035                                     &dev_cmd->hdr, firstlen,
1036                                     DMA_BIDIRECTIONAL);
1037         if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
1038                 return -1;
1039         dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1040         dma_unmap_len_set(out_meta, len, firstlen);
1041
1042         if (!ieee80211_has_morefrags(fc)) {
1043                 txq->need_update = 1;
1044         } else {
1045                 wait_write_ptr = 1;
1046                 txq->need_update = 0;
1047         }
1048
1049         /* Set up TFD's 2nd entry to point directly to remainder of skb,
1050          * if any (802.11 null frames have no payload). */
1051         secondlen = skb->len - hdr_len;
1052         if (secondlen > 0) {
1053                 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
1054                                            secondlen, DMA_TO_DEVICE);
1055                 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
1056                         dma_unmap_single(priv->bus->dev,
1057                                          dma_unmap_addr(out_meta, mapping),
1058                                          dma_unmap_len(out_meta, len),
1059                                          DMA_BIDIRECTIONAL);
1060                         return -1;
1061                 }
1062         }
1063
1064         /* Attach buffers to TFD */
1065         iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
1066         if (secondlen > 0)
1067                 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
1068                                              secondlen, 0);
1069
1070         scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1071                                 offsetof(struct iwl_tx_cmd, scratch);
1072
1073         /* take back ownership of DMA buffer to enable update */
1074         dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
1075                         DMA_BIDIRECTIONAL);
1076         tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1077         tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1078
1079         IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
1080                      le16_to_cpu(dev_cmd->hdr.sequence));
1081         IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1082         iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1083         iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1084
1085         /* Set up entry for this TFD in Tx byte-count array */
1086         if (ampdu)
1087                 iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
1088                                                le16_to_cpu(tx_cmd->len));
1089
1090         dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
1091                         DMA_BIDIRECTIONAL);
1092
1093         trace_iwlwifi_dev_tx(priv,
1094                              &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1095                              sizeof(struct iwl_tfd),
1096                              &dev_cmd->hdr, firstlen,
1097                              skb->data + hdr_len, secondlen);
1098
1099         /* Tell device the write index *just past* this latest filled TFD */
1100         q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1101         iwl_txq_update_write_ptr(priv, txq);
1102
1103         /*
1104          * At this point the frame is "transmitted" successfully
1105          * and we will get a TX status notification eventually,
1106          * regardless of the value of ret. "ret" only indicates
1107          * whether or not we should update the write pointer.
1108          */
1109         if (iwl_queue_space(q) < q->high_mark) {
1110                 if (wait_write_ptr) {
1111                         txq->need_update = 1;
1112                         iwl_txq_update_write_ptr(priv, txq);
1113                 } else {
1114                         iwl_stop_queue(priv, txq);
1115                 }
1116         }
1117         return 0;
1118 }
1119
1120 static void iwl_trans_pcie_kick_nic(struct iwl_priv *priv)
1121 {
1122         /* Remove all resets to allow NIC to operate */
1123         iwl_write32(priv, CSR_RESET, 0);
1124 }
1125
1126 static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
1127 {
1128         struct iwl_trans_pcie *trans_pcie =
1129                 IWL_TRANS_GET_PCIE_TRANS(trans);
1130         int err;
1131
1132         trans_pcie->inta_mask = CSR_INI_SET_MASK;
1133
1134         tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1135                 iwl_irq_tasklet, (unsigned long)trans);
1136
1137         iwl_alloc_isr_ict(trans);
1138
1139         err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
1140                 DRV_NAME, trans);
1141         if (err) {
1142                 IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
1143                 iwl_free_isr_ict(trans);
1144                 return err;
1145         }
1146
1147         INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
1148         return 0;
1149 }
1150
1151 static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id,
1152                       int ssn, u32 status, struct sk_buff_head *skbs)
1153 {
1154         struct iwl_priv *priv = priv(trans);
1155         struct iwl_tx_queue *txq = &priv->txq[txq_id];
1156         /* n_bd is usually 256 => n_bd - 1 = 0xff */
1157         int tfd_num = ssn & (txq->q.n_bd - 1);
1158         u8 agg_state;
1159         bool cond;
1160
1161         if (txq->sched_retry) {
1162                 agg_state =
1163                         priv->stations[txq->sta_id].tid[txq->tid].agg.state;
1164                 cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
1165         } else {
1166                 cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
1167         }
1168
1169         if (txq->q.read_ptr != tfd_num) {
1170                 IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
1171                                 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1172                                 ssn , tfd_num, txq_id, txq->swq_id);
1173                 iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1174                 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
1175                         iwl_wake_queue(priv, txq);
1176         }
1177 }
1178
1179 static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
1180 {
1181         unsigned long flags;
1182         struct iwl_trans_pcie *trans_pcie =
1183                 IWL_TRANS_GET_PCIE_TRANS(trans);
1184
1185         spin_lock_irqsave(&trans->shrd->lock, flags);
1186         iwl_disable_interrupts(trans);
1187         spin_unlock_irqrestore(&trans->shrd->lock, flags);
1188
1189         /* wait to make sure we flush pending tasklet*/
1190         synchronize_irq(bus(trans)->irq);
1191         tasklet_kill(&trans_pcie->irq_tasklet);
1192 }
1193
1194 static void iwl_trans_pcie_free(struct iwl_priv *priv)
1195 {
1196         free_irq(priv->bus->irq, trans(priv));
1197         iwl_free_isr_ict(trans(priv));
1198         kfree(trans(priv));
1199         trans(priv) = NULL;
1200 }
1201
1202 #ifdef CONFIG_PM
1203
1204 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1205 {
1206         /*
1207          * This function is called when system goes into suspend state
1208          * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1209          * first but since iwl_mac_stop() has no knowledge of who the caller is,
1210          * it will not call apm_ops.stop() to stop the DMA operation.
1211          * Calling apm_ops.stop here to make sure we stop the DMA.
1212          *
1213          * But of course ... if we have configured WoWLAN then we did other
1214          * things already :-)
1215          */
1216         if (!trans->shrd->wowlan)
1217                 iwl_apm_stop(priv(trans));
1218
1219         return 0;
1220 }
1221
1222 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1223 {
1224         bool hw_rfkill = false;
1225
1226         iwl_enable_interrupts(trans);
1227
1228         if (!(iwl_read32(priv(trans), CSR_GP_CNTRL) &
1229                                 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1230                 hw_rfkill = true;
1231
1232         if (hw_rfkill)
1233                 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1234         else
1235                 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1236
1237         wiphy_rfkill_set_hw_state(priv(trans)->hw->wiphy, hw_rfkill);
1238
1239         return 0;
1240 }
1241 #else /* CONFIG_PM */
1242 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1243 { return 0; }
1244
1245 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1246 { return 0; }
1247
1248 #endif /* CONFIG_PM */
1249
1250 const struct iwl_trans_ops trans_ops_pcie;
1251
1252 static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1253 {
1254         struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1255                                               sizeof(struct iwl_trans_pcie),
1256                                               GFP_KERNEL);
1257         if (iwl_trans) {
1258                 struct iwl_trans_pcie *trans_pcie =
1259                         IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
1260                 iwl_trans->ops = &trans_ops_pcie;
1261                 iwl_trans->shrd = shrd;
1262                 trans_pcie->trans = iwl_trans;
1263         }
1264
1265         return iwl_trans;
1266 }
1267
1268 #ifdef CONFIG_IWLWIFI_DEBUGFS
1269 /* create and remove of files */
1270 #define DEBUGFS_ADD_FILE(name, parent, mode) do {                       \
1271         if (!debugfs_create_file(#name, mode, parent, trans,            \
1272                                  &iwl_dbgfs_##name##_ops))              \
1273                 return -ENOMEM;                                         \
1274 } while (0)
1275
1276 /* file operation */
1277 #define DEBUGFS_READ_FUNC(name)                                         \
1278 static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
1279                                         char __user *user_buf,          \
1280                                         size_t count, loff_t *ppos);
1281
1282 #define DEBUGFS_WRITE_FUNC(name)                                        \
1283 static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
1284                                         const char __user *user_buf,    \
1285                                         size_t count, loff_t *ppos);
1286
1287
1288 static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1289 {
1290         file->private_data = inode->i_private;
1291         return 0;
1292 }
1293
1294 #define DEBUGFS_READ_FILE_OPS(name)                                     \
1295         DEBUGFS_READ_FUNC(name);                                        \
1296 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1297         .read = iwl_dbgfs_##name##_read,                                \
1298         .open = iwl_dbgfs_open_file_generic,                            \
1299         .llseek = generic_file_llseek,                                  \
1300 };
1301
1302 #define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
1303         DEBUGFS_READ_FUNC(name);                                        \
1304         DEBUGFS_WRITE_FUNC(name);                                       \
1305 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1306         .write = iwl_dbgfs_##name##_write,                              \
1307         .read = iwl_dbgfs_##name##_read,                                \
1308         .open = iwl_dbgfs_open_file_generic,                            \
1309         .llseek = generic_file_llseek,                                  \
1310 };
1311
1312 static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1313                                          char __user *user_buf,
1314                                          size_t count, loff_t *ppos)
1315 {
1316         struct iwl_trans *trans = file->private_data;
1317         struct iwl_priv *priv = priv(trans);
1318         int pos = 0, ofs = 0;
1319         int cnt = 0, entry;
1320         struct iwl_trans_pcie *trans_pcie =
1321                 IWL_TRANS_GET_PCIE_TRANS(trans);
1322         struct iwl_tx_queue *txq;
1323         struct iwl_queue *q;
1324         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1325         char *buf;
1326         int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
1327                 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
1328         const u8 *ptr;
1329         ssize_t ret;
1330
1331         if (!priv->txq) {
1332                 IWL_ERR(trans, "txq not ready\n");
1333                 return -EAGAIN;
1334         }
1335         buf = kzalloc(bufsz, GFP_KERNEL);
1336         if (!buf) {
1337                 IWL_ERR(trans, "Can not allocate buffer\n");
1338                 return -ENOMEM;
1339         }
1340         pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
1341         for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1342                 txq = &priv->txq[cnt];
1343                 q = &txq->q;
1344                 pos += scnprintf(buf + pos, bufsz - pos,
1345                                 "q[%d]: read_ptr: %u, write_ptr: %u\n",
1346                                 cnt, q->read_ptr, q->write_ptr);
1347         }
1348         if (priv->tx_traffic &&
1349                 (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
1350                 ptr = priv->tx_traffic;
1351                 pos += scnprintf(buf + pos, bufsz - pos,
1352                                 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
1353                 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1354                         for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1355                              entry++,  ofs += 16) {
1356                                 pos += scnprintf(buf + pos, bufsz - pos,
1357                                                 "0x%.4x ", ofs);
1358                                 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1359                                                    buf + pos, bufsz - pos, 0);
1360                                 pos += strlen(buf + pos);
1361                                 if (bufsz - pos > 0)
1362                                         buf[pos++] = '\n';
1363                         }
1364                 }
1365         }
1366
1367         pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
1368         pos += scnprintf(buf + pos, bufsz - pos,
1369                         "read: %u, write: %u\n",
1370                          rxq->read, rxq->write);
1371
1372         if (priv->rx_traffic &&
1373                 (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
1374                 ptr = priv->rx_traffic;
1375                 pos += scnprintf(buf + pos, bufsz - pos,
1376                                 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
1377                 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1378                         for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1379                              entry++,  ofs += 16) {
1380                                 pos += scnprintf(buf + pos, bufsz - pos,
1381                                                 "0x%.4x ", ofs);
1382                                 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1383                                                    buf + pos, bufsz - pos, 0);
1384                                 pos += strlen(buf + pos);
1385                                 if (bufsz - pos > 0)
1386                                         buf[pos++] = '\n';
1387                         }
1388                 }
1389         }
1390
1391         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1392         kfree(buf);
1393         return ret;
1394 }
1395
1396 static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1397                                          const char __user *user_buf,
1398                                          size_t count, loff_t *ppos)
1399 {
1400         struct iwl_trans *trans = file->private_data;
1401         char buf[8];
1402         int buf_size;
1403         int traffic_log;
1404
1405         memset(buf, 0, sizeof(buf));
1406         buf_size = min(count, sizeof(buf) -  1);
1407         if (copy_from_user(buf, user_buf, buf_size))
1408                 return -EFAULT;
1409         if (sscanf(buf, "%d", &traffic_log) != 1)
1410                 return -EFAULT;
1411         if (traffic_log == 0)
1412                 iwl_reset_traffic_log(priv(trans));
1413
1414         return count;
1415 }
1416
1417 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1418                                                 char __user *user_buf,
1419                                                 size_t count, loff_t *ppos) {
1420
1421         struct iwl_trans *trans = file->private_data;
1422         struct iwl_priv *priv = priv(trans);
1423         struct iwl_tx_queue *txq;
1424         struct iwl_queue *q;
1425         char *buf;
1426         int pos = 0;
1427         int cnt;
1428         int ret;
1429         const size_t bufsz = sizeof(char) * 64 *
1430                                 priv->cfg->base_params->num_of_queues;
1431
1432         if (!priv->txq) {
1433                 IWL_ERR(priv, "txq not ready\n");
1434                 return -EAGAIN;
1435         }
1436         buf = kzalloc(bufsz, GFP_KERNEL);
1437         if (!buf)
1438                 return -ENOMEM;
1439
1440         for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1441                 txq = &priv->txq[cnt];
1442                 q = &txq->q;
1443                 pos += scnprintf(buf + pos, bufsz - pos,
1444                                 "hwq %.2d: read=%u write=%u stop=%d"
1445                                 " swq_id=%#.2x (ac %d/hwq %d)\n",
1446                                 cnt, q->read_ptr, q->write_ptr,
1447                                 !!test_bit(cnt, priv->queue_stopped),
1448                                 txq->swq_id, txq->swq_id & 3,
1449                                 (txq->swq_id >> 2) & 0x1f);
1450                 if (cnt >= 4)
1451                         continue;
1452                 /* for the ACs, display the stop count too */
1453                 pos += scnprintf(buf + pos, bufsz - pos,
1454                                 "        stop-count: %d\n",
1455                                 atomic_read(&priv->queue_stop_count[cnt]));
1456         }
1457         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1458         kfree(buf);
1459         return ret;
1460 }
1461
1462 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1463                                                 char __user *user_buf,
1464                                                 size_t count, loff_t *ppos) {
1465         struct iwl_trans *trans = file->private_data;
1466         struct iwl_trans_pcie *trans_pcie =
1467                 IWL_TRANS_GET_PCIE_TRANS(trans);
1468         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1469         char buf[256];
1470         int pos = 0;
1471         const size_t bufsz = sizeof(buf);
1472
1473         pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1474                                                 rxq->read);
1475         pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1476                                                 rxq->write);
1477         pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1478                                                 rxq->free_count);
1479         if (rxq->rb_stts) {
1480                 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1481                          le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
1482         } else {
1483                 pos += scnprintf(buf + pos, bufsz - pos,
1484                                         "closed_rb_num: Not Allocated\n");
1485         }
1486         return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1487 }
1488
1489 static ssize_t iwl_dbgfs_log_event_read(struct file *file,
1490                                          char __user *user_buf,
1491                                          size_t count, loff_t *ppos)
1492 {
1493         struct iwl_trans *trans = file->private_data;
1494         char *buf;
1495         int pos = 0;
1496         ssize_t ret = -ENOMEM;
1497
1498         ret = pos = iwl_dump_nic_event_log(priv(trans), true, &buf, true);
1499         if (buf) {
1500                 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1501                 kfree(buf);
1502         }
1503         return ret;
1504 }
1505
1506 static ssize_t iwl_dbgfs_log_event_write(struct file *file,
1507                                         const char __user *user_buf,
1508                                         size_t count, loff_t *ppos)
1509 {
1510         struct iwl_trans *trans = file->private_data;
1511         u32 event_log_flag;
1512         char buf[8];
1513         int buf_size;
1514
1515         memset(buf, 0, sizeof(buf));
1516         buf_size = min(count, sizeof(buf) -  1);
1517         if (copy_from_user(buf, user_buf, buf_size))
1518                 return -EFAULT;
1519         if (sscanf(buf, "%d", &event_log_flag) != 1)
1520                 return -EFAULT;
1521         if (event_log_flag == 1)
1522                 iwl_dump_nic_event_log(priv(trans), true, NULL, false);
1523
1524         return count;
1525 }
1526
1527 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1528                                         char __user *user_buf,
1529                                         size_t count, loff_t *ppos) {
1530
1531         struct iwl_trans *trans = file->private_data;
1532         struct iwl_trans_pcie *trans_pcie =
1533                 IWL_TRANS_GET_PCIE_TRANS(trans);
1534         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1535
1536         int pos = 0;
1537         char *buf;
1538         int bufsz = 24 * 64; /* 24 items * 64 char per item */
1539         ssize_t ret;
1540
1541         buf = kzalloc(bufsz, GFP_KERNEL);
1542         if (!buf) {
1543                 IWL_ERR(trans, "Can not allocate Buffer\n");
1544                 return -ENOMEM;
1545         }
1546
1547         pos += scnprintf(buf + pos, bufsz - pos,
1548                         "Interrupt Statistics Report:\n");
1549
1550         pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1551                 isr_stats->hw);
1552         pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1553                 isr_stats->sw);
1554         if (isr_stats->sw || isr_stats->hw) {
1555                 pos += scnprintf(buf + pos, bufsz - pos,
1556                         "\tLast Restarting Code:  0x%X\n",
1557                         isr_stats->err_code);
1558         }
1559 #ifdef CONFIG_IWLWIFI_DEBUG
1560         pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1561                 isr_stats->sch);
1562         pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1563                 isr_stats->alive);
1564 #endif
1565         pos += scnprintf(buf + pos, bufsz - pos,
1566                 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1567
1568         pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1569                 isr_stats->ctkill);
1570
1571         pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1572                 isr_stats->wakeup);
1573
1574         pos += scnprintf(buf + pos, bufsz - pos,
1575                 "Rx command responses:\t\t %u\n", isr_stats->rx);
1576
1577         pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1578                 isr_stats->tx);
1579
1580         pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1581                 isr_stats->unhandled);
1582
1583         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1584         kfree(buf);
1585         return ret;
1586 }
1587
1588 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1589                                          const char __user *user_buf,
1590                                          size_t count, loff_t *ppos)
1591 {
1592         struct iwl_trans *trans = file->private_data;
1593         struct iwl_trans_pcie *trans_pcie =
1594                 IWL_TRANS_GET_PCIE_TRANS(trans);
1595         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1596
1597         char buf[8];
1598         int buf_size;
1599         u32 reset_flag;
1600
1601         memset(buf, 0, sizeof(buf));
1602         buf_size = min(count, sizeof(buf) -  1);
1603         if (copy_from_user(buf, user_buf, buf_size))
1604                 return -EFAULT;
1605         if (sscanf(buf, "%x", &reset_flag) != 1)
1606                 return -EFAULT;
1607         if (reset_flag == 0)
1608                 memset(isr_stats, 0, sizeof(*isr_stats));
1609
1610         return count;
1611 }
1612
1613 DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1614 DEBUGFS_READ_WRITE_FILE_OPS(log_event);
1615 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1616 DEBUGFS_READ_FILE_OPS(rx_queue);
1617 DEBUGFS_READ_FILE_OPS(tx_queue);
1618
1619 /*
1620  * Create the debugfs files and directories
1621  *
1622  */
1623 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1624                                         struct dentry *dir)
1625 {
1626         DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
1627         DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1628         DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1629         DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
1630         DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1631         return 0;
1632 }
1633 #else
1634 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1635                                         struct dentry *dir)
1636 { return 0; }
1637
1638 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1639
1640 const struct iwl_trans_ops trans_ops_pcie = {
1641         .alloc = iwl_trans_pcie_alloc,
1642         .request_irq = iwl_trans_pcie_request_irq,
1643         .start_device = iwl_trans_pcie_start_device,
1644         .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
1645         .stop_device = iwl_trans_pcie_stop_device,
1646
1647         .tx_start = iwl_trans_pcie_tx_start,
1648
1649         .rx_free = iwl_trans_pcie_rx_free,
1650         .tx_free = iwl_trans_pcie_tx_free,
1651
1652         .send_cmd = iwl_trans_pcie_send_cmd,
1653         .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
1654
1655         .get_tx_cmd = iwl_trans_pcie_get_tx_cmd,
1656         .tx = iwl_trans_pcie_tx,
1657         .reclaim = iwl_trans_pcie_reclaim,
1658
1659         .txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
1660         .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
1661
1662         .kick_nic = iwl_trans_pcie_kick_nic,
1663
1664         .disable_sync_irq = iwl_trans_pcie_disable_sync_irq,
1665         .free = iwl_trans_pcie_free,
1666
1667         .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1668         .suspend = iwl_trans_pcie_suspend,
1669         .resume = iwl_trans_pcie_resume,
1670 };
1671