]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/wireless/iwlwifi/iwl-trans.c
iwlagn: move ISR related data to transport layer
[karo-tx-linux.git] / drivers / net / wireless / iwlwifi / iwl-trans.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22  * USA
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called LICENSE.GPL.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <ilw@linux.intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  *
40  *  * Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  *  * Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in
44  *    the documentation and/or other materials provided with the
45  *    distribution.
46  *  * Neither the name Intel Corporation nor the names of its
47  *    contributors may be used to endorse or promote products derived
48  *    from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  *
62  *****************************************************************************/
63 #include <linux/interrupt.h>
64 #include <linux/debugfs.h>
65
66 #include "iwl-dev.h"
67 #include "iwl-trans.h"
68 #include "iwl-core.h"
69 #include "iwl-helpers.h"
70 #include "iwl-trans-int-pcie.h"
71 /*TODO remove uneeded includes when the transport layer tx_free will be here */
72 #include "iwl-agn.h"
73 #include "iwl-core.h"
74 #include "iwl-shared.h"
75
76 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
77 {
78         struct iwl_trans_pcie *trans_pcie =
79                 IWL_TRANS_GET_PCIE_TRANS(trans);
80         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
81         struct device *dev = bus(trans)->dev;
82
83         memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
84
85         spin_lock_init(&rxq->lock);
86         INIT_LIST_HEAD(&rxq->rx_free);
87         INIT_LIST_HEAD(&rxq->rx_used);
88
89         if (WARN_ON(rxq->bd || rxq->rb_stts))
90                 return -EINVAL;
91
92         /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
93         rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
94                                      &rxq->bd_dma, GFP_KERNEL);
95         if (!rxq->bd)
96                 goto err_bd;
97         memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
98
99         /*Allocate the driver's pointer to receive buffer status */
100         rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
101                                           &rxq->rb_stts_dma, GFP_KERNEL);
102         if (!rxq->rb_stts)
103                 goto err_rb_stts;
104         memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
105
106         return 0;
107
108 err_rb_stts:
109         dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
110                         rxq->bd, rxq->bd_dma);
111         memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
112         rxq->bd = NULL;
113 err_bd:
114         return -ENOMEM;
115 }
116
117 static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
118 {
119         struct iwl_trans_pcie *trans_pcie =
120                 IWL_TRANS_GET_PCIE_TRANS(trans);
121         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
122         int i;
123
124         /* Fill the rx_used queue with _all_ of the Rx buffers */
125         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
126                 /* In the reset function, these buffers may have been allocated
127                  * to an SKB, so we need to unmap and free potential storage */
128                 if (rxq->pool[i].page != NULL) {
129                         dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
130                                 PAGE_SIZE << hw_params(trans).rx_page_order,
131                                 DMA_FROM_DEVICE);
132                         __iwl_free_pages(priv(trans), rxq->pool[i].page);
133                         rxq->pool[i].page = NULL;
134                 }
135                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
136         }
137 }
138
139 static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
140                                  struct iwl_rx_queue *rxq)
141 {
142         u32 rb_size;
143         const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
144         u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
145
146         rb_timeout = RX_RB_TIMEOUT;
147
148         if (iwlagn_mod_params.amsdu_size_8K)
149                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
150         else
151                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
152
153         /* Stop Rx DMA */
154         iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
155
156         /* Reset driver's Rx queue write index */
157         iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
158
159         /* Tell device where to find RBD circular buffer in DRAM */
160         iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
161                            (u32)(rxq->bd_dma >> 8));
162
163         /* Tell device where in DRAM to update its Rx status */
164         iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
165                            rxq->rb_stts_dma >> 4);
166
167         /* Enable Rx DMA
168          * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
169          *      the credit mechanism in 5000 HW RX FIFO
170          * Direct rx interrupts to hosts
171          * Rx buffer size 4 or 8k
172          * RB timeout 0x10
173          * 256 RBDs
174          */
175         iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
176                            FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
177                            FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
178                            FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
179                            FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
180                            rb_size|
181                            (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
182                            (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
183
184         /* Set interrupt coalescing timer to default (2048 usecs) */
185         iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
186 }
187
188 static int iwl_rx_init(struct iwl_trans *trans)
189 {
190         struct iwl_trans_pcie *trans_pcie =
191                 IWL_TRANS_GET_PCIE_TRANS(trans);
192         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
193
194         int i, err;
195         unsigned long flags;
196
197         if (!rxq->bd) {
198                 err = iwl_trans_rx_alloc(trans);
199                 if (err)
200                         return err;
201         }
202
203         spin_lock_irqsave(&rxq->lock, flags);
204         INIT_LIST_HEAD(&rxq->rx_free);
205         INIT_LIST_HEAD(&rxq->rx_used);
206
207         iwl_trans_rxq_free_rx_bufs(trans);
208
209         for (i = 0; i < RX_QUEUE_SIZE; i++)
210                 rxq->queue[i] = NULL;
211
212         /* Set us so that we have processed and used all buffers, but have
213          * not restocked the Rx queue with fresh buffers */
214         rxq->read = rxq->write = 0;
215         rxq->write_actual = 0;
216         rxq->free_count = 0;
217         spin_unlock_irqrestore(&rxq->lock, flags);
218
219         iwlagn_rx_replenish(trans);
220
221         iwl_trans_rx_hw_init(priv(trans), rxq);
222
223         spin_lock_irqsave(&trans->shrd->lock, flags);
224         rxq->need_update = 1;
225         iwl_rx_queue_update_write_ptr(trans, rxq);
226         spin_unlock_irqrestore(&trans->shrd->lock, flags);
227
228         return 0;
229 }
230
231 static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
232 {
233         struct iwl_trans_pcie *trans_pcie =
234                 IWL_TRANS_GET_PCIE_TRANS(trans);
235         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
236
237         unsigned long flags;
238
239         /*if rxq->bd is NULL, it means that nothing has been allocated,
240          * exit now */
241         if (!rxq->bd) {
242                 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
243                 return;
244         }
245
246         spin_lock_irqsave(&rxq->lock, flags);
247         iwl_trans_rxq_free_rx_bufs(trans);
248         spin_unlock_irqrestore(&rxq->lock, flags);
249
250         dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
251                           rxq->bd, rxq->bd_dma);
252         memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
253         rxq->bd = NULL;
254
255         if (rxq->rb_stts)
256                 dma_free_coherent(bus(trans)->dev,
257                                   sizeof(struct iwl_rb_status),
258                                   rxq->rb_stts, rxq->rb_stts_dma);
259         else
260                 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
261         memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
262         rxq->rb_stts = NULL;
263 }
264
265 static int iwl_trans_rx_stop(struct iwl_priv *priv)
266 {
267
268         /* stop Rx DMA */
269         iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
270         return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
271                             FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
272 }
273
274 static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
275                                     struct iwl_dma_ptr *ptr, size_t size)
276 {
277         if (WARN_ON(ptr->addr))
278                 return -EINVAL;
279
280         ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
281                                        &ptr->dma, GFP_KERNEL);
282         if (!ptr->addr)
283                 return -ENOMEM;
284         ptr->size = size;
285         return 0;
286 }
287
288 static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
289                                     struct iwl_dma_ptr *ptr)
290 {
291         if (unlikely(!ptr->addr))
292                 return;
293
294         dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
295         memset(ptr, 0, sizeof(*ptr));
296 }
297
298 static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
299                       int slots_num, u32 txq_id)
300 {
301         size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX;
302         int i;
303
304         if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
305                 return -EINVAL;
306
307         txq->q.n_window = slots_num;
308
309         txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
310                             GFP_KERNEL);
311         txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
312                            GFP_KERNEL);
313
314         if (!txq->meta || !txq->cmd)
315                 goto error;
316
317         for (i = 0; i < slots_num; i++) {
318                 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
319                                         GFP_KERNEL);
320                 if (!txq->cmd[i])
321                         goto error;
322         }
323
324         /* Alloc driver data array and TFD circular buffer */
325         /* Driver private data, only for Tx (not command) queues,
326          * not shared with device. */
327         if (txq_id != priv->shrd->cmd_queue) {
328                 txq->txb = kzalloc(sizeof(txq->txb[0]) *
329                                    TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
330                 if (!txq->txb) {
331                         IWL_ERR(priv, "kmalloc for auxiliary BD "
332                                   "structures failed\n");
333                         goto error;
334                 }
335         } else {
336                 txq->txb = NULL;
337         }
338
339         /* Circular buffer of transmit frame descriptors (TFDs),
340          * shared with device */
341         txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
342                                        GFP_KERNEL);
343         if (!txq->tfds) {
344                 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
345                 goto error;
346         }
347         txq->q.id = txq_id;
348
349         return 0;
350 error:
351         kfree(txq->txb);
352         txq->txb = NULL;
353         /* since txq->cmd has been zeroed,
354          * all non allocated cmd[i] will be NULL */
355         if (txq->cmd)
356                 for (i = 0; i < slots_num; i++)
357                         kfree(txq->cmd[i]);
358         kfree(txq->meta);
359         kfree(txq->cmd);
360         txq->meta = NULL;
361         txq->cmd = NULL;
362
363         return -ENOMEM;
364
365 }
366
367 static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
368                       int slots_num, u32 txq_id)
369 {
370         int ret;
371
372         txq->need_update = 0;
373         memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
374
375         /*
376          * For the default queues 0-3, set up the swq_id
377          * already -- all others need to get one later
378          * (if they need one at all).
379          */
380         if (txq_id < 4)
381                 iwl_set_swq_id(txq, txq_id, txq_id);
382
383         /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
384          * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
385         BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
386
387         /* Initialize queue's high/low-water marks, and head/tail indexes */
388         ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
389                         txq_id);
390         if (ret)
391                 return ret;
392
393         /*
394          * Tell nic where to find circular buffer of Tx Frame Descriptors for
395          * given Tx queue, and enable the DMA channel used for that queue.
396          * Circular buffer (TFD queue in DRAM) physical base address */
397         iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
398                              txq->q.dma_addr >> 8);
399
400         return 0;
401 }
402
403 /**
404  * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
405  */
406 static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
407 {
408         struct iwl_tx_queue *txq = &priv->txq[txq_id];
409         struct iwl_queue *q = &txq->q;
410
411         if (!q->n_bd)
412                 return;
413
414         while (q->write_ptr != q->read_ptr) {
415                 /* The read_ptr needs to bound by q->n_window */
416                 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
417                 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
418         }
419 }
420
421 /**
422  * iwl_tx_queue_free - Deallocate DMA queue.
423  * @txq: Transmit queue to deallocate.
424  *
425  * Empty queue by removing and destroying all BD's.
426  * Free all buffers.
427  * 0-fill, but do not free "txq" descriptor structure.
428  */
429 static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
430 {
431         struct iwl_tx_queue *txq = &priv->txq[txq_id];
432         struct device *dev = priv->bus->dev;
433         int i;
434         if (WARN_ON(!txq))
435                 return;
436
437         iwl_tx_queue_unmap(priv, txq_id);
438
439         /* De-alloc array of command/tx buffers */
440         for (i = 0; i < txq->q.n_window; i++)
441                 kfree(txq->cmd[i]);
442
443         /* De-alloc circular buffer of TFDs */
444         if (txq->q.n_bd) {
445                 dma_free_coherent(dev, hw_params(priv).tfd_size *
446                                   txq->q.n_bd, txq->tfds, txq->q.dma_addr);
447                 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
448         }
449
450         /* De-alloc array of per-TFD driver data */
451         kfree(txq->txb);
452         txq->txb = NULL;
453
454         /* deallocate arrays */
455         kfree(txq->cmd);
456         kfree(txq->meta);
457         txq->cmd = NULL;
458         txq->meta = NULL;
459
460         /* 0-fill queue descriptor structure */
461         memset(txq, 0, sizeof(*txq));
462 }
463
464 /**
465  * iwl_trans_tx_free - Free TXQ Context
466  *
467  * Destroy all TX DMA queues and structures
468  */
469 static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
470 {
471         int txq_id;
472
473         /* Tx queues */
474         if (priv->txq) {
475                 for (txq_id = 0;
476                      txq_id < hw_params(priv).max_txq_num; txq_id++)
477                         iwl_tx_queue_free(priv, txq_id);
478         }
479
480         kfree(priv->txq);
481         priv->txq = NULL;
482
483         iwlagn_free_dma_ptr(priv, &priv->kw);
484
485         iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
486 }
487
488 /**
489  * iwl_trans_tx_alloc - allocate TX context
490  * Allocate all Tx DMA structures and initialize them
491  *
492  * @param priv
493  * @return error code
494  */
495 static int iwl_trans_tx_alloc(struct iwl_priv *priv)
496 {
497         int ret;
498         int txq_id, slots_num;
499
500         /*It is not allowed to alloc twice, so warn when this happens.
501          * We cannot rely on the previous allocation, so free and fail */
502         if (WARN_ON(priv->txq)) {
503                 ret = -EINVAL;
504                 goto error;
505         }
506
507         ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
508                                 hw_params(priv).scd_bc_tbls_size);
509         if (ret) {
510                 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
511                 goto error;
512         }
513
514         /* Alloc keep-warm buffer */
515         ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
516         if (ret) {
517                 IWL_ERR(priv, "Keep Warm allocation failed\n");
518                 goto error;
519         }
520
521         priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
522                         priv->cfg->base_params->num_of_queues, GFP_KERNEL);
523         if (!priv->txq) {
524                 IWL_ERR(priv, "Not enough memory for txq\n");
525                 ret = ENOMEM;
526                 goto error;
527         }
528
529         /* Alloc and init all Tx queues, including the command queue (#4/#9) */
530         for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
531                 slots_num = (txq_id == priv->shrd->cmd_queue) ?
532                                         TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
533                 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
534                                        txq_id);
535                 if (ret) {
536                         IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
537                         goto error;
538                 }
539         }
540
541         return 0;
542
543 error:
544         iwl_trans_tx_free(trans(priv));
545
546         return ret;
547 }
548 static int iwl_tx_init(struct iwl_priv *priv)
549 {
550         int ret;
551         int txq_id, slots_num;
552         unsigned long flags;
553         bool alloc = false;
554
555         if (!priv->txq) {
556                 ret = iwl_trans_tx_alloc(priv);
557                 if (ret)
558                         goto error;
559                 alloc = true;
560         }
561
562         spin_lock_irqsave(&priv->shrd->lock, flags);
563
564         /* Turn off all Tx DMA fifos */
565         iwl_write_prph(priv, SCD_TXFACT, 0);
566
567         /* Tell NIC where to find the "keep warm" buffer */
568         iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
569
570         spin_unlock_irqrestore(&priv->shrd->lock, flags);
571
572         /* Alloc and init all Tx queues, including the command queue (#4/#9) */
573         for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
574                 slots_num = (txq_id == priv->shrd->cmd_queue) ?
575                                         TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
576                 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
577                                        txq_id);
578                 if (ret) {
579                         IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
580                         goto error;
581                 }
582         }
583
584         return 0;
585 error:
586         /*Upon error, free only if we allocated something */
587         if (alloc)
588                 iwl_trans_tx_free(trans(priv));
589         return ret;
590 }
591
592 static void iwl_set_pwr_vmain(struct iwl_priv *priv)
593 {
594 /*
595  * (for documentation purposes)
596  * to set power to V_AUX, do:
597
598                 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
599                         iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
600                                                APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
601                                                ~APMG_PS_CTRL_MSK_PWR_SRC);
602  */
603
604         iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
605                                APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
606                                ~APMG_PS_CTRL_MSK_PWR_SRC);
607 }
608
609 static int iwl_nic_init(struct iwl_priv *priv)
610 {
611         unsigned long flags;
612
613         /* nic_init */
614         spin_lock_irqsave(&priv->shrd->lock, flags);
615         iwl_apm_init(priv);
616
617         /* Set interrupt coalescing calibration timer to default (512 usecs) */
618         iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
619
620         spin_unlock_irqrestore(&priv->shrd->lock, flags);
621
622         iwl_set_pwr_vmain(priv);
623
624         priv->cfg->lib->nic_config(priv);
625
626         /* Allocate the RX queue, or reset if it is already allocated */
627         iwl_rx_init(trans(priv));
628
629         /* Allocate or reset and init all Tx and Command queues */
630         if (iwl_tx_init(priv))
631                 return -ENOMEM;
632
633         if (priv->cfg->base_params->shadow_reg_enable) {
634                 /* enable shadow regs in HW */
635                 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
636                         0x800FFFFF);
637         }
638
639         set_bit(STATUS_INIT, &priv->shrd->status);
640
641         return 0;
642 }
643
644 #define HW_READY_TIMEOUT (50)
645
646 /* Note: returns poll_bit return value, which is >= 0 if success */
647 static int iwl_set_hw_ready(struct iwl_priv *priv)
648 {
649         int ret;
650
651         iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
652                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
653
654         /* See if we got it */
655         ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
656                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
657                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
658                                 HW_READY_TIMEOUT);
659
660         IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
661         return ret;
662 }
663
664 /* Note: returns standard 0/-ERROR code */
665 static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv)
666 {
667         int ret;
668
669         IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
670
671         ret = iwl_set_hw_ready(priv);
672         if (ret >= 0)
673                 return 0;
674
675         /* If HW is not ready, prepare the conditions to check again */
676         iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
677                         CSR_HW_IF_CONFIG_REG_PREPARE);
678
679         ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
680                         ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
681                         CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
682
683         if (ret < 0)
684                 return ret;
685
686         /* HW should be ready by now, check again. */
687         ret = iwl_set_hw_ready(priv);
688         if (ret >= 0)
689                 return 0;
690         return ret;
691 }
692
693 static int iwl_trans_pcie_start_device(struct iwl_priv *priv)
694 {
695         int ret;
696
697         priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
698
699         if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
700              iwl_trans_pcie_prepare_card_hw(priv)) {
701                 IWL_WARN(priv, "Exit HW not ready\n");
702                 return -EIO;
703         }
704
705         /* If platform's RF_KILL switch is NOT set to KILL */
706         if (iwl_read32(priv, CSR_GP_CNTRL) &
707                         CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
708                 clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
709         else
710                 set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
711
712         if (iwl_is_rfkill(priv)) {
713                 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
714                 iwl_enable_interrupts(trans(priv));
715                 return -ERFKILL;
716         }
717
718         iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
719
720         ret = iwl_nic_init(priv);
721         if (ret) {
722                 IWL_ERR(priv, "Unable to init nic\n");
723                 return ret;
724         }
725
726         /* make sure rfkill handshake bits are cleared */
727         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
728         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
729                     CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
730
731         /* clear (again), then enable host interrupts */
732         iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
733         iwl_enable_interrupts(trans(priv));
734
735         /* really make sure rfkill handshake bits are cleared */
736         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
737         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
738
739         return 0;
740 }
741
742 /*
743  * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
744  * must be called under priv->shrd->lock and mac access
745  */
746 static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
747 {
748         iwl_write_prph(priv, SCD_TXFACT, mask);
749 }
750
751 #define IWL_AC_UNSET -1
752
753 struct queue_to_fifo_ac {
754         s8 fifo, ac;
755 };
756
757 static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
758         { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
759         { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
760         { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
761         { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
762         { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
763         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
764         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
765         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
766         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
767         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
768         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
769 };
770
771 static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
772         { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
773         { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
774         { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
775         { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
776         { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
777         { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
778         { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
779         { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
780         { IWL_TX_FIFO_BE_IPAN, 2, },
781         { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
782         { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
783 };
784 static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
785 {
786         const struct queue_to_fifo_ac *queue_to_fifo;
787         struct iwl_rxon_context *ctx;
788         u32 a;
789         unsigned long flags;
790         int i, chan;
791         u32 reg_val;
792
793         spin_lock_irqsave(&priv->shrd->lock, flags);
794
795         priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
796         a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
797         /* reset conext data memory */
798         for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
799                 a += 4)
800                 iwl_write_targ_mem(priv, a, 0);
801         /* reset tx status memory */
802         for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
803                 a += 4)
804                 iwl_write_targ_mem(priv, a, 0);
805         for (; a < priv->scd_base_addr +
806                SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
807                a += 4)
808                 iwl_write_targ_mem(priv, a, 0);
809
810         iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
811                        priv->scd_bc_tbls.dma >> 10);
812
813         /* Enable DMA channel */
814         for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
815                 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
816                                 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
817                                 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
818
819         /* Update FH chicken bits */
820         reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
821         iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
822                            reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
823
824         iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
825                 SCD_QUEUECHAIN_SEL_ALL(priv));
826         iwl_write_prph(priv, SCD_AGGR_SEL, 0);
827
828         /* initiate the queues */
829         for (i = 0; i < hw_params(priv).max_txq_num; i++) {
830                 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
831                 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
832                 iwl_write_targ_mem(priv, priv->scd_base_addr +
833                                 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
834                 iwl_write_targ_mem(priv, priv->scd_base_addr +
835                                 SCD_CONTEXT_QUEUE_OFFSET(i) +
836                                 sizeof(u32),
837                                 ((SCD_WIN_SIZE <<
838                                 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
839                                 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
840                                 ((SCD_FRAME_LIMIT <<
841                                 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
842                                 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
843         }
844
845         iwl_write_prph(priv, SCD_INTERRUPT_MASK,
846                         IWL_MASK(0, hw_params(priv).max_txq_num));
847
848         /* Activate all Tx DMA/FIFO channels */
849         iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
850
851         /* map queues to FIFOs */
852         if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
853                 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
854         else
855                 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
856
857         iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0);
858
859         /* make sure all queue are not stopped */
860         memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
861         for (i = 0; i < 4; i++)
862                 atomic_set(&priv->queue_stop_count[i], 0);
863         for_each_context(priv, ctx)
864                 ctx->last_tx_rejected = false;
865
866         /* reset to 0 to enable all the queue first */
867         priv->txq_ctx_active_msk = 0;
868
869         BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) !=
870                                                 IWLAGN_FIRST_AMPDU_QUEUE);
871         BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) !=
872                                                 IWLAGN_FIRST_AMPDU_QUEUE);
873
874         for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
875                 int fifo = queue_to_fifo[i].fifo;
876                 int ac = queue_to_fifo[i].ac;
877
878                 iwl_txq_ctx_activate(priv, i);
879
880                 if (fifo == IWL_TX_FIFO_UNUSED)
881                         continue;
882
883                 if (ac != IWL_AC_UNSET)
884                         iwl_set_swq_id(&priv->txq[i], ac, i);
885                 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
886         }
887
888         spin_unlock_irqrestore(&priv->shrd->lock, flags);
889
890         /* Enable L1-Active */
891         iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
892                           APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
893 }
894
895 /**
896  * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
897  */
898 static int iwl_trans_tx_stop(struct iwl_priv *priv)
899 {
900         int ch, txq_id;
901         unsigned long flags;
902
903         /* Turn off all Tx DMA fifos */
904         spin_lock_irqsave(&priv->shrd->lock, flags);
905
906         iwl_trans_txq_set_sched(priv, 0);
907
908         /* Stop each Tx DMA channel, and wait for it to be idle */
909         for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
910                 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
911                 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
912                                     FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
913                                     1000))
914                         IWL_ERR(priv, "Failing on timeout while stopping"
915                             " DMA channel %d [0x%08x]", ch,
916                             iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
917         }
918         spin_unlock_irqrestore(&priv->shrd->lock, flags);
919
920         if (!priv->txq) {
921                 IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
922                 return 0;
923         }
924
925         /* Unmap DMA from host system and free skb's */
926         for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
927                 iwl_tx_queue_unmap(priv, txq_id);
928
929         return 0;
930 }
931
932 static void iwl_trans_pcie_stop_device(struct iwl_priv *priv)
933 {
934         /* stop and reset the on-board processor */
935         iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
936
937         /* tell the device to stop sending interrupts */
938         iwl_trans_disable_sync_irq(trans(priv));
939
940         /* device going down, Stop using ICT table */
941         iwl_disable_ict(trans(priv));
942
943         /*
944          * If a HW restart happens during firmware loading,
945          * then the firmware loading might call this function
946          * and later it might be called again due to the
947          * restart. So don't process again if the device is
948          * already dead.
949          */
950         if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) {
951                 iwl_trans_tx_stop(priv);
952                 iwl_trans_rx_stop(priv);
953
954                 /* Power-down device's busmaster DMA clocks */
955                 iwl_write_prph(priv, APMG_CLK_DIS_REG,
956                                APMG_CLK_VAL_DMA_CLK_RQT);
957                 udelay(5);
958         }
959
960         /* Make sure (redundant) we've released our request to stay awake */
961         iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
962
963         /* Stop the device, and put it in low power state */
964         iwl_apm_stop(priv);
965 }
966
967 static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_priv *priv,
968                                                 int txq_id)
969 {
970         struct iwl_tx_queue *txq = &priv->txq[txq_id];
971         struct iwl_queue *q = &txq->q;
972         struct iwl_device_cmd *dev_cmd;
973
974         if (unlikely(iwl_queue_space(q) < q->high_mark))
975                 return NULL;
976
977         /*
978          * Set up the Tx-command (not MAC!) header.
979          * Store the chosen Tx queue and TFD index within the sequence field;
980          * after Tx, uCode's Tx response will return this value so driver can
981          * locate the frame within the tx queue and do post-tx processing.
982          */
983         dev_cmd = txq->cmd[q->write_ptr];
984         memset(dev_cmd, 0, sizeof(*dev_cmd));
985         dev_cmd->hdr.cmd = REPLY_TX;
986         dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
987                                 INDEX_TO_SEQ(q->write_ptr)));
988         return &dev_cmd->cmd.tx;
989 }
990
991 static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
992                 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
993                 struct iwl_rxon_context *ctx)
994 {
995         struct iwl_tx_queue *txq = &priv->txq[txq_id];
996         struct iwl_queue *q = &txq->q;
997         struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
998         struct iwl_cmd_meta *out_meta;
999
1000         dma_addr_t phys_addr = 0;
1001         dma_addr_t txcmd_phys;
1002         dma_addr_t scratch_phys;
1003         u16 len, firstlen, secondlen;
1004         u8 wait_write_ptr = 0;
1005         u8 hdr_len = ieee80211_hdrlen(fc);
1006
1007         /* Set up driver data for this TFD */
1008         memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
1009         txq->txb[q->write_ptr].skb = skb;
1010         txq->txb[q->write_ptr].ctx = ctx;
1011
1012         /* Set up first empty entry in queue's array of Tx/cmd buffers */
1013         out_meta = &txq->meta[q->write_ptr];
1014
1015         /*
1016          * Use the first empty entry in this queue's command buffer array
1017          * to contain the Tx command and MAC header concatenated together
1018          * (payload data will be in another buffer).
1019          * Size of this varies, due to varying MAC header length.
1020          * If end is not dword aligned, we'll have 2 extra bytes at the end
1021          * of the MAC header (device reads on dword boundaries).
1022          * We'll tell device about this padding later.
1023          */
1024         len = sizeof(struct iwl_tx_cmd) +
1025                 sizeof(struct iwl_cmd_header) + hdr_len;
1026         firstlen = (len + 3) & ~3;
1027
1028         /* Tell NIC about any 2-byte padding after MAC header */
1029         if (firstlen != len)
1030                 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1031
1032         /* Physical address of this Tx command's header (not MAC header!),
1033          * within command buffer array. */
1034         txcmd_phys = dma_map_single(priv->bus->dev,
1035                                     &dev_cmd->hdr, firstlen,
1036                                     DMA_BIDIRECTIONAL);
1037         if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
1038                 return -1;
1039         dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1040         dma_unmap_len_set(out_meta, len, firstlen);
1041
1042         if (!ieee80211_has_morefrags(fc)) {
1043                 txq->need_update = 1;
1044         } else {
1045                 wait_write_ptr = 1;
1046                 txq->need_update = 0;
1047         }
1048
1049         /* Set up TFD's 2nd entry to point directly to remainder of skb,
1050          * if any (802.11 null frames have no payload). */
1051         secondlen = skb->len - hdr_len;
1052         if (secondlen > 0) {
1053                 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
1054                                            secondlen, DMA_TO_DEVICE);
1055                 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
1056                         dma_unmap_single(priv->bus->dev,
1057                                          dma_unmap_addr(out_meta, mapping),
1058                                          dma_unmap_len(out_meta, len),
1059                                          DMA_BIDIRECTIONAL);
1060                         return -1;
1061                 }
1062         }
1063
1064         /* Attach buffers to TFD */
1065         iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
1066         if (secondlen > 0)
1067                 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
1068                                              secondlen, 0);
1069
1070         scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1071                                 offsetof(struct iwl_tx_cmd, scratch);
1072
1073         /* take back ownership of DMA buffer to enable update */
1074         dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
1075                         DMA_BIDIRECTIONAL);
1076         tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1077         tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1078
1079         IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
1080                      le16_to_cpu(dev_cmd->hdr.sequence));
1081         IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1082         iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1083         iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1084
1085         /* Set up entry for this TFD in Tx byte-count array */
1086         if (ampdu)
1087                 iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
1088                                                le16_to_cpu(tx_cmd->len));
1089
1090         dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
1091                         DMA_BIDIRECTIONAL);
1092
1093         trace_iwlwifi_dev_tx(priv,
1094                              &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1095                              sizeof(struct iwl_tfd),
1096                              &dev_cmd->hdr, firstlen,
1097                              skb->data + hdr_len, secondlen);
1098
1099         /* Tell device the write index *just past* this latest filled TFD */
1100         q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1101         iwl_txq_update_write_ptr(priv, txq);
1102
1103         /*
1104          * At this point the frame is "transmitted" successfully
1105          * and we will get a TX status notification eventually,
1106          * regardless of the value of ret. "ret" only indicates
1107          * whether or not we should update the write pointer.
1108          */
1109         if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1110                 if (wait_write_ptr) {
1111                         txq->need_update = 1;
1112                         iwl_txq_update_write_ptr(priv, txq);
1113                 } else {
1114                         iwl_stop_queue(priv, txq);
1115                 }
1116         }
1117         return 0;
1118 }
1119
1120 static void iwl_trans_pcie_kick_nic(struct iwl_priv *priv)
1121 {
1122         /* Remove all resets to allow NIC to operate */
1123         iwl_write32(priv, CSR_RESET, 0);
1124 }
1125
1126 static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
1127 {
1128         struct iwl_trans_pcie *trans_pcie =
1129                 IWL_TRANS_GET_PCIE_TRANS(trans);
1130         int err;
1131
1132         trans_pcie->inta_mask = CSR_INI_SET_MASK;
1133
1134         tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1135                 iwl_irq_tasklet, (unsigned long)trans);
1136
1137         iwl_alloc_isr_ict(trans);
1138
1139         err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
1140                 DRV_NAME, trans);
1141         if (err) {
1142                 IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
1143                 iwl_free_isr_ict(trans);
1144                 return err;
1145         }
1146
1147         INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
1148         return 0;
1149 }
1150
1151 static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
1152 {
1153         unsigned long flags;
1154         struct iwl_trans_pcie *trans_pcie =
1155                 IWL_TRANS_GET_PCIE_TRANS(trans);
1156
1157         spin_lock_irqsave(&trans->shrd->lock, flags);
1158         iwl_disable_interrupts(trans);
1159         spin_unlock_irqrestore(&trans->shrd->lock, flags);
1160
1161         /* wait to make sure we flush pending tasklet*/
1162         synchronize_irq(bus(trans)->irq);
1163         tasklet_kill(&trans_pcie->irq_tasklet);
1164 }
1165
1166 static void iwl_trans_pcie_free(struct iwl_priv *priv)
1167 {
1168         free_irq(priv->bus->irq, trans(priv));
1169         iwl_free_isr_ict(trans(priv));
1170         kfree(trans(priv));
1171         trans(priv) = NULL;
1172 }
1173
1174 #ifdef CONFIG_PM
1175
1176 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1177 {
1178         /*
1179          * This function is called when system goes into suspend state
1180          * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1181          * first but since iwl_mac_stop() has no knowledge of who the caller is,
1182          * it will not call apm_ops.stop() to stop the DMA operation.
1183          * Calling apm_ops.stop here to make sure we stop the DMA.
1184          *
1185          * But of course ... if we have configured WoWLAN then we did other
1186          * things already :-)
1187          */
1188         if (!trans->shrd->wowlan)
1189                 iwl_apm_stop(priv(trans));
1190
1191         return 0;
1192 }
1193
1194 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1195 {
1196         bool hw_rfkill = false;
1197
1198         iwl_enable_interrupts(trans);
1199
1200         if (!(iwl_read32(priv(trans), CSR_GP_CNTRL) &
1201                                 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1202                 hw_rfkill = true;
1203
1204         if (hw_rfkill)
1205                 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1206         else
1207                 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1208
1209         wiphy_rfkill_set_hw_state(priv(trans)->hw->wiphy, hw_rfkill);
1210
1211         return 0;
1212 }
1213 #else /* CONFIG_PM */
1214 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1215 { return 0; }
1216
1217 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1218 { return 0; }
1219
1220 #endif /* CONFIG_PM */
1221
1222 const struct iwl_trans_ops trans_ops_pcie;
1223
1224 static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1225 {
1226         struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1227                                               sizeof(struct iwl_trans_pcie),
1228                                               GFP_KERNEL);
1229         if (iwl_trans) {
1230                 struct iwl_trans_pcie *trans_pcie =
1231                         IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
1232                 iwl_trans->ops = &trans_ops_pcie;
1233                 iwl_trans->shrd = shrd;
1234                 trans_pcie->trans = iwl_trans;
1235         }
1236
1237         return iwl_trans;
1238 }
1239
1240 #ifdef CONFIG_IWLWIFI_DEBUGFS
1241 /* create and remove of files */
1242 #define DEBUGFS_ADD_FILE(name, parent, mode) do {                       \
1243         if (!debugfs_create_file(#name, mode, parent, trans,            \
1244                                  &iwl_dbgfs_##name##_ops))              \
1245                 return -ENOMEM;                                         \
1246 } while (0)
1247
1248 /* file operation */
1249 #define DEBUGFS_READ_FUNC(name)                                         \
1250 static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
1251                                         char __user *user_buf,          \
1252                                         size_t count, loff_t *ppos);
1253
1254 #define DEBUGFS_WRITE_FUNC(name)                                        \
1255 static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
1256                                         const char __user *user_buf,    \
1257                                         size_t count, loff_t *ppos);
1258
1259
1260 static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1261 {
1262         file->private_data = inode->i_private;
1263         return 0;
1264 }
1265
1266 #define DEBUGFS_READ_FILE_OPS(name)                                     \
1267         DEBUGFS_READ_FUNC(name);                                        \
1268 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1269         .read = iwl_dbgfs_##name##_read,                                \
1270         .open = iwl_dbgfs_open_file_generic,                            \
1271         .llseek = generic_file_llseek,                                  \
1272 };
1273
1274 #define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
1275         DEBUGFS_READ_FUNC(name);                                        \
1276         DEBUGFS_WRITE_FUNC(name);                                       \
1277 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1278         .write = iwl_dbgfs_##name##_write,                              \
1279         .read = iwl_dbgfs_##name##_read,                                \
1280         .open = iwl_dbgfs_open_file_generic,                            \
1281         .llseek = generic_file_llseek,                                  \
1282 };
1283
1284 static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1285                                          char __user *user_buf,
1286                                          size_t count, loff_t *ppos)
1287 {
1288         struct iwl_trans *trans = file->private_data;
1289         struct iwl_priv *priv = priv(trans);
1290         int pos = 0, ofs = 0;
1291         int cnt = 0, entry;
1292         struct iwl_trans_pcie *trans_pcie =
1293                 IWL_TRANS_GET_PCIE_TRANS(trans);
1294         struct iwl_tx_queue *txq;
1295         struct iwl_queue *q;
1296         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1297         char *buf;
1298         int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
1299                 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
1300         const u8 *ptr;
1301         ssize_t ret;
1302
1303         if (!priv->txq) {
1304                 IWL_ERR(trans, "txq not ready\n");
1305                 return -EAGAIN;
1306         }
1307         buf = kzalloc(bufsz, GFP_KERNEL);
1308         if (!buf) {
1309                 IWL_ERR(trans, "Can not allocate buffer\n");
1310                 return -ENOMEM;
1311         }
1312         pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
1313         for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1314                 txq = &priv->txq[cnt];
1315                 q = &txq->q;
1316                 pos += scnprintf(buf + pos, bufsz - pos,
1317                                 "q[%d]: read_ptr: %u, write_ptr: %u\n",
1318                                 cnt, q->read_ptr, q->write_ptr);
1319         }
1320         if (priv->tx_traffic &&
1321                 (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
1322                 ptr = priv->tx_traffic;
1323                 pos += scnprintf(buf + pos, bufsz - pos,
1324                                 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
1325                 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1326                         for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1327                              entry++,  ofs += 16) {
1328                                 pos += scnprintf(buf + pos, bufsz - pos,
1329                                                 "0x%.4x ", ofs);
1330                                 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1331                                                    buf + pos, bufsz - pos, 0);
1332                                 pos += strlen(buf + pos);
1333                                 if (bufsz - pos > 0)
1334                                         buf[pos++] = '\n';
1335                         }
1336                 }
1337         }
1338
1339         pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
1340         pos += scnprintf(buf + pos, bufsz - pos,
1341                         "read: %u, write: %u\n",
1342                          rxq->read, rxq->write);
1343
1344         if (priv->rx_traffic &&
1345                 (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
1346                 ptr = priv->rx_traffic;
1347                 pos += scnprintf(buf + pos, bufsz - pos,
1348                                 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
1349                 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1350                         for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1351                              entry++,  ofs += 16) {
1352                                 pos += scnprintf(buf + pos, bufsz - pos,
1353                                                 "0x%.4x ", ofs);
1354                                 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1355                                                    buf + pos, bufsz - pos, 0);
1356                                 pos += strlen(buf + pos);
1357                                 if (bufsz - pos > 0)
1358                                         buf[pos++] = '\n';
1359                         }
1360                 }
1361         }
1362
1363         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1364         kfree(buf);
1365         return ret;
1366 }
1367
1368 static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1369                                          const char __user *user_buf,
1370                                          size_t count, loff_t *ppos)
1371 {
1372         struct iwl_trans *trans = file->private_data;
1373         char buf[8];
1374         int buf_size;
1375         int traffic_log;
1376
1377         memset(buf, 0, sizeof(buf));
1378         buf_size = min(count, sizeof(buf) -  1);
1379         if (copy_from_user(buf, user_buf, buf_size))
1380                 return -EFAULT;
1381         if (sscanf(buf, "%d", &traffic_log) != 1)
1382                 return -EFAULT;
1383         if (traffic_log == 0)
1384                 iwl_reset_traffic_log(priv(trans));
1385
1386         return count;
1387 }
1388
1389 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1390                                                 char __user *user_buf,
1391                                                 size_t count, loff_t *ppos) {
1392
1393         struct iwl_trans *trans = file->private_data;
1394         struct iwl_priv *priv = priv(trans);
1395         struct iwl_tx_queue *txq;
1396         struct iwl_queue *q;
1397         char *buf;
1398         int pos = 0;
1399         int cnt;
1400         int ret;
1401         const size_t bufsz = sizeof(char) * 64 *
1402                                 priv->cfg->base_params->num_of_queues;
1403
1404         if (!priv->txq) {
1405                 IWL_ERR(priv, "txq not ready\n");
1406                 return -EAGAIN;
1407         }
1408         buf = kzalloc(bufsz, GFP_KERNEL);
1409         if (!buf)
1410                 return -ENOMEM;
1411
1412         for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1413                 txq = &priv->txq[cnt];
1414                 q = &txq->q;
1415                 pos += scnprintf(buf + pos, bufsz - pos,
1416                                 "hwq %.2d: read=%u write=%u stop=%d"
1417                                 " swq_id=%#.2x (ac %d/hwq %d)\n",
1418                                 cnt, q->read_ptr, q->write_ptr,
1419                                 !!test_bit(cnt, priv->queue_stopped),
1420                                 txq->swq_id, txq->swq_id & 3,
1421                                 (txq->swq_id >> 2) & 0x1f);
1422                 if (cnt >= 4)
1423                         continue;
1424                 /* for the ACs, display the stop count too */
1425                 pos += scnprintf(buf + pos, bufsz - pos,
1426                                 "        stop-count: %d\n",
1427                                 atomic_read(&priv->queue_stop_count[cnt]));
1428         }
1429         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1430         kfree(buf);
1431         return ret;
1432 }
1433
1434 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1435                                                 char __user *user_buf,
1436                                                 size_t count, loff_t *ppos) {
1437         struct iwl_trans *trans = file->private_data;
1438         struct iwl_trans_pcie *trans_pcie =
1439                 IWL_TRANS_GET_PCIE_TRANS(trans);
1440         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1441         char buf[256];
1442         int pos = 0;
1443         const size_t bufsz = sizeof(buf);
1444
1445         pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1446                                                 rxq->read);
1447         pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1448                                                 rxq->write);
1449         pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1450                                                 rxq->free_count);
1451         if (rxq->rb_stts) {
1452                 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1453                          le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
1454         } else {
1455                 pos += scnprintf(buf + pos, bufsz - pos,
1456                                         "closed_rb_num: Not Allocated\n");
1457         }
1458         return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1459 }
1460
1461 DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1462 DEBUGFS_READ_FILE_OPS(rx_queue);
1463 DEBUGFS_READ_FILE_OPS(tx_queue);
1464
1465 /*
1466  * Create the debugfs files and directories
1467  *
1468  */
1469 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1470                                         struct dentry *dir)
1471 {
1472         DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
1473         DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1474         DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1475         return 0;
1476 }
1477 #else
1478 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1479                                         struct dentry *dir)
1480 { return 0; }
1481
1482 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1483
1484 const struct iwl_trans_ops trans_ops_pcie = {
1485         .alloc = iwl_trans_pcie_alloc,
1486         .request_irq = iwl_trans_pcie_request_irq,
1487         .start_device = iwl_trans_pcie_start_device,
1488         .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
1489         .stop_device = iwl_trans_pcie_stop_device,
1490
1491         .tx_start = iwl_trans_pcie_tx_start,
1492
1493         .rx_free = iwl_trans_pcie_rx_free,
1494         .tx_free = iwl_trans_pcie_tx_free,
1495
1496         .send_cmd = iwl_trans_pcie_send_cmd,
1497         .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
1498
1499         .get_tx_cmd = iwl_trans_pcie_get_tx_cmd,
1500         .tx = iwl_trans_pcie_tx,
1501
1502         .txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
1503         .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
1504
1505         .kick_nic = iwl_trans_pcie_kick_nic,
1506
1507         .disable_sync_irq = iwl_trans_pcie_disable_sync_irq,
1508         .free = iwl_trans_pcie_free,
1509
1510         .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1511         .suspend = iwl_trans_pcie_suspend,
1512         .resume = iwl_trans_pcie_resume,
1513 };
1514