]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/wireless/iwlwifi/iwl-trans.c
iwlagn: iwl_tid_data moves to iwl-shared
[mv-sheeva.git] / drivers / net / wireless / iwlwifi / iwl-trans.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22  * USA
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called LICENSE.GPL.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <ilw@linux.intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  *
40  *  * Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  *  * Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in
44  *    the documentation and/or other materials provided with the
45  *    distribution.
46  *  * Neither the name Intel Corporation nor the names of its
47  *    contributors may be used to endorse or promote products derived
48  *    from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  *
62  *****************************************************************************/
63 #include <linux/interrupt.h>
64 #include <linux/debugfs.h>
65 #include <linux/bitops.h>
66 #include <linux/gfp.h>
67
68 #include "iwl-dev.h"
69 #include "iwl-trans.h"
70 #include "iwl-core.h"
71 #include "iwl-helpers.h"
72 #include "iwl-trans-int-pcie.h"
73 /*TODO remove uneeded includes when the transport layer tx_free will be here */
74 #include "iwl-agn.h"
75 #include "iwl-shared.h"
76
77 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
78 {
79         struct iwl_trans_pcie *trans_pcie =
80                 IWL_TRANS_GET_PCIE_TRANS(trans);
81         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
82         struct device *dev = bus(trans)->dev;
83
84         memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
85
86         spin_lock_init(&rxq->lock);
87         INIT_LIST_HEAD(&rxq->rx_free);
88         INIT_LIST_HEAD(&rxq->rx_used);
89
90         if (WARN_ON(rxq->bd || rxq->rb_stts))
91                 return -EINVAL;
92
93         /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
94         rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
95                                      &rxq->bd_dma, GFP_KERNEL);
96         if (!rxq->bd)
97                 goto err_bd;
98         memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
99
100         /*Allocate the driver's pointer to receive buffer status */
101         rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
102                                           &rxq->rb_stts_dma, GFP_KERNEL);
103         if (!rxq->rb_stts)
104                 goto err_rb_stts;
105         memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
106
107         return 0;
108
109 err_rb_stts:
110         dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
111                         rxq->bd, rxq->bd_dma);
112         memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
113         rxq->bd = NULL;
114 err_bd:
115         return -ENOMEM;
116 }
117
118 static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
119 {
120         struct iwl_trans_pcie *trans_pcie =
121                 IWL_TRANS_GET_PCIE_TRANS(trans);
122         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
123         int i;
124
125         /* Fill the rx_used queue with _all_ of the Rx buffers */
126         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
127                 /* In the reset function, these buffers may have been allocated
128                  * to an SKB, so we need to unmap and free potential storage */
129                 if (rxq->pool[i].page != NULL) {
130                         dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
131                                 PAGE_SIZE << hw_params(trans).rx_page_order,
132                                 DMA_FROM_DEVICE);
133                         __free_pages(rxq->pool[i].page,
134                                      hw_params(trans).rx_page_order);
135                         rxq->pool[i].page = NULL;
136                 }
137                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
138         }
139 }
140
141 static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
142                                  struct iwl_rx_queue *rxq)
143 {
144         u32 rb_size;
145         const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
146         u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
147         struct iwl_trans *trans = trans(priv);
148
149         rb_timeout = RX_RB_TIMEOUT;
150
151         if (iwlagn_mod_params.amsdu_size_8K)
152                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
153         else
154                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
155
156         /* Stop Rx DMA */
157         iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
158
159         /* Reset driver's Rx queue write index */
160         iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
161
162         /* Tell device where to find RBD circular buffer in DRAM */
163         iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
164                            (u32)(rxq->bd_dma >> 8));
165
166         /* Tell device where in DRAM to update its Rx status */
167         iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
168                            rxq->rb_stts_dma >> 4);
169
170         /* Enable Rx DMA
171          * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
172          *      the credit mechanism in 5000 HW RX FIFO
173          * Direct rx interrupts to hosts
174          * Rx buffer size 4 or 8k
175          * RB timeout 0x10
176          * 256 RBDs
177          */
178         iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
179                            FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
180                            FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
181                            FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
182                            FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
183                            rb_size|
184                            (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
185                            (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
186
187         /* Set interrupt coalescing timer to default (2048 usecs) */
188         iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
189 }
190
191 static int iwl_rx_init(struct iwl_trans *trans)
192 {
193         struct iwl_trans_pcie *trans_pcie =
194                 IWL_TRANS_GET_PCIE_TRANS(trans);
195         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
196
197         int i, err;
198         unsigned long flags;
199
200         if (!rxq->bd) {
201                 err = iwl_trans_rx_alloc(trans);
202                 if (err)
203                         return err;
204         }
205
206         spin_lock_irqsave(&rxq->lock, flags);
207         INIT_LIST_HEAD(&rxq->rx_free);
208         INIT_LIST_HEAD(&rxq->rx_used);
209
210         iwl_trans_rxq_free_rx_bufs(trans);
211
212         for (i = 0; i < RX_QUEUE_SIZE; i++)
213                 rxq->queue[i] = NULL;
214
215         /* Set us so that we have processed and used all buffers, but have
216          * not restocked the Rx queue with fresh buffers */
217         rxq->read = rxq->write = 0;
218         rxq->write_actual = 0;
219         rxq->free_count = 0;
220         spin_unlock_irqrestore(&rxq->lock, flags);
221
222         iwlagn_rx_replenish(trans);
223
224         iwl_trans_rx_hw_init(priv(trans), rxq);
225
226         spin_lock_irqsave(&trans->shrd->lock, flags);
227         rxq->need_update = 1;
228         iwl_rx_queue_update_write_ptr(trans, rxq);
229         spin_unlock_irqrestore(&trans->shrd->lock, flags);
230
231         return 0;
232 }
233
234 static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
235 {
236         struct iwl_trans_pcie *trans_pcie =
237                 IWL_TRANS_GET_PCIE_TRANS(trans);
238         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
239
240         unsigned long flags;
241
242         /*if rxq->bd is NULL, it means that nothing has been allocated,
243          * exit now */
244         if (!rxq->bd) {
245                 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
246                 return;
247         }
248
249         spin_lock_irqsave(&rxq->lock, flags);
250         iwl_trans_rxq_free_rx_bufs(trans);
251         spin_unlock_irqrestore(&rxq->lock, flags);
252
253         dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
254                           rxq->bd, rxq->bd_dma);
255         memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
256         rxq->bd = NULL;
257
258         if (rxq->rb_stts)
259                 dma_free_coherent(bus(trans)->dev,
260                                   sizeof(struct iwl_rb_status),
261                                   rxq->rb_stts, rxq->rb_stts_dma);
262         else
263                 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
264         memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
265         rxq->rb_stts = NULL;
266 }
267
268 static int iwl_trans_rx_stop(struct iwl_trans *trans)
269 {
270
271         /* stop Rx DMA */
272         iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
273         return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
274                             FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
275 }
276
277 static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
278                                     struct iwl_dma_ptr *ptr, size_t size)
279 {
280         if (WARN_ON(ptr->addr))
281                 return -EINVAL;
282
283         ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
284                                        &ptr->dma, GFP_KERNEL);
285         if (!ptr->addr)
286                 return -ENOMEM;
287         ptr->size = size;
288         return 0;
289 }
290
291 static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
292                                     struct iwl_dma_ptr *ptr)
293 {
294         if (unlikely(!ptr->addr))
295                 return;
296
297         dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
298         memset(ptr, 0, sizeof(*ptr));
299 }
300
301 static int iwl_trans_txq_alloc(struct iwl_trans *trans,
302                                 struct iwl_tx_queue *txq, int slots_num,
303                                 u32 txq_id)
304 {
305         size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
306         int i;
307
308         if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
309                 return -EINVAL;
310
311         txq->q.n_window = slots_num;
312
313         txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
314                             GFP_KERNEL);
315         txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
316                            GFP_KERNEL);
317
318         if (!txq->meta || !txq->cmd)
319                 goto error;
320
321         for (i = 0; i < slots_num; i++) {
322                 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
323                                         GFP_KERNEL);
324                 if (!txq->cmd[i])
325                         goto error;
326         }
327
328         /* Alloc driver data array and TFD circular buffer */
329         /* Driver private data, only for Tx (not command) queues,
330          * not shared with device. */
331         if (txq_id != trans->shrd->cmd_queue) {
332                 txq->txb = kzalloc(sizeof(txq->txb[0]) *
333                                    TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
334                 if (!txq->txb) {
335                         IWL_ERR(trans, "kmalloc for auxiliary BD "
336                                   "structures failed\n");
337                         goto error;
338                 }
339         } else {
340                 txq->txb = NULL;
341         }
342
343         /* Circular buffer of transmit frame descriptors (TFDs),
344          * shared with device */
345         txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
346                                        &txq->q.dma_addr, GFP_KERNEL);
347         if (!txq->tfds) {
348                 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
349                 goto error;
350         }
351         txq->q.id = txq_id;
352
353         return 0;
354 error:
355         kfree(txq->txb);
356         txq->txb = NULL;
357         /* since txq->cmd has been zeroed,
358          * all non allocated cmd[i] will be NULL */
359         if (txq->cmd)
360                 for (i = 0; i < slots_num; i++)
361                         kfree(txq->cmd[i]);
362         kfree(txq->meta);
363         kfree(txq->cmd);
364         txq->meta = NULL;
365         txq->cmd = NULL;
366
367         return -ENOMEM;
368
369 }
370
371 static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
372                       int slots_num, u32 txq_id)
373 {
374         int ret;
375
376         txq->need_update = 0;
377         memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
378
379         /*
380          * For the default queues 0-3, set up the swq_id
381          * already -- all others need to get one later
382          * (if they need one at all).
383          */
384         if (txq_id < 4)
385                 iwl_set_swq_id(txq, txq_id, txq_id);
386
387         /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
388          * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
389         BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
390
391         /* Initialize queue's high/low-water marks, and head/tail indexes */
392         ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
393                         txq_id);
394         if (ret)
395                 return ret;
396
397         /*
398          * Tell nic where to find circular buffer of Tx Frame Descriptors for
399          * given Tx queue, and enable the DMA channel used for that queue.
400          * Circular buffer (TFD queue in DRAM) physical base address */
401         iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
402                              txq->q.dma_addr >> 8);
403
404         return 0;
405 }
406
407 /**
408  * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
409  */
410 static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
411 {
412         struct iwl_priv *priv = priv(trans);
413         struct iwl_tx_queue *txq = &priv->txq[txq_id];
414         struct iwl_queue *q = &txq->q;
415
416         if (!q->n_bd)
417                 return;
418
419         while (q->write_ptr != q->read_ptr) {
420                 /* The read_ptr needs to bound by q->n_window */
421                 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr));
422                 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
423         }
424 }
425
426 /**
427  * iwl_tx_queue_free - Deallocate DMA queue.
428  * @txq: Transmit queue to deallocate.
429  *
430  * Empty queue by removing and destroying all BD's.
431  * Free all buffers.
432  * 0-fill, but do not free "txq" descriptor structure.
433  */
434 static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
435 {
436         struct iwl_priv *priv = priv(trans);
437         struct iwl_tx_queue *txq = &priv->txq[txq_id];
438         struct device *dev = bus(trans)->dev;
439         int i;
440         if (WARN_ON(!txq))
441                 return;
442
443         iwl_tx_queue_unmap(trans, txq_id);
444
445         /* De-alloc array of command/tx buffers */
446         for (i = 0; i < txq->q.n_window; i++)
447                 kfree(txq->cmd[i]);
448
449         /* De-alloc circular buffer of TFDs */
450         if (txq->q.n_bd) {
451                 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
452                                   txq->q.n_bd, txq->tfds, txq->q.dma_addr);
453                 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
454         }
455
456         /* De-alloc array of per-TFD driver data */
457         kfree(txq->txb);
458         txq->txb = NULL;
459
460         /* deallocate arrays */
461         kfree(txq->cmd);
462         kfree(txq->meta);
463         txq->cmd = NULL;
464         txq->meta = NULL;
465
466         /* 0-fill queue descriptor structure */
467         memset(txq, 0, sizeof(*txq));
468 }
469
470 /**
471  * iwl_trans_tx_free - Free TXQ Context
472  *
473  * Destroy all TX DMA queues and structures
474  */
475 static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
476 {
477         int txq_id;
478         struct iwl_trans_pcie *trans_pcie =
479                 IWL_TRANS_GET_PCIE_TRANS(trans);
480         struct iwl_priv *priv = priv(trans);
481
482         /* Tx queues */
483         if (priv->txq) {
484                 for (txq_id = 0;
485                      txq_id < hw_params(trans).max_txq_num; txq_id++)
486                         iwl_tx_queue_free(trans, txq_id);
487         }
488
489         kfree(priv->txq);
490         priv->txq = NULL;
491
492         iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
493
494         iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
495 }
496
497 /**
498  * iwl_trans_tx_alloc - allocate TX context
499  * Allocate all Tx DMA structures and initialize them
500  *
501  * @param priv
502  * @return error code
503  */
504 static int iwl_trans_tx_alloc(struct iwl_trans *trans)
505 {
506         int ret;
507         int txq_id, slots_num;
508         struct iwl_priv *priv = priv(trans);
509         struct iwl_trans_pcie *trans_pcie =
510                 IWL_TRANS_GET_PCIE_TRANS(trans);
511
512         u16 scd_bc_tbls_size = priv->cfg->base_params->num_of_queues *
513                         sizeof(struct iwlagn_scd_bc_tbl);
514
515         /*It is not allowed to alloc twice, so warn when this happens.
516          * We cannot rely on the previous allocation, so free and fail */
517         if (WARN_ON(priv->txq)) {
518                 ret = -EINVAL;
519                 goto error;
520         }
521
522         ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
523                                    scd_bc_tbls_size);
524         if (ret) {
525                 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
526                 goto error;
527         }
528
529         /* Alloc keep-warm buffer */
530         ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
531         if (ret) {
532                 IWL_ERR(trans, "Keep Warm allocation failed\n");
533                 goto error;
534         }
535
536         priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
537                         priv->cfg->base_params->num_of_queues, GFP_KERNEL);
538         if (!priv->txq) {
539                 IWL_ERR(trans, "Not enough memory for txq\n");
540                 ret = ENOMEM;
541                 goto error;
542         }
543
544         /* Alloc and init all Tx queues, including the command queue (#4/#9) */
545         for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
546                 slots_num = (txq_id == trans->shrd->cmd_queue) ?
547                                         TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
548                 ret = iwl_trans_txq_alloc(trans, &priv->txq[txq_id], slots_num,
549                                        txq_id);
550                 if (ret) {
551                         IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
552                         goto error;
553                 }
554         }
555
556         return 0;
557
558 error:
559         iwl_trans_tx_free(trans);
560
561         return ret;
562 }
563 static int iwl_tx_init(struct iwl_trans *trans)
564 {
565         int ret;
566         int txq_id, slots_num;
567         unsigned long flags;
568         bool alloc = false;
569         struct iwl_priv *priv = priv(trans);
570         struct iwl_trans_pcie *trans_pcie =
571                 IWL_TRANS_GET_PCIE_TRANS(trans);
572
573         if (!priv->txq) {
574                 ret = iwl_trans_tx_alloc(trans);
575                 if (ret)
576                         goto error;
577                 alloc = true;
578         }
579
580         spin_lock_irqsave(&trans->shrd->lock, flags);
581
582         /* Turn off all Tx DMA fifos */
583         iwl_write_prph(bus(trans), SCD_TXFACT, 0);
584
585         /* Tell NIC where to find the "keep warm" buffer */
586         iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
587                            trans_pcie->kw.dma >> 4);
588
589         spin_unlock_irqrestore(&trans->shrd->lock, flags);
590
591         /* Alloc and init all Tx queues, including the command queue (#4/#9) */
592         for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
593                 slots_num = (txq_id == trans->shrd->cmd_queue) ?
594                                         TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
595                 ret = iwl_trans_txq_init(trans, &priv->txq[txq_id], slots_num,
596                                        txq_id);
597                 if (ret) {
598                         IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
599                         goto error;
600                 }
601         }
602
603         return 0;
604 error:
605         /*Upon error, free only if we allocated something */
606         if (alloc)
607                 iwl_trans_tx_free(trans);
608         return ret;
609 }
610
611 static void iwl_set_pwr_vmain(struct iwl_priv *priv)
612 {
613         struct iwl_trans *trans = trans(priv);
614 /*
615  * (for documentation purposes)
616  * to set power to V_AUX, do:
617
618                 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
619                         iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
620                                                APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
621                                                ~APMG_PS_CTRL_MSK_PWR_SRC);
622  */
623
624         iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
625                                APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
626                                ~APMG_PS_CTRL_MSK_PWR_SRC);
627 }
628
629 static int iwl_nic_init(struct iwl_trans *trans)
630 {
631         unsigned long flags;
632         struct iwl_priv *priv = priv(trans);
633
634         /* nic_init */
635         spin_lock_irqsave(&trans->shrd->lock, flags);
636         iwl_apm_init(priv);
637
638         /* Set interrupt coalescing calibration timer to default (512 usecs) */
639         iwl_write8(bus(trans), CSR_INT_COALESCING,
640                 IWL_HOST_INT_CALIB_TIMEOUT_DEF);
641
642         spin_unlock_irqrestore(&trans->shrd->lock, flags);
643
644         iwl_set_pwr_vmain(priv);
645
646         priv->cfg->lib->nic_config(priv);
647
648         /* Allocate the RX queue, or reset if it is already allocated */
649         iwl_rx_init(trans);
650
651         /* Allocate or reset and init all Tx and Command queues */
652         if (iwl_tx_init(trans))
653                 return -ENOMEM;
654
655         if (priv->cfg->base_params->shadow_reg_enable) {
656                 /* enable shadow regs in HW */
657                 iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
658                         0x800FFFFF);
659         }
660
661         set_bit(STATUS_INIT, &trans->shrd->status);
662
663         return 0;
664 }
665
666 #define HW_READY_TIMEOUT (50)
667
668 /* Note: returns poll_bit return value, which is >= 0 if success */
669 static int iwl_set_hw_ready(struct iwl_trans *trans)
670 {
671         int ret;
672
673         iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
674                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
675
676         /* See if we got it */
677         ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
678                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
679                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
680                                 HW_READY_TIMEOUT);
681
682         IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
683         return ret;
684 }
685
686 /* Note: returns standard 0/-ERROR code */
687 static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
688 {
689         int ret;
690
691         IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
692
693         ret = iwl_set_hw_ready(trans);
694         if (ret >= 0)
695                 return 0;
696
697         /* If HW is not ready, prepare the conditions to check again */
698         iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
699                         CSR_HW_IF_CONFIG_REG_PREPARE);
700
701         ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
702                         ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
703                         CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
704
705         if (ret < 0)
706                 return ret;
707
708         /* HW should be ready by now, check again. */
709         ret = iwl_set_hw_ready(trans);
710         if (ret >= 0)
711                 return 0;
712         return ret;
713 }
714
715 static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
716 {
717         int ret;
718         struct iwl_priv *priv = priv(trans);
719
720         priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
721
722         if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
723              iwl_trans_pcie_prepare_card_hw(trans)) {
724                 IWL_WARN(trans, "Exit HW not ready\n");
725                 return -EIO;
726         }
727
728         /* If platform's RF_KILL switch is NOT set to KILL */
729         if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
730                         CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
731                 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
732         else
733                 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
734
735         if (iwl_is_rfkill(trans->shrd)) {
736                 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
737                 iwl_enable_interrupts(trans);
738                 return -ERFKILL;
739         }
740
741         iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
742
743         ret = iwl_nic_init(trans);
744         if (ret) {
745                 IWL_ERR(trans, "Unable to init nic\n");
746                 return ret;
747         }
748
749         /* make sure rfkill handshake bits are cleared */
750         iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
751         iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
752                     CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
753
754         /* clear (again), then enable host interrupts */
755         iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
756         iwl_enable_interrupts(trans);
757
758         /* really make sure rfkill handshake bits are cleared */
759         iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
760         iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
761
762         return 0;
763 }
764
765 /*
766  * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
767  * must be called under priv->shrd->lock and mac access
768  */
769 static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
770 {
771         iwl_write_prph(bus(trans), SCD_TXFACT, mask);
772 }
773
774 #define IWL_AC_UNSET -1
775
776 struct queue_to_fifo_ac {
777         s8 fifo, ac;
778 };
779
780 static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
781         { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
782         { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
783         { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
784         { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
785         { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
786         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
787         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
788         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
789         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
790         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
791         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
792 };
793
794 static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
795         { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
796         { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
797         { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
798         { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
799         { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
800         { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
801         { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
802         { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
803         { IWL_TX_FIFO_BE_IPAN, 2, },
804         { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
805         { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
806 };
807 static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
808 {
809         const struct queue_to_fifo_ac *queue_to_fifo;
810         struct iwl_rxon_context *ctx;
811         struct iwl_priv *priv = priv(trans);
812         struct iwl_trans_pcie *trans_pcie =
813                 IWL_TRANS_GET_PCIE_TRANS(trans);
814         u32 a;
815         unsigned long flags;
816         int i, chan;
817         u32 reg_val;
818
819         spin_lock_irqsave(&trans->shrd->lock, flags);
820
821         trans_pcie->scd_base_addr =
822                 iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
823         a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
824         /* reset conext data memory */
825         for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
826                 a += 4)
827                 iwl_write_targ_mem(bus(trans), a, 0);
828         /* reset tx status memory */
829         for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
830                 a += 4)
831                 iwl_write_targ_mem(bus(trans), a, 0);
832         for (; a < trans_pcie->scd_base_addr +
833                SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
834                a += 4)
835                 iwl_write_targ_mem(bus(trans), a, 0);
836
837         iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
838                        trans_pcie->scd_bc_tbls.dma >> 10);
839
840         /* Enable DMA channel */
841         for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
842                 iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
843                                 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
844                                 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
845
846         /* Update FH chicken bits */
847         reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
848         iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
849                            reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
850
851         iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
852                 SCD_QUEUECHAIN_SEL_ALL(priv));
853         iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
854
855         /* initiate the queues */
856         for (i = 0; i < hw_params(priv).max_txq_num; i++) {
857                 iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
858                 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
859                 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
860                                 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
861                 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
862                                 SCD_CONTEXT_QUEUE_OFFSET(i) +
863                                 sizeof(u32),
864                                 ((SCD_WIN_SIZE <<
865                                 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
866                                 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
867                                 ((SCD_FRAME_LIMIT <<
868                                 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
869                                 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
870         }
871
872         iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
873                         IWL_MASK(0, hw_params(trans).max_txq_num));
874
875         /* Activate all Tx DMA/FIFO channels */
876         iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
877
878         /* map queues to FIFOs */
879         if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
880                 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
881         else
882                 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
883
884         iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
885
886         /* make sure all queue are not stopped */
887         memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
888         for (i = 0; i < 4; i++)
889                 atomic_set(&priv->queue_stop_count[i], 0);
890         for_each_context(priv, ctx)
891                 ctx->last_tx_rejected = false;
892
893         /* reset to 0 to enable all the queue first */
894         priv->txq_ctx_active_msk = 0;
895
896         BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
897                                                 IWLAGN_FIRST_AMPDU_QUEUE);
898         BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
899                                                 IWLAGN_FIRST_AMPDU_QUEUE);
900
901         for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
902                 int fifo = queue_to_fifo[i].fifo;
903                 int ac = queue_to_fifo[i].ac;
904
905                 iwl_txq_ctx_activate(priv, i);
906
907                 if (fifo == IWL_TX_FIFO_UNUSED)
908                         continue;
909
910                 if (ac != IWL_AC_UNSET)
911                         iwl_set_swq_id(&priv->txq[i], ac, i);
912                 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
913         }
914
915         spin_unlock_irqrestore(&trans->shrd->lock, flags);
916
917         /* Enable L1-Active */
918         iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
919                           APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
920 }
921
922 /**
923  * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
924  */
925 static int iwl_trans_tx_stop(struct iwl_trans *trans)
926 {
927         int ch, txq_id;
928         unsigned long flags;
929         struct iwl_priv *priv = priv(trans);
930
931         /* Turn off all Tx DMA fifos */
932         spin_lock_irqsave(&trans->shrd->lock, flags);
933
934         iwl_trans_txq_set_sched(trans, 0);
935
936         /* Stop each Tx DMA channel, and wait for it to be idle */
937         for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
938                 iwl_write_direct32(bus(trans),
939                                    FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
940                 if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
941                                     FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
942                                     1000))
943                         IWL_ERR(trans, "Failing on timeout while stopping"
944                             " DMA channel %d [0x%08x]", ch,
945                             iwl_read_direct32(bus(trans),
946                                               FH_TSSR_TX_STATUS_REG));
947         }
948         spin_unlock_irqrestore(&trans->shrd->lock, flags);
949
950         if (!priv->txq) {
951                 IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
952                 return 0;
953         }
954
955         /* Unmap DMA from host system and free skb's */
956         for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
957                 iwl_tx_queue_unmap(trans, txq_id);
958
959         return 0;
960 }
961
962 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
963 {
964         /* stop and reset the on-board processor */
965         iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
966
967         /* tell the device to stop sending interrupts */
968         iwl_trans_disable_sync_irq(trans);
969
970         /* device going down, Stop using ICT table */
971         iwl_disable_ict(trans);
972
973         /*
974          * If a HW restart happens during firmware loading,
975          * then the firmware loading might call this function
976          * and later it might be called again due to the
977          * restart. So don't process again if the device is
978          * already dead.
979          */
980         if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
981                 iwl_trans_tx_stop(trans);
982                 iwl_trans_rx_stop(trans);
983
984                 /* Power-down device's busmaster DMA clocks */
985                 iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
986                                APMG_CLK_VAL_DMA_CLK_RQT);
987                 udelay(5);
988         }
989
990         /* Make sure (redundant) we've released our request to stay awake */
991         iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
992                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
993
994         /* Stop the device, and put it in low power state */
995         iwl_apm_stop(priv(trans));
996 }
997
998 static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_trans *trans,
999                                                 int txq_id)
1000 {
1001         struct iwl_priv *priv = priv(trans);
1002         struct iwl_tx_queue *txq = &priv->txq[txq_id];
1003         struct iwl_queue *q = &txq->q;
1004         struct iwl_device_cmd *dev_cmd;
1005
1006         if (unlikely(iwl_queue_space(q) < q->high_mark))
1007                 return NULL;
1008
1009         /*
1010          * Set up the Tx-command (not MAC!) header.
1011          * Store the chosen Tx queue and TFD index within the sequence field;
1012          * after Tx, uCode's Tx response will return this value so driver can
1013          * locate the frame within the tx queue and do post-tx processing.
1014          */
1015         dev_cmd = txq->cmd[q->write_ptr];
1016         memset(dev_cmd, 0, sizeof(*dev_cmd));
1017         dev_cmd->hdr.cmd = REPLY_TX;
1018         dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1019                                 INDEX_TO_SEQ(q->write_ptr)));
1020         return &dev_cmd->cmd.tx;
1021 }
1022
1023 static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
1024                 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
1025                 struct iwl_rxon_context *ctx)
1026 {
1027         struct iwl_tx_queue *txq = &priv->txq[txq_id];
1028         struct iwl_queue *q = &txq->q;
1029         struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
1030         struct iwl_cmd_meta *out_meta;
1031
1032         dma_addr_t phys_addr = 0;
1033         dma_addr_t txcmd_phys;
1034         dma_addr_t scratch_phys;
1035         u16 len, firstlen, secondlen;
1036         u8 wait_write_ptr = 0;
1037         u8 hdr_len = ieee80211_hdrlen(fc);
1038
1039         /* Set up driver data for this TFD */
1040         memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
1041         txq->txb[q->write_ptr].skb = skb;
1042         txq->txb[q->write_ptr].ctx = ctx;
1043
1044         /* Set up first empty entry in queue's array of Tx/cmd buffers */
1045         out_meta = &txq->meta[q->write_ptr];
1046
1047         /*
1048          * Use the first empty entry in this queue's command buffer array
1049          * to contain the Tx command and MAC header concatenated together
1050          * (payload data will be in another buffer).
1051          * Size of this varies, due to varying MAC header length.
1052          * If end is not dword aligned, we'll have 2 extra bytes at the end
1053          * of the MAC header (device reads on dword boundaries).
1054          * We'll tell device about this padding later.
1055          */
1056         len = sizeof(struct iwl_tx_cmd) +
1057                 sizeof(struct iwl_cmd_header) + hdr_len;
1058         firstlen = (len + 3) & ~3;
1059
1060         /* Tell NIC about any 2-byte padding after MAC header */
1061         if (firstlen != len)
1062                 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1063
1064         /* Physical address of this Tx command's header (not MAC header!),
1065          * within command buffer array. */
1066         txcmd_phys = dma_map_single(priv->bus->dev,
1067                                     &dev_cmd->hdr, firstlen,
1068                                     DMA_BIDIRECTIONAL);
1069         if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
1070                 return -1;
1071         dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1072         dma_unmap_len_set(out_meta, len, firstlen);
1073
1074         if (!ieee80211_has_morefrags(fc)) {
1075                 txq->need_update = 1;
1076         } else {
1077                 wait_write_ptr = 1;
1078                 txq->need_update = 0;
1079         }
1080
1081         /* Set up TFD's 2nd entry to point directly to remainder of skb,
1082          * if any (802.11 null frames have no payload). */
1083         secondlen = skb->len - hdr_len;
1084         if (secondlen > 0) {
1085                 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
1086                                            secondlen, DMA_TO_DEVICE);
1087                 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
1088                         dma_unmap_single(priv->bus->dev,
1089                                          dma_unmap_addr(out_meta, mapping),
1090                                          dma_unmap_len(out_meta, len),
1091                                          DMA_BIDIRECTIONAL);
1092                         return -1;
1093                 }
1094         }
1095
1096         /* Attach buffers to TFD */
1097         iwlagn_txq_attach_buf_to_tfd(trans(priv), txq, txcmd_phys,
1098                                         firstlen, 1);
1099         if (secondlen > 0)
1100                 iwlagn_txq_attach_buf_to_tfd(trans(priv), txq, phys_addr,
1101                                              secondlen, 0);
1102
1103         scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1104                                 offsetof(struct iwl_tx_cmd, scratch);
1105
1106         /* take back ownership of DMA buffer to enable update */
1107         dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
1108                         DMA_BIDIRECTIONAL);
1109         tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1110         tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1111
1112         IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
1113                      le16_to_cpu(dev_cmd->hdr.sequence));
1114         IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1115         iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1116         iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1117
1118         /* Set up entry for this TFD in Tx byte-count array */
1119         if (ampdu)
1120                 iwl_trans_txq_update_byte_cnt_tbl(trans(priv), txq,
1121                                                le16_to_cpu(tx_cmd->len));
1122
1123         dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
1124                         DMA_BIDIRECTIONAL);
1125
1126         trace_iwlwifi_dev_tx(priv,
1127                              &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1128                              sizeof(struct iwl_tfd),
1129                              &dev_cmd->hdr, firstlen,
1130                              skb->data + hdr_len, secondlen);
1131
1132         /* Tell device the write index *just past* this latest filled TFD */
1133         q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1134         iwl_txq_update_write_ptr(priv, txq);
1135
1136         /*
1137          * At this point the frame is "transmitted" successfully
1138          * and we will get a TX status notification eventually,
1139          * regardless of the value of ret. "ret" only indicates
1140          * whether or not we should update the write pointer.
1141          */
1142         if (iwl_queue_space(q) < q->high_mark) {
1143                 if (wait_write_ptr) {
1144                         txq->need_update = 1;
1145                         iwl_txq_update_write_ptr(priv, txq);
1146                 } else {
1147                         iwl_stop_queue(priv, txq);
1148                 }
1149         }
1150         return 0;
1151 }
1152
1153 static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
1154 {
1155         /* Remove all resets to allow NIC to operate */
1156         iwl_write32(bus(trans), CSR_RESET, 0);
1157 }
1158
1159 static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
1160 {
1161         struct iwl_trans_pcie *trans_pcie =
1162                 IWL_TRANS_GET_PCIE_TRANS(trans);
1163         int err;
1164
1165         trans_pcie->inta_mask = CSR_INI_SET_MASK;
1166
1167         tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1168                 iwl_irq_tasklet, (unsigned long)trans);
1169
1170         iwl_alloc_isr_ict(trans);
1171
1172         err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
1173                 DRV_NAME, trans);
1174         if (err) {
1175                 IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
1176                 iwl_free_isr_ict(trans);
1177                 return err;
1178         }
1179
1180         INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
1181         return 0;
1182 }
1183
1184 static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id,
1185                       int ssn, u32 status, struct sk_buff_head *skbs)
1186 {
1187         struct iwl_priv *priv = priv(trans);
1188         struct iwl_tx_queue *txq = &priv->txq[txq_id];
1189         /* n_bd is usually 256 => n_bd - 1 = 0xff */
1190         int tfd_num = ssn & (txq->q.n_bd - 1);
1191         u8 agg_state;
1192         bool cond;
1193
1194         if (txq->sched_retry) {
1195                 agg_state =
1196                         priv->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
1197                 cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
1198         } else {
1199                 cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
1200         }
1201
1202         if (txq->q.read_ptr != tfd_num) {
1203                 IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
1204                                 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1205                                 ssn , tfd_num, txq_id, txq->swq_id);
1206                 iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1207                 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
1208                         iwl_wake_queue(priv, txq);
1209         }
1210 }
1211
1212 static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
1213 {
1214         unsigned long flags;
1215         struct iwl_trans_pcie *trans_pcie =
1216                 IWL_TRANS_GET_PCIE_TRANS(trans);
1217
1218         spin_lock_irqsave(&trans->shrd->lock, flags);
1219         iwl_disable_interrupts(trans);
1220         spin_unlock_irqrestore(&trans->shrd->lock, flags);
1221
1222         /* wait to make sure we flush pending tasklet*/
1223         synchronize_irq(bus(trans)->irq);
1224         tasklet_kill(&trans_pcie->irq_tasklet);
1225 }
1226
1227 static void iwl_trans_pcie_free(struct iwl_trans *trans)
1228 {
1229         free_irq(bus(trans)->irq, trans);
1230         iwl_free_isr_ict(trans);
1231         trans->shrd->trans = NULL;
1232         kfree(trans);
1233 }
1234
1235 #ifdef CONFIG_PM
1236
1237 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1238 {
1239         /*
1240          * This function is called when system goes into suspend state
1241          * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1242          * first but since iwl_mac_stop() has no knowledge of who the caller is,
1243          * it will not call apm_ops.stop() to stop the DMA operation.
1244          * Calling apm_ops.stop here to make sure we stop the DMA.
1245          *
1246          * But of course ... if we have configured WoWLAN then we did other
1247          * things already :-)
1248          */
1249         if (!trans->shrd->wowlan)
1250                 iwl_apm_stop(priv(trans));
1251
1252         return 0;
1253 }
1254
1255 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1256 {
1257         bool hw_rfkill = false;
1258
1259         iwl_enable_interrupts(trans);
1260
1261         if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
1262                                 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1263                 hw_rfkill = true;
1264
1265         if (hw_rfkill)
1266                 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1267         else
1268                 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1269
1270         wiphy_rfkill_set_hw_state(priv(trans)->hw->wiphy, hw_rfkill);
1271
1272         return 0;
1273 }
1274 #else /* CONFIG_PM */
1275 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1276 { return 0; }
1277
1278 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1279 { return 0; }
1280
1281 #endif /* CONFIG_PM */
1282
1283 const struct iwl_trans_ops trans_ops_pcie;
1284
1285 static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1286 {
1287         struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1288                                               sizeof(struct iwl_trans_pcie),
1289                                               GFP_KERNEL);
1290         if (iwl_trans) {
1291                 struct iwl_trans_pcie *trans_pcie =
1292                         IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
1293                 iwl_trans->ops = &trans_ops_pcie;
1294                 iwl_trans->shrd = shrd;
1295                 trans_pcie->trans = iwl_trans;
1296                 spin_lock_init(&iwl_trans->hcmd_lock);
1297         }
1298
1299         return iwl_trans;
1300 }
1301
1302 #ifdef CONFIG_IWLWIFI_DEBUGFS
1303 /* create and remove of files */
1304 #define DEBUGFS_ADD_FILE(name, parent, mode) do {                       \
1305         if (!debugfs_create_file(#name, mode, parent, trans,            \
1306                                  &iwl_dbgfs_##name##_ops))              \
1307                 return -ENOMEM;                                         \
1308 } while (0)
1309
1310 /* file operation */
1311 #define DEBUGFS_READ_FUNC(name)                                         \
1312 static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
1313                                         char __user *user_buf,          \
1314                                         size_t count, loff_t *ppos);
1315
1316 #define DEBUGFS_WRITE_FUNC(name)                                        \
1317 static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
1318                                         const char __user *user_buf,    \
1319                                         size_t count, loff_t *ppos);
1320
1321
1322 static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1323 {
1324         file->private_data = inode->i_private;
1325         return 0;
1326 }
1327
1328 #define DEBUGFS_READ_FILE_OPS(name)                                     \
1329         DEBUGFS_READ_FUNC(name);                                        \
1330 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1331         .read = iwl_dbgfs_##name##_read,                                \
1332         .open = iwl_dbgfs_open_file_generic,                            \
1333         .llseek = generic_file_llseek,                                  \
1334 };
1335
1336 #define DEBUGFS_WRITE_FILE_OPS(name)                                    \
1337         DEBUGFS_WRITE_FUNC(name);                                       \
1338 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1339         .write = iwl_dbgfs_##name##_write,                              \
1340         .open = iwl_dbgfs_open_file_generic,                            \
1341         .llseek = generic_file_llseek,                                  \
1342 };
1343
1344 #define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
1345         DEBUGFS_READ_FUNC(name);                                        \
1346         DEBUGFS_WRITE_FUNC(name);                                       \
1347 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1348         .write = iwl_dbgfs_##name##_write,                              \
1349         .read = iwl_dbgfs_##name##_read,                                \
1350         .open = iwl_dbgfs_open_file_generic,                            \
1351         .llseek = generic_file_llseek,                                  \
1352 };
1353
1354 static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1355                                          char __user *user_buf,
1356                                          size_t count, loff_t *ppos)
1357 {
1358         struct iwl_trans *trans = file->private_data;
1359         struct iwl_priv *priv = priv(trans);
1360         int pos = 0, ofs = 0;
1361         int cnt = 0, entry;
1362         struct iwl_trans_pcie *trans_pcie =
1363                 IWL_TRANS_GET_PCIE_TRANS(trans);
1364         struct iwl_tx_queue *txq;
1365         struct iwl_queue *q;
1366         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1367         char *buf;
1368         int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
1369                 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
1370         const u8 *ptr;
1371         ssize_t ret;
1372
1373         if (!priv->txq) {
1374                 IWL_ERR(trans, "txq not ready\n");
1375                 return -EAGAIN;
1376         }
1377         buf = kzalloc(bufsz, GFP_KERNEL);
1378         if (!buf) {
1379                 IWL_ERR(trans, "Can not allocate buffer\n");
1380                 return -ENOMEM;
1381         }
1382         pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
1383         for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1384                 txq = &priv->txq[cnt];
1385                 q = &txq->q;
1386                 pos += scnprintf(buf + pos, bufsz - pos,
1387                                 "q[%d]: read_ptr: %u, write_ptr: %u\n",
1388                                 cnt, q->read_ptr, q->write_ptr);
1389         }
1390         if (priv->tx_traffic &&
1391                 (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
1392                 ptr = priv->tx_traffic;
1393                 pos += scnprintf(buf + pos, bufsz - pos,
1394                                 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
1395                 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1396                         for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1397                              entry++,  ofs += 16) {
1398                                 pos += scnprintf(buf + pos, bufsz - pos,
1399                                                 "0x%.4x ", ofs);
1400                                 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1401                                                    buf + pos, bufsz - pos, 0);
1402                                 pos += strlen(buf + pos);
1403                                 if (bufsz - pos > 0)
1404                                         buf[pos++] = '\n';
1405                         }
1406                 }
1407         }
1408
1409         pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
1410         pos += scnprintf(buf + pos, bufsz - pos,
1411                         "read: %u, write: %u\n",
1412                          rxq->read, rxq->write);
1413
1414         if (priv->rx_traffic &&
1415                 (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
1416                 ptr = priv->rx_traffic;
1417                 pos += scnprintf(buf + pos, bufsz - pos,
1418                                 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
1419                 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1420                         for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1421                              entry++,  ofs += 16) {
1422                                 pos += scnprintf(buf + pos, bufsz - pos,
1423                                                 "0x%.4x ", ofs);
1424                                 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1425                                                    buf + pos, bufsz - pos, 0);
1426                                 pos += strlen(buf + pos);
1427                                 if (bufsz - pos > 0)
1428                                         buf[pos++] = '\n';
1429                         }
1430                 }
1431         }
1432
1433         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1434         kfree(buf);
1435         return ret;
1436 }
1437
1438 static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1439                                          const char __user *user_buf,
1440                                          size_t count, loff_t *ppos)
1441 {
1442         struct iwl_trans *trans = file->private_data;
1443         char buf[8];
1444         int buf_size;
1445         int traffic_log;
1446
1447         memset(buf, 0, sizeof(buf));
1448         buf_size = min(count, sizeof(buf) -  1);
1449         if (copy_from_user(buf, user_buf, buf_size))
1450                 return -EFAULT;
1451         if (sscanf(buf, "%d", &traffic_log) != 1)
1452                 return -EFAULT;
1453         if (traffic_log == 0)
1454                 iwl_reset_traffic_log(priv(trans));
1455
1456         return count;
1457 }
1458
1459 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1460                                                 char __user *user_buf,
1461                                                 size_t count, loff_t *ppos) {
1462
1463         struct iwl_trans *trans = file->private_data;
1464         struct iwl_priv *priv = priv(trans);
1465         struct iwl_tx_queue *txq;
1466         struct iwl_queue *q;
1467         char *buf;
1468         int pos = 0;
1469         int cnt;
1470         int ret;
1471         const size_t bufsz = sizeof(char) * 64 *
1472                                 priv->cfg->base_params->num_of_queues;
1473
1474         if (!priv->txq) {
1475                 IWL_ERR(priv, "txq not ready\n");
1476                 return -EAGAIN;
1477         }
1478         buf = kzalloc(bufsz, GFP_KERNEL);
1479         if (!buf)
1480                 return -ENOMEM;
1481
1482         for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1483                 txq = &priv->txq[cnt];
1484                 q = &txq->q;
1485                 pos += scnprintf(buf + pos, bufsz - pos,
1486                                 "hwq %.2d: read=%u write=%u stop=%d"
1487                                 " swq_id=%#.2x (ac %d/hwq %d)\n",
1488                                 cnt, q->read_ptr, q->write_ptr,
1489                                 !!test_bit(cnt, priv->queue_stopped),
1490                                 txq->swq_id, txq->swq_id & 3,
1491                                 (txq->swq_id >> 2) & 0x1f);
1492                 if (cnt >= 4)
1493                         continue;
1494                 /* for the ACs, display the stop count too */
1495                 pos += scnprintf(buf + pos, bufsz - pos,
1496                                 "        stop-count: %d\n",
1497                                 atomic_read(&priv->queue_stop_count[cnt]));
1498         }
1499         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1500         kfree(buf);
1501         return ret;
1502 }
1503
1504 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1505                                                 char __user *user_buf,
1506                                                 size_t count, loff_t *ppos) {
1507         struct iwl_trans *trans = file->private_data;
1508         struct iwl_trans_pcie *trans_pcie =
1509                 IWL_TRANS_GET_PCIE_TRANS(trans);
1510         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1511         char buf[256];
1512         int pos = 0;
1513         const size_t bufsz = sizeof(buf);
1514
1515         pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1516                                                 rxq->read);
1517         pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1518                                                 rxq->write);
1519         pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1520                                                 rxq->free_count);
1521         if (rxq->rb_stts) {
1522                 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1523                          le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
1524         } else {
1525                 pos += scnprintf(buf + pos, bufsz - pos,
1526                                         "closed_rb_num: Not Allocated\n");
1527         }
1528         return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1529 }
1530
1531 static ssize_t iwl_dbgfs_log_event_read(struct file *file,
1532                                          char __user *user_buf,
1533                                          size_t count, loff_t *ppos)
1534 {
1535         struct iwl_trans *trans = file->private_data;
1536         char *buf;
1537         int pos = 0;
1538         ssize_t ret = -ENOMEM;
1539
1540         ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
1541         if (buf) {
1542                 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1543                 kfree(buf);
1544         }
1545         return ret;
1546 }
1547
1548 static ssize_t iwl_dbgfs_log_event_write(struct file *file,
1549                                         const char __user *user_buf,
1550                                         size_t count, loff_t *ppos)
1551 {
1552         struct iwl_trans *trans = file->private_data;
1553         u32 event_log_flag;
1554         char buf[8];
1555         int buf_size;
1556
1557         memset(buf, 0, sizeof(buf));
1558         buf_size = min(count, sizeof(buf) -  1);
1559         if (copy_from_user(buf, user_buf, buf_size))
1560                 return -EFAULT;
1561         if (sscanf(buf, "%d", &event_log_flag) != 1)
1562                 return -EFAULT;
1563         if (event_log_flag == 1)
1564                 iwl_dump_nic_event_log(trans, true, NULL, false);
1565
1566         return count;
1567 }
1568
1569 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1570                                         char __user *user_buf,
1571                                         size_t count, loff_t *ppos) {
1572
1573         struct iwl_trans *trans = file->private_data;
1574         struct iwl_trans_pcie *trans_pcie =
1575                 IWL_TRANS_GET_PCIE_TRANS(trans);
1576         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1577
1578         int pos = 0;
1579         char *buf;
1580         int bufsz = 24 * 64; /* 24 items * 64 char per item */
1581         ssize_t ret;
1582
1583         buf = kzalloc(bufsz, GFP_KERNEL);
1584         if (!buf) {
1585                 IWL_ERR(trans, "Can not allocate Buffer\n");
1586                 return -ENOMEM;
1587         }
1588
1589         pos += scnprintf(buf + pos, bufsz - pos,
1590                         "Interrupt Statistics Report:\n");
1591
1592         pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1593                 isr_stats->hw);
1594         pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1595                 isr_stats->sw);
1596         if (isr_stats->sw || isr_stats->hw) {
1597                 pos += scnprintf(buf + pos, bufsz - pos,
1598                         "\tLast Restarting Code:  0x%X\n",
1599                         isr_stats->err_code);
1600         }
1601 #ifdef CONFIG_IWLWIFI_DEBUG
1602         pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1603                 isr_stats->sch);
1604         pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1605                 isr_stats->alive);
1606 #endif
1607         pos += scnprintf(buf + pos, bufsz - pos,
1608                 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1609
1610         pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1611                 isr_stats->ctkill);
1612
1613         pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1614                 isr_stats->wakeup);
1615
1616         pos += scnprintf(buf + pos, bufsz - pos,
1617                 "Rx command responses:\t\t %u\n", isr_stats->rx);
1618
1619         pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1620                 isr_stats->tx);
1621
1622         pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1623                 isr_stats->unhandled);
1624
1625         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1626         kfree(buf);
1627         return ret;
1628 }
1629
1630 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1631                                          const char __user *user_buf,
1632                                          size_t count, loff_t *ppos)
1633 {
1634         struct iwl_trans *trans = file->private_data;
1635         struct iwl_trans_pcie *trans_pcie =
1636                 IWL_TRANS_GET_PCIE_TRANS(trans);
1637         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1638
1639         char buf[8];
1640         int buf_size;
1641         u32 reset_flag;
1642
1643         memset(buf, 0, sizeof(buf));
1644         buf_size = min(count, sizeof(buf) -  1);
1645         if (copy_from_user(buf, user_buf, buf_size))
1646                 return -EFAULT;
1647         if (sscanf(buf, "%x", &reset_flag) != 1)
1648                 return -EFAULT;
1649         if (reset_flag == 0)
1650                 memset(isr_stats, 0, sizeof(*isr_stats));
1651
1652         return count;
1653 }
1654
1655 static const char *get_csr_string(int cmd)
1656 {
1657         switch (cmd) {
1658         IWL_CMD(CSR_HW_IF_CONFIG_REG);
1659         IWL_CMD(CSR_INT_COALESCING);
1660         IWL_CMD(CSR_INT);
1661         IWL_CMD(CSR_INT_MASK);
1662         IWL_CMD(CSR_FH_INT_STATUS);
1663         IWL_CMD(CSR_GPIO_IN);
1664         IWL_CMD(CSR_RESET);
1665         IWL_CMD(CSR_GP_CNTRL);
1666         IWL_CMD(CSR_HW_REV);
1667         IWL_CMD(CSR_EEPROM_REG);
1668         IWL_CMD(CSR_EEPROM_GP);
1669         IWL_CMD(CSR_OTP_GP_REG);
1670         IWL_CMD(CSR_GIO_REG);
1671         IWL_CMD(CSR_GP_UCODE_REG);
1672         IWL_CMD(CSR_GP_DRIVER_REG);
1673         IWL_CMD(CSR_UCODE_DRV_GP1);
1674         IWL_CMD(CSR_UCODE_DRV_GP2);
1675         IWL_CMD(CSR_LED_REG);
1676         IWL_CMD(CSR_DRAM_INT_TBL_REG);
1677         IWL_CMD(CSR_GIO_CHICKEN_BITS);
1678         IWL_CMD(CSR_ANA_PLL_CFG);
1679         IWL_CMD(CSR_HW_REV_WA_REG);
1680         IWL_CMD(CSR_DBG_HPET_MEM_REG);
1681         default:
1682                 return "UNKNOWN";
1683         }
1684 }
1685
1686 void iwl_dump_csr(struct iwl_trans *trans)
1687 {
1688         int i;
1689         static const u32 csr_tbl[] = {
1690                 CSR_HW_IF_CONFIG_REG,
1691                 CSR_INT_COALESCING,
1692                 CSR_INT,
1693                 CSR_INT_MASK,
1694                 CSR_FH_INT_STATUS,
1695                 CSR_GPIO_IN,
1696                 CSR_RESET,
1697                 CSR_GP_CNTRL,
1698                 CSR_HW_REV,
1699                 CSR_EEPROM_REG,
1700                 CSR_EEPROM_GP,
1701                 CSR_OTP_GP_REG,
1702                 CSR_GIO_REG,
1703                 CSR_GP_UCODE_REG,
1704                 CSR_GP_DRIVER_REG,
1705                 CSR_UCODE_DRV_GP1,
1706                 CSR_UCODE_DRV_GP2,
1707                 CSR_LED_REG,
1708                 CSR_DRAM_INT_TBL_REG,
1709                 CSR_GIO_CHICKEN_BITS,
1710                 CSR_ANA_PLL_CFG,
1711                 CSR_HW_REV_WA_REG,
1712                 CSR_DBG_HPET_MEM_REG
1713         };
1714         IWL_ERR(trans, "CSR values:\n");
1715         IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1716                 "CSR_INT_PERIODIC_REG)\n");
1717         for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
1718                 IWL_ERR(trans, "  %25s: 0X%08x\n",
1719                         get_csr_string(csr_tbl[i]),
1720                         iwl_read32(bus(trans), csr_tbl[i]));
1721         }
1722 }
1723
1724 static ssize_t iwl_dbgfs_csr_write(struct file *file,
1725                                          const char __user *user_buf,
1726                                          size_t count, loff_t *ppos)
1727 {
1728         struct iwl_trans *trans = file->private_data;
1729         char buf[8];
1730         int buf_size;
1731         int csr;
1732
1733         memset(buf, 0, sizeof(buf));
1734         buf_size = min(count, sizeof(buf) -  1);
1735         if (copy_from_user(buf, user_buf, buf_size))
1736                 return -EFAULT;
1737         if (sscanf(buf, "%d", &csr) != 1)
1738                 return -EFAULT;
1739
1740         iwl_dump_csr(trans);
1741
1742         return count;
1743 }
1744
1745 static const char *get_fh_string(int cmd)
1746 {
1747         switch (cmd) {
1748         IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1749         IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1750         IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1751         IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1752         IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1753         IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1754         IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1755         IWL_CMD(FH_TSSR_TX_STATUS_REG);
1756         IWL_CMD(FH_TSSR_TX_ERROR_REG);
1757         default:
1758                 return "UNKNOWN";
1759         }
1760 }
1761
1762 int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1763 {
1764         int i;
1765 #ifdef CONFIG_IWLWIFI_DEBUG
1766         int pos = 0;
1767         size_t bufsz = 0;
1768 #endif
1769         static const u32 fh_tbl[] = {
1770                 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1771                 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1772                 FH_RSCSR_CHNL0_WPTR,
1773                 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1774                 FH_MEM_RSSR_SHARED_CTRL_REG,
1775                 FH_MEM_RSSR_RX_STATUS_REG,
1776                 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1777                 FH_TSSR_TX_STATUS_REG,
1778                 FH_TSSR_TX_ERROR_REG
1779         };
1780 #ifdef CONFIG_IWLWIFI_DEBUG
1781         if (display) {
1782                 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1783                 *buf = kmalloc(bufsz, GFP_KERNEL);
1784                 if (!*buf)
1785                         return -ENOMEM;
1786                 pos += scnprintf(*buf + pos, bufsz - pos,
1787                                 "FH register values:\n");
1788                 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1789                         pos += scnprintf(*buf + pos, bufsz - pos,
1790                                 "  %34s: 0X%08x\n",
1791                                 get_fh_string(fh_tbl[i]),
1792                                 iwl_read_direct32(bus(trans), fh_tbl[i]));
1793                 }
1794                 return pos;
1795         }
1796 #endif
1797         IWL_ERR(trans, "FH register values:\n");
1798         for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
1799                 IWL_ERR(trans, "  %34s: 0X%08x\n",
1800                         get_fh_string(fh_tbl[i]),
1801                         iwl_read_direct32(bus(trans), fh_tbl[i]));
1802         }
1803         return 0;
1804 }
1805
1806 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1807                                          char __user *user_buf,
1808                                          size_t count, loff_t *ppos)
1809 {
1810         struct iwl_trans *trans = file->private_data;
1811         char *buf;
1812         int pos = 0;
1813         ssize_t ret = -EFAULT;
1814
1815         ret = pos = iwl_dump_fh(trans, &buf, true);
1816         if (buf) {
1817                 ret = simple_read_from_buffer(user_buf,
1818                                               count, ppos, buf, pos);
1819                 kfree(buf);
1820         }
1821
1822         return ret;
1823 }
1824
1825 DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1826 DEBUGFS_READ_WRITE_FILE_OPS(log_event);
1827 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1828 DEBUGFS_READ_FILE_OPS(fh_reg);
1829 DEBUGFS_READ_FILE_OPS(rx_queue);
1830 DEBUGFS_READ_FILE_OPS(tx_queue);
1831 DEBUGFS_WRITE_FILE_OPS(csr);
1832
1833 /*
1834  * Create the debugfs files and directories
1835  *
1836  */
1837 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1838                                         struct dentry *dir)
1839 {
1840         DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
1841         DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1842         DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1843         DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
1844         DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1845         DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1846         DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1847         return 0;
1848 }
1849 #else
1850 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1851                                         struct dentry *dir)
1852 { return 0; }
1853
1854 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1855
1856 const struct iwl_trans_ops trans_ops_pcie = {
1857         .alloc = iwl_trans_pcie_alloc,
1858         .request_irq = iwl_trans_pcie_request_irq,
1859         .start_device = iwl_trans_pcie_start_device,
1860         .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
1861         .stop_device = iwl_trans_pcie_stop_device,
1862
1863         .tx_start = iwl_trans_pcie_tx_start,
1864
1865         .rx_free = iwl_trans_pcie_rx_free,
1866         .tx_free = iwl_trans_pcie_tx_free,
1867
1868         .send_cmd = iwl_trans_pcie_send_cmd,
1869         .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
1870
1871         .get_tx_cmd = iwl_trans_pcie_get_tx_cmd,
1872         .tx = iwl_trans_pcie_tx,
1873         .reclaim = iwl_trans_pcie_reclaim,
1874
1875         .txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
1876         .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
1877
1878         .kick_nic = iwl_trans_pcie_kick_nic,
1879
1880         .disable_sync_irq = iwl_trans_pcie_disable_sync_irq,
1881         .free = iwl_trans_pcie_free,
1882
1883         .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1884         .suspend = iwl_trans_pcie_suspend,
1885         .resume = iwl_trans_pcie_resume,
1886 };
1887