]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/wireless/iwlwifi/iwl-trans.c
iwlagn: iwl_rx_queue moves to the iwl_trans_pcie
[karo-tx-linux.git] / drivers / net / wireless / iwlwifi / iwl-trans.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22  * USA
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called LICENSE.GPL.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <ilw@linux.intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  *
40  *  * Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  *  * Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in
44  *    the documentation and/or other materials provided with the
45  *    distribution.
46  *  * Neither the name Intel Corporation nor the names of its
47  *    contributors may be used to endorse or promote products derived
48  *    from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  *
62  *****************************************************************************/
63 #include <linux/interrupt.h>
64 #include <linux/debugfs.h>
65
66 #include "iwl-dev.h"
67 #include "iwl-trans.h"
68 #include "iwl-core.h"
69 #include "iwl-helpers.h"
70 #include "iwl-trans-int-pcie.h"
71 /*TODO remove uneeded includes when the transport layer tx_free will be here */
72 #include "iwl-agn.h"
73 #include "iwl-core.h"
74 #include "iwl-shared.h"
75
76 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
77 {
78         struct iwl_trans_pcie *trans_pcie =
79                 IWL_TRANS_GET_PCIE_TRANS(trans);
80         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
81         struct device *dev = bus(trans)->dev;
82
83         memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
84
85         spin_lock_init(&rxq->lock);
86         INIT_LIST_HEAD(&rxq->rx_free);
87         INIT_LIST_HEAD(&rxq->rx_used);
88
89         if (WARN_ON(rxq->bd || rxq->rb_stts))
90                 return -EINVAL;
91
92         /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
93         rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
94                                      &rxq->bd_dma, GFP_KERNEL);
95         if (!rxq->bd)
96                 goto err_bd;
97         memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
98
99         /*Allocate the driver's pointer to receive buffer status */
100         rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
101                                           &rxq->rb_stts_dma, GFP_KERNEL);
102         if (!rxq->rb_stts)
103                 goto err_rb_stts;
104         memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
105
106         return 0;
107
108 err_rb_stts:
109         dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
110                         rxq->bd, rxq->bd_dma);
111         memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
112         rxq->bd = NULL;
113 err_bd:
114         return -ENOMEM;
115 }
116
117 static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
118 {
119         struct iwl_trans_pcie *trans_pcie =
120                 IWL_TRANS_GET_PCIE_TRANS(trans);
121         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
122         int i;
123
124         /* Fill the rx_used queue with _all_ of the Rx buffers */
125         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
126                 /* In the reset function, these buffers may have been allocated
127                  * to an SKB, so we need to unmap and free potential storage */
128                 if (rxq->pool[i].page != NULL) {
129                         dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
130                                 PAGE_SIZE << hw_params(trans).rx_page_order,
131                                 DMA_FROM_DEVICE);
132                         __iwl_free_pages(priv(trans), rxq->pool[i].page);
133                         rxq->pool[i].page = NULL;
134                 }
135                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
136         }
137 }
138
139 static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
140                                  struct iwl_rx_queue *rxq)
141 {
142         u32 rb_size;
143         const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
144         u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
145
146         rb_timeout = RX_RB_TIMEOUT;
147
148         if (iwlagn_mod_params.amsdu_size_8K)
149                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
150         else
151                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
152
153         /* Stop Rx DMA */
154         iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
155
156         /* Reset driver's Rx queue write index */
157         iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
158
159         /* Tell device where to find RBD circular buffer in DRAM */
160         iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
161                            (u32)(rxq->bd_dma >> 8));
162
163         /* Tell device where in DRAM to update its Rx status */
164         iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
165                            rxq->rb_stts_dma >> 4);
166
167         /* Enable Rx DMA
168          * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
169          *      the credit mechanism in 5000 HW RX FIFO
170          * Direct rx interrupts to hosts
171          * Rx buffer size 4 or 8k
172          * RB timeout 0x10
173          * 256 RBDs
174          */
175         iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
176                            FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
177                            FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
178                            FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
179                            FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
180                            rb_size|
181                            (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
182                            (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
183
184         /* Set interrupt coalescing timer to default (2048 usecs) */
185         iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
186 }
187
188 static int iwl_rx_init(struct iwl_trans *trans)
189 {
190         struct iwl_trans_pcie *trans_pcie =
191                 IWL_TRANS_GET_PCIE_TRANS(trans);
192         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
193
194         int i, err;
195         unsigned long flags;
196
197         if (!rxq->bd) {
198                 err = iwl_trans_rx_alloc(trans);
199                 if (err)
200                         return err;
201         }
202
203         spin_lock_irqsave(&rxq->lock, flags);
204         INIT_LIST_HEAD(&rxq->rx_free);
205         INIT_LIST_HEAD(&rxq->rx_used);
206
207         iwl_trans_rxq_free_rx_bufs(trans);
208
209         for (i = 0; i < RX_QUEUE_SIZE; i++)
210                 rxq->queue[i] = NULL;
211
212         /* Set us so that we have processed and used all buffers, but have
213          * not restocked the Rx queue with fresh buffers */
214         rxq->read = rxq->write = 0;
215         rxq->write_actual = 0;
216         rxq->free_count = 0;
217         spin_unlock_irqrestore(&rxq->lock, flags);
218
219         iwlagn_rx_replenish(trans);
220
221         iwl_trans_rx_hw_init(priv(trans), rxq);
222
223         spin_lock_irqsave(&trans->shrd->lock, flags);
224         rxq->need_update = 1;
225         iwl_rx_queue_update_write_ptr(trans, rxq);
226         spin_unlock_irqrestore(&trans->shrd->lock, flags);
227
228         return 0;
229 }
230
231 static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
232 {
233         struct iwl_trans_pcie *trans_pcie =
234                 IWL_TRANS_GET_PCIE_TRANS(trans);
235         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
236
237         unsigned long flags;
238
239         /*if rxq->bd is NULL, it means that nothing has been allocated,
240          * exit now */
241         if (!rxq->bd) {
242                 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
243                 return;
244         }
245
246         spin_lock_irqsave(&rxq->lock, flags);
247         iwl_trans_rxq_free_rx_bufs(trans);
248         spin_unlock_irqrestore(&rxq->lock, flags);
249
250         dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
251                           rxq->bd, rxq->bd_dma);
252         memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
253         rxq->bd = NULL;
254
255         if (rxq->rb_stts)
256                 dma_free_coherent(bus(trans)->dev,
257                                   sizeof(struct iwl_rb_status),
258                                   rxq->rb_stts, rxq->rb_stts_dma);
259         else
260                 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
261         memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
262         rxq->rb_stts = NULL;
263 }
264
265 static int iwl_trans_rx_stop(struct iwl_priv *priv)
266 {
267
268         /* stop Rx DMA */
269         iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
270         return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
271                             FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
272 }
273
274 static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
275                                     struct iwl_dma_ptr *ptr, size_t size)
276 {
277         if (WARN_ON(ptr->addr))
278                 return -EINVAL;
279
280         ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
281                                        &ptr->dma, GFP_KERNEL);
282         if (!ptr->addr)
283                 return -ENOMEM;
284         ptr->size = size;
285         return 0;
286 }
287
288 static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
289                                     struct iwl_dma_ptr *ptr)
290 {
291         if (unlikely(!ptr->addr))
292                 return;
293
294         dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
295         memset(ptr, 0, sizeof(*ptr));
296 }
297
298 static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
299                       int slots_num, u32 txq_id)
300 {
301         size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX;
302         int i;
303
304         if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
305                 return -EINVAL;
306
307         txq->q.n_window = slots_num;
308
309         txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
310                             GFP_KERNEL);
311         txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
312                            GFP_KERNEL);
313
314         if (!txq->meta || !txq->cmd)
315                 goto error;
316
317         for (i = 0; i < slots_num; i++) {
318                 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
319                                         GFP_KERNEL);
320                 if (!txq->cmd[i])
321                         goto error;
322         }
323
324         /* Alloc driver data array and TFD circular buffer */
325         /* Driver private data, only for Tx (not command) queues,
326          * not shared with device. */
327         if (txq_id != priv->shrd->cmd_queue) {
328                 txq->txb = kzalloc(sizeof(txq->txb[0]) *
329                                    TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
330                 if (!txq->txb) {
331                         IWL_ERR(priv, "kmalloc for auxiliary BD "
332                                   "structures failed\n");
333                         goto error;
334                 }
335         } else {
336                 txq->txb = NULL;
337         }
338
339         /* Circular buffer of transmit frame descriptors (TFDs),
340          * shared with device */
341         txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
342                                        GFP_KERNEL);
343         if (!txq->tfds) {
344                 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
345                 goto error;
346         }
347         txq->q.id = txq_id;
348
349         return 0;
350 error:
351         kfree(txq->txb);
352         txq->txb = NULL;
353         /* since txq->cmd has been zeroed,
354          * all non allocated cmd[i] will be NULL */
355         if (txq->cmd)
356                 for (i = 0; i < slots_num; i++)
357                         kfree(txq->cmd[i]);
358         kfree(txq->meta);
359         kfree(txq->cmd);
360         txq->meta = NULL;
361         txq->cmd = NULL;
362
363         return -ENOMEM;
364
365 }
366
367 static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
368                       int slots_num, u32 txq_id)
369 {
370         int ret;
371
372         txq->need_update = 0;
373         memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
374
375         /*
376          * For the default queues 0-3, set up the swq_id
377          * already -- all others need to get one later
378          * (if they need one at all).
379          */
380         if (txq_id < 4)
381                 iwl_set_swq_id(txq, txq_id, txq_id);
382
383         /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
384          * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
385         BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
386
387         /* Initialize queue's high/low-water marks, and head/tail indexes */
388         ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
389                         txq_id);
390         if (ret)
391                 return ret;
392
393         /*
394          * Tell nic where to find circular buffer of Tx Frame Descriptors for
395          * given Tx queue, and enable the DMA channel used for that queue.
396          * Circular buffer (TFD queue in DRAM) physical base address */
397         iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
398                              txq->q.dma_addr >> 8);
399
400         return 0;
401 }
402
403 /**
404  * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
405  */
406 static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
407 {
408         struct iwl_tx_queue *txq = &priv->txq[txq_id];
409         struct iwl_queue *q = &txq->q;
410
411         if (!q->n_bd)
412                 return;
413
414         while (q->write_ptr != q->read_ptr) {
415                 /* The read_ptr needs to bound by q->n_window */
416                 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
417                 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
418         }
419 }
420
421 /**
422  * iwl_tx_queue_free - Deallocate DMA queue.
423  * @txq: Transmit queue to deallocate.
424  *
425  * Empty queue by removing and destroying all BD's.
426  * Free all buffers.
427  * 0-fill, but do not free "txq" descriptor structure.
428  */
429 static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
430 {
431         struct iwl_tx_queue *txq = &priv->txq[txq_id];
432         struct device *dev = priv->bus->dev;
433         int i;
434         if (WARN_ON(!txq))
435                 return;
436
437         iwl_tx_queue_unmap(priv, txq_id);
438
439         /* De-alloc array of command/tx buffers */
440         for (i = 0; i < txq->q.n_window; i++)
441                 kfree(txq->cmd[i]);
442
443         /* De-alloc circular buffer of TFDs */
444         if (txq->q.n_bd) {
445                 dma_free_coherent(dev, hw_params(priv).tfd_size *
446                                   txq->q.n_bd, txq->tfds, txq->q.dma_addr);
447                 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
448         }
449
450         /* De-alloc array of per-TFD driver data */
451         kfree(txq->txb);
452         txq->txb = NULL;
453
454         /* deallocate arrays */
455         kfree(txq->cmd);
456         kfree(txq->meta);
457         txq->cmd = NULL;
458         txq->meta = NULL;
459
460         /* 0-fill queue descriptor structure */
461         memset(txq, 0, sizeof(*txq));
462 }
463
464 /**
465  * iwl_trans_tx_free - Free TXQ Context
466  *
467  * Destroy all TX DMA queues and structures
468  */
469 static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
470 {
471         int txq_id;
472
473         /* Tx queues */
474         if (priv->txq) {
475                 for (txq_id = 0;
476                      txq_id < hw_params(priv).max_txq_num; txq_id++)
477                         iwl_tx_queue_free(priv, txq_id);
478         }
479
480         kfree(priv->txq);
481         priv->txq = NULL;
482
483         iwlagn_free_dma_ptr(priv, &priv->kw);
484
485         iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
486 }
487
488 /**
489  * iwl_trans_tx_alloc - allocate TX context
490  * Allocate all Tx DMA structures and initialize them
491  *
492  * @param priv
493  * @return error code
494  */
495 static int iwl_trans_tx_alloc(struct iwl_priv *priv)
496 {
497         int ret;
498         int txq_id, slots_num;
499
500         /*It is not allowed to alloc twice, so warn when this happens.
501          * We cannot rely on the previous allocation, so free and fail */
502         if (WARN_ON(priv->txq)) {
503                 ret = -EINVAL;
504                 goto error;
505         }
506
507         ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
508                                 hw_params(priv).scd_bc_tbls_size);
509         if (ret) {
510                 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
511                 goto error;
512         }
513
514         /* Alloc keep-warm buffer */
515         ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
516         if (ret) {
517                 IWL_ERR(priv, "Keep Warm allocation failed\n");
518                 goto error;
519         }
520
521         priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
522                         priv->cfg->base_params->num_of_queues, GFP_KERNEL);
523         if (!priv->txq) {
524                 IWL_ERR(priv, "Not enough memory for txq\n");
525                 ret = ENOMEM;
526                 goto error;
527         }
528
529         /* Alloc and init all Tx queues, including the command queue (#4/#9) */
530         for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
531                 slots_num = (txq_id == priv->shrd->cmd_queue) ?
532                                         TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
533                 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
534                                        txq_id);
535                 if (ret) {
536                         IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
537                         goto error;
538                 }
539         }
540
541         return 0;
542
543 error:
544         iwl_trans_tx_free(trans(priv));
545
546         return ret;
547 }
548 static int iwl_tx_init(struct iwl_priv *priv)
549 {
550         int ret;
551         int txq_id, slots_num;
552         unsigned long flags;
553         bool alloc = false;
554
555         if (!priv->txq) {
556                 ret = iwl_trans_tx_alloc(priv);
557                 if (ret)
558                         goto error;
559                 alloc = true;
560         }
561
562         spin_lock_irqsave(&priv->shrd->lock, flags);
563
564         /* Turn off all Tx DMA fifos */
565         iwl_write_prph(priv, SCD_TXFACT, 0);
566
567         /* Tell NIC where to find the "keep warm" buffer */
568         iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
569
570         spin_unlock_irqrestore(&priv->shrd->lock, flags);
571
572         /* Alloc and init all Tx queues, including the command queue (#4/#9) */
573         for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
574                 slots_num = (txq_id == priv->shrd->cmd_queue) ?
575                                         TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
576                 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
577                                        txq_id);
578                 if (ret) {
579                         IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
580                         goto error;
581                 }
582         }
583
584         return 0;
585 error:
586         /*Upon error, free only if we allocated something */
587         if (alloc)
588                 iwl_trans_tx_free(trans(priv));
589         return ret;
590 }
591
592 static void iwl_set_pwr_vmain(struct iwl_priv *priv)
593 {
594 /*
595  * (for documentation purposes)
596  * to set power to V_AUX, do:
597
598                 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
599                         iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
600                                                APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
601                                                ~APMG_PS_CTRL_MSK_PWR_SRC);
602  */
603
604         iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
605                                APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
606                                ~APMG_PS_CTRL_MSK_PWR_SRC);
607 }
608
609 static int iwl_nic_init(struct iwl_priv *priv)
610 {
611         unsigned long flags;
612
613         /* nic_init */
614         spin_lock_irqsave(&priv->shrd->lock, flags);
615         iwl_apm_init(priv);
616
617         /* Set interrupt coalescing calibration timer to default (512 usecs) */
618         iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
619
620         spin_unlock_irqrestore(&priv->shrd->lock, flags);
621
622         iwl_set_pwr_vmain(priv);
623
624         priv->cfg->lib->nic_config(priv);
625
626         /* Allocate the RX queue, or reset if it is already allocated */
627         iwl_rx_init(trans(priv));
628
629         /* Allocate or reset and init all Tx and Command queues */
630         if (iwl_tx_init(priv))
631                 return -ENOMEM;
632
633         if (priv->cfg->base_params->shadow_reg_enable) {
634                 /* enable shadow regs in HW */
635                 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
636                         0x800FFFFF);
637         }
638
639         set_bit(STATUS_INIT, &priv->shrd->status);
640
641         return 0;
642 }
643
644 #define HW_READY_TIMEOUT (50)
645
646 /* Note: returns poll_bit return value, which is >= 0 if success */
647 static int iwl_set_hw_ready(struct iwl_priv *priv)
648 {
649         int ret;
650
651         iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
652                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
653
654         /* See if we got it */
655         ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
656                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
657                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
658                                 HW_READY_TIMEOUT);
659
660         IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
661         return ret;
662 }
663
664 /* Note: returns standard 0/-ERROR code */
665 static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv)
666 {
667         int ret;
668
669         IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
670
671         ret = iwl_set_hw_ready(priv);
672         if (ret >= 0)
673                 return 0;
674
675         /* If HW is not ready, prepare the conditions to check again */
676         iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
677                         CSR_HW_IF_CONFIG_REG_PREPARE);
678
679         ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
680                         ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
681                         CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
682
683         if (ret < 0)
684                 return ret;
685
686         /* HW should be ready by now, check again. */
687         ret = iwl_set_hw_ready(priv);
688         if (ret >= 0)
689                 return 0;
690         return ret;
691 }
692
693 static int iwl_trans_pcie_start_device(struct iwl_priv *priv)
694 {
695         int ret;
696
697         priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
698
699         if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
700              iwl_trans_pcie_prepare_card_hw(priv)) {
701                 IWL_WARN(priv, "Exit HW not ready\n");
702                 return -EIO;
703         }
704
705         /* If platform's RF_KILL switch is NOT set to KILL */
706         if (iwl_read32(priv, CSR_GP_CNTRL) &
707                         CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
708                 clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
709         else
710                 set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
711
712         if (iwl_is_rfkill(priv)) {
713                 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
714                 iwl_enable_interrupts(priv);
715                 return -ERFKILL;
716         }
717
718         iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
719
720         ret = iwl_nic_init(priv);
721         if (ret) {
722                 IWL_ERR(priv, "Unable to init nic\n");
723                 return ret;
724         }
725
726         /* make sure rfkill handshake bits are cleared */
727         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
728         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
729                     CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
730
731         /* clear (again), then enable host interrupts */
732         iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
733         iwl_enable_interrupts(priv);
734
735         /* really make sure rfkill handshake bits are cleared */
736         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
737         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
738
739         return 0;
740 }
741
742 /*
743  * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
744  * must be called under priv->shrd->lock and mac access
745  */
746 static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
747 {
748         iwl_write_prph(priv, SCD_TXFACT, mask);
749 }
750
751 #define IWL_AC_UNSET -1
752
753 struct queue_to_fifo_ac {
754         s8 fifo, ac;
755 };
756
757 static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
758         { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
759         { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
760         { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
761         { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
762         { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
763         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
764         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
765         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
766         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
767         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
768         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
769 };
770
771 static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
772         { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
773         { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
774         { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
775         { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
776         { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
777         { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
778         { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
779         { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
780         { IWL_TX_FIFO_BE_IPAN, 2, },
781         { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
782         { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
783 };
784 static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
785 {
786         const struct queue_to_fifo_ac *queue_to_fifo;
787         struct iwl_rxon_context *ctx;
788         u32 a;
789         unsigned long flags;
790         int i, chan;
791         u32 reg_val;
792
793         spin_lock_irqsave(&priv->shrd->lock, flags);
794
795         priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
796         a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
797         /* reset conext data memory */
798         for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
799                 a += 4)
800                 iwl_write_targ_mem(priv, a, 0);
801         /* reset tx status memory */
802         for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
803                 a += 4)
804                 iwl_write_targ_mem(priv, a, 0);
805         for (; a < priv->scd_base_addr +
806                SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
807                a += 4)
808                 iwl_write_targ_mem(priv, a, 0);
809
810         iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
811                        priv->scd_bc_tbls.dma >> 10);
812
813         /* Enable DMA channel */
814         for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
815                 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
816                                 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
817                                 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
818
819         /* Update FH chicken bits */
820         reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
821         iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
822                            reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
823
824         iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
825                 SCD_QUEUECHAIN_SEL_ALL(priv));
826         iwl_write_prph(priv, SCD_AGGR_SEL, 0);
827
828         /* initiate the queues */
829         for (i = 0; i < hw_params(priv).max_txq_num; i++) {
830                 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
831                 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
832                 iwl_write_targ_mem(priv, priv->scd_base_addr +
833                                 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
834                 iwl_write_targ_mem(priv, priv->scd_base_addr +
835                                 SCD_CONTEXT_QUEUE_OFFSET(i) +
836                                 sizeof(u32),
837                                 ((SCD_WIN_SIZE <<
838                                 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
839                                 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
840                                 ((SCD_FRAME_LIMIT <<
841                                 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
842                                 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
843         }
844
845         iwl_write_prph(priv, SCD_INTERRUPT_MASK,
846                         IWL_MASK(0, hw_params(priv).max_txq_num));
847
848         /* Activate all Tx DMA/FIFO channels */
849         iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
850
851         /* map queues to FIFOs */
852         if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
853                 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
854         else
855                 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
856
857         iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0);
858
859         /* make sure all queue are not stopped */
860         memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
861         for (i = 0; i < 4; i++)
862                 atomic_set(&priv->queue_stop_count[i], 0);
863         for_each_context(priv, ctx)
864                 ctx->last_tx_rejected = false;
865
866         /* reset to 0 to enable all the queue first */
867         priv->txq_ctx_active_msk = 0;
868
869         BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) !=
870                                                 IWLAGN_FIRST_AMPDU_QUEUE);
871         BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) !=
872                                                 IWLAGN_FIRST_AMPDU_QUEUE);
873
874         for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
875                 int fifo = queue_to_fifo[i].fifo;
876                 int ac = queue_to_fifo[i].ac;
877
878                 iwl_txq_ctx_activate(priv, i);
879
880                 if (fifo == IWL_TX_FIFO_UNUSED)
881                         continue;
882
883                 if (ac != IWL_AC_UNSET)
884                         iwl_set_swq_id(&priv->txq[i], ac, i);
885                 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
886         }
887
888         spin_unlock_irqrestore(&priv->shrd->lock, flags);
889
890         /* Enable L1-Active */
891         iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
892                           APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
893 }
894
895 /**
896  * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
897  */
898 static int iwl_trans_tx_stop(struct iwl_priv *priv)
899 {
900         int ch, txq_id;
901         unsigned long flags;
902
903         /* Turn off all Tx DMA fifos */
904         spin_lock_irqsave(&priv->shrd->lock, flags);
905
906         iwl_trans_txq_set_sched(priv, 0);
907
908         /* Stop each Tx DMA channel, and wait for it to be idle */
909         for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
910                 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
911                 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
912                                     FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
913                                     1000))
914                         IWL_ERR(priv, "Failing on timeout while stopping"
915                             " DMA channel %d [0x%08x]", ch,
916                             iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
917         }
918         spin_unlock_irqrestore(&priv->shrd->lock, flags);
919
920         if (!priv->txq) {
921                 IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
922                 return 0;
923         }
924
925         /* Unmap DMA from host system and free skb's */
926         for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
927                 iwl_tx_queue_unmap(priv, txq_id);
928
929         return 0;
930 }
931
932 static void iwl_trans_pcie_stop_device(struct iwl_priv *priv)
933 {
934         unsigned long flags;
935
936         /* stop and reset the on-board processor */
937         iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
938
939         /* tell the device to stop sending interrupts */
940         spin_lock_irqsave(&priv->shrd->lock, flags);
941         iwl_disable_interrupts(priv);
942         spin_unlock_irqrestore(&priv->shrd->lock, flags);
943         iwl_trans_sync_irq(trans(priv));
944
945         /* device going down, Stop using ICT table */
946         iwl_disable_ict(priv);
947
948         /*
949          * If a HW restart happens during firmware loading,
950          * then the firmware loading might call this function
951          * and later it might be called again due to the
952          * restart. So don't process again if the device is
953          * already dead.
954          */
955         if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) {
956                 iwl_trans_tx_stop(priv);
957                 iwl_trans_rx_stop(priv);
958
959                 /* Power-down device's busmaster DMA clocks */
960                 iwl_write_prph(priv, APMG_CLK_DIS_REG,
961                                APMG_CLK_VAL_DMA_CLK_RQT);
962                 udelay(5);
963         }
964
965         /* Make sure (redundant) we've released our request to stay awake */
966         iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
967
968         /* Stop the device, and put it in low power state */
969         iwl_apm_stop(priv);
970 }
971
972 static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_priv *priv,
973                                                 int txq_id)
974 {
975         struct iwl_tx_queue *txq = &priv->txq[txq_id];
976         struct iwl_queue *q = &txq->q;
977         struct iwl_device_cmd *dev_cmd;
978
979         if (unlikely(iwl_queue_space(q) < q->high_mark))
980                 return NULL;
981
982         /*
983          * Set up the Tx-command (not MAC!) header.
984          * Store the chosen Tx queue and TFD index within the sequence field;
985          * after Tx, uCode's Tx response will return this value so driver can
986          * locate the frame within the tx queue and do post-tx processing.
987          */
988         dev_cmd = txq->cmd[q->write_ptr];
989         memset(dev_cmd, 0, sizeof(*dev_cmd));
990         dev_cmd->hdr.cmd = REPLY_TX;
991         dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
992                                 INDEX_TO_SEQ(q->write_ptr)));
993         return &dev_cmd->cmd.tx;
994 }
995
996 static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
997                 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
998                 struct iwl_rxon_context *ctx)
999 {
1000         struct iwl_tx_queue *txq = &priv->txq[txq_id];
1001         struct iwl_queue *q = &txq->q;
1002         struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
1003         struct iwl_cmd_meta *out_meta;
1004
1005         dma_addr_t phys_addr = 0;
1006         dma_addr_t txcmd_phys;
1007         dma_addr_t scratch_phys;
1008         u16 len, firstlen, secondlen;
1009         u8 wait_write_ptr = 0;
1010         u8 hdr_len = ieee80211_hdrlen(fc);
1011
1012         /* Set up driver data for this TFD */
1013         memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
1014         txq->txb[q->write_ptr].skb = skb;
1015         txq->txb[q->write_ptr].ctx = ctx;
1016
1017         /* Set up first empty entry in queue's array of Tx/cmd buffers */
1018         out_meta = &txq->meta[q->write_ptr];
1019
1020         /*
1021          * Use the first empty entry in this queue's command buffer array
1022          * to contain the Tx command and MAC header concatenated together
1023          * (payload data will be in another buffer).
1024          * Size of this varies, due to varying MAC header length.
1025          * If end is not dword aligned, we'll have 2 extra bytes at the end
1026          * of the MAC header (device reads on dword boundaries).
1027          * We'll tell device about this padding later.
1028          */
1029         len = sizeof(struct iwl_tx_cmd) +
1030                 sizeof(struct iwl_cmd_header) + hdr_len;
1031         firstlen = (len + 3) & ~3;
1032
1033         /* Tell NIC about any 2-byte padding after MAC header */
1034         if (firstlen != len)
1035                 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1036
1037         /* Physical address of this Tx command's header (not MAC header!),
1038          * within command buffer array. */
1039         txcmd_phys = dma_map_single(priv->bus->dev,
1040                                     &dev_cmd->hdr, firstlen,
1041                                     DMA_BIDIRECTIONAL);
1042         if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
1043                 return -1;
1044         dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1045         dma_unmap_len_set(out_meta, len, firstlen);
1046
1047         if (!ieee80211_has_morefrags(fc)) {
1048                 txq->need_update = 1;
1049         } else {
1050                 wait_write_ptr = 1;
1051                 txq->need_update = 0;
1052         }
1053
1054         /* Set up TFD's 2nd entry to point directly to remainder of skb,
1055          * if any (802.11 null frames have no payload). */
1056         secondlen = skb->len - hdr_len;
1057         if (secondlen > 0) {
1058                 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
1059                                            secondlen, DMA_TO_DEVICE);
1060                 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
1061                         dma_unmap_single(priv->bus->dev,
1062                                          dma_unmap_addr(out_meta, mapping),
1063                                          dma_unmap_len(out_meta, len),
1064                                          DMA_BIDIRECTIONAL);
1065                         return -1;
1066                 }
1067         }
1068
1069         /* Attach buffers to TFD */
1070         iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
1071         if (secondlen > 0)
1072                 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
1073                                              secondlen, 0);
1074
1075         scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1076                                 offsetof(struct iwl_tx_cmd, scratch);
1077
1078         /* take back ownership of DMA buffer to enable update */
1079         dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
1080                         DMA_BIDIRECTIONAL);
1081         tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1082         tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1083
1084         IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
1085                      le16_to_cpu(dev_cmd->hdr.sequence));
1086         IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1087         iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1088         iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1089
1090         /* Set up entry for this TFD in Tx byte-count array */
1091         if (ampdu)
1092                 iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
1093                                                le16_to_cpu(tx_cmd->len));
1094
1095         dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
1096                         DMA_BIDIRECTIONAL);
1097
1098         trace_iwlwifi_dev_tx(priv,
1099                              &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1100                              sizeof(struct iwl_tfd),
1101                              &dev_cmd->hdr, firstlen,
1102                              skb->data + hdr_len, secondlen);
1103
1104         /* Tell device the write index *just past* this latest filled TFD */
1105         q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1106         iwl_txq_update_write_ptr(priv, txq);
1107
1108         /*
1109          * At this point the frame is "transmitted" successfully
1110          * and we will get a TX status notification eventually,
1111          * regardless of the value of ret. "ret" only indicates
1112          * whether or not we should update the write pointer.
1113          */
1114         if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1115                 if (wait_write_ptr) {
1116                         txq->need_update = 1;
1117                         iwl_txq_update_write_ptr(priv, txq);
1118                 } else {
1119                         iwl_stop_queue(priv, txq);
1120                 }
1121         }
1122         return 0;
1123 }
1124
1125 static void iwl_trans_pcie_kick_nic(struct iwl_priv *priv)
1126 {
1127         /* Remove all resets to allow NIC to operate */
1128         iwl_write32(priv, CSR_RESET, 0);
1129 }
1130
1131 static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
1132 {
1133         struct iwl_trans_pcie *trans_pcie =
1134                 IWL_TRANS_GET_PCIE_TRANS(trans);
1135         struct iwl_priv *priv = priv(trans);
1136         int err;
1137
1138         tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
1139                 iwl_irq_tasklet, (unsigned long)priv);
1140
1141         iwl_alloc_isr_ict(priv);
1142
1143         err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
1144                 DRV_NAME, priv);
1145         if (err) {
1146                 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq);
1147                 iwl_free_isr_ict(priv);
1148                 return err;
1149         }
1150
1151         INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
1152         return 0;
1153 }
1154
1155 static void iwl_trans_pcie_sync_irq(struct iwl_priv *priv)
1156 {
1157         /* wait to make sure we flush pending tasklet*/
1158         synchronize_irq(priv->bus->irq);
1159         tasklet_kill(&priv->irq_tasklet);
1160 }
1161
1162 static void iwl_trans_pcie_free(struct iwl_priv *priv)
1163 {
1164         free_irq(priv->bus->irq, priv);
1165         iwl_free_isr_ict(priv);
1166         kfree(trans(priv));
1167         trans(priv) = NULL;
1168 }
1169
1170 const struct iwl_trans_ops trans_ops_pcie;
1171
1172 static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1173 {
1174         struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1175                                               sizeof(struct iwl_trans_pcie),
1176                                               GFP_KERNEL);
1177         if (iwl_trans) {
1178                 struct iwl_trans_pcie *trans_pcie =
1179                         IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
1180                 iwl_trans->ops = &trans_ops_pcie;
1181                 iwl_trans->shrd = shrd;
1182                 trans_pcie->trans = iwl_trans;
1183         }
1184
1185         return iwl_trans;
1186 }
1187
1188 #ifdef CONFIG_IWLWIFI_DEBUGFS
1189 /* create and remove of files */
1190 #define DEBUGFS_ADD_FILE(name, parent, mode) do {                       \
1191         if (!debugfs_create_file(#name, mode, parent, trans,            \
1192                                  &iwl_dbgfs_##name##_ops))              \
1193                 return -ENOMEM;                                         \
1194 } while (0)
1195
1196 /* file operation */
1197 #define DEBUGFS_READ_FUNC(name)                                         \
1198 static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
1199                                         char __user *user_buf,          \
1200                                         size_t count, loff_t *ppos);
1201
1202 #define DEBUGFS_WRITE_FUNC(name)                                        \
1203 static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
1204                                         const char __user *user_buf,    \
1205                                         size_t count, loff_t *ppos);
1206
1207
1208 static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1209 {
1210         file->private_data = inode->i_private;
1211         return 0;
1212 }
1213
1214 #define DEBUGFS_READ_FILE_OPS(name)                                     \
1215         DEBUGFS_READ_FUNC(name);                                        \
1216 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1217         .read = iwl_dbgfs_##name##_read,                                \
1218         .open = iwl_dbgfs_open_file_generic,                            \
1219         .llseek = generic_file_llseek,                                  \
1220 };
1221
1222 #define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
1223         DEBUGFS_READ_FUNC(name);                                        \
1224         DEBUGFS_WRITE_FUNC(name);                                       \
1225 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1226         .write = iwl_dbgfs_##name##_write,                              \
1227         .read = iwl_dbgfs_##name##_read,                                \
1228         .open = iwl_dbgfs_open_file_generic,                            \
1229         .llseek = generic_file_llseek,                                  \
1230 };
1231
1232 static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1233                                          char __user *user_buf,
1234                                          size_t count, loff_t *ppos)
1235 {
1236         struct iwl_trans *trans = file->private_data;
1237         struct iwl_priv *priv = priv(trans);
1238         int pos = 0, ofs = 0;
1239         int cnt = 0, entry;
1240         struct iwl_trans_pcie *trans_pcie =
1241                 IWL_TRANS_GET_PCIE_TRANS(trans);
1242         struct iwl_tx_queue *txq;
1243         struct iwl_queue *q;
1244         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1245         char *buf;
1246         int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
1247                 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
1248         const u8 *ptr;
1249         ssize_t ret;
1250
1251         if (!priv->txq) {
1252                 IWL_ERR(trans, "txq not ready\n");
1253                 return -EAGAIN;
1254         }
1255         buf = kzalloc(bufsz, GFP_KERNEL);
1256         if (!buf) {
1257                 IWL_ERR(trans, "Can not allocate buffer\n");
1258                 return -ENOMEM;
1259         }
1260         pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
1261         for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1262                 txq = &priv->txq[cnt];
1263                 q = &txq->q;
1264                 pos += scnprintf(buf + pos, bufsz - pos,
1265                                 "q[%d]: read_ptr: %u, write_ptr: %u\n",
1266                                 cnt, q->read_ptr, q->write_ptr);
1267         }
1268         if (priv->tx_traffic &&
1269                 (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
1270                 ptr = priv->tx_traffic;
1271                 pos += scnprintf(buf + pos, bufsz - pos,
1272                                 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
1273                 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1274                         for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1275                              entry++,  ofs += 16) {
1276                                 pos += scnprintf(buf + pos, bufsz - pos,
1277                                                 "0x%.4x ", ofs);
1278                                 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1279                                                    buf + pos, bufsz - pos, 0);
1280                                 pos += strlen(buf + pos);
1281                                 if (bufsz - pos > 0)
1282                                         buf[pos++] = '\n';
1283                         }
1284                 }
1285         }
1286
1287         pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
1288         pos += scnprintf(buf + pos, bufsz - pos,
1289                         "read: %u, write: %u\n",
1290                          rxq->read, rxq->write);
1291
1292         if (priv->rx_traffic &&
1293                 (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
1294                 ptr = priv->rx_traffic;
1295                 pos += scnprintf(buf + pos, bufsz - pos,
1296                                 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
1297                 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1298                         for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1299                              entry++,  ofs += 16) {
1300                                 pos += scnprintf(buf + pos, bufsz - pos,
1301                                                 "0x%.4x ", ofs);
1302                                 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1303                                                    buf + pos, bufsz - pos, 0);
1304                                 pos += strlen(buf + pos);
1305                                 if (bufsz - pos > 0)
1306                                         buf[pos++] = '\n';
1307                         }
1308                 }
1309         }
1310
1311         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1312         kfree(buf);
1313         return ret;
1314 }
1315
1316 static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1317                                          const char __user *user_buf,
1318                                          size_t count, loff_t *ppos)
1319 {
1320         struct iwl_trans *trans = file->private_data;
1321         char buf[8];
1322         int buf_size;
1323         int traffic_log;
1324
1325         memset(buf, 0, sizeof(buf));
1326         buf_size = min(count, sizeof(buf) -  1);
1327         if (copy_from_user(buf, user_buf, buf_size))
1328                 return -EFAULT;
1329         if (sscanf(buf, "%d", &traffic_log) != 1)
1330                 return -EFAULT;
1331         if (traffic_log == 0)
1332                 iwl_reset_traffic_log(priv(trans));
1333
1334         return count;
1335 }
1336
1337 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1338                                                 char __user *user_buf,
1339                                                 size_t count, loff_t *ppos) {
1340
1341         struct iwl_trans *trans = file->private_data;
1342         struct iwl_priv *priv = priv(trans);
1343         struct iwl_tx_queue *txq;
1344         struct iwl_queue *q;
1345         char *buf;
1346         int pos = 0;
1347         int cnt;
1348         int ret;
1349         const size_t bufsz = sizeof(char) * 64 *
1350                                 priv->cfg->base_params->num_of_queues;
1351
1352         if (!priv->txq) {
1353                 IWL_ERR(priv, "txq not ready\n");
1354                 return -EAGAIN;
1355         }
1356         buf = kzalloc(bufsz, GFP_KERNEL);
1357         if (!buf)
1358                 return -ENOMEM;
1359
1360         for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1361                 txq = &priv->txq[cnt];
1362                 q = &txq->q;
1363                 pos += scnprintf(buf + pos, bufsz - pos,
1364                                 "hwq %.2d: read=%u write=%u stop=%d"
1365                                 " swq_id=%#.2x (ac %d/hwq %d)\n",
1366                                 cnt, q->read_ptr, q->write_ptr,
1367                                 !!test_bit(cnt, priv->queue_stopped),
1368                                 txq->swq_id, txq->swq_id & 3,
1369                                 (txq->swq_id >> 2) & 0x1f);
1370                 if (cnt >= 4)
1371                         continue;
1372                 /* for the ACs, display the stop count too */
1373                 pos += scnprintf(buf + pos, bufsz - pos,
1374                                 "        stop-count: %d\n",
1375                                 atomic_read(&priv->queue_stop_count[cnt]));
1376         }
1377         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1378         kfree(buf);
1379         return ret;
1380 }
1381
1382 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1383                                                 char __user *user_buf,
1384                                                 size_t count, loff_t *ppos) {
1385         struct iwl_trans *trans = file->private_data;
1386         struct iwl_trans_pcie *trans_pcie =
1387                 IWL_TRANS_GET_PCIE_TRANS(trans);
1388         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1389         char buf[256];
1390         int pos = 0;
1391         const size_t bufsz = sizeof(buf);
1392
1393         pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1394                                                 rxq->read);
1395         pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1396                                                 rxq->write);
1397         pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1398                                                 rxq->free_count);
1399         if (rxq->rb_stts) {
1400                 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1401                          le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
1402         } else {
1403                 pos += scnprintf(buf + pos, bufsz - pos,
1404                                         "closed_rb_num: Not Allocated\n");
1405         }
1406         return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1407 }
1408
1409 DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1410 DEBUGFS_READ_FILE_OPS(rx_queue);
1411 DEBUGFS_READ_FILE_OPS(tx_queue);
1412
1413 /*
1414  * Create the debugfs files and directories
1415  *
1416  */
1417 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1418                                         struct dentry *dir)
1419 {
1420         DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
1421         DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1422         DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1423         return 0;
1424 }
1425 #else
1426 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1427                                         struct dentry *dir)
1428 { return 0; }
1429
1430 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1431
1432 const struct iwl_trans_ops trans_ops_pcie = {
1433         .alloc = iwl_trans_pcie_alloc,
1434         .request_irq = iwl_trans_pcie_request_irq,
1435         .start_device = iwl_trans_pcie_start_device,
1436         .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
1437         .stop_device = iwl_trans_pcie_stop_device,
1438
1439         .tx_start = iwl_trans_pcie_tx_start,
1440
1441         .rx_free = iwl_trans_pcie_rx_free,
1442         .tx_free = iwl_trans_pcie_tx_free,
1443
1444         .send_cmd = iwl_trans_pcie_send_cmd,
1445         .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
1446
1447         .get_tx_cmd = iwl_trans_pcie_get_tx_cmd,
1448         .tx = iwl_trans_pcie_tx,
1449
1450         .txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
1451         .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
1452
1453         .kick_nic = iwl_trans_pcie_kick_nic,
1454
1455         .sync_irq = iwl_trans_pcie_sync_irq,
1456         .free = iwl_trans_pcie_free,
1457
1458         .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1459 };
1460