]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
7438aaedbd5cb3dc98b0b85bf6bd726a81ec8260
[mv-sheeva.git] / drivers / net / wireless / iwlwifi / iwl-trans-tx-pcie.c
1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4  *
5  * Portions of this file are derived from the ipw3945 project, as well
6  * as portions of the ieee80211 subsystem header files.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2 of the GNU General Public License as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20  *
21  * The full GNU General Public License is included in this distribution in the
22  * file called LICENSE.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <ilw@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
32 #include <net/mac80211.h>
33
34 #include "iwl-agn.h"
35 #include "iwl-dev.h"
36 #include "iwl-core.h"
37 #include "iwl-io.h"
38 #include "iwl-helpers.h"
39 #include "iwl-trans-int-pcie.h"
40
41 /**
42  * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
43  */
44 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
45                                            struct iwl_tx_queue *txq,
46                                            u16 byte_cnt)
47 {
48         struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
49         int write_ptr = txq->q.write_ptr;
50         int txq_id = txq->q.id;
51         u8 sec_ctl = 0;
52         u8 sta_id = 0;
53         u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
54         __le16 bc_ent;
55
56         WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
57
58         sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
59         sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
60
61         switch (sec_ctl & TX_CMD_SEC_MSK) {
62         case TX_CMD_SEC_CCM:
63                 len += CCMP_MIC_LEN;
64                 break;
65         case TX_CMD_SEC_TKIP:
66                 len += TKIP_ICV_LEN;
67                 break;
68         case TX_CMD_SEC_WEP:
69                 len += WEP_IV_LEN + WEP_ICV_LEN;
70                 break;
71         }
72
73         bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
74
75         scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
76
77         if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
78                 scd_bc_tbl[txq_id].
79                         tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
80 }
81
82 /**
83  * iwl_txq_update_write_ptr - Send new write index to hardware
84  */
85 void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
86 {
87         u32 reg = 0;
88         int txq_id = txq->q.id;
89
90         if (txq->need_update == 0)
91                 return;
92
93         if (priv->cfg->base_params->shadow_reg_enable) {
94                 /* shadow register enabled */
95                 iwl_write32(priv, HBUS_TARG_WRPTR,
96                             txq->q.write_ptr | (txq_id << 8));
97         } else {
98                 /* if we're trying to save power */
99                 if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) {
100                         /* wake up nic if it's powered down ...
101                          * uCode will wake up, and interrupt us again, so next
102                          * time we'll skip this part. */
103                         reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
104
105                         if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
106                                 IWL_DEBUG_INFO(priv,
107                                         "Tx queue %d requesting wakeup,"
108                                         " GP1 = 0x%x\n", txq_id, reg);
109                                 iwl_set_bit(priv, CSR_GP_CNTRL,
110                                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
111                                 return;
112                         }
113
114                         iwl_write_direct32(priv, HBUS_TARG_WRPTR,
115                                      txq->q.write_ptr | (txq_id << 8));
116
117                 /*
118                  * else not in power-save mode,
119                  * uCode will never sleep when we're
120                  * trying to tx (during RFKILL, we're not trying to tx).
121                  */
122                 } else
123                         iwl_write32(priv, HBUS_TARG_WRPTR,
124                                     txq->q.write_ptr | (txq_id << 8));
125         }
126         txq->need_update = 0;
127 }
128
129 static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
130 {
131         struct iwl_tfd_tb *tb = &tfd->tbs[idx];
132
133         dma_addr_t addr = get_unaligned_le32(&tb->lo);
134         if (sizeof(dma_addr_t) > sizeof(u32))
135                 addr |=
136                 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
137
138         return addr;
139 }
140
141 static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
142 {
143         struct iwl_tfd_tb *tb = &tfd->tbs[idx];
144
145         return le16_to_cpu(tb->hi_n_len) >> 4;
146 }
147
148 static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
149                                   dma_addr_t addr, u16 len)
150 {
151         struct iwl_tfd_tb *tb = &tfd->tbs[idx];
152         u16 hi_n_len = len << 4;
153
154         put_unaligned_le32(addr, &tb->lo);
155         if (sizeof(dma_addr_t) > sizeof(u32))
156                 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
157
158         tb->hi_n_len = cpu_to_le16(hi_n_len);
159
160         tfd->num_tbs = idx + 1;
161 }
162
163 static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
164 {
165         return tfd->num_tbs & 0x1f;
166 }
167
168 static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
169                      struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
170 {
171         int i;
172         int num_tbs;
173
174         /* Sanity check on number of chunks */
175         num_tbs = iwl_tfd_get_num_tbs(tfd);
176
177         if (num_tbs >= IWL_NUM_OF_TBS) {
178                 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
179                 /* @todo issue fatal error, it is quite serious situation */
180                 return;
181         }
182
183         /* Unmap tx_cmd */
184         if (num_tbs)
185                 dma_unmap_single(priv->bus->dev,
186                                 dma_unmap_addr(meta, mapping),
187                                 dma_unmap_len(meta, len),
188                                 DMA_BIDIRECTIONAL);
189
190         /* Unmap chunks, if any. */
191         for (i = 1; i < num_tbs; i++)
192                 dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i),
193                                 iwl_tfd_tb_get_len(tfd, i), dma_dir);
194 }
195
196 /**
197  * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
198  * @priv - driver private data
199  * @txq - tx queue
200  * @index - the index of the TFD to be freed
201  *
202  * Does NOT advance any TFD circular buffer read/write indexes
203  * Does NOT free the TFD itself (which is within circular buffer)
204  */
205 void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
206         int index)
207 {
208         struct iwl_tfd *tfd_tmp = txq->tfds;
209
210         iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
211                          DMA_TO_DEVICE);
212
213         /* free SKB */
214         if (txq->txb) {
215                 struct sk_buff *skb;
216
217                 skb = txq->txb[index].skb;
218
219                 /* can be called from irqs-disabled context */
220                 if (skb) {
221                         dev_kfree_skb_any(skb);
222                         txq->txb[index].skb = NULL;
223                 }
224         }
225 }
226
227 int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
228                                  struct iwl_tx_queue *txq,
229                                  dma_addr_t addr, u16 len,
230                                  u8 reset)
231 {
232         struct iwl_queue *q;
233         struct iwl_tfd *tfd, *tfd_tmp;
234         u32 num_tbs;
235
236         q = &txq->q;
237         tfd_tmp = txq->tfds;
238         tfd = &tfd_tmp[q->write_ptr];
239
240         if (reset)
241                 memset(tfd, 0, sizeof(*tfd));
242
243         num_tbs = iwl_tfd_get_num_tbs(tfd);
244
245         /* Each TFD can point to a maximum 20 Tx buffers */
246         if (num_tbs >= IWL_NUM_OF_TBS) {
247                 IWL_ERR(priv, "Error can not send more than %d chunks\n",
248                           IWL_NUM_OF_TBS);
249                 return -EINVAL;
250         }
251
252         if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
253                 return -EINVAL;
254
255         if (unlikely(addr & ~IWL_TX_DMA_MASK))
256                 IWL_ERR(priv, "Unaligned address = %llx\n",
257                           (unsigned long long)addr);
258
259         iwl_tfd_set_tb(tfd, num_tbs, addr, len);
260
261         return 0;
262 }
263
264 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
265  * DMA services
266  *
267  * Theory of operation
268  *
269  * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
270  * of buffer descriptors, each of which points to one or more data buffers for
271  * the device to read from or fill.  Driver and device exchange status of each
272  * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
273  * entries in each circular buffer, to protect against confusing empty and full
274  * queue states.
275  *
276  * The device reads or writes the data in the queues via the device's several
277  * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
278  *
279  * For Tx queue, there are low mark and high mark limits. If, after queuing
280  * the packet for Tx, free space become < low mark, Tx queue stopped. When
281  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
282  * Tx queue resumed.
283  *
284  ***************************************************/
285
286 int iwl_queue_space(const struct iwl_queue *q)
287 {
288         int s = q->read_ptr - q->write_ptr;
289
290         if (q->read_ptr > q->write_ptr)
291                 s -= q->n_bd;
292
293         if (s <= 0)
294                 s += q->n_window;
295         /* keep some reserve to not confuse empty and full situations */
296         s -= 2;
297         if (s < 0)
298                 s = 0;
299         return s;
300 }
301
302 /**
303  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
304  */
305 int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
306                           int count, int slots_num, u32 id)
307 {
308         q->n_bd = count;
309         q->n_window = slots_num;
310         q->id = id;
311
312         /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
313          * and iwl_queue_dec_wrap are broken. */
314         if (WARN_ON(!is_power_of_2(count)))
315                 return -EINVAL;
316
317         /* slots_num must be power-of-two size, otherwise
318          * get_cmd_index is broken. */
319         if (WARN_ON(!is_power_of_2(slots_num)))
320                 return -EINVAL;
321
322         q->low_mark = q->n_window / 4;
323         if (q->low_mark < 4)
324                 q->low_mark = 4;
325
326         q->high_mark = q->n_window / 8;
327         if (q->high_mark < 2)
328                 q->high_mark = 2;
329
330         q->write_ptr = q->read_ptr = 0;
331
332         return 0;
333 }
334
335 /*TODO: this functions should NOT be exported from trans module - export it
336  * until the reclaim flow will be brought to the transport module too.
337  * Add a declaration to make sparse happy */
338 void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
339                                           struct iwl_tx_queue *txq);
340
341 void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
342                                           struct iwl_tx_queue *txq)
343 {
344         struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
345         int txq_id = txq->q.id;
346         int read_ptr = txq->q.read_ptr;
347         u8 sta_id = 0;
348         __le16 bc_ent;
349
350         WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
351
352         if (txq_id != priv->shrd->cmd_queue)
353                 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
354
355         bc_ent = cpu_to_le16(1 | (sta_id << 12));
356         scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
357
358         if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
359                 scd_bc_tbl[txq_id].
360                         tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
361 }
362
363 static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
364                                         u16 txq_id)
365 {
366         u32 tbl_dw_addr;
367         u32 tbl_dw;
368         u16 scd_q2ratid;
369
370         scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
371
372         tbl_dw_addr = priv->scd_base_addr +
373                         SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
374
375         tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
376
377         if (txq_id & 0x1)
378                 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
379         else
380                 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
381
382         iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
383
384         return 0;
385 }
386
387 static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
388 {
389         /* Simply stop the queue, but don't change any configuration;
390          * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
391         iwl_write_prph(priv,
392                 SCD_QUEUE_STATUS_BITS(txq_id),
393                 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
394                 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
395 }
396
397 void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
398                                 int txq_id, u32 index)
399 {
400         iwl_write_direct32(priv, HBUS_TARG_WRPTR,
401                         (index & 0xff) | (txq_id << 8));
402         iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index);
403 }
404
405 void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
406                                         struct iwl_tx_queue *txq,
407                                         int tx_fifo_id, int scd_retry)
408 {
409         int txq_id = txq->q.id;
410         int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
411
412         iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id),
413                         (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
414                         (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
415                         (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
416                         SCD_QUEUE_STTS_REG_MSK);
417
418         txq->sched_retry = scd_retry;
419
420         IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
421                        active ? "Activate" : "Deactivate",
422                        scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
423 }
424
425 void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
426                                                 int frame_limit)
427 {
428         int tx_fifo, txq_id, ssn_idx;
429         u16 ra_tid;
430         unsigned long flags;
431         struct iwl_tid_data *tid_data;
432
433         if (WARN_ON(sta_id == IWL_INVALID_STATION))
434                 return;
435         if (WARN_ON(tid >= MAX_TID_COUNT))
436                 return;
437
438         spin_lock_irqsave(&priv->shrd->sta_lock, flags);
439         tid_data = &priv->stations[sta_id].tid[tid];
440         ssn_idx = SEQ_TO_SN(tid_data->seq_number);
441         txq_id = tid_data->agg.txq_id;
442         tx_fifo = tid_data->agg.tx_fifo;
443         spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
444
445         ra_tid = BUILD_RAxTID(sta_id, tid);
446
447         spin_lock_irqsave(&priv->shrd->lock, flags);
448
449         /* Stop this Tx queue before configuring it */
450         iwlagn_tx_queue_stop_scheduler(priv, txq_id);
451
452         /* Map receiver-address / traffic-ID to this queue */
453         iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
454
455         /* Set this queue as a chain-building queue */
456         iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id));
457
458         /* enable aggregations for the queue */
459         iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id));
460
461         /* Place first TFD at index corresponding to start sequence number.
462          * Assumes that ssn_idx is valid (!= 0xFFF) */
463         priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
464         priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
465         iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
466
467         /* Set up Tx window size and frame limit for this queue */
468         iwl_write_targ_mem(priv, priv->scd_base_addr +
469                         SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
470                         sizeof(u32),
471                         ((frame_limit <<
472                         SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
473                         SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
474                         ((frame_limit <<
475                         SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
476                         SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
477
478         iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
479
480         /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
481         iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
482
483         priv->txq[txq_id].sta_id = sta_id;
484         priv->txq[txq_id].tid = tid;
485
486         spin_unlock_irqrestore(&priv->shrd->lock, flags);
487 }
488
489 int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
490                                   u16 ssn_idx, u8 tx_fifo)
491 {
492         if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
493             (IWLAGN_FIRST_AMPDU_QUEUE +
494                 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
495                 IWL_ERR(priv,
496                         "queue number out of range: %d, must be %d to %d\n",
497                         txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
498                         IWLAGN_FIRST_AMPDU_QUEUE +
499                         priv->cfg->base_params->num_of_ampdu_queues - 1);
500                 return -EINVAL;
501         }
502
503         iwlagn_tx_queue_stop_scheduler(priv, txq_id);
504
505         iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id));
506
507         priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
508         priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
509         /* supposes that ssn_idx is valid (!= 0xFFF) */
510         iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
511
512         iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
513         iwl_txq_ctx_deactivate(priv, txq_id);
514         iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
515
516         return 0;
517 }
518
519 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
520
521 /**
522  * iwl_enqueue_hcmd - enqueue a uCode command
523  * @priv: device private data point
524  * @cmd: a point to the ucode command structure
525  *
526  * The function returns < 0 values to indicate the operation is
527  * failed. On success, it turns the index (> 0) of command in the
528  * command queue.
529  */
530 static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
531 {
532         struct iwl_tx_queue *txq = &priv->txq[priv->shrd->cmd_queue];
533         struct iwl_queue *q = &txq->q;
534         struct iwl_device_cmd *out_cmd;
535         struct iwl_cmd_meta *out_meta;
536         dma_addr_t phys_addr;
537         unsigned long flags;
538         u32 idx;
539         u16 copy_size, cmd_size;
540         bool is_ct_kill = false;
541         bool had_nocopy = false;
542         int i;
543         u8 *cmd_dest;
544 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
545         const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
546         int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
547         int trace_idx;
548 #endif
549
550         if (test_bit(STATUS_FW_ERROR, &priv->shrd->status)) {
551                 IWL_WARN(priv, "fw recovery, no hcmd send\n");
552                 return -EIO;
553         }
554
555         if ((priv->ucode_owner == IWL_OWNERSHIP_TM) &&
556             !(cmd->flags & CMD_ON_DEMAND)) {
557                 IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
558                 return -EIO;
559         }
560
561         copy_size = sizeof(out_cmd->hdr);
562         cmd_size = sizeof(out_cmd->hdr);
563
564         /* need one for the header if the first is NOCOPY */
565         BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
566
567         for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
568                 if (!cmd->len[i])
569                         continue;
570                 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
571                         had_nocopy = true;
572                 } else {
573                         /* NOCOPY must not be followed by normal! */
574                         if (WARN_ON(had_nocopy))
575                                 return -EINVAL;
576                         copy_size += cmd->len[i];
577                 }
578                 cmd_size += cmd->len[i];
579         }
580
581         /*
582          * If any of the command structures end up being larger than
583          * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
584          * allocated into separate TFDs, then we will need to
585          * increase the size of the buffers.
586          */
587         if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
588                 return -EINVAL;
589
590         if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
591                 IWL_WARN(priv, "Not sending command - %s KILL\n",
592                          iwl_is_rfkill(priv) ? "RF" : "CT");
593                 return -EIO;
594         }
595
596         spin_lock_irqsave(&priv->hcmd_lock, flags);
597
598         if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
599                 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
600
601                 IWL_ERR(priv, "No space in command queue\n");
602                 is_ct_kill = iwl_check_for_ct_kill(priv);
603                 if (!is_ct_kill) {
604                         IWL_ERR(priv, "Restarting adapter due to queue full\n");
605                         iwlagn_fw_error(priv, false);
606                 }
607                 return -ENOSPC;
608         }
609
610         idx = get_cmd_index(q, q->write_ptr);
611         out_cmd = txq->cmd[idx];
612         out_meta = &txq->meta[idx];
613
614         memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
615         if (cmd->flags & CMD_WANT_SKB)
616                 out_meta->source = cmd;
617         if (cmd->flags & CMD_ASYNC)
618                 out_meta->callback = cmd->callback;
619
620         /* set up the header */
621
622         out_cmd->hdr.cmd = cmd->id;
623         out_cmd->hdr.flags = 0;
624         out_cmd->hdr.sequence =
625                 cpu_to_le16(QUEUE_TO_SEQ(priv->shrd->cmd_queue) |
626                                          INDEX_TO_SEQ(q->write_ptr));
627
628         /* and copy the data that needs to be copied */
629
630         cmd_dest = &out_cmd->cmd.payload[0];
631         for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
632                 if (!cmd->len[i])
633                         continue;
634                 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
635                         break;
636                 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
637                 cmd_dest += cmd->len[i];
638         }
639
640         IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
641                         "%d bytes at %d[%d]:%d\n",
642                         get_cmd_string(out_cmd->hdr.cmd),
643                         out_cmd->hdr.cmd,
644                         le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
645                         q->write_ptr, idx, priv->shrd->cmd_queue);
646
647         phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size,
648                                 DMA_BIDIRECTIONAL);
649         if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
650                 idx = -ENOMEM;
651                 goto out;
652         }
653
654         dma_unmap_addr_set(out_meta, mapping, phys_addr);
655         dma_unmap_len_set(out_meta, len, copy_size);
656
657         iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
658 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
659         trace_bufs[0] = &out_cmd->hdr;
660         trace_lens[0] = copy_size;
661         trace_idx = 1;
662 #endif
663
664         for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
665                 if (!cmd->len[i])
666                         continue;
667                 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
668                         continue;
669                 phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i],
670                                            cmd->len[i], DMA_BIDIRECTIONAL);
671                 if (dma_mapping_error(priv->bus->dev, phys_addr)) {
672                         iwlagn_unmap_tfd(priv, out_meta,
673                                          &txq->tfds[q->write_ptr],
674                                          DMA_BIDIRECTIONAL);
675                         idx = -ENOMEM;
676                         goto out;
677                 }
678
679                 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
680                                              cmd->len[i], 0);
681 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
682                 trace_bufs[trace_idx] = cmd->data[i];
683                 trace_lens[trace_idx] = cmd->len[i];
684                 trace_idx++;
685 #endif
686         }
687
688         out_meta->flags = cmd->flags;
689
690         txq->need_update = 1;
691
692         /* check that tracing gets all possible blocks */
693         BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
694 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
695         trace_iwlwifi_dev_hcmd(priv, cmd->flags,
696                                trace_bufs[0], trace_lens[0],
697                                trace_bufs[1], trace_lens[1],
698                                trace_bufs[2], trace_lens[2]);
699 #endif
700
701         /* Increment and update queue's write index */
702         q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
703         iwl_txq_update_write_ptr(priv, txq);
704
705  out:
706         spin_unlock_irqrestore(&priv->hcmd_lock, flags);
707         return idx;
708 }
709
710 /**
711  * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
712  *
713  * When FW advances 'R' index, all entries between old and new 'R' index
714  * need to be reclaimed. As result, some free space forms.  If there is
715  * enough free space (> low mark), wake the stack that feeds us.
716  */
717 static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
718 {
719         struct iwl_tx_queue *txq = &priv->txq[txq_id];
720         struct iwl_queue *q = &txq->q;
721         int nfreed = 0;
722
723         if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
724                 IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
725                           "index %d is out of range [0-%d] %d %d.\n", __func__,
726                           txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
727                 return;
728         }
729
730         for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
731              q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
732
733                 if (nfreed++ > 0) {
734                         IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
735                                         q->write_ptr, q->read_ptr);
736                         iwlagn_fw_error(priv, false);
737                 }
738
739         }
740 }
741
742 /**
743  * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
744  * @rxb: Rx buffer to reclaim
745  *
746  * If an Rx buffer has an async callback associated with it the callback
747  * will be executed.  The attached skb (if present) will only be freed
748  * if the callback returns 1
749  */
750 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
751 {
752         struct iwl_rx_packet *pkt = rxb_addr(rxb);
753         u16 sequence = le16_to_cpu(pkt->hdr.sequence);
754         int txq_id = SEQ_TO_QUEUE(sequence);
755         int index = SEQ_TO_INDEX(sequence);
756         int cmd_index;
757         struct iwl_device_cmd *cmd;
758         struct iwl_cmd_meta *meta;
759         struct iwl_tx_queue *txq = &priv->txq[priv->shrd->cmd_queue];
760         unsigned long flags;
761
762         /* If a Tx command is being handled and it isn't in the actual
763          * command queue then there a command routing bug has been introduced
764          * in the queue management code. */
765         if (WARN(txq_id != priv->shrd->cmd_queue,
766                  "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
767                   txq_id, priv->shrd->cmd_queue, sequence,
768                   priv->txq[priv->shrd->cmd_queue].q.read_ptr,
769                   priv->txq[priv->shrd->cmd_queue].q.write_ptr)) {
770                 iwl_print_hex_error(priv, pkt, 32);
771                 return;
772         }
773
774         cmd_index = get_cmd_index(&txq->q, index);
775         cmd = txq->cmd[cmd_index];
776         meta = &txq->meta[cmd_index];
777
778         iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
779
780         /* Input error checking is done when commands are added to queue. */
781         if (meta->flags & CMD_WANT_SKB) {
782                 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
783                 rxb->page = NULL;
784         } else if (meta->callback)
785                 meta->callback(priv, cmd, pkt);
786
787         spin_lock_irqsave(&priv->hcmd_lock, flags);
788
789         iwl_hcmd_queue_reclaim(priv, txq_id, index);
790
791         if (!(meta->flags & CMD_ASYNC)) {
792                 clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
793                 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
794                                get_cmd_string(cmd->hdr.cmd));
795                 wake_up_interruptible(&priv->wait_command_queue);
796         }
797
798         meta->flags = 0;
799
800         spin_unlock_irqrestore(&priv->hcmd_lock, flags);
801 }
802
803 const char *get_cmd_string(u8 cmd)
804 {
805         switch (cmd) {
806                 IWL_CMD(REPLY_ALIVE);
807                 IWL_CMD(REPLY_ERROR);
808                 IWL_CMD(REPLY_RXON);
809                 IWL_CMD(REPLY_RXON_ASSOC);
810                 IWL_CMD(REPLY_QOS_PARAM);
811                 IWL_CMD(REPLY_RXON_TIMING);
812                 IWL_CMD(REPLY_ADD_STA);
813                 IWL_CMD(REPLY_REMOVE_STA);
814                 IWL_CMD(REPLY_REMOVE_ALL_STA);
815                 IWL_CMD(REPLY_TXFIFO_FLUSH);
816                 IWL_CMD(REPLY_WEPKEY);
817                 IWL_CMD(REPLY_TX);
818                 IWL_CMD(REPLY_LEDS_CMD);
819                 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
820                 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
821                 IWL_CMD(COEX_MEDIUM_NOTIFICATION);
822                 IWL_CMD(COEX_EVENT_CMD);
823                 IWL_CMD(REPLY_QUIET_CMD);
824                 IWL_CMD(REPLY_CHANNEL_SWITCH);
825                 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
826                 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
827                 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
828                 IWL_CMD(POWER_TABLE_CMD);
829                 IWL_CMD(PM_SLEEP_NOTIFICATION);
830                 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
831                 IWL_CMD(REPLY_SCAN_CMD);
832                 IWL_CMD(REPLY_SCAN_ABORT_CMD);
833                 IWL_CMD(SCAN_START_NOTIFICATION);
834                 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
835                 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
836                 IWL_CMD(BEACON_NOTIFICATION);
837                 IWL_CMD(REPLY_TX_BEACON);
838                 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
839                 IWL_CMD(QUIET_NOTIFICATION);
840                 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
841                 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
842                 IWL_CMD(REPLY_BT_CONFIG);
843                 IWL_CMD(REPLY_STATISTICS_CMD);
844                 IWL_CMD(STATISTICS_NOTIFICATION);
845                 IWL_CMD(REPLY_CARD_STATE_CMD);
846                 IWL_CMD(CARD_STATE_NOTIFICATION);
847                 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
848                 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
849                 IWL_CMD(SENSITIVITY_CMD);
850                 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
851                 IWL_CMD(REPLY_RX_PHY_CMD);
852                 IWL_CMD(REPLY_RX_MPDU_CMD);
853                 IWL_CMD(REPLY_RX);
854                 IWL_CMD(REPLY_COMPRESSED_BA);
855                 IWL_CMD(CALIBRATION_CFG_CMD);
856                 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
857                 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
858                 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
859                 IWL_CMD(TEMPERATURE_NOTIFICATION);
860                 IWL_CMD(TX_ANT_CONFIGURATION_CMD);
861                 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
862                 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
863                 IWL_CMD(REPLY_BT_COEX_PROT_ENV);
864                 IWL_CMD(REPLY_WIPAN_PARAMS);
865                 IWL_CMD(REPLY_WIPAN_RXON);
866                 IWL_CMD(REPLY_WIPAN_RXON_TIMING);
867                 IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
868                 IWL_CMD(REPLY_WIPAN_QOS_PARAM);
869                 IWL_CMD(REPLY_WIPAN_WEPKEY);
870                 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
871                 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
872                 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
873                 IWL_CMD(REPLY_WOWLAN_PATTERNS);
874                 IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
875                 IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
876                 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
877                 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
878                 IWL_CMD(REPLY_WOWLAN_GET_STATUS);
879         default:
880                 return "UNKNOWN";
881
882         }
883 }
884
885 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
886
887 static void iwl_generic_cmd_callback(struct iwl_priv *priv,
888                                      struct iwl_device_cmd *cmd,
889                                      struct iwl_rx_packet *pkt)
890 {
891         if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
892                 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
893                         get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
894                 return;
895         }
896
897 #ifdef CONFIG_IWLWIFI_DEBUG
898         switch (cmd->hdr.cmd) {
899         case REPLY_TX_LINK_QUALITY_CMD:
900         case SENSITIVITY_CMD:
901                 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
902                                 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
903                 break;
904         default:
905                 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
906                                 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
907         }
908 #endif
909 }
910
911 static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
912 {
913         int ret;
914
915         /* An asynchronous command can not expect an SKB to be set. */
916         if (WARN_ON(cmd->flags & CMD_WANT_SKB))
917                 return -EINVAL;
918
919         /* Assign a generic callback if one is not provided */
920         if (!cmd->callback)
921                 cmd->callback = iwl_generic_cmd_callback;
922
923         if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
924                 return -EBUSY;
925
926         ret = iwl_enqueue_hcmd(priv, cmd);
927         if (ret < 0) {
928                 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
929                           get_cmd_string(cmd->id), ret);
930                 return ret;
931         }
932         return 0;
933 }
934
935 static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
936 {
937         int cmd_idx;
938         int ret;
939
940         lockdep_assert_held(&priv->shrd->mutex);
941
942          /* A synchronous command can not have a callback set. */
943         if (WARN_ON(cmd->callback))
944                 return -EINVAL;
945
946         IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
947                         get_cmd_string(cmd->id));
948
949         set_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
950         IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
951                         get_cmd_string(cmd->id));
952
953         cmd_idx = iwl_enqueue_hcmd(priv, cmd);
954         if (cmd_idx < 0) {
955                 ret = cmd_idx;
956                 clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
957                 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
958                           get_cmd_string(cmd->id), ret);
959                 return ret;
960         }
961
962         ret = wait_event_interruptible_timeout(priv->wait_command_queue,
963                         !test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status),
964                         HOST_COMPLETE_TIMEOUT);
965         if (!ret) {
966                 if (test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status)) {
967                         IWL_ERR(priv,
968                                 "Error sending %s: time out after %dms.\n",
969                                 get_cmd_string(cmd->id),
970                                 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
971
972                         clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
973                         IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command"
974                                  "%s\n", get_cmd_string(cmd->id));
975                         ret = -ETIMEDOUT;
976                         goto cancel;
977                 }
978         }
979
980         if (test_bit(STATUS_RF_KILL_HW, &priv->shrd->status)) {
981                 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
982                                get_cmd_string(cmd->id));
983                 ret = -ECANCELED;
984                 goto fail;
985         }
986         if (test_bit(STATUS_FW_ERROR, &priv->shrd->status)) {
987                 IWL_ERR(priv, "Command %s failed: FW Error\n",
988                                get_cmd_string(cmd->id));
989                 ret = -EIO;
990                 goto fail;
991         }
992         if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
993                 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
994                           get_cmd_string(cmd->id));
995                 ret = -EIO;
996                 goto cancel;
997         }
998
999         return 0;
1000
1001 cancel:
1002         if (cmd->flags & CMD_WANT_SKB) {
1003                 /*
1004                  * Cancel the CMD_WANT_SKB flag for the cmd in the
1005                  * TX cmd queue. Otherwise in case the cmd comes
1006                  * in later, it will possibly set an invalid
1007                  * address (cmd->meta.source).
1008                  */
1009                 priv->txq[priv->shrd->cmd_queue].meta[cmd_idx].flags &=
1010                                                         ~CMD_WANT_SKB;
1011         }
1012 fail:
1013         if (cmd->reply_page) {
1014                 iwl_free_pages(priv, cmd->reply_page);
1015                 cmd->reply_page = 0;
1016         }
1017
1018         return ret;
1019 }
1020
1021 int iwl_trans_pcie_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1022 {
1023         if (cmd->flags & CMD_ASYNC)
1024                 return iwl_send_cmd_async(priv, cmd);
1025
1026         return iwl_send_cmd_sync(priv, cmd);
1027 }
1028
1029 int iwl_trans_pcie_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags,
1030                 u16 len, const void *data)
1031 {
1032         struct iwl_host_cmd cmd = {
1033                 .id = id,
1034                 .len = { len, },
1035                 .data = { data, },
1036                 .flags = flags,
1037         };
1038
1039         return iwl_trans_pcie_send_cmd(priv, &cmd);
1040 }
1041
1042 /* Frees buffers until index _not_ inclusive */
1043 void iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1044                             struct sk_buff_head *skbs)
1045
1046 {
1047         struct iwl_tx_queue *txq = &priv(trans)->txq[txq_id];
1048         struct iwl_queue *q = &txq->q;
1049         struct iwl_tx_info *tx_info;
1050         struct ieee80211_tx_info *info;
1051         int last_to_free;
1052
1053         /*Since we free until index _not_ inclusive, the one before index is
1054          * the last we will free. This one must be used */
1055         last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
1056
1057         if ((index >= q->n_bd) ||
1058            (iwl_queue_used(q, last_to_free) == 0)) {
1059                 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
1060                           "last_to_free %d is out of range [0-%d] %d %d.\n",
1061                           __func__, txq_id, last_to_free, q->n_bd,
1062                           q->write_ptr, q->read_ptr);
1063                 return;
1064         }
1065
1066         IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
1067                            q->read_ptr, index);
1068
1069         if (WARN_ON(!skb_queue_empty(skbs)))
1070                 return;
1071
1072         for (;
1073              q->read_ptr != index;
1074              q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1075
1076                 tx_info = &txq->txb[txq->q.read_ptr];
1077
1078                 if (WARN_ON_ONCE(tx_info->skb == NULL))
1079                         continue;
1080
1081                 info = IEEE80211_SKB_CB(tx_info->skb);
1082                 info->driver_data[0] = tx_info->ctx;
1083
1084                 __skb_queue_tail(skbs, tx_info->skb);
1085
1086                 tx_info->skb = NULL;
1087
1088                 iwlagn_txq_inval_byte_cnt_tbl(priv(trans), txq);
1089
1090                 iwlagn_txq_free_tfd(priv(trans), txq, txq->q.read_ptr);
1091         }
1092 }