]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
[karo-tx-linux.git] / drivers / net / wireless / iwlwifi / iwl-trans-pcie-tx.c
1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4  *
5  * Portions of this file are derived from the ipw3945 project, as well
6  * as portions of the ieee80211 subsystem header files.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2 of the GNU General Public License as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20  *
21  * The full GNU General Public License is included in this distribution in the
22  * file called LICENSE.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <ilw@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
32
33 #include "iwl-debug.h"
34 #include "iwl-csr.h"
35 #include "iwl-prph.h"
36 #include "iwl-io.h"
37 #include "iwl-agn-hw.h"
38 #include "iwl-trans-pcie-int.h"
39
40 #define IWL_TX_CRC_SIZE 4
41 #define IWL_TX_DELIMITER_SIZE 4
42
43 /**
44  * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
45  */
46 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
47                                            struct iwl_tx_queue *txq,
48                                            u16 byte_cnt)
49 {
50         struct iwlagn_scd_bc_tbl *scd_bc_tbl;
51         struct iwl_trans_pcie *trans_pcie =
52                 IWL_TRANS_GET_PCIE_TRANS(trans);
53         int write_ptr = txq->q.write_ptr;
54         int txq_id = txq->q.id;
55         u8 sec_ctl = 0;
56         u8 sta_id = 0;
57         u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
58         __le16 bc_ent;
59         struct iwl_tx_cmd *tx_cmd =
60                 (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
61
62         scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
63
64         WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
65
66         sta_id = tx_cmd->sta_id;
67         sec_ctl = tx_cmd->sec_ctl;
68
69         switch (sec_ctl & TX_CMD_SEC_MSK) {
70         case TX_CMD_SEC_CCM:
71                 len += CCMP_MIC_LEN;
72                 break;
73         case TX_CMD_SEC_TKIP:
74                 len += TKIP_ICV_LEN;
75                 break;
76         case TX_CMD_SEC_WEP:
77                 len += WEP_IV_LEN + WEP_ICV_LEN;
78                 break;
79         }
80
81         bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
82
83         scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
84
85         if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
86                 scd_bc_tbl[txq_id].
87                         tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
88 }
89
90 /**
91  * iwl_txq_update_write_ptr - Send new write index to hardware
92  */
93 void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
94 {
95         u32 reg = 0;
96         int txq_id = txq->q.id;
97
98         if (txq->need_update == 0)
99                 return;
100
101         if (hw_params(trans).shadow_reg_enable) {
102                 /* shadow register enabled */
103                 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
104                             txq->q.write_ptr | (txq_id << 8));
105         } else {
106                 /* if we're trying to save power */
107                 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
108                         /* wake up nic if it's powered down ...
109                          * uCode will wake up, and interrupt us again, so next
110                          * time we'll skip this part. */
111                         reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
112
113                         if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
114                                 IWL_DEBUG_INFO(trans,
115                                         "Tx queue %d requesting wakeup,"
116                                         " GP1 = 0x%x\n", txq_id, reg);
117                                 iwl_set_bit(bus(trans), CSR_GP_CNTRL,
118                                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
119                                 return;
120                         }
121
122                         iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
123                                      txq->q.write_ptr | (txq_id << 8));
124
125                 /*
126                  * else not in power-save mode,
127                  * uCode will never sleep when we're
128                  * trying to tx (during RFKILL, we're not trying to tx).
129                  */
130                 } else
131                         iwl_write32(bus(trans), HBUS_TARG_WRPTR,
132                                     txq->q.write_ptr | (txq_id << 8));
133         }
134         txq->need_update = 0;
135 }
136
137 static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
138 {
139         struct iwl_tfd_tb *tb = &tfd->tbs[idx];
140
141         dma_addr_t addr = get_unaligned_le32(&tb->lo);
142         if (sizeof(dma_addr_t) > sizeof(u32))
143                 addr |=
144                 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
145
146         return addr;
147 }
148
149 static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
150 {
151         struct iwl_tfd_tb *tb = &tfd->tbs[idx];
152
153         return le16_to_cpu(tb->hi_n_len) >> 4;
154 }
155
156 static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
157                                   dma_addr_t addr, u16 len)
158 {
159         struct iwl_tfd_tb *tb = &tfd->tbs[idx];
160         u16 hi_n_len = len << 4;
161
162         put_unaligned_le32(addr, &tb->lo);
163         if (sizeof(dma_addr_t) > sizeof(u32))
164                 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
165
166         tb->hi_n_len = cpu_to_le16(hi_n_len);
167
168         tfd->num_tbs = idx + 1;
169 }
170
171 static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
172 {
173         return tfd->num_tbs & 0x1f;
174 }
175
176 static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
177                      struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
178 {
179         int i;
180         int num_tbs;
181
182         /* Sanity check on number of chunks */
183         num_tbs = iwl_tfd_get_num_tbs(tfd);
184
185         if (num_tbs >= IWL_NUM_OF_TBS) {
186                 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
187                 /* @todo issue fatal error, it is quite serious situation */
188                 return;
189         }
190
191         /* Unmap tx_cmd */
192         if (num_tbs)
193                 dma_unmap_single(bus(trans)->dev,
194                                 dma_unmap_addr(meta, mapping),
195                                 dma_unmap_len(meta, len),
196                                 DMA_BIDIRECTIONAL);
197
198         /* Unmap chunks, if any. */
199         for (i = 1; i < num_tbs; i++)
200                 dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
201                                 iwl_tfd_tb_get_len(tfd, i), dma_dir);
202 }
203
204 /**
205  * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
206  * @trans - transport private data
207  * @txq - tx queue
208  * @index - the index of the TFD to be freed
209  *@dma_dir - the direction of the DMA mapping
210  *
211  * Does NOT advance any TFD circular buffer read/write indexes
212  * Does NOT free the TFD itself (which is within circular buffer)
213  */
214 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
215         int index, enum dma_data_direction dma_dir)
216 {
217         struct iwl_tfd *tfd_tmp = txq->tfds;
218
219         iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
220
221         /* free SKB */
222         if (txq->skbs) {
223                 struct sk_buff *skb;
224
225                 skb = txq->skbs[index];
226
227                 /* Can be called from irqs-disabled context
228                  * If skb is not NULL, it means that the whole queue is being
229                  * freed and that the queue is not empty - free the skb
230                  */
231                 if (skb) {
232                         iwl_free_skb(priv(trans), skb);
233                         txq->skbs[index] = NULL;
234                 }
235         }
236 }
237
238 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
239                                  struct iwl_tx_queue *txq,
240                                  dma_addr_t addr, u16 len,
241                                  u8 reset)
242 {
243         struct iwl_queue *q;
244         struct iwl_tfd *tfd, *tfd_tmp;
245         u32 num_tbs;
246
247         q = &txq->q;
248         tfd_tmp = txq->tfds;
249         tfd = &tfd_tmp[q->write_ptr];
250
251         if (reset)
252                 memset(tfd, 0, sizeof(*tfd));
253
254         num_tbs = iwl_tfd_get_num_tbs(tfd);
255
256         /* Each TFD can point to a maximum 20 Tx buffers */
257         if (num_tbs >= IWL_NUM_OF_TBS) {
258                 IWL_ERR(trans, "Error can not send more than %d chunks\n",
259                           IWL_NUM_OF_TBS);
260                 return -EINVAL;
261         }
262
263         if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
264                 return -EINVAL;
265
266         if (unlikely(addr & ~IWL_TX_DMA_MASK))
267                 IWL_ERR(trans, "Unaligned address = %llx\n",
268                           (unsigned long long)addr);
269
270         iwl_tfd_set_tb(tfd, num_tbs, addr, len);
271
272         return 0;
273 }
274
275 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
276  * DMA services
277  *
278  * Theory of operation
279  *
280  * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
281  * of buffer descriptors, each of which points to one or more data buffers for
282  * the device to read from or fill.  Driver and device exchange status of each
283  * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
284  * entries in each circular buffer, to protect against confusing empty and full
285  * queue states.
286  *
287  * The device reads or writes the data in the queues via the device's several
288  * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
289  *
290  * For Tx queue, there are low mark and high mark limits. If, after queuing
291  * the packet for Tx, free space become < low mark, Tx queue stopped. When
292  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
293  * Tx queue resumed.
294  *
295  ***************************************************/
296
297 int iwl_queue_space(const struct iwl_queue *q)
298 {
299         int s = q->read_ptr - q->write_ptr;
300
301         if (q->read_ptr > q->write_ptr)
302                 s -= q->n_bd;
303
304         if (s <= 0)
305                 s += q->n_window;
306         /* keep some reserve to not confuse empty and full situations */
307         s -= 2;
308         if (s < 0)
309                 s = 0;
310         return s;
311 }
312
313 /**
314  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
315  */
316 int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
317 {
318         q->n_bd = count;
319         q->n_window = slots_num;
320         q->id = id;
321
322         /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
323          * and iwl_queue_dec_wrap are broken. */
324         if (WARN_ON(!is_power_of_2(count)))
325                 return -EINVAL;
326
327         /* slots_num must be power-of-two size, otherwise
328          * get_cmd_index is broken. */
329         if (WARN_ON(!is_power_of_2(slots_num)))
330                 return -EINVAL;
331
332         q->low_mark = q->n_window / 4;
333         if (q->low_mark < 4)
334                 q->low_mark = 4;
335
336         q->high_mark = q->n_window / 8;
337         if (q->high_mark < 2)
338                 q->high_mark = 2;
339
340         q->write_ptr = q->read_ptr = 0;
341
342         return 0;
343 }
344
345 static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
346                                           struct iwl_tx_queue *txq)
347 {
348         struct iwl_trans_pcie *trans_pcie =
349                 IWL_TRANS_GET_PCIE_TRANS(trans);
350         struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
351         int txq_id = txq->q.id;
352         int read_ptr = txq->q.read_ptr;
353         u8 sta_id = 0;
354         __le16 bc_ent;
355         struct iwl_tx_cmd *tx_cmd =
356                 (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
357
358         WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
359
360         if (txq_id != trans->shrd->cmd_queue)
361                 sta_id = tx_cmd->sta_id;
362
363         bc_ent = cpu_to_le16(1 | (sta_id << 12));
364         scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
365
366         if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
367                 scd_bc_tbl[txq_id].
368                         tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
369 }
370
371 static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
372                                         u16 txq_id)
373 {
374         u32 tbl_dw_addr;
375         u32 tbl_dw;
376         u16 scd_q2ratid;
377
378         struct iwl_trans_pcie *trans_pcie =
379                 IWL_TRANS_GET_PCIE_TRANS(trans);
380
381         scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
382
383         tbl_dw_addr = trans_pcie->scd_base_addr +
384                         SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
385
386         tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
387
388         if (txq_id & 0x1)
389                 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
390         else
391                 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
392
393         iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
394
395         return 0;
396 }
397
398 static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
399 {
400         /* Simply stop the queue, but don't change any configuration;
401          * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
402         iwl_write_prph(bus(trans),
403                 SCD_QUEUE_STATUS_BITS(txq_id),
404                 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
405                 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
406 }
407
408 void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
409                                 int txq_id, u32 index)
410 {
411         iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
412                         (index & 0xff) | (txq_id << 8));
413         iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
414 }
415
416 void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
417                                         struct iwl_tx_queue *txq,
418                                         int tx_fifo_id, int scd_retry)
419 {
420         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
421         int txq_id = txq->q.id;
422         int active =
423                 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
424
425         iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
426                         (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
427                         (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
428                         (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
429                         SCD_QUEUE_STTS_REG_MSK);
430
431         txq->sched_retry = scd_retry;
432
433         IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
434                        active ? "Activate" : "Deactivate",
435                        scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
436 }
437
438 static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
439                                     u8 ctx, u16 tid)
440 {
441         const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
442         if (likely(tid < ARRAY_SIZE(tid_to_ac)))
443                 return ac_to_fifo[tid_to_ac[tid]];
444
445         /* no support for TIDs 8-15 yet */
446         return -EINVAL;
447 }
448
449 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
450                                  enum iwl_rxon_context_id ctx, int sta_id,
451                                  int tid, int frame_limit)
452 {
453         int tx_fifo, txq_id, ssn_idx;
454         u16 ra_tid;
455         unsigned long flags;
456         struct iwl_tid_data *tid_data;
457
458         struct iwl_trans_pcie *trans_pcie =
459                 IWL_TRANS_GET_PCIE_TRANS(trans);
460
461         if (WARN_ON(sta_id == IWL_INVALID_STATION))
462                 return;
463         if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
464                 return;
465
466         tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
467         if (WARN_ON(tx_fifo < 0)) {
468                 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
469                 return;
470         }
471
472         spin_lock_irqsave(&trans->shrd->sta_lock, flags);
473         tid_data = &trans->shrd->tid_data[sta_id][tid];
474         ssn_idx = SEQ_TO_SN(tid_data->seq_number);
475         txq_id = tid_data->agg.txq_id;
476         spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
477
478         ra_tid = BUILD_RAxTID(sta_id, tid);
479
480         spin_lock_irqsave(&trans->shrd->lock, flags);
481
482         /* Stop this Tx queue before configuring it */
483         iwlagn_tx_queue_stop_scheduler(trans, txq_id);
484
485         /* Map receiver-address / traffic-ID to this queue */
486         iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
487
488         /* Set this queue as a chain-building queue */
489         iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
490
491         /* enable aggregations for the queue */
492         iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
493
494         /* Place first TFD at index corresponding to start sequence number.
495          * Assumes that ssn_idx is valid (!= 0xFFF) */
496         trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
497         trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
498         iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
499
500         /* Set up Tx window size and frame limit for this queue */
501         iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
502                         SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
503                         sizeof(u32),
504                         ((frame_limit <<
505                         SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
506                         SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
507                         ((frame_limit <<
508                         SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
509                         SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
510
511         iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
512
513         /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
514         iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
515                                         tx_fifo, 1);
516
517         trans_pcie->txq[txq_id].sta_id = sta_id;
518         trans_pcie->txq[txq_id].tid = tid;
519
520         spin_unlock_irqrestore(&trans->shrd->lock, flags);
521 }
522
523 /*
524  * Find first available (lowest unused) Tx Queue, mark it "active".
525  * Called only when finding queue for aggregation.
526  * Should never return anything < 7, because they should already
527  * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
528  */
529 static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
530 {
531         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
532         int txq_id;
533
534         for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
535                 if (!test_and_set_bit(txq_id,
536                                         &trans_pcie->txq_ctx_active_msk))
537                         return txq_id;
538         return -1;
539 }
540
541 int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
542                                 enum iwl_rxon_context_id ctx, int sta_id,
543                                 int tid, u16 *ssn)
544 {
545         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
546         struct iwl_tid_data *tid_data;
547         unsigned long flags;
548         int txq_id;
549
550         txq_id = iwlagn_txq_ctx_activate_free(trans);
551         if (txq_id == -1) {
552                 IWL_ERR(trans, "No free aggregation queue available\n");
553                 return -ENXIO;
554         }
555
556         spin_lock_irqsave(&trans->shrd->sta_lock, flags);
557         tid_data = &trans->shrd->tid_data[sta_id][tid];
558         *ssn = SEQ_TO_SN(tid_data->seq_number);
559         tid_data->agg.txq_id = txq_id;
560         iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
561
562         tid_data = &trans->shrd->tid_data[sta_id][tid];
563         if (tid_data->tfds_in_queue == 0) {
564                 IWL_DEBUG_HT(trans, "HW queue is empty\n");
565                 tid_data->agg.state = IWL_AGG_ON;
566                 iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
567         } else {
568                 IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
569                              "queue\n", tid_data->tfds_in_queue);
570                 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
571         }
572         spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
573
574         return 0;
575 }
576
577 void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
578 {
579         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
580         iwlagn_tx_queue_stop_scheduler(trans, txq_id);
581
582         iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
583
584         trans_pcie->txq[txq_id].q.read_ptr = 0;
585         trans_pcie->txq[txq_id].q.write_ptr = 0;
586         /* supposes that ssn_idx is valid (!= 0xFFF) */
587         iwl_trans_set_wr_ptrs(trans, txq_id, 0);
588
589         iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
590         iwl_txq_ctx_deactivate(trans_pcie, txq_id);
591         iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
592 }
593
594 int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
595                                   enum iwl_rxon_context_id ctx, int sta_id,
596                                   int tid)
597 {
598         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
599         unsigned long flags;
600         int read_ptr, write_ptr;
601         struct iwl_tid_data *tid_data;
602         int txq_id;
603
604         spin_lock_irqsave(&trans->shrd->sta_lock, flags);
605
606         tid_data = &trans->shrd->tid_data[sta_id][tid];
607         txq_id = tid_data->agg.txq_id;
608
609         if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
610             (IWLAGN_FIRST_AMPDU_QUEUE +
611                 hw_params(trans).num_ampdu_queues <= txq_id)) {
612                 IWL_ERR(trans,
613                         "queue number out of range: %d, must be %d to %d\n",
614                         txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
615                         IWLAGN_FIRST_AMPDU_QUEUE +
616                         hw_params(trans).num_ampdu_queues - 1);
617                 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
618                 return -EINVAL;
619         }
620
621         switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
622         case IWL_EMPTYING_HW_QUEUE_ADDBA:
623                 /*
624                 * This can happen if the peer stops aggregation
625                 * again before we've had a chance to drain the
626                 * queue we selected previously, i.e. before the
627                 * session was really started completely.
628                 */
629                 IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
630                 goto turn_off;
631         case IWL_AGG_ON:
632                 break;
633         default:
634                 IWL_WARN(trans, "Stopping AGG while state not ON "
635                          "or starting for %d on %d (%d)\n", sta_id, tid,
636                          trans->shrd->tid_data[sta_id][tid].agg.state);
637                 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
638                 return 0;
639         }
640
641         write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
642         read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
643
644         /* The queue is not empty */
645         if (write_ptr != read_ptr) {
646                 IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
647                 trans->shrd->tid_data[sta_id][tid].agg.state =
648                         IWL_EMPTYING_HW_QUEUE_DELBA;
649                 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
650                 return 0;
651         }
652
653         IWL_DEBUG_HT(trans, "HW queue is empty\n");
654 turn_off:
655         trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
656
657         /* do not restore/save irqs */
658         spin_unlock(&trans->shrd->sta_lock);
659         spin_lock(&trans->shrd->lock);
660
661         iwl_trans_pcie_txq_agg_disable(trans, txq_id);
662
663         spin_unlock_irqrestore(&trans->shrd->lock, flags);
664
665         iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
666
667         return 0;
668 }
669
670 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
671
672 /**
673  * iwl_enqueue_hcmd - enqueue a uCode command
674  * @priv: device private data point
675  * @cmd: a point to the ucode command structure
676  *
677  * The function returns < 0 values to indicate the operation is
678  * failed. On success, it turns the index (> 0) of command in the
679  * command queue.
680  */
681 static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
682 {
683         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
684         struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
685         struct iwl_queue *q = &txq->q;
686         struct iwl_device_cmd *out_cmd;
687         struct iwl_cmd_meta *out_meta;
688         dma_addr_t phys_addr;
689         unsigned long flags;
690         u32 idx;
691         u16 copy_size, cmd_size;
692         bool is_ct_kill = false;
693         bool had_nocopy = false;
694         int i;
695         u8 *cmd_dest;
696 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
697         const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
698         int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
699         int trace_idx;
700 #endif
701
702         if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
703                 IWL_WARN(trans, "fw recovery, no hcmd send\n");
704                 return -EIO;
705         }
706
707         if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
708             !(cmd->flags & CMD_ON_DEMAND)) {
709                 IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
710                 return -EIO;
711         }
712
713         copy_size = sizeof(out_cmd->hdr);
714         cmd_size = sizeof(out_cmd->hdr);
715
716         /* need one for the header if the first is NOCOPY */
717         BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
718
719         for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
720                 if (!cmd->len[i])
721                         continue;
722                 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
723                         had_nocopy = true;
724                 } else {
725                         /* NOCOPY must not be followed by normal! */
726                         if (WARN_ON(had_nocopy))
727                                 return -EINVAL;
728                         copy_size += cmd->len[i];
729                 }
730                 cmd_size += cmd->len[i];
731         }
732
733         /*
734          * If any of the command structures end up being larger than
735          * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
736          * allocated into separate TFDs, then we will need to
737          * increase the size of the buffers.
738          */
739         if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
740                 return -EINVAL;
741
742         if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
743                 IWL_WARN(trans, "Not sending command - %s KILL\n",
744                          iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
745                 return -EIO;
746         }
747
748         spin_lock_irqsave(&trans->hcmd_lock, flags);
749
750         if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
751                 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
752
753                 IWL_ERR(trans, "No space in command queue\n");
754                 is_ct_kill = iwl_check_for_ct_kill(priv(trans));
755                 if (!is_ct_kill) {
756                         IWL_ERR(trans, "Restarting adapter queue is full\n");
757                         iwlagn_fw_error(priv(trans), false);
758                 }
759                 return -ENOSPC;
760         }
761
762         idx = get_cmd_index(q, q->write_ptr);
763         out_cmd = txq->cmd[idx];
764         out_meta = &txq->meta[idx];
765
766         memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
767         if (cmd->flags & CMD_WANT_SKB)
768                 out_meta->source = cmd;
769
770         /* set up the header */
771
772         out_cmd->hdr.cmd = cmd->id;
773         out_cmd->hdr.flags = 0;
774         out_cmd->hdr.sequence =
775                 cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
776                                          INDEX_TO_SEQ(q->write_ptr));
777
778         /* and copy the data that needs to be copied */
779
780         cmd_dest = out_cmd->payload;
781         for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
782                 if (!cmd->len[i])
783                         continue;
784                 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
785                         break;
786                 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
787                 cmd_dest += cmd->len[i];
788         }
789
790         IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
791                         "%d bytes at %d[%d]:%d\n",
792                         get_cmd_string(out_cmd->hdr.cmd),
793                         out_cmd->hdr.cmd,
794                         le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
795                         q->write_ptr, idx, trans->shrd->cmd_queue);
796
797         phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
798                                 DMA_BIDIRECTIONAL);
799         if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
800                 idx = -ENOMEM;
801                 goto out;
802         }
803
804         dma_unmap_addr_set(out_meta, mapping, phys_addr);
805         dma_unmap_len_set(out_meta, len, copy_size);
806
807         iwlagn_txq_attach_buf_to_tfd(trans, txq,
808                                         phys_addr, copy_size, 1);
809 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
810         trace_bufs[0] = &out_cmd->hdr;
811         trace_lens[0] = copy_size;
812         trace_idx = 1;
813 #endif
814
815         for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
816                 if (!cmd->len[i])
817                         continue;
818                 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
819                         continue;
820                 phys_addr = dma_map_single(bus(trans)->dev,
821                                            (void *)cmd->data[i],
822                                            cmd->len[i], DMA_BIDIRECTIONAL);
823                 if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
824                         iwlagn_unmap_tfd(trans, out_meta,
825                                          &txq->tfds[q->write_ptr],
826                                          DMA_BIDIRECTIONAL);
827                         idx = -ENOMEM;
828                         goto out;
829                 }
830
831                 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
832                                              cmd->len[i], 0);
833 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
834                 trace_bufs[trace_idx] = cmd->data[i];
835                 trace_lens[trace_idx] = cmd->len[i];
836                 trace_idx++;
837 #endif
838         }
839
840         out_meta->flags = cmd->flags;
841
842         txq->need_update = 1;
843
844         /* check that tracing gets all possible blocks */
845         BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
846 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
847         trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
848                                trace_bufs[0], trace_lens[0],
849                                trace_bufs[1], trace_lens[1],
850                                trace_bufs[2], trace_lens[2]);
851 #endif
852
853         /* Increment and update queue's write index */
854         q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
855         iwl_txq_update_write_ptr(trans, txq);
856
857  out:
858         spin_unlock_irqrestore(&trans->hcmd_lock, flags);
859         return idx;
860 }
861
862 /**
863  * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
864  *
865  * When FW advances 'R' index, all entries between old and new 'R' index
866  * need to be reclaimed. As result, some free space forms.  If there is
867  * enough free space (> low mark), wake the stack that feeds us.
868  */
869 static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
870                                    int idx)
871 {
872         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
873         struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
874         struct iwl_queue *q = &txq->q;
875         int nfreed = 0;
876
877         if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
878                 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
879                           "index %d is out of range [0-%d] %d %d.\n", __func__,
880                           txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
881                 return;
882         }
883
884         for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
885              q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
886
887                 if (nfreed++ > 0) {
888                         IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
889                                         q->write_ptr, q->read_ptr);
890                         iwlagn_fw_error(priv(trans), false);
891                 }
892
893         }
894 }
895
896 /**
897  * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
898  * @rxb: Rx buffer to reclaim
899  * @handler_status: return value of the handler of the command
900  *      (put in setup_rx_handlers)
901  *
902  * If an Rx buffer has an async callback associated with it the callback
903  * will be executed.  The attached skb (if present) will only be freed
904  * if the callback returns 1
905  */
906 void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
907                          int handler_status)
908 {
909         struct iwl_rx_packet *pkt = rxb_addr(rxb);
910         u16 sequence = le16_to_cpu(pkt->hdr.sequence);
911         int txq_id = SEQ_TO_QUEUE(sequence);
912         int index = SEQ_TO_INDEX(sequence);
913         int cmd_index;
914         struct iwl_device_cmd *cmd;
915         struct iwl_cmd_meta *meta;
916         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
917         struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
918         unsigned long flags;
919
920         /* If a Tx command is being handled and it isn't in the actual
921          * command queue then there a command routing bug has been introduced
922          * in the queue management code. */
923         if (WARN(txq_id != trans->shrd->cmd_queue,
924                  "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
925                   txq_id, trans->shrd->cmd_queue, sequence,
926                   trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
927                   trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
928                 iwl_print_hex_error(trans, pkt, 32);
929                 return;
930         }
931
932         cmd_index = get_cmd_index(&txq->q, index);
933         cmd = txq->cmd[cmd_index];
934         meta = &txq->meta[cmd_index];
935
936         txq->time_stamp = jiffies;
937
938         iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
939                          DMA_BIDIRECTIONAL);
940
941         /* Input error checking is done when commands are added to queue. */
942         if (meta->flags & CMD_WANT_SKB) {
943                 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
944                 meta->source->handler_status = handler_status;
945                 rxb->page = NULL;
946         }
947
948         spin_lock_irqsave(&trans->hcmd_lock, flags);
949
950         iwl_hcmd_queue_reclaim(trans, txq_id, index);
951
952         if (!(meta->flags & CMD_ASYNC)) {
953                 if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
954                         IWL_WARN(trans,
955                                  "HCMD_ACTIVE already clear for command %s\n",
956                                  get_cmd_string(cmd->hdr.cmd));
957                 }
958                 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
959                 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
960                                get_cmd_string(cmd->hdr.cmd));
961                 wake_up(&trans->shrd->wait_command_queue);
962         }
963
964         meta->flags = 0;
965
966         spin_unlock_irqrestore(&trans->hcmd_lock, flags);
967 }
968
969 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
970
971 static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
972 {
973         int ret;
974
975         /* An asynchronous command can not expect an SKB to be set. */
976         if (WARN_ON(cmd->flags & CMD_WANT_SKB))
977                 return -EINVAL;
978
979
980         if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
981                 return -EBUSY;
982
983         ret = iwl_enqueue_hcmd(trans, cmd);
984         if (ret < 0) {
985                 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
986                           get_cmd_string(cmd->id), ret);
987                 return ret;
988         }
989         return 0;
990 }
991
992 static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
993 {
994         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
995         int cmd_idx;
996         int ret;
997
998         lockdep_assert_held(&trans->shrd->mutex);
999
1000         IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1001                         get_cmd_string(cmd->id));
1002
1003         set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1004         IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1005                         get_cmd_string(cmd->id));
1006
1007         cmd_idx = iwl_enqueue_hcmd(trans, cmd);
1008         if (cmd_idx < 0) {
1009                 ret = cmd_idx;
1010                 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1011                 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1012                           get_cmd_string(cmd->id), ret);
1013                 return ret;
1014         }
1015
1016         ret = wait_event_timeout(trans->shrd->wait_command_queue,
1017                         !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
1018                         HOST_COMPLETE_TIMEOUT);
1019         if (!ret) {
1020                 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
1021                         struct iwl_tx_queue *txq =
1022                                 &trans_pcie->txq[trans->shrd->cmd_queue];
1023                         struct iwl_queue *q = &txq->q;
1024
1025                         IWL_ERR(trans,
1026                                 "Error sending %s: time out after %dms.\n",
1027                                 get_cmd_string(cmd->id),
1028                                 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1029
1030                         IWL_ERR(trans,
1031                                 "Current CMD queue read_ptr %d write_ptr %d\n",
1032                                 q->read_ptr, q->write_ptr);
1033
1034                         clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1035                         IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
1036                                  "%s\n", get_cmd_string(cmd->id));
1037                         ret = -ETIMEDOUT;
1038                         goto cancel;
1039                 }
1040         }
1041
1042         if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
1043                 IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
1044                                get_cmd_string(cmd->id));
1045                 ret = -ECANCELED;
1046                 goto fail;
1047         }
1048         if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
1049                 IWL_ERR(trans, "Command %s failed: FW Error\n",
1050                                get_cmd_string(cmd->id));
1051                 ret = -EIO;
1052                 goto fail;
1053         }
1054         if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
1055                 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1056                           get_cmd_string(cmd->id));
1057                 ret = -EIO;
1058                 goto cancel;
1059         }
1060
1061         return 0;
1062
1063 cancel:
1064         if (cmd->flags & CMD_WANT_SKB) {
1065                 /*
1066                  * Cancel the CMD_WANT_SKB flag for the cmd in the
1067                  * TX cmd queue. Otherwise in case the cmd comes
1068                  * in later, it will possibly set an invalid
1069                  * address (cmd->meta.source).
1070                  */
1071                 trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1072                                                         ~CMD_WANT_SKB;
1073         }
1074 fail:
1075         if (cmd->reply_page) {
1076                 iwl_free_pages(trans->shrd, cmd->reply_page);
1077                 cmd->reply_page = 0;
1078         }
1079
1080         return ret;
1081 }
1082
1083 int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1084 {
1085         if (cmd->flags & CMD_ASYNC)
1086                 return iwl_send_cmd_async(trans, cmd);
1087
1088         return iwl_send_cmd_sync(trans, cmd);
1089 }
1090
1091 /* Frees buffers until index _not_ inclusive */
1092 int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1093                          struct sk_buff_head *skbs)
1094 {
1095         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1096         struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1097         struct iwl_queue *q = &txq->q;
1098         int last_to_free;
1099         int freed = 0;
1100
1101         /* This function is not meant to release cmd queue*/
1102         if (WARN_ON(txq_id == trans->shrd->cmd_queue))
1103                 return 0;
1104
1105         /*Since we free until index _not_ inclusive, the one before index is
1106          * the last we will free. This one must be used */
1107         last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
1108
1109         if ((index >= q->n_bd) ||
1110            (iwl_queue_used(q, last_to_free) == 0)) {
1111                 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
1112                           "last_to_free %d is out of range [0-%d] %d %d.\n",
1113                           __func__, txq_id, last_to_free, q->n_bd,
1114                           q->write_ptr, q->read_ptr);
1115                 return 0;
1116         }
1117
1118         IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
1119                            q->read_ptr, index);
1120
1121         if (WARN_ON(!skb_queue_empty(skbs)))
1122                 return 0;
1123
1124         for (;
1125              q->read_ptr != index;
1126              q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1127
1128                 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1129                         continue;
1130
1131                 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1132
1133                 txq->skbs[txq->q.read_ptr] = NULL;
1134
1135                 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1136
1137                 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
1138                 freed++;
1139         }
1140         return freed;
1141 }