1 /******************************************************************************
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 Intel Deutschland GmbH
7 * Portions of this file are derived from the ipw3945 project, as well
8 * as portions of the ieee80211 subsystem header files.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
26 * Contact Information:
27 * Intel Linux Wireless <linuxwifi@intel.com>
28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *****************************************************************************/
31 #include <linux/sched.h>
32 #include <linux/wait.h>
33 #include <linux/gfp.h>
38 #include "iwl-op-mode.h"
40 /******************************************************************************
44 ******************************************************************************/
47 * Rx theory of operation
49 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
50 * each of which point to Receive Buffers to be filled by the NIC. These get
51 * used not only for Rx frames, but for any command response or notification
52 * from the NIC. The driver and NIC manage the Rx buffers by means
53 * of indexes into the circular buffer.
56 * The host/firmware share two index registers for managing the Rx buffers.
58 * The READ index maps to the first position that the firmware may be writing
59 * to -- the driver can read up to (but not including) this position and get
61 * The READ index is managed by the firmware once the card is enabled.
63 * The WRITE index maps to the last position the driver has read from -- the
64 * position preceding WRITE is the last slot the firmware can place a packet.
66 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
69 * During initialization, the host sets up the READ queue position to the first
70 * INDEX position, and WRITE to the last (READ - 1 wrapped)
72 * When the firmware places a packet in a buffer, it will advance the READ index
73 * and fire the RX interrupt. The driver can then query the READ index and
74 * process as many packets as possible, moving the WRITE index forward as it
75 * resets the Rx queue buffers with new memory.
77 * The management in the driver is as follows:
78 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
79 * When the interrupt handler is called, the request is processed.
80 * The page is either stolen - transferred to the upper layer
81 * or reused - added immediately to the iwl->rxq->rx_free list.
82 * + When the page is stolen - the driver updates the matching queue's used
83 * count, detaches the RBD and transfers it to the queue used list.
84 * When there are two used RBDs - they are transferred to the allocator empty
85 * list. Work is then scheduled for the allocator to start allocating
87 * When there are another 6 used RBDs - they are transferred to the allocator
88 * empty list and the driver tries to claim the pre-allocated buffers and
89 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
91 * When there are 8+ buffers in the free list - either from allocation or from
92 * 8 reused unstolen pages - restock is called to update the FW and indexes.
93 * + In order to make sure the allocator always has RBDs to use for allocation
94 * the allocator has initial pool in the size of num_queues*(8-2) - the
95 * maximum missing RBDs per allocation request (request posted with 2
96 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
97 * The queues supplies the recycle of the rest of the RBDs.
98 * + A received packet is processed and handed to the kernel network stack,
99 * detached from the iwl->rxq. The driver 'processed' index is updated.
100 * + If there are no allocated buffers in iwl->rxq->rx_free,
101 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
102 * If there were enough free buffers and RX_STALLED is set it is cleared.
107 * iwl_rxq_alloc() Allocates rx_free
108 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
109 * iwl_pcie_rxq_restock.
110 * Used only during initialization.
111 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
112 * queue, updates firmware pointers, and updates
114 * iwl_pcie_rx_allocator() Background work for allocating pages.
116 * -- enable interrupts --
117 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
118 * READ INDEX, detaching the SKB from the pool.
119 * Moves the packet buffer from queue to rx_used.
120 * Posts and claims requests to the allocator.
121 * Calls iwl_pcie_rxq_restock to refill any empty
127 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
129 * Regular Receive interrupt:
131 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
132 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
134 * rxq.queue -> rxq.rx_free -> rxq.queue
140 * iwl_rxq_space - Return number of free slots available in queue.
142 static int iwl_rxq_space(const struct iwl_rxq *rxq)
144 /* Make sure rx queue size is a power of 2 */
145 WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
148 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
149 * between empty and completely full queues.
150 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
151 * defined for negative dividends.
153 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
157 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
159 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
161 return cpu_to_le32((u32)(dma_addr >> 8));
164 static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
166 iwl_write_prph(trans, ofs, val & 0xffffffff);
167 iwl_write_prph(trans, ofs + 4, val >> 32);
171 * iwl_pcie_rx_stop - stops the Rx DMA
173 int iwl_pcie_rx_stop(struct iwl_trans *trans)
175 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
176 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
177 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
181 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
183 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
188 lockdep_assert_held(&rxq->lock);
191 * explicitly wake up the NIC if:
192 * 1. shadow registers aren't enabled
193 * 2. there is a chance that the NIC is asleep
195 if (!trans->cfg->base_params->shadow_reg_enable &&
196 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
197 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
199 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
200 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
202 iwl_set_bit(trans, CSR_GP_CNTRL,
203 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
204 rxq->need_update = true;
209 rxq->write_actual = round_down(rxq->write, 8);
210 if (trans->cfg->mq_rx_supported)
211 iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
214 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
217 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
219 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
222 for (i = 0; i < trans->num_rx_queues; i++) {
223 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
225 if (!rxq->need_update)
227 spin_lock(&rxq->lock);
228 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
229 rxq->need_update = false;
230 spin_unlock(&rxq->lock);
234 static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
237 struct iwl_rx_mem_buffer *rxb;
240 * If the device isn't enabled - no need to try to add buffers...
241 * This can happen when we stop the device and still have an interrupt
242 * pending. We stop the APM before we sync the interrupts because we
243 * have to (see comment there). On the other hand, since the APM is
244 * stopped, we cannot access the HW (in particular not prph).
245 * So don't try to restock if the APM has been already stopped.
247 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
250 spin_lock(&rxq->lock);
251 while (rxq->free_count) {
252 __le64 *bd = (__le64 *)rxq->bd;
254 /* Get next free Rx buffer, remove from free list */
255 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
257 list_del(&rxb->list);
259 /* 12 first bits are expected to be empty */
260 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
261 /* Point to Rx buffer via next RBD in circular buffer */
262 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
263 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
266 spin_unlock(&rxq->lock);
269 * If we've added more space for the firmware to place data, tell it.
270 * Increment device's write pointer in multiples of 8.
272 if (rxq->write_actual != (rxq->write & ~0x7)) {
273 spin_lock(&rxq->lock);
274 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
275 spin_unlock(&rxq->lock);
280 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
282 * If there are slots in the RX queue that need to be restocked,
283 * and we have free pre-allocated buffers, fill the ranks as much
284 * as we can, pulling from rx_free.
286 * This moves the 'write' index forward to catch up with 'processed', and
287 * also updates the memory address in the firmware to reference the new
290 static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
292 struct iwl_rx_mem_buffer *rxb;
295 * If the device isn't enabled - not need to try to add buffers...
296 * This can happen when we stop the device and still have an interrupt
297 * pending. We stop the APM before we sync the interrupts because we
298 * have to (see comment there). On the other hand, since the APM is
299 * stopped, we cannot access the HW (in particular not prph).
300 * So don't try to restock if the APM has been already stopped.
302 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
305 spin_lock(&rxq->lock);
306 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
307 __le32 *bd = (__le32 *)rxq->bd;
308 /* The overwritten rxb must be a used one */
309 rxb = rxq->queue[rxq->write];
310 BUG_ON(rxb && rxb->page);
312 /* Get next free Rx buffer, remove from free list */
313 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
315 list_del(&rxb->list);
317 /* Point to Rx buffer via next RBD in circular buffer */
318 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
319 rxq->queue[rxq->write] = rxb;
320 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
323 spin_unlock(&rxq->lock);
325 /* If we've added more space for the firmware to place data, tell it.
326 * Increment device's write pointer in multiples of 8. */
327 if (rxq->write_actual != (rxq->write & ~0x7)) {
328 spin_lock(&rxq->lock);
329 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
330 spin_unlock(&rxq->lock);
335 * iwl_pcie_rx_alloc_page - allocates and returns a page.
338 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
341 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
343 gfp_t gfp_mask = priority;
345 if (trans_pcie->rx_page_order > 0)
346 gfp_mask |= __GFP_COMP;
348 /* Alloc a new receive buffer */
349 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
352 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
353 trans_pcie->rx_page_order);
355 * Issue an error if we don't have enough pre-allocated
358 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
360 "Failed to alloc_pages\n");
367 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
369 * A used RBD is an Rx buffer that has been given to the stack. To use it again
370 * a page must be allocated and the RBD must point to the page. This function
371 * doesn't change the HW pointer but handles the list of pages that is used by
372 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
375 static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
378 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
379 struct iwl_rx_mem_buffer *rxb;
383 spin_lock(&rxq->lock);
384 if (list_empty(&rxq->rx_used)) {
385 spin_unlock(&rxq->lock);
388 spin_unlock(&rxq->lock);
390 /* Alloc a new receive buffer */
391 page = iwl_pcie_rx_alloc_page(trans, priority);
395 spin_lock(&rxq->lock);
397 if (list_empty(&rxq->rx_used)) {
398 spin_unlock(&rxq->lock);
399 __free_pages(page, trans_pcie->rx_page_order);
402 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
404 list_del(&rxb->list);
405 spin_unlock(&rxq->lock);
409 /* Get physical address of the RB */
411 dma_map_page(trans->dev, page, 0,
412 PAGE_SIZE << trans_pcie->rx_page_order,
414 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
416 spin_lock(&rxq->lock);
417 list_add(&rxb->list, &rxq->rx_used);
418 spin_unlock(&rxq->lock);
419 __free_pages(page, trans_pcie->rx_page_order);
423 spin_lock(&rxq->lock);
425 list_add_tail(&rxb->list, &rxq->rx_free);
428 spin_unlock(&rxq->lock);
432 static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
434 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
437 for (i = 0; i < MQ_RX_POOL_SIZE; i++) {
438 if (!trans_pcie->rx_pool[i].page)
440 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
441 PAGE_SIZE << trans_pcie->rx_page_order,
443 __free_pages(trans_pcie->rx_pool[i].page,
444 trans_pcie->rx_page_order);
445 trans_pcie->rx_pool[i].page = NULL;
450 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
452 * Allocates for each received request 8 pages
453 * Called as a scheduled work item.
455 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
457 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
458 struct iwl_rb_allocator *rba = &trans_pcie->rba;
459 struct list_head local_empty;
460 int pending = atomic_xchg(&rba->req_pending, 0);
462 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
464 /* If we were scheduled - there is at least one request */
465 spin_lock(&rba->lock);
466 /* swap out the rba->rbd_empty to a local list */
467 list_replace_init(&rba->rbd_empty, &local_empty);
468 spin_unlock(&rba->lock);
472 struct list_head local_allocated;
473 gfp_t gfp_mask = GFP_KERNEL;
475 /* Do not post a warning if there are only a few requests */
476 if (pending < RX_PENDING_WATERMARK)
477 gfp_mask |= __GFP_NOWARN;
479 INIT_LIST_HEAD(&local_allocated);
481 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
482 struct iwl_rx_mem_buffer *rxb;
485 /* List should never be empty - each reused RBD is
486 * returned to the list, and initial pool covers any
487 * possible gap between the time the page is allocated
488 * to the time the RBD is added.
490 BUG_ON(list_empty(&local_empty));
491 /* Get the first rxb from the rbd list */
492 rxb = list_first_entry(&local_empty,
493 struct iwl_rx_mem_buffer, list);
496 /* Alloc a new receive buffer */
497 page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
502 /* Get physical address of the RB */
503 rxb->page_dma = dma_map_page(trans->dev, page, 0,
504 PAGE_SIZE << trans_pcie->rx_page_order,
506 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
508 __free_pages(page, trans_pcie->rx_page_order);
512 /* move the allocated entry to the out list */
513 list_move(&rxb->list, &local_allocated);
519 pending = atomic_xchg(&rba->req_pending, 0);
521 "Pending allocation requests = %d\n",
525 spin_lock(&rba->lock);
526 /* add the allocated rbds to the allocator allocated list */
527 list_splice_tail(&local_allocated, &rba->rbd_allocated);
528 /* get more empty RBDs for current pending requests */
529 list_splice_tail_init(&rba->rbd_empty, &local_empty);
530 spin_unlock(&rba->lock);
532 atomic_inc(&rba->req_ready);
535 spin_lock(&rba->lock);
536 /* return unused rbds to the allocator empty list */
537 list_splice_tail(&local_empty, &rba->rbd_empty);
538 spin_unlock(&rba->lock);
542 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
544 .* Called by queue when the queue posted allocation request and
545 * has freed 8 RBDs in order to restock itself.
547 static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
548 struct iwl_rx_mem_buffer
549 *out[RX_CLAIM_REQ_ALLOC])
551 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
552 struct iwl_rb_allocator *rba = &trans_pcie->rba;
556 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
557 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
558 * function will return -ENOMEM, as there are no ready requests.
559 * atomic_dec_if_positive will perofrm the *actual* decrement only if
560 * req_ready > 0, i.e. - there are ready requests and the function
561 * hands one request to the caller.
563 if (atomic_dec_if_positive(&rba->req_ready) < 0)
566 spin_lock(&rba->lock);
567 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
568 /* Get next free Rx buffer, remove it from free list */
569 out[i] = list_first_entry(&rba->rbd_allocated,
570 struct iwl_rx_mem_buffer, list);
571 list_del(&out[i]->list);
573 spin_unlock(&rba->lock);
578 static void iwl_pcie_rx_allocator_work(struct work_struct *data)
580 struct iwl_rb_allocator *rba_p =
581 container_of(data, struct iwl_rb_allocator, rx_alloc);
582 struct iwl_trans_pcie *trans_pcie =
583 container_of(rba_p, struct iwl_trans_pcie, rba);
585 iwl_pcie_rx_allocator(trans_pcie->trans);
588 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
590 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
591 struct iwl_rb_allocator *rba = &trans_pcie->rba;
592 struct device *dev = trans->dev;
594 int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
597 if (WARN_ON(trans_pcie->rxq))
600 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
602 if (!trans_pcie->rxq)
605 spin_lock_init(&rba->lock);
607 for (i = 0; i < trans->num_rx_queues; i++) {
608 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
610 spin_lock_init(&rxq->lock);
611 if (trans->cfg->mq_rx_supported)
612 rxq->queue_size = MQ_RX_TABLE_SIZE;
614 rxq->queue_size = RX_QUEUE_SIZE;
617 * Allocate the circular buffer of Read Buffer Descriptors
620 rxq->bd = dma_zalloc_coherent(dev,
621 free_size * rxq->queue_size,
622 &rxq->bd_dma, GFP_KERNEL);
626 if (trans->cfg->mq_rx_supported) {
627 rxq->used_bd = dma_zalloc_coherent(dev,
636 /*Allocate the driver's pointer to receive buffer status */
637 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
646 for (i = 0; i < trans->num_rx_queues; i++) {
647 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
650 dma_free_coherent(dev, free_size * rxq->queue_size,
651 rxq->bd, rxq->bd_dma);
656 dma_free_coherent(trans->dev,
657 sizeof(struct iwl_rb_status),
658 rxq->rb_stts, rxq->rb_stts_dma);
661 dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
662 rxq->used_bd, rxq->used_bd_dma);
663 rxq->used_bd_dma = 0;
666 kfree(trans_pcie->rxq);
671 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
673 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
675 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
677 switch (trans_pcie->rx_buf_size) {
679 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
682 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
685 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
689 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
693 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
694 /* reset and flush pointers */
695 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
696 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
697 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
699 /* Reset driver's Rx queue write index */
700 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
702 /* Tell device where to find RBD circular buffer in DRAM */
703 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
704 (u32)(rxq->bd_dma >> 8));
706 /* Tell device where in DRAM to update its Rx status */
707 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
708 rxq->rb_stts_dma >> 4);
711 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
712 * the credit mechanism in 5000 HW RX FIFO
713 * Direct rx interrupts to hosts
714 * Rx buffer size 4 or 8k or 12k
718 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
719 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
720 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
721 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
723 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
724 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
726 /* Set interrupt coalescing timer to default (2048 usecs) */
727 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
729 /* W/A for interrupt coalescing bug in 7260 and 3160 */
730 if (trans->cfg->host_interrupt_operation_mode)
731 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
734 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
736 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
737 u32 rb_size, enabled = 0;
740 switch (trans_pcie->rx_buf_size) {
742 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
745 rb_size = RFH_RXF_DMA_RB_SIZE_8K;
748 rb_size = RFH_RXF_DMA_RB_SIZE_12K;
752 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
756 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
757 /* disable free amd used rx queue operation */
758 iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
760 for (i = 0; i < trans->num_rx_queues; i++) {
761 /* Tell device where to find RBD free table in DRAM */
762 iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
763 (u64)(trans_pcie->rxq[i].bd_dma));
764 /* Tell device where to find RBD used table in DRAM */
765 iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
766 (u64)(trans_pcie->rxq[i].used_bd_dma));
767 /* Tell device where in DRAM to update its Rx status */
768 iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
769 trans_pcie->rxq[i].rb_stts_dma);
770 /* Reset device indice tables */
771 iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
772 iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
773 iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);
775 enabled |= BIT(i) | BIT(i + 16);
778 /* restock default queue */
779 iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]);
784 * Rx buffer size 4 or 8k or 12k
788 iwl_write_prph(trans, RFH_RXF_DMA_CFG,
789 RFH_DMA_EN_ENABLE_VAL |
790 rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
791 RFH_RXF_DMA_MIN_RB_4_8 |
792 RFH_RXF_DMA_RBDCB_SIZE_512);
794 iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
795 RFH_GEN_CFG_SERVICE_DMA_SNOOP);
796 iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
798 /* Set interrupt coalescing timer to default (2048 usecs) */
799 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
802 static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
804 lockdep_assert_held(&rxq->lock);
806 INIT_LIST_HEAD(&rxq->rx_free);
807 INIT_LIST_HEAD(&rxq->rx_used);
812 static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
818 int iwl_pcie_rx_init(struct iwl_trans *trans)
820 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
821 struct iwl_rxq *def_rxq;
822 struct iwl_rb_allocator *rba = &trans_pcie->rba;
823 int i, err, num_rbds, allocator_pool_size;
825 if (!trans_pcie->rxq) {
826 err = iwl_pcie_rx_alloc(trans);
830 def_rxq = trans_pcie->rxq;
832 rba->alloc_wq = alloc_workqueue("rb_allocator",
833 WQ_HIGHPRI | WQ_UNBOUND, 1);
834 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
836 spin_lock(&rba->lock);
837 atomic_set(&rba->req_pending, 0);
838 atomic_set(&rba->req_ready, 0);
839 INIT_LIST_HEAD(&rba->rbd_allocated);
840 INIT_LIST_HEAD(&rba->rbd_empty);
841 spin_unlock(&rba->lock);
843 /* free all first - we might be reconfigured for a different size */
844 iwl_pcie_free_rbs_pool(trans);
846 for (i = 0; i < RX_QUEUE_SIZE; i++)
847 def_rxq->queue[i] = NULL;
849 for (i = 0; i < trans->num_rx_queues; i++) {
850 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
854 spin_lock(&rxq->lock);
856 * Set read write pointer to reflect that we have processed
857 * and used all buffers, but have not restocked the Rx queue
862 rxq->write_actual = 0;
863 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
865 iwl_pcie_rx_init_rxb_lists(rxq);
868 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
869 iwl_pcie_dummy_napi_poll, 64);
871 spin_unlock(&rxq->lock);
874 /* move the pool to the default queue and allocator ownerships */
875 num_rbds = trans->cfg->mq_rx_supported ?
876 MQ_RX_POOL_SIZE : RX_QUEUE_SIZE;
877 allocator_pool_size = trans->num_rx_queues *
878 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
879 for (i = 0; i < num_rbds; i++) {
880 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
882 if (i < allocator_pool_size)
883 list_add(&rxb->list, &rba->rbd_empty);
885 list_add(&rxb->list, &def_rxq->rx_used);
886 trans_pcie->global_table[i] = rxb;
890 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
891 if (trans->cfg->mq_rx_supported) {
892 iwl_pcie_rx_mq_hw_init(trans);
894 iwl_pcie_rxq_restock(trans, def_rxq);
895 iwl_pcie_rx_hw_init(trans, def_rxq);
898 spin_lock(&def_rxq->lock);
899 iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
900 spin_unlock(&def_rxq->lock);
905 void iwl_pcie_rx_free(struct iwl_trans *trans)
907 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
908 struct iwl_rb_allocator *rba = &trans_pcie->rba;
909 int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
914 * if rxq is NULL, it means that nothing has been allocated,
917 if (!trans_pcie->rxq) {
918 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
922 cancel_work_sync(&rba->rx_alloc);
924 destroy_workqueue(rba->alloc_wq);
925 rba->alloc_wq = NULL;
928 iwl_pcie_free_rbs_pool(trans);
930 for (i = 0; i < trans->num_rx_queues; i++) {
931 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
934 dma_free_coherent(trans->dev,
935 free_size * rxq->queue_size,
936 rxq->bd, rxq->bd_dma);
941 dma_free_coherent(trans->dev,
942 sizeof(struct iwl_rb_status),
943 rxq->rb_stts, rxq->rb_stts_dma);
945 IWL_DEBUG_INFO(trans,
946 "Free rxq->rb_stts which is NULL\n");
949 dma_free_coherent(trans->dev,
950 sizeof(__le32) * rxq->queue_size,
951 rxq->used_bd, rxq->used_bd_dma);
952 rxq->used_bd_dma = 0;
956 netif_napi_del(&rxq->napi);
958 kfree(trans_pcie->rxq);
962 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
964 * Called when a RBD can be reused. The RBD is transferred to the allocator.
965 * When there are 2 empty RBDs - a request for allocation is posted
967 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
968 struct iwl_rx_mem_buffer *rxb,
969 struct iwl_rxq *rxq, bool emergency)
971 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
972 struct iwl_rb_allocator *rba = &trans_pcie->rba;
974 /* Move the RBD to the used list, will be moved to allocator in batches
975 * before claiming or posting a request*/
976 list_add_tail(&rxb->list, &rxq->rx_used);
978 if (unlikely(emergency))
981 /* Count the allocator owned RBDs */
984 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
985 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
986 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
987 * after but we still need to post another request.
989 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
990 /* Move the 2 RBDs to the allocator ownership.
991 Allocator has another 6 from pool for the request completion*/
992 spin_lock(&rba->lock);
993 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
994 spin_unlock(&rba->lock);
996 atomic_inc(&rba->req_pending);
997 queue_work(rba->alloc_wq, &rba->rx_alloc);
1001 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1002 struct iwl_rxq *rxq,
1003 struct iwl_rx_mem_buffer *rxb,
1006 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1007 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1008 bool page_stolen = false;
1009 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1015 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1017 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1018 struct iwl_rx_packet *pkt;
1021 int index, cmd_index, len;
1022 struct iwl_rx_cmd_buffer rxcb = {
1024 ._rx_page_order = trans_pcie->rx_page_order,
1026 ._page_stolen = false,
1027 .truesize = max_len,
1030 pkt = rxb_addr(&rxcb);
1032 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
1036 "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
1038 iwl_get_cmd_string(trans,
1039 iwl_cmd_id(pkt->hdr.cmd,
1042 pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
1044 len = iwl_rx_packet_len(pkt);
1045 len += sizeof(u32); /* account for status word */
1046 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1047 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1049 /* Reclaim a command buffer only if this packet is a response
1050 * to a (driver-originated) command.
1051 * If the packet (e.g. Rx frame) originated from uCode,
1052 * there is no command buffer to reclaim.
1053 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1054 * but apparently a few don't get set; catch them here. */
1055 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1059 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1060 if (trans_pcie->no_reclaim_cmds[i] ==
1068 sequence = le16_to_cpu(pkt->hdr.sequence);
1069 index = SEQ_TO_INDEX(sequence);
1070 cmd_index = get_cmd_index(&txq->q, index);
1073 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1076 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1080 kzfree(txq->entries[cmd_index].free_buf);
1081 txq->entries[cmd_index].free_buf = NULL;
1085 * After here, we should always check rxcb._page_stolen,
1086 * if it is true then one of the handlers took the page.
1090 /* Invoke any callbacks, transfer the buffer to caller,
1091 * and fire off the (possibly) blocking
1092 * iwl_trans_send_cmd()
1093 * as we reclaim the driver command queue */
1094 if (!rxcb._page_stolen)
1095 iwl_pcie_hcmd_complete(trans, &rxcb);
1097 IWL_WARN(trans, "Claim null rxb?\n");
1100 page_stolen |= rxcb._page_stolen;
1101 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1104 /* page was stolen from us -- free our reference */
1106 __free_pages(rxb->page, trans_pcie->rx_page_order);
1110 /* Reuse the page if possible. For notification packets and
1111 * SKBs that fail to Rx correctly, add them back into the
1112 * rx_free list for reuse later. */
1113 if (rxb->page != NULL) {
1115 dma_map_page(trans->dev, rxb->page, 0,
1116 PAGE_SIZE << trans_pcie->rx_page_order,
1118 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1120 * free the page(s) as well to not break
1121 * the invariant that the items on the used
1122 * list have no page(s)
1124 __free_pages(rxb->page, trans_pcie->rx_page_order);
1126 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1128 list_add_tail(&rxb->list, &rxq->rx_free);
1132 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1136 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1138 static void iwl_pcie_rx_handle(struct iwl_trans *trans)
1140 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1141 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
1142 u32 r, i, j, count = 0;
1143 bool emergency = false;
1146 spin_lock(&rxq->lock);
1147 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1148 * buffer that the driver may process (last buffer filled by ucode). */
1149 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
1152 /* Rx interrupt, but nothing sent from uCode */
1154 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
1157 struct iwl_rx_mem_buffer *rxb;
1159 if (unlikely(rxq->used_count == rxq->queue_size / 2))
1162 if (trans->cfg->mq_rx_supported) {
1164 * used_bd is a 32 bit but only 12 are used to retrieve
1167 u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]);
1169 rxb = trans_pcie->global_table[vid];
1171 rxb = rxq->queue[i];
1172 rxq->queue[i] = NULL;
1175 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
1176 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1178 i = (i + 1) & (rxq->queue_size - 1);
1180 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1181 * try to claim the pre-allocated buffers from the allocator */
1182 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
1183 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1184 struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
1186 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
1188 /* Add the remaining 6 empty RBDs
1191 spin_lock(&rba->lock);
1192 list_splice_tail_init(&rxq->rx_used,
1194 spin_unlock(&rba->lock);
1197 /* If not ready - continue, will try to reclaim later.
1198 * No need to reschedule work - allocator exits only on
1200 if (!iwl_pcie_rx_allocator_get(trans, out)) {
1201 /* If success - then RX_CLAIM_REQ_ALLOC
1202 * buffers were retrieved and should be added
1204 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
1205 for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
1206 list_add_tail(&out[j]->list,
1216 if (rxq->used_count < rxq->queue_size / 3)
1218 spin_unlock(&rxq->lock);
1219 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1220 spin_lock(&rxq->lock);
1223 /* handle restock for three cases, can be all of them at once:
1224 * - we just pulled buffers from the allocator
1225 * - we have 8+ unstolen pages accumulated
1226 * - we are in emergency and allocated buffers
1228 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
1230 spin_unlock(&rxq->lock);
1231 if (trans->cfg->mq_rx_supported)
1232 iwl_pcie_rxq_mq_restock(trans, rxq);
1234 iwl_pcie_rxq_restock(trans, rxq);
1239 /* Backtrack one entry */
1241 spin_unlock(&rxq->lock);
1244 * handle a case where in emergency there are some unallocated RBDs.
1245 * those RBDs are in the used list, but are not tracked by the queue's
1246 * used_count which counts allocator owned RBDs.
1247 * unallocated emergency RBDs must be allocated on exit, otherwise
1248 * when called again the function may not be in emergency mode and
1249 * they will be handed to the allocator with no tracking in the RBD
1250 * allocator counters, which will lead to them never being claimed back
1252 * by allocating them here, they are now in the queue free list, and
1253 * will be restocked by the next call of iwl_pcie_rxq_restock.
1255 if (unlikely(emergency && count))
1256 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1259 napi_gro_flush(&rxq->napi, false);
1263 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1265 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1267 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1270 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1271 if (trans->cfg->internal_wimax_coex &&
1272 !trans->cfg->apmg_not_supported &&
1273 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1274 APMS_CLK_VAL_MRB_FUNC_MODE) ||
1275 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1276 APMG_PS_CTRL_VAL_RESET_REQ))) {
1277 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1278 iwl_op_mode_wimax_active(trans->op_mode);
1279 wake_up(&trans_pcie->wait_command_queue);
1283 iwl_pcie_dump_csr(trans);
1284 iwl_dump_fh(trans, NULL);
1287 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1288 * before we wake up the command caller, to ensure a proper cleanup. */
1289 iwl_trans_fw_error(trans);
1292 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
1293 del_timer(&trans_pcie->txq[i].stuck_timer);
1295 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1296 wake_up(&trans_pcie->wait_command_queue);
1299 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1303 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1305 trace_iwlwifi_dev_irq(trans->dev);
1307 /* Discover which interrupts are active/pending */
1308 inta = iwl_read32(trans, CSR_INT);
1310 /* the thread will service interrupts and re-enable them */
1314 /* a device (PCI-E) page is 4096 bytes long */
1315 #define ICT_SHIFT 12
1316 #define ICT_SIZE (1 << ICT_SHIFT)
1317 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1319 /* interrupt handler using ict table, with this interrupt driver will
1320 * stop using INTA register to get device's interrupt, reading this register
1321 * is expensive, device will write interrupts in ICT dram table, increment
1322 * index then will fire interrupt to driver, driver will OR all ICT table
1323 * entries from current index up to table entry with 0 value. the result is
1324 * the interrupt we need to service, driver will set the entries back to 0 and
1327 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1334 trace_iwlwifi_dev_irq(trans->dev);
1336 /* Ignore interrupt if there's nothing in NIC to service.
1337 * This may be due to IRQ shared with another device,
1338 * or due to sporadic interrupts thrown from our NIC. */
1339 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1340 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1345 * Collect all entries up to the first 0, starting from ict_index;
1346 * note we already read at ict_index.
1350 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1351 trans_pcie->ict_index, read);
1352 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1353 trans_pcie->ict_index =
1354 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1356 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1357 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1361 /* We should not get this value, just ignore it. */
1362 if (val == 0xffffffff)
1366 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1367 * (bit 15 before shifting it to 31) to clear when using interrupt
1368 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1369 * so we use them to decide on the real state of the Rx bit.
1370 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1375 inta = (0xff & val) | ((0xff00 & val) << 16);
1379 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1381 struct iwl_trans *trans = dev_id;
1382 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1383 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1387 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1389 spin_lock(&trans_pcie->irq_lock);
1391 /* dram interrupt table not set yet,
1392 * use legacy interrupt.
1394 if (likely(trans_pcie->use_ict))
1395 inta = iwl_pcie_int_cause_ict(trans);
1397 inta = iwl_pcie_int_cause_non_ict(trans);
1399 if (iwl_have_debug_level(IWL_DL_ISR)) {
1400 IWL_DEBUG_ISR(trans,
1401 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1402 inta, trans_pcie->inta_mask,
1403 iwl_read32(trans, CSR_INT_MASK),
1404 iwl_read32(trans, CSR_FH_INT_STATUS));
1405 if (inta & (~trans_pcie->inta_mask))
1406 IWL_DEBUG_ISR(trans,
1407 "We got a masked interrupt (0x%08x)\n",
1408 inta & (~trans_pcie->inta_mask));
1411 inta &= trans_pcie->inta_mask;
1414 * Ignore interrupt if there's nothing in NIC to service.
1415 * This may be due to IRQ shared with another device,
1416 * or due to sporadic interrupts thrown from our NIC.
1418 if (unlikely(!inta)) {
1419 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1421 * Re-enable interrupts here since we don't
1422 * have anything to service
1424 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1425 iwl_enable_interrupts(trans);
1426 spin_unlock(&trans_pcie->irq_lock);
1427 lock_map_release(&trans->sync_cmd_lockdep_map);
1431 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1433 * Hardware disappeared. It might have
1434 * already raised an interrupt.
1436 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1437 spin_unlock(&trans_pcie->irq_lock);
1441 /* Ack/clear/reset pending uCode interrupts.
1442 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1444 /* There is a hardware bug in the interrupt mask function that some
1445 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1446 * they are disabled in the CSR_INT_MASK register. Furthermore the
1447 * ICT interrupt handling mechanism has another bug that might cause
1448 * these unmasked interrupts fail to be detected. We workaround the
1449 * hardware bugs here by ACKing all the possible interrupts so that
1450 * interrupt coalescing can still be achieved.
1452 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1454 if (iwl_have_debug_level(IWL_DL_ISR))
1455 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1456 inta, iwl_read32(trans, CSR_INT_MASK));
1458 spin_unlock(&trans_pcie->irq_lock);
1460 /* Now service all interrupt bits discovered above. */
1461 if (inta & CSR_INT_BIT_HW_ERR) {
1462 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1464 /* Tell the device to stop sending interrupts */
1465 iwl_disable_interrupts(trans);
1468 iwl_pcie_irq_handle_error(trans);
1470 handled |= CSR_INT_BIT_HW_ERR;
1475 if (iwl_have_debug_level(IWL_DL_ISR)) {
1476 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1477 if (inta & CSR_INT_BIT_SCD) {
1478 IWL_DEBUG_ISR(trans,
1479 "Scheduler finished to transmit the frame/frames.\n");
1483 /* Alive notification via Rx interrupt will do the real work */
1484 if (inta & CSR_INT_BIT_ALIVE) {
1485 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1490 /* Safely ignore these bits for debug checks below */
1491 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1493 /* HW RF KILL switch toggled */
1494 if (inta & CSR_INT_BIT_RF_KILL) {
1497 hw_rfkill = iwl_is_rfkill_set(trans);
1498 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1499 hw_rfkill ? "disable radio" : "enable radio");
1501 isr_stats->rfkill++;
1503 mutex_lock(&trans_pcie->mutex);
1504 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1505 mutex_unlock(&trans_pcie->mutex);
1507 set_bit(STATUS_RFKILL, &trans->status);
1508 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1510 IWL_DEBUG_RF_KILL(trans,
1511 "Rfkill while SYNC HCMD in flight\n");
1512 wake_up(&trans_pcie->wait_command_queue);
1514 clear_bit(STATUS_RFKILL, &trans->status);
1517 handled |= CSR_INT_BIT_RF_KILL;
1520 /* Chip got too hot and stopped itself */
1521 if (inta & CSR_INT_BIT_CT_KILL) {
1522 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1523 isr_stats->ctkill++;
1524 handled |= CSR_INT_BIT_CT_KILL;
1527 /* Error detected by uCode */
1528 if (inta & CSR_INT_BIT_SW_ERR) {
1529 IWL_ERR(trans, "Microcode SW error detected. "
1530 " Restarting 0x%X.\n", inta);
1532 iwl_pcie_irq_handle_error(trans);
1533 handled |= CSR_INT_BIT_SW_ERR;
1536 /* uCode wakes up after power-down sleep */
1537 if (inta & CSR_INT_BIT_WAKEUP) {
1538 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1539 iwl_pcie_rxq_check_wrptr(trans);
1540 iwl_pcie_txq_check_wrptrs(trans);
1542 isr_stats->wakeup++;
1544 handled |= CSR_INT_BIT_WAKEUP;
1547 /* All uCode command responses, including Tx command responses,
1548 * Rx "responses" (frame-received notification), and other
1549 * notifications from uCode come through here*/
1550 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1551 CSR_INT_BIT_RX_PERIODIC)) {
1552 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1553 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1554 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1555 iwl_write32(trans, CSR_FH_INT_STATUS,
1556 CSR_FH_INT_RX_MASK);
1558 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1559 handled |= CSR_INT_BIT_RX_PERIODIC;
1561 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1563 /* Sending RX interrupt require many steps to be done in the
1565 * 1- write interrupt to current index in ICT table.
1567 * 3- update RX shared data to indicate last write index.
1568 * 4- send interrupt.
1569 * This could lead to RX race, driver could receive RX interrupt
1570 * but the shared data changes does not reflect this;
1571 * periodic interrupt will detect any dangling Rx activity.
1574 /* Disable periodic interrupt; we use it as just a one-shot. */
1575 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1576 CSR_INT_PERIODIC_DIS);
1579 * Enable periodic interrupt in 8 msec only if we received
1580 * real RX interrupt (instead of just periodic int), to catch
1581 * any dangling Rx interrupt. If it was just the periodic
1582 * interrupt, there was no dangling Rx activity, and no need
1583 * to extend the periodic interrupt; one-shot is enough.
1585 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1586 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1587 CSR_INT_PERIODIC_ENA);
1592 iwl_pcie_rx_handle(trans);
1596 /* This "Tx" DMA channel is used only for loading uCode */
1597 if (inta & CSR_INT_BIT_FH_TX) {
1598 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1599 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1601 handled |= CSR_INT_BIT_FH_TX;
1602 /* Wake up uCode load routine, now that load is complete */
1603 trans_pcie->ucode_write_complete = true;
1604 wake_up(&trans_pcie->ucode_write_waitq);
1607 if (inta & ~handled) {
1608 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1609 isr_stats->unhandled++;
1612 if (inta & ~(trans_pcie->inta_mask)) {
1613 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1614 inta & ~trans_pcie->inta_mask);
1617 /* Re-enable all interrupts */
1618 /* only Re-enable if disabled by irq */
1619 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1620 iwl_enable_interrupts(trans);
1621 /* Re-enable RF_KILL if it occurred */
1622 else if (handled & CSR_INT_BIT_RF_KILL)
1623 iwl_enable_rfkill_int(trans);
1626 lock_map_release(&trans->sync_cmd_lockdep_map);
1630 /******************************************************************************
1634 ******************************************************************************/
1636 /* Free dram table */
1637 void iwl_pcie_free_ict(struct iwl_trans *trans)
1639 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1641 if (trans_pcie->ict_tbl) {
1642 dma_free_coherent(trans->dev, ICT_SIZE,
1643 trans_pcie->ict_tbl,
1644 trans_pcie->ict_tbl_dma);
1645 trans_pcie->ict_tbl = NULL;
1646 trans_pcie->ict_tbl_dma = 0;
1651 * allocate dram shared table, it is an aligned memory
1652 * block of ICT_SIZE.
1653 * also reset all data related to ICT table interrupt.
1655 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1657 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1659 trans_pcie->ict_tbl =
1660 dma_zalloc_coherent(trans->dev, ICT_SIZE,
1661 &trans_pcie->ict_tbl_dma,
1663 if (!trans_pcie->ict_tbl)
1666 /* just an API sanity check ... it is guaranteed to be aligned */
1667 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1668 iwl_pcie_free_ict(trans);
1675 /* Device is going up inform it about using ICT interrupt table,
1676 * also we need to tell the driver to start using ICT interrupt.
1678 void iwl_pcie_reset_ict(struct iwl_trans *trans)
1680 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1683 if (!trans_pcie->ict_tbl)
1686 spin_lock(&trans_pcie->irq_lock);
1687 iwl_disable_interrupts(trans);
1689 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1691 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1693 val |= CSR_DRAM_INT_TBL_ENABLE |
1694 CSR_DRAM_INIT_TBL_WRAP_CHECK |
1695 CSR_DRAM_INIT_TBL_WRITE_POINTER;
1697 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1699 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1700 trans_pcie->use_ict = true;
1701 trans_pcie->ict_index = 0;
1702 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1703 iwl_enable_interrupts(trans);
1704 spin_unlock(&trans_pcie->irq_lock);
1707 /* Device is going down disable ict interrupt usage */
1708 void iwl_pcie_disable_ict(struct iwl_trans *trans)
1710 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1712 spin_lock(&trans_pcie->irq_lock);
1713 trans_pcie->use_ict = false;
1714 spin_unlock(&trans_pcie->irq_lock);
1717 irqreturn_t iwl_pcie_isr(int irq, void *data)
1719 struct iwl_trans *trans = data;
1724 /* Disable (but don't clear!) interrupts here to avoid
1725 * back-to-back ISRs and sporadic interrupts from our NIC.
1726 * If we have something to service, the tasklet will re-enable ints.
1727 * If we *don't* have something, we'll re-enable before leaving here.
1729 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1731 return IRQ_WAKE_THREAD;