1 /******************************************************************************
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/sched.h>
30 #include <linux/wait.h>
31 #include <linux/gfp.h>
36 #include "iwl-op-mode.h"
38 /******************************************************************************
42 ******************************************************************************/
45 * Rx theory of operation
47 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
48 * each of which point to Receive Buffers to be filled by the NIC. These get
49 * used not only for Rx frames, but for any command response or notification
50 * from the NIC. The driver and NIC manage the Rx buffers by means
51 * of indexes into the circular buffer.
54 * The host/firmware share two index registers for managing the Rx buffers.
56 * The READ index maps to the first position that the firmware may be writing
57 * to -- the driver can read up to (but not including) this position and get
59 * The READ index is managed by the firmware once the card is enabled.
61 * The WRITE index maps to the last position the driver has read from -- the
62 * position preceding WRITE is the last slot the firmware can place a packet.
64 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
67 * During initialization, the host sets up the READ queue position to the first
68 * INDEX position, and WRITE to the last (READ - 1 wrapped)
70 * When the firmware places a packet in a buffer, it will advance the READ index
71 * and fire the RX interrupt. The driver can then query the READ index and
72 * process as many packets as possible, moving the WRITE index forward as it
73 * resets the Rx queue buffers with new memory.
75 * The management in the driver is as follows:
76 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
77 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
78 * to replenish the iwl->rxq->rx_free.
79 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
80 * iwl->rxq is replenished and the READ INDEX is updated (updating the
81 * 'processed' and 'read' driver indexes as well)
82 * + A received packet is processed and handed to the kernel network stack,
83 * detached from the iwl->rxq. The driver 'processed' index is updated.
84 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
85 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
86 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
87 * were enough free buffers and RX_STALLED is set it is cleared.
92 * iwl_rx_queue_alloc() Allocates rx_free
93 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
94 * iwl_rx_queue_restock
95 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
96 * queue, updates firmware pointers, and updates
97 * the WRITE index. If insufficient rx_free buffers
98 * are available, schedules iwl_rx_replenish
100 * -- enable interrupts --
101 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
102 * READ INDEX, detaching the SKB from the pool.
103 * Moves the packet buffer from queue to rx_used.
104 * Calls iwl_rx_queue_restock to refill any empty
111 * iwl_rx_queue_space - Return number of free slots available in queue.
113 static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
115 int s = q->read - q->write;
118 /* keep some buffer to not confuse full and empty queue */
126 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
128 void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
129 struct iwl_rx_queue *q)
134 spin_lock_irqsave(&q->lock, flags);
136 if (q->need_update == 0)
139 if (trans->cfg->base_params->shadow_reg_enable) {
140 /* shadow register enabled */
141 /* Device expects a multiple of 8 */
142 q->write_actual = (q->write & ~0x7);
143 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
145 struct iwl_trans_pcie *trans_pcie =
146 IWL_TRANS_GET_PCIE_TRANS(trans);
148 /* If power-saving is in use, make sure device is awake */
149 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
150 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
152 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
153 IWL_DEBUG_INFO(trans,
154 "Rx queue requesting wakeup,"
155 " GP1 = 0x%x\n", reg);
156 iwl_set_bit(trans, CSR_GP_CNTRL,
157 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
161 q->write_actual = (q->write & ~0x7);
162 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
165 /* Else device is assumed to be awake */
167 /* Device expects a multiple of 8 */
168 q->write_actual = (q->write & ~0x7);
169 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
176 spin_unlock_irqrestore(&q->lock, flags);
180 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
182 static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
184 return cpu_to_le32((u32)(dma_addr >> 8));
188 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
190 * If there are slots in the RX queue that need to be restocked,
191 * and we have free pre-allocated buffers, fill the ranks as much
192 * as we can, pulling from rx_free.
194 * This moves the 'write' index forward to catch up with 'processed', and
195 * also updates the memory address in the firmware to reference the new
198 static void iwl_rx_queue_restock(struct iwl_trans *trans)
200 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
201 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
202 struct list_head *element;
203 struct iwl_rx_mem_buffer *rxb;
207 * If the device isn't enabled - not need to try to add buffers...
208 * This can happen when we stop the device and still have an interrupt
209 * pending. We stop the APM before we sync the interrupts / tasklets
210 * because we have to (see comment there). On the other hand, since
211 * the APM is stopped, we cannot access the HW (in particular not prph).
212 * So don't try to restock if the APM has been already stopped.
214 if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
217 spin_lock_irqsave(&rxq->lock, flags);
218 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
219 /* The overwritten rxb must be a used one */
220 rxb = rxq->queue[rxq->write];
221 BUG_ON(rxb && rxb->page);
223 /* Get next free Rx buffer, remove from free list */
224 element = rxq->rx_free.next;
225 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
228 /* Point to Rx buffer via next RBD in circular buffer */
229 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
230 rxq->queue[rxq->write] = rxb;
231 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
234 spin_unlock_irqrestore(&rxq->lock, flags);
235 /* If the pre-allocated buffer pool is dropping low, schedule to
237 if (rxq->free_count <= RX_LOW_WATERMARK)
238 schedule_work(&trans_pcie->rx_replenish);
240 /* If we've added more space for the firmware to place data, tell it.
241 * Increment device's write pointer in multiples of 8. */
242 if (rxq->write_actual != (rxq->write & ~0x7)) {
243 spin_lock_irqsave(&rxq->lock, flags);
244 rxq->need_update = 1;
245 spin_unlock_irqrestore(&rxq->lock, flags);
246 iwl_rx_queue_update_write_ptr(trans, rxq);
251 * iwl_rx_allocate - allocate a page for each used RBD
253 * A used RBD is an Rx buffer that has been given to the stack. To use it again
254 * a page must be allocated and the RBD must point to the page. This function
255 * doesn't change the HW pointer but handles the list of pages that is used by
256 * iwl_rx_queue_restock. The latter function will update the HW to use the newly
259 static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
261 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
262 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
263 struct list_head *element;
264 struct iwl_rx_mem_buffer *rxb;
267 gfp_t gfp_mask = priority;
270 spin_lock_irqsave(&rxq->lock, flags);
271 if (list_empty(&rxq->rx_used)) {
272 spin_unlock_irqrestore(&rxq->lock, flags);
275 spin_unlock_irqrestore(&rxq->lock, flags);
277 if (rxq->free_count > RX_LOW_WATERMARK)
278 gfp_mask |= __GFP_NOWARN;
280 if (trans_pcie->rx_page_order > 0)
281 gfp_mask |= __GFP_COMP;
283 /* Alloc a new receive buffer */
284 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
287 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
289 trans_pcie->rx_page_order);
291 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
293 IWL_CRIT(trans, "Failed to alloc_pages with %s."
294 "Only %u free buffers remaining.\n",
295 priority == GFP_ATOMIC ?
296 "GFP_ATOMIC" : "GFP_KERNEL",
298 /* We don't reschedule replenish work here -- we will
299 * call the restock method and if it still needs
300 * more buffers it will schedule replenish */
304 spin_lock_irqsave(&rxq->lock, flags);
306 if (list_empty(&rxq->rx_used)) {
307 spin_unlock_irqrestore(&rxq->lock, flags);
308 __free_pages(page, trans_pcie->rx_page_order);
311 element = rxq->rx_used.next;
312 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
315 spin_unlock_irqrestore(&rxq->lock, flags);
319 /* Get physical address of the RB */
321 dma_map_page(trans->dev, page, 0,
322 PAGE_SIZE << trans_pcie->rx_page_order,
324 /* dma address must be no more than 36 bits */
325 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
326 /* and also 256 byte aligned! */
327 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
329 spin_lock_irqsave(&rxq->lock, flags);
331 list_add_tail(&rxb->list, &rxq->rx_free);
334 spin_unlock_irqrestore(&rxq->lock, flags);
339 * iwl_rx_replenish - Move all used buffers from rx_used to rx_free
341 * When moving to rx_free an page is allocated for the slot.
343 * Also restock the Rx queue via iwl_rx_queue_restock.
344 * This is called as a scheduled work item (except for during initialization)
346 void iwl_rx_replenish(struct iwl_trans *trans)
348 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
351 iwl_rx_allocate(trans, GFP_KERNEL);
353 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
354 iwl_rx_queue_restock(trans);
355 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
358 static void iwl_rx_replenish_now(struct iwl_trans *trans)
360 iwl_rx_allocate(trans, GFP_ATOMIC);
362 iwl_rx_queue_restock(trans);
365 void iwl_bg_rx_replenish(struct work_struct *data)
367 struct iwl_trans_pcie *trans_pcie =
368 container_of(data, struct iwl_trans_pcie, rx_replenish);
370 iwl_rx_replenish(trans_pcie->trans);
373 static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
374 struct iwl_rx_mem_buffer *rxb)
376 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
377 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
378 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
380 bool page_stolen = false;
381 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
387 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
389 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
390 struct iwl_rx_packet *pkt;
391 struct iwl_device_cmd *cmd;
394 int index, cmd_index, err, len;
395 struct iwl_rx_cmd_buffer rxcb = {
398 ._page_stolen = false,
402 pkt = rxb_addr(&rxcb);
404 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
407 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
409 trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
412 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
413 len += sizeof(u32); /* account for status word */
414 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
415 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
417 /* Reclaim a command buffer only if this packet is a response
418 * to a (driver-originated) command.
419 * If the packet (e.g. Rx frame) originated from uCode,
420 * there is no command buffer to reclaim.
421 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
422 * but apparently a few don't get set; catch them here. */
423 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
427 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
428 if (trans_pcie->no_reclaim_cmds[i] ==
436 sequence = le16_to_cpu(pkt->hdr.sequence);
437 index = SEQ_TO_INDEX(sequence);
438 cmd_index = get_cmd_index(&txq->q, index);
441 struct iwl_pcie_tx_queue_entry *ent;
442 ent = &txq->entries[cmd_index];
444 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
449 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
452 /* The original command isn't needed any more */
453 kfree(txq->entries[cmd_index].copy_cmd);
454 txq->entries[cmd_index].copy_cmd = NULL;
458 * After here, we should always check rxcb._page_stolen,
459 * if it is true then one of the handlers took the page.
463 /* Invoke any callbacks, transfer the buffer to caller,
464 * and fire off the (possibly) blocking
465 * iwl_trans_send_cmd()
466 * as we reclaim the driver command queue */
467 if (!rxcb._page_stolen)
468 iwl_tx_cmd_complete(trans, &rxcb, err);
470 IWL_WARN(trans, "Claim null rxb?\n");
473 page_stolen |= rxcb._page_stolen;
474 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
477 /* page was stolen from us -- free our reference */
479 __free_pages(rxb->page, trans_pcie->rx_page_order);
483 /* Reuse the page if possible. For notification packets and
484 * SKBs that fail to Rx correctly, add them back into the
485 * rx_free list for reuse later. */
486 spin_lock_irqsave(&rxq->lock, flags);
487 if (rxb->page != NULL) {
489 dma_map_page(trans->dev, rxb->page, 0,
490 PAGE_SIZE << trans_pcie->rx_page_order,
492 list_add_tail(&rxb->list, &rxq->rx_free);
495 list_add_tail(&rxb->list, &rxq->rx_used);
496 spin_unlock_irqrestore(&rxq->lock, flags);
500 * iwl_rx_handle - Main entry function for receiving responses from uCode
502 * Uses the priv->rx_handlers callback function array to invoke
503 * the appropriate handlers, including command responses,
504 * frame-received notifications, and other notifications.
506 static void iwl_rx_handle(struct iwl_trans *trans)
508 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
509 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
515 /* uCode's read index (stored in shared DRAM) indicates the last Rx
516 * buffer that the driver may process (last buffer filled by ucode). */
517 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
520 /* Rx interrupt, but nothing sent from uCode */
522 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
524 /* calculate total frames need to be restock after handling RX */
525 total_empty = r - rxq->write_actual;
527 total_empty += RX_QUEUE_SIZE;
529 if (total_empty > (RX_QUEUE_SIZE / 2))
533 struct iwl_rx_mem_buffer *rxb;
536 rxq->queue[i] = NULL;
538 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
540 iwl_rx_handle_rxbuf(trans, rxb);
542 i = (i + 1) & RX_QUEUE_MASK;
543 /* If there are a lot of unused frames,
544 * restock the Rx queue so ucode wont assert. */
549 iwl_rx_replenish_now(trans);
555 /* Backtrack one entry */
558 iwl_rx_replenish_now(trans);
560 iwl_rx_queue_restock(trans);
564 * iwl_irq_handle_error - called for HW or SW error interrupt from card
566 static void iwl_irq_handle_error(struct iwl_trans *trans)
568 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
569 if (trans->cfg->internal_wimax_coex &&
570 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
571 APMS_CLK_VAL_MRB_FUNC_MODE) ||
572 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
573 APMG_PS_CTRL_VAL_RESET_REQ))) {
574 struct iwl_trans_pcie *trans_pcie =
575 IWL_TRANS_GET_PCIE_TRANS(trans);
577 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
578 iwl_op_mode_wimax_active(trans->op_mode);
579 wake_up(&trans->wait_command_queue);
584 iwl_dump_fh(trans, NULL);
586 iwl_op_mode_nic_error(trans->op_mode);
589 /* tasklet for iwlagn interrupt */
590 void iwl_irq_tasklet(struct iwl_trans *trans)
592 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
593 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
598 #ifdef CONFIG_IWLWIFI_DEBUG
602 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
604 /* Ack/clear/reset pending uCode interrupts.
605 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
607 /* There is a hardware bug in the interrupt mask function that some
608 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
609 * they are disabled in the CSR_INT_MASK register. Furthermore the
610 * ICT interrupt handling mechanism has another bug that might cause
611 * these unmasked interrupts fail to be detected. We workaround the
612 * hardware bugs here by ACKing all the possible interrupts so that
613 * interrupt coalescing can still be achieved.
615 iwl_write32(trans, CSR_INT,
616 trans_pcie->inta | ~trans_pcie->inta_mask);
618 inta = trans_pcie->inta;
620 #ifdef CONFIG_IWLWIFI_DEBUG
621 if (iwl_have_debug_level(IWL_DL_ISR)) {
623 inta_mask = iwl_read32(trans, CSR_INT_MASK);
624 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
629 /* saved interrupt in inta variable now we can reset trans_pcie->inta */
630 trans_pcie->inta = 0;
632 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
634 /* Now service all interrupt bits discovered above. */
635 if (inta & CSR_INT_BIT_HW_ERR) {
636 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
638 /* Tell the device to stop sending interrupts */
639 iwl_disable_interrupts(trans);
642 iwl_irq_handle_error(trans);
644 handled |= CSR_INT_BIT_HW_ERR;
649 #ifdef CONFIG_IWLWIFI_DEBUG
650 if (iwl_have_debug_level(IWL_DL_ISR)) {
651 /* NIC fires this, but we don't use it, redundant with WAKEUP */
652 if (inta & CSR_INT_BIT_SCD) {
653 IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
654 "the frame/frames.\n");
658 /* Alive notification via Rx interrupt will do the real work */
659 if (inta & CSR_INT_BIT_ALIVE) {
660 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
665 /* Safely ignore these bits for debug checks below */
666 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
668 /* HW RF KILL switch toggled */
669 if (inta & CSR_INT_BIT_RF_KILL) {
672 hw_rfkill = iwl_is_rfkill_set(trans);
673 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
674 hw_rfkill ? "disable radio" : "enable radio");
678 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
680 handled |= CSR_INT_BIT_RF_KILL;
683 /* Chip got too hot and stopped itself */
684 if (inta & CSR_INT_BIT_CT_KILL) {
685 IWL_ERR(trans, "Microcode CT kill error detected.\n");
687 handled |= CSR_INT_BIT_CT_KILL;
690 /* Error detected by uCode */
691 if (inta & CSR_INT_BIT_SW_ERR) {
692 IWL_ERR(trans, "Microcode SW error detected. "
693 " Restarting 0x%X.\n", inta);
695 iwl_irq_handle_error(trans);
696 handled |= CSR_INT_BIT_SW_ERR;
699 /* uCode wakes up after power-down sleep */
700 if (inta & CSR_INT_BIT_WAKEUP) {
701 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
702 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
703 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
704 iwl_txq_update_write_ptr(trans,
705 &trans_pcie->txq[i]);
709 handled |= CSR_INT_BIT_WAKEUP;
712 /* All uCode command responses, including Tx command responses,
713 * Rx "responses" (frame-received notification), and other
714 * notifications from uCode come through here*/
715 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
716 CSR_INT_BIT_RX_PERIODIC)) {
717 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
718 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
719 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
720 iwl_write32(trans, CSR_FH_INT_STATUS,
723 if (inta & CSR_INT_BIT_RX_PERIODIC) {
724 handled |= CSR_INT_BIT_RX_PERIODIC;
726 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
728 /* Sending RX interrupt require many steps to be done in the
730 * 1- write interrupt to current index in ICT table.
732 * 3- update RX shared data to indicate last write index.
734 * This could lead to RX race, driver could receive RX interrupt
735 * but the shared data changes does not reflect this;
736 * periodic interrupt will detect any dangling Rx activity.
739 /* Disable periodic interrupt; we use it as just a one-shot. */
740 iwl_write8(trans, CSR_INT_PERIODIC_REG,
741 CSR_INT_PERIODIC_DIS);
743 iwl_rx_handle(trans);
746 * Enable periodic interrupt in 8 msec only if we received
747 * real RX interrupt (instead of just periodic int), to catch
748 * any dangling Rx interrupt. If it was just the periodic
749 * interrupt, there was no dangling Rx activity, and no need
750 * to extend the periodic interrupt; one-shot is enough.
752 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
753 iwl_write8(trans, CSR_INT_PERIODIC_REG,
754 CSR_INT_PERIODIC_ENA);
759 /* This "Tx" DMA channel is used only for loading uCode */
760 if (inta & CSR_INT_BIT_FH_TX) {
761 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
762 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
764 handled |= CSR_INT_BIT_FH_TX;
765 /* Wake up uCode load routine, now that load is complete */
766 trans_pcie->ucode_write_complete = true;
767 wake_up(&trans_pcie->ucode_write_waitq);
770 if (inta & ~handled) {
771 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
772 isr_stats->unhandled++;
775 if (inta & ~(trans_pcie->inta_mask)) {
776 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
777 inta & ~trans_pcie->inta_mask);
780 /* Re-enable all interrupts */
781 /* only Re-enable if disabled by irq */
782 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
783 iwl_enable_interrupts(trans);
784 /* Re-enable RF_KILL if it occurred */
785 else if (handled & CSR_INT_BIT_RF_KILL)
786 iwl_enable_rfkill_int(trans);
789 /******************************************************************************
793 ******************************************************************************/
795 /* a device (PCI-E) page is 4096 bytes long */
797 #define ICT_SIZE (1 << ICT_SHIFT)
798 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
800 /* Free dram table */
801 void iwl_free_isr_ict(struct iwl_trans *trans)
803 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
805 if (trans_pcie->ict_tbl) {
806 dma_free_coherent(trans->dev, ICT_SIZE,
808 trans_pcie->ict_tbl_dma);
809 trans_pcie->ict_tbl = NULL;
810 trans_pcie->ict_tbl_dma = 0;
816 * allocate dram shared table, it is an aligned memory
818 * also reset all data related to ICT table interrupt.
820 int iwl_alloc_isr_ict(struct iwl_trans *trans)
822 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
824 trans_pcie->ict_tbl =
825 dma_alloc_coherent(trans->dev, ICT_SIZE,
826 &trans_pcie->ict_tbl_dma,
828 if (!trans_pcie->ict_tbl)
831 /* just an API sanity check ... it is guaranteed to be aligned */
832 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
833 iwl_free_isr_ict(trans);
837 IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
838 (unsigned long long)trans_pcie->ict_tbl_dma);
840 IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
842 /* reset table and index to all 0 */
843 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
844 trans_pcie->ict_index = 0;
846 /* add periodic RX interrupt */
847 trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
851 /* Device is going up inform it about using ICT interrupt table,
852 * also we need to tell the driver to start using ICT interrupt.
854 void iwl_reset_ict(struct iwl_trans *trans)
856 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
860 if (!trans_pcie->ict_tbl)
863 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
864 iwl_disable_interrupts(trans);
866 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
868 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
870 val |= CSR_DRAM_INT_TBL_ENABLE;
871 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
873 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
875 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
876 trans_pcie->use_ict = true;
877 trans_pcie->ict_index = 0;
878 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
879 iwl_enable_interrupts(trans);
880 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
883 /* Device is going down disable ict interrupt usage */
884 void iwl_disable_ict(struct iwl_trans *trans)
886 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
889 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
890 trans_pcie->use_ict = false;
891 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
894 /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
895 static irqreturn_t iwl_isr(int irq, void *data)
897 struct iwl_trans *trans = data;
898 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
900 #ifdef CONFIG_IWLWIFI_DEBUG
904 lockdep_assert_held(&trans_pcie->irq_lock);
906 trace_iwlwifi_dev_irq(trans->dev);
908 /* Disable (but don't clear!) interrupts here to avoid
909 * back-to-back ISRs and sporadic interrupts from our NIC.
910 * If we have something to service, the tasklet will re-enable ints.
911 * If we *don't* have something, we'll re-enable before leaving here. */
912 inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
913 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
915 /* Discover which interrupts are active/pending */
916 inta = iwl_read32(trans, CSR_INT);
918 /* Ignore interrupt if there's nothing in NIC to service.
919 * This may be due to IRQ shared with another device,
920 * or due to sporadic interrupts thrown from our NIC. */
922 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
926 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
927 /* Hardware disappeared. It might have already raised
929 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
933 #ifdef CONFIG_IWLWIFI_DEBUG
934 if (iwl_have_debug_level(IWL_DL_ISR)) {
935 inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
936 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
937 "fh 0x%08x\n", inta, inta_mask, inta_fh);
941 trans_pcie->inta |= inta;
942 /* iwl_irq_tasklet() will service interrupts and re-enable them */
944 tasklet_schedule(&trans_pcie->irq_tasklet);
945 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
947 iwl_enable_interrupts(trans);
950 /* re-enable interrupts here since we don't have anything to service. */
951 /* only Re-enable if disabled by irq and no schedules tasklet. */
952 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
954 iwl_enable_interrupts(trans);
959 /* interrupt handler using ict table, with this interrupt driver will
960 * stop using INTA register to get device's interrupt, reading this register
961 * is expensive, device will write interrupts in ICT dram table, increment
962 * index then will fire interrupt to driver, driver will OR all ICT table
963 * entries from current index up to table entry with 0 value. the result is
964 * the interrupt we need to service, driver will set the entries back to 0 and
967 irqreturn_t iwl_isr_ict(int irq, void *data)
969 struct iwl_trans *trans = data;
970 struct iwl_trans_pcie *trans_pcie;
979 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
981 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
983 /* dram interrupt table not set yet,
984 * use legacy interrupt.
986 if (unlikely(!trans_pcie->use_ict)) {
987 irqreturn_t ret = iwl_isr(irq, data);
988 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
992 trace_iwlwifi_dev_irq(trans->dev);
995 /* Disable (but don't clear!) interrupts here to avoid
996 * back-to-back ISRs and sporadic interrupts from our NIC.
997 * If we have something to service, the tasklet will re-enable ints.
998 * If we *don't* have something, we'll re-enable before leaving here.
1000 inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
1001 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1004 /* Ignore interrupt if there's nothing in NIC to service.
1005 * This may be due to IRQ shared with another device,
1006 * or due to sporadic interrupts thrown from our NIC. */
1007 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1008 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1010 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1015 * Collect all entries up to the first 0, starting from ict_index;
1016 * note we already read at ict_index.
1020 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1021 trans_pcie->ict_index, read);
1022 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1023 trans_pcie->ict_index =
1024 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1026 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1027 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1031 /* We should not get this value, just ignore it. */
1032 if (val == 0xffffffff)
1036 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1037 * (bit 15 before shifting it to 31) to clear when using interrupt
1038 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1039 * so we use them to decide on the real state of the Rx bit.
1040 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1045 inta = (0xff & val) | ((0xff00 & val) << 16);
1046 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1047 inta, inta_mask, val);
1049 inta &= trans_pcie->inta_mask;
1050 trans_pcie->inta |= inta;
1052 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1054 tasklet_schedule(&trans_pcie->irq_tasklet);
1055 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1056 !trans_pcie->inta) {
1057 /* Allow interrupt if was disabled by this handler and
1058 * no tasklet was schedules, We should not enable interrupt,
1059 * tasklet will enable it.
1061 iwl_enable_interrupts(trans);
1064 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1068 /* re-enable interrupts here since we don't have anything to service.
1069 * only Re-enable if disabled by irq.
1071 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1073 iwl_enable_interrupts(trans);
1075 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);