From 990aa6d7b28d26bf22171410b49f191e8e9b09fc Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 14 Nov 2012 12:39:52 +0200 Subject: [PATCH] iwlwifi: rename functions in transport layer 1) s/tx_queue/txq for the sake of consistency. 2) s/rx_queue/rxq for the sake of consistency. 3) Make all functions begin with iwl_pcie_ iwl_queue_init and iwl_queue_space are an exception since they are not PCIE specific although they are in pcie subdir. 4) s/trans_pcie_get_cmd_string/get_cmd_string it is much shorter and used in debug prints which are long lines. 5) s/iwl_bg_rx_replenish/iwl_pcie_rx_replenish_work this better emphasizes that it is a work 6) remove invalid kernelDOC markers pcie/tx.c and pcie/trans.c still needs to be cleaned up. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/pcie/internal.h | 88 ++++++----- drivers/net/wireless/iwlwifi/pcie/rx.c | 148 +++++++++---------- drivers/net/wireless/iwlwifi/pcie/trans.c | 116 +++++++-------- drivers/net/wireless/iwlwifi/pcie/tx.c | 108 +++++++------- 4 files changed, 218 insertions(+), 242 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index ebf3aa0fedf2..d058ddaebd93 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -73,7 +73,7 @@ struct isr_statistics { }; /** - * struct iwl_rx_queue - Rx queue + * struct iwl_rxq - Rx queue * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @pool: @@ -91,7 +91,7 @@ struct isr_statistics { * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ -struct iwl_rx_queue { +struct iwl_rxq { __le32 *bd; dma_addr_t bd_dma; struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; @@ -157,8 +157,8 @@ struct iwl_cmd_meta { * 32 since we don't need so many commands pending. Since the HW * still uses 256 BDs for DMA though, n_bd stays 256. As a result, * the software buffers (in the variables @meta, @txb in struct - * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds - * in the same struct) have 256. + * iwl_txq) only have 32 entries, while the HW buffers (@tfds in + * the same struct) have 256. * This means that we end up with the following: * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | * SW entries: | 0 | ... | 31 | @@ -182,7 +182,7 @@ struct iwl_queue { #define TFD_TX_CMD_SLOTS 256 #define TFD_CMD_SLOTS 32 -struct iwl_pcie_tx_queue_entry { +struct iwl_pcie_txq_entry { struct iwl_device_cmd *cmd; struct iwl_device_cmd *copy_cmd; struct sk_buff *skb; @@ -192,7 +192,7 @@ struct iwl_pcie_tx_queue_entry { }; /** - * struct iwl_tx_queue - Tx Queue for DMA + * struct iwl_txq - Tx Queue for DMA * @q: generic Rx/Tx queue descriptor * @tfds: transmit frame descriptors (DMA memory) * @entries: transmit entries (driver state) @@ -205,10 +205,10 @@ struct iwl_pcie_tx_queue_entry { * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame * descriptors) and required locking structures. */ -struct iwl_tx_queue { +struct iwl_txq { struct iwl_queue q; struct iwl_tfd *tfds; - struct iwl_pcie_tx_queue_entry *entries; + struct iwl_pcie_txq_entry *entries; spinlock_t lock; struct timer_list stuck_timer; struct iwl_trans_pcie *trans_pcie; @@ -238,7 +238,7 @@ struct iwl_tx_queue { * @wd_timeout: queue watchdog timeout (jiffies) */ struct iwl_trans_pcie { - struct iwl_rx_queue rxq; + struct iwl_rxq rxq; struct work_struct rx_replenish; struct iwl_trans *trans; struct iwl_drv *drv; @@ -260,7 +260,7 @@ struct iwl_trans_pcie { struct iwl_dma_ptr scd_bc_tbls; struct iwl_dma_ptr kw; - struct iwl_tx_queue *txq; + struct iwl_txq *txq; unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; @@ -323,51 +323,47 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); /***************************************************** * RX ******************************************************/ -void iwl_bg_rx_replenish(struct work_struct *data); -void iwl_irq_tasklet(struct iwl_trans *trans); -void iwl_rx_replenish(struct iwl_trans *trans); -void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, - struct iwl_rx_queue *q); +void iwl_pcie_rx_replenish_work(struct work_struct *data); +void iwl_pcie_rx_replenish(struct iwl_trans *trans); +void iwl_pcie_tasklet(struct iwl_trans *trans); +void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q); /***************************************************** -* ICT +* ICT - interrupt handling ******************************************************/ -void iwl_reset_ict(struct iwl_trans *trans); -void iwl_disable_ict(struct iwl_trans *trans); -int iwl_alloc_isr_ict(struct iwl_trans *trans); -void iwl_free_isr_ict(struct iwl_trans *trans); -irqreturn_t iwl_isr_ict(int irq, void *data); +irqreturn_t iwl_pcie_isr_ict(int irq, void *data); +int iwl_pcie_alloc_ict(struct iwl_trans *trans); +void iwl_pcie_free_ict(struct iwl_trans *trans); +void iwl_pcie_reset_ict(struct iwl_trans *trans); +void iwl_pcie_disable_ict(struct iwl_trans *trans); /***************************************************** * TX / HCMD ******************************************************/ -void iwl_txq_update_write_ptr(struct iwl_trans *trans, - struct iwl_tx_queue *txq); -int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, - struct iwl_tx_queue *txq, - dma_addr_t addr, u16 len, u8 reset); -int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id); -int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); -void iwl_tx_cmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb, int handler_status); -void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_tx_queue *txq, - u16 byte_cnt); -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn); -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); -void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, - enum dma_data_direction dma_dir); -int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, +void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); +int iwl_pcie_tx_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, + dma_addr_t addr, u16 len, u8 reset); +int iwl_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); +void iwl_pcie_hcmd_complete(struct iwl_trans *trans, + struct iwl_rx_cmd_buffer *rxb, int handler_status); +void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, u16 byte_cnt); +void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, + int sta_id, int tid, int frame_limit, u16 ssn); +void iwl_pcie_txq_disable(struct iwl_trans *trans, int queue); +void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, + enum dma_data_direction dma_dir); +int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index, struct sk_buff_head *skbs); -void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id); +void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id); +int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id); int iwl_queue_space(const struct iwl_queue *q); /***************************************************** * Error handling ******************************************************/ -int iwl_dump_fh(struct iwl_trans *trans, char **buf); -void iwl_dump_csr(struct iwl_trans *trans); +int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf); +void iwl_pcie_dump_csr(struct iwl_trans *trans); /***************************************************** * Helpers @@ -403,7 +399,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) } static inline void iwl_wake_queue(struct iwl_trans *trans, - struct iwl_tx_queue *txq) + struct iwl_txq *txq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -414,7 +410,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans, } static inline void iwl_stop_queue(struct iwl_trans *trans, - struct iwl_tx_queue *txq) + struct iwl_txq *txq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -438,8 +434,8 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) return index & (q->n_window - 1); } -static inline const char * -trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd) +static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie, + u8 cmd) { if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) return "UNKNOWN"; diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 11a93eddc84f..087d022bc93a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -76,7 +76,7 @@ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled * to replenish the iwl->rxq->rx_free. - * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the * iwl->rxq is replenished and the READ INDEX is updated (updating the * 'processed' and 'read' driver indexes as well) * + A received packet is processed and handed to the kernel network stack, @@ -89,28 +89,28 @@ * * Driver sequence: * - * iwl_rx_queue_alloc() Allocates rx_free - * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_rx_queue_restock - * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * iwl_rxq_alloc() Allocates rx_free + * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_pcie_rxq_restock + * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl_rx_replenish + * are available, schedules iwl_pcie_rx_replenish * * -- enable interrupts -- - * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. - * Calls iwl_rx_queue_restock to refill any empty + * Calls iwl_pcie_rxq_restock to refill any empty * slots. * ... * */ -/** - * iwl_rx_queue_space - Return number of free slots available in queue. +/* + * iwl_rxq_space - Return number of free slots available in queue. */ -static int iwl_rx_queue_space(const struct iwl_rx_queue *q) +static int iwl_rxq_space(const struct iwl_rxq *q) { int s = q->read - q->write; if (s <= 0) @@ -122,11 +122,10 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q) return s; } -/** - * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue +/* + * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue */ -void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, - struct iwl_rx_queue *q) +void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q) { unsigned long flags; u32 reg; @@ -176,7 +175,7 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, spin_unlock_irqrestore(&q->lock, flags); } -/** +/* * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr */ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) @@ -184,8 +183,8 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) return cpu_to_le32((u32)(dma_addr >> 8)); } -/** - * iwl_rx_queue_restock - refill RX queue from pre-allocated pool +/* + * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool * * If there are slots in the RX queue that need to be restocked, * and we have free pre-allocated buffers, fill the ranks as much @@ -195,10 +194,10 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) * also updates the memory address in the firmware to reference the new * target buffer. */ -static void iwl_rx_queue_restock(struct iwl_trans *trans) +static void iwl_pcie_rxq_restock(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; unsigned long flags; @@ -214,7 +213,7 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans) return; spin_lock_irqsave(&rxq->lock, flags); - while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { /* The overwritten rxb must be a used one */ rxb = rxq->queue[rxq->write]; BUG_ON(rxb && rxb->page); @@ -242,23 +241,23 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans) spin_lock_irqsave(&rxq->lock, flags); rxq->need_update = 1; spin_unlock_irqrestore(&rxq->lock, flags); - iwl_rx_queue_update_write_ptr(trans, rxq); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); } } /* - * iwl_rx_allocate - allocate a page for each used RBD + * iwl_pcie_rx_allocate - allocate a page for each used RBD * * A used RBD is an Rx buffer that has been given to the stack. To use it again * a page must be allocated and the RBD must point to the page. This function * doesn't change the HW pointer but handles the list of pages that is used by - * iwl_rx_queue_restock. The latter function will update the HW to use the newly + * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) +static void iwl_pcie_rx_allocate(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; unsigned long flags; @@ -333,46 +332,46 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) } /* - * iwl_rx_replenish - Move all used buffers from rx_used to rx_free + * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free * * When moving to rx_free an page is allocated for the slot. * - * Also restock the Rx queue via iwl_rx_queue_restock. + * Also restock the Rx queue via iwl_pcie_rxq_restock. * This is called as a scheduled work item (except for during initialization) */ -void iwl_rx_replenish(struct iwl_trans *trans) +void iwl_pcie_rx_replenish(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long flags; - iwl_rx_allocate(trans, GFP_KERNEL); + iwl_pcie_rx_allocate(trans, GFP_KERNEL); spin_lock_irqsave(&trans_pcie->irq_lock, flags); - iwl_rx_queue_restock(trans); + iwl_pcie_rxq_restock(trans); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); } -static void iwl_rx_replenish_now(struct iwl_trans *trans) +static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans) { - iwl_rx_allocate(trans, GFP_ATOMIC); + iwl_pcie_rx_allocate(trans, GFP_ATOMIC); - iwl_rx_queue_restock(trans); + iwl_pcie_rxq_restock(trans); } -void iwl_bg_rx_replenish(struct work_struct *data) +void iwl_pcie_rx_replenish_work(struct work_struct *data) { struct iwl_trans_pcie *trans_pcie = container_of(data, struct iwl_trans_pcie, rx_replenish); - iwl_rx_replenish(trans_pcie->trans); + iwl_pcie_rx_replenish(trans_pcie->trans); } -static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, +static void iwl_pcie_rx_handle_rxbuf(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; - struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; unsigned long flags; bool page_stolen = false; int max_len = PAGE_SIZE << trans_pcie->rx_page_order; @@ -402,8 +401,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, break; IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", - rxcb._offset, - trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd), + rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd), pkt->hdr.cmd); len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; @@ -435,7 +433,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, cmd_index = get_cmd_index(&txq->q, index); if (reclaim) { - struct iwl_pcie_tx_queue_entry *ent; + struct iwl_pcie_txq_entry *ent; ent = &txq->entries[cmd_index]; cmd = ent->copy_cmd; WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); @@ -465,7 +463,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (!rxcb._page_stolen) - iwl_tx_cmd_complete(trans, &rxcb, err); + iwl_pcie_hcmd_complete(trans, &rxcb, err); else IWL_WARN(trans, "Claim null rxb?\n"); } @@ -496,17 +494,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, spin_unlock_irqrestore(&rxq->lock, flags); } -/** - * iwl_rx_handle - Main entry function for receiving responses from uCode - * - * Uses the priv->rx_handlers callback function array to invoke - * the appropriate handlers, including command responses, - * frame-received notifications, and other notifications. +/* + * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ -static void iwl_rx_handle(struct iwl_trans *trans) +static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq; u32 r, i; u8 fill_rx = 0; u32 count = 8; @@ -537,7 +531,7 @@ static void iwl_rx_handle(struct iwl_trans *trans) IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", r, i, rxb); - iwl_rx_handle_rxbuf(trans, rxb); + iwl_pcie_rx_handle_rxbuf(trans, rxb); i = (i + 1) & RX_QUEUE_MASK; /* If there are a lot of unused frames, @@ -546,7 +540,7 @@ static void iwl_rx_handle(struct iwl_trans *trans) count++; if (count >= 8) { rxq->read = i; - iwl_rx_replenish_now(trans); + iwl_pcie_rx_replenish_now(trans); count = 0; } } @@ -555,15 +549,15 @@ static void iwl_rx_handle(struct iwl_trans *trans) /* Backtrack one entry */ rxq->read = i; if (fill_rx) - iwl_rx_replenish_now(trans); + iwl_pcie_rx_replenish_now(trans); else - iwl_rx_queue_restock(trans); + iwl_pcie_rxq_restock(trans); } -/** - * iwl_irq_handle_error - called for HW or SW error interrupt from card +/* + * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card */ -static void iwl_irq_handle_error(struct iwl_trans *trans) +static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -579,8 +573,8 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) return; } - iwl_dump_csr(trans); - iwl_dump_fh(trans, NULL); + iwl_pcie_dump_csr(trans); + iwl_pcie_dump_fh(trans, NULL); set_bit(STATUS_FW_ERROR, &trans_pcie->status); clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); @@ -590,7 +584,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) } /* tasklet for iwlagn interrupt */ -void iwl_irq_tasklet(struct iwl_trans *trans) +void iwl_pcie_tasklet(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; @@ -642,7 +636,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) iwl_disable_interrupts(trans); isr_stats->hw++; - iwl_irq_handle_error(trans); + iwl_pcie_irq_handle_error(trans); handled |= CSR_INT_BIT_HW_ERR; @@ -705,17 +699,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans) IWL_ERR(trans, "Microcode SW error detected. " " Restarting 0x%X.\n", inta); isr_stats->sw++; - iwl_irq_handle_error(trans); + iwl_pcie_irq_handle_error(trans); handled |= CSR_INT_BIT_SW_ERR; } /* uCode wakes up after power-down sleep */ if (inta & CSR_INT_BIT_WAKEUP) { IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); - iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); + iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq); for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) - iwl_txq_update_write_ptr(trans, - &trans_pcie->txq[i]); + iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]); isr_stats->wakeup++; @@ -753,7 +746,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_DIS); - iwl_rx_handle(trans); + iwl_pcie_rx_handle(trans); /* * Enable periodic interrupt in 8 msec only if we received @@ -811,7 +804,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) #define ICT_COUNT (ICT_SIZE / sizeof(u32)) /* Free dram table */ -void iwl_free_isr_ict(struct iwl_trans *trans) +void iwl_pcie_free_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -824,13 +817,12 @@ void iwl_free_isr_ict(struct iwl_trans *trans) } } - /* * allocate dram shared table, it is an aligned memory * block of ICT_SIZE. * also reset all data related to ICT table interrupt. */ -int iwl_alloc_isr_ict(struct iwl_trans *trans) +int iwl_pcie_alloc_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -843,7 +835,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans) /* just an API sanity check ... it is guaranteed to be aligned */ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { - iwl_free_isr_ict(trans); + iwl_pcie_free_ict(trans); return -EINVAL; } @@ -864,7 +856,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans) /* Device is going up inform it about using ICT interrupt table, * also we need to tell the driver to start using ICT interrupt. */ -void iwl_reset_ict(struct iwl_trans *trans) +void iwl_pcie_reset_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; @@ -894,7 +886,7 @@ void iwl_reset_ict(struct iwl_trans *trans) } /* Device is going down disable ict interrupt usage */ -void iwl_disable_ict(struct iwl_trans *trans) +void iwl_pcie_disable_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long flags; @@ -905,7 +897,7 @@ void iwl_disable_ict(struct iwl_trans *trans) } /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ -static irqreturn_t iwl_isr(int irq, void *data) +static irqreturn_t iwl_pcie_isr(int irq, void *data) { struct iwl_trans *trans = data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -952,7 +944,7 @@ static irqreturn_t iwl_isr(int irq, void *data) #endif trans_pcie->inta |= inta; - /* iwl_irq_tasklet() will service interrupts and re-enable them */ + /* iwl_pcie_tasklet() will service interrupts and re-enable them */ if (likely(inta)) tasklet_schedule(&trans_pcie->irq_tasklet); else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && @@ -977,7 +969,7 @@ none: * the interrupt we need to service, driver will set the entries back to 0 and * set index. */ -irqreturn_t iwl_isr_ict(int irq, void *data) +irqreturn_t iwl_pcie_isr_ict(int irq, void *data) { struct iwl_trans *trans = data; struct iwl_trans_pcie *trans_pcie; @@ -997,7 +989,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) * use legacy interrupt. */ if (unlikely(!trans_pcie->use_ict)) { - irqreturn_t ret = iwl_isr(irq, data); + irqreturn_t ret = iwl_pcie_isr(irq, data); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); return ret; } @@ -1062,7 +1054,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) inta &= trans_pcie->inta_mask; trans_pcie->inta |= inta; - /* iwl_irq_tasklet() will service interrupts and re-enable them */ + /* iwl_pcie_tasklet() will service interrupts and re-enable them */ if (likely(inta)) tasklet_schedule(&trans_pcie->irq_tasklet); else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 7eb5f483f77d..1eed9882b7b8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -84,7 +84,7 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); @@ -120,7 +120,7 @@ err_bd: static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq; int i; /* Fill the rx_used queue with _all_ of the Rx buffers */ @@ -139,8 +139,7 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) } } -static void iwl_trans_rx_hw_init(struct iwl_trans *trans, - struct iwl_rx_queue *rxq) +static void iwl_trans_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size; @@ -189,7 +188,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans, static int iwl_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq; int i, err; unsigned long flags; @@ -216,13 +215,13 @@ static int iwl_rx_init(struct iwl_trans *trans) rxq->free_count = 0; spin_unlock_irqrestore(&rxq->lock, flags); - iwl_rx_replenish(trans); + iwl_pcie_rx_replenish(trans); iwl_trans_rx_hw_init(trans, rxq); spin_lock_irqsave(&trans_pcie->irq_lock, flags); rxq->need_update = 1; - iwl_rx_queue_update_write_ptr(trans, rxq); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); return 0; @@ -231,7 +230,7 @@ static int iwl_rx_init(struct iwl_trans *trans) static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq; unsigned long flags; /*if rxq->bd is NULL, it means that nothing has been allocated, @@ -295,7 +294,7 @@ static void iwlagn_free_dma_ptr(struct iwl_trans *trans, static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) { - struct iwl_tx_queue *txq = (void *)data; + struct iwl_txq *txq = (void *)data; struct iwl_queue *q = &txq->q; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); @@ -359,7 +358,7 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) } static int iwl_trans_txq_alloc(struct iwl_trans *trans, - struct iwl_tx_queue *txq, int slots_num, + struct iwl_txq *txq, int slots_num, u32 txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -376,7 +375,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans, txq->q.n_window = slots_num; txq->entries = kcalloc(slots_num, - sizeof(struct iwl_pcie_tx_queue_entry), + sizeof(struct iwl_pcie_txq_entry), GFP_KERNEL); if (!txq->entries) @@ -413,7 +412,7 @@ error: } -static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, +static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, u32 txq_id) { int ret; @@ -443,12 +442,12 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, } /* - * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's + * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's */ -void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) +void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; enum dma_data_direction dma_dir; @@ -465,31 +464,31 @@ void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) spin_lock_bh(&txq->lock); while (q->write_ptr != q->read_ptr) { - iwl_txq_free_tfd(trans, txq, dma_dir); + iwl_pcie_txq_free_tfd(trans, txq, dma_dir); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); } spin_unlock_bh(&txq->lock); } -/** - * iwl_tx_queue_free - Deallocate DMA queue. +/* + * iwl_txq_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. * * Empty queue by removing and destroying all BD's. * Free all buffers. * 0-fill, but do not free "txq" descriptor structure. */ -static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) +static void iwl_txq_free(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct device *dev = trans->dev; int i; if (WARN_ON(!txq)) return; - iwl_tx_queue_unmap(trans, txq_id); + iwl_pcie_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ if (txq_id == trans_pcie->cmd_queue) @@ -515,7 +514,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) memset(txq, 0, sizeof(*txq)); } -/** +/* * iwl_trans_tx_free - Free TXQ Context * * Destroy all TX DMA queues and structures @@ -529,7 +528,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) if (trans_pcie->txq) { for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) - iwl_tx_queue_free(trans, txq_id); + iwl_txq_free(trans, txq_id); } kfree(trans_pcie->txq); @@ -540,12 +539,9 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); } -/** +/* * iwl_trans_tx_alloc - allocate TX context * Allocate all Tx DMA structures and initialize them - * - * @param priv - * @return error code */ static int iwl_trans_tx_alloc(struct iwl_trans *trans) { @@ -578,7 +574,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans) } trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, - sizeof(struct iwl_tx_queue), GFP_KERNEL); + sizeof(struct iwl_txq), GFP_KERNEL); if (!trans_pcie->txq) { IWL_ERR(trans, "Not enough memory for txq\n"); ret = ENOMEM; @@ -1146,11 +1142,11 @@ static void iwl_tx_start(struct iwl_trans *trans, u32 scd_base_addr) static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) { - iwl_reset_ict(trans); + iwl_pcie_reset_ict(trans); iwl_tx_start(trans, scd_addr); } -/** +/* * iwlagn_txq_ctx_stop - Stop all Tx DMA channels */ static int iwl_trans_tx_stop(struct iwl_trans *trans) @@ -1188,7 +1184,7 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans) /* Unmap DMA from host system and free skb's */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) - iwl_tx_queue_unmap(trans, txq_id); + iwl_pcie_txq_unmap(trans, txq_id); return 0; } @@ -1204,7 +1200,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); /* device going down, Stop using ICT table */ - iwl_disable_ict(trans); + iwl_pcie_disable_ict(trans); /* * If a HW restart happens during firmware loading, @@ -1274,7 +1270,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; struct iwl_cmd_meta *out_meta; - struct iwl_tx_queue *txq; + struct iwl_txq *txq; struct iwl_queue *q; dma_addr_t phys_addr = 0; dma_addr_t txcmd_phys; @@ -1370,10 +1366,9 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* Attach buffers to TFD */ - iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1); + iwl_pcie_tx_build_tfd(trans, txq, txcmd_phys, firstlen, 1); if (secondlen > 0) - iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, - secondlen, 0); + iwl_pcie_tx_build_tfd(trans, txq, phys_addr, secondlen, 0); scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + offsetof(struct iwl_tx_cmd, scratch); @@ -1389,7 +1384,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); /* Set up entry for this TFD in Tx byte-count array */ - iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); + iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, DMA_BIDIRECTIONAL); @@ -1409,7 +1404,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Tell device the write index *just past* this latest filled TFD */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); - iwl_txq_update_write_ptr(trans, txq); + iwl_pcie_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully @@ -1420,7 +1415,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, if (iwl_queue_space(q) < q->high_mark) { if (wait_write_ptr) { txq->need_update = 1; - iwl_txq_update_write_ptr(trans, txq); + iwl_pcie_txq_inc_wr_ptr(trans, txq); } else { iwl_stop_queue(trans, txq); } @@ -1442,19 +1437,20 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) if (!trans_pcie->irq_requested) { tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) - iwl_irq_tasklet, (unsigned long)trans); + iwl_pcie_tasklet, (unsigned long)trans); - iwl_alloc_isr_ict(trans); + iwl_pcie_alloc_ict(trans); - err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED, - DRV_NAME, trans); + err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict, + IRQF_SHARED, DRV_NAME, trans); if (err) { IWL_ERR(trans, "Error allocating IRQ %d\n", trans_pcie->irq); goto error; } - INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish); + INIT_WORK(&trans_pcie->rx_replenish, + iwl_pcie_rx_replenish_work); trans_pcie->irq_requested = true; } @@ -1478,7 +1474,7 @@ err_free_irq: trans_pcie->irq_requested = false; free_irq(trans_pcie->irq, trans); error: - iwl_free_isr_ict(trans); + iwl_pcie_free_ict(trans); tasklet_kill(&trans_pcie->irq_tasklet); return err; } @@ -1522,7 +1518,7 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; /* n_bd is usually 256 => n_bd - 1 = 0xff */ int tfd_num = ssn & (txq->q.n_bd - 1); @@ -1531,7 +1527,7 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (txq->q.read_ptr != tfd_num) { IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", txq_id, txq->q.read_ptr, tfd_num, ssn); - iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); + iwl_pcie_txq_reclaim(trans, txq_id, tfd_num, skbs); if (iwl_queue_space(&txq->q) > txq->q.low_mark) iwl_wake_queue(trans, txq); } @@ -1590,7 +1586,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) if (trans_pcie->irq_requested == true) { free_irq(trans_pcie->irq, trans); - iwl_free_isr_ict(trans); + iwl_pcie_free_ict(trans); } pci_disable_msi(trans_pcie->pci_dev); @@ -1636,10 +1632,10 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans) #define IWL_FLUSH_WAIT_MS 2000 -static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) +static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue *txq; + struct iwl_txq *txq; struct iwl_queue *q; int cnt; unsigned long now = jiffies; @@ -1683,7 +1679,7 @@ static const char *get_fh_string(int cmd) #undef IWL_CMD } -int iwl_dump_fh(struct iwl_trans *trans, char **buf) +int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf) { int i; static const u32 fh_tbl[] = { @@ -1762,7 +1758,7 @@ static const char *get_csr_string(int cmd) #undef IWL_CMD } -void iwl_dump_csr(struct iwl_trans *trans) +void iwl_pcie_dump_csr(struct iwl_trans *trans) { int i; static const u32 csr_tbl[] = { @@ -1852,7 +1848,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue *txq; + struct iwl_txq *txq; struct iwl_queue *q; char *buf; int pos = 0; @@ -1889,7 +1885,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq; char buf[256]; int pos = 0; const size_t bufsz = sizeof(buf); @@ -2008,7 +2004,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file, if (sscanf(buf, "%d", &csr) != 1) return -EFAULT; - iwl_dump_csr(trans); + iwl_pcie_dump_csr(trans); return count; } @@ -2022,7 +2018,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, int pos = 0; ssize_t ret = -EFAULT; - ret = pos = iwl_dump_fh(trans, &buf); + ret = pos = iwl_pcie_dump_fh(trans, &buf); if (buf) { ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); @@ -2091,17 +2087,17 @@ static const struct iwl_trans_ops trans_ops_pcie = { .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, - .send_cmd = iwl_trans_pcie_send_cmd, + .send_cmd = iwl_pcie_send_cmd, .tx = iwl_trans_pcie_tx, .reclaim = iwl_trans_pcie_reclaim, - .txq_disable = iwl_trans_pcie_txq_disable, - .txq_enable = iwl_trans_pcie_txq_enable, + .txq_disable = iwl_pcie_txq_disable, + .txq_enable = iwl_pcie_txq_enable, .dbgfs_register = iwl_trans_pcie_dbgfs_register, - .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, + .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, #ifdef CONFIG_PM_SLEEP .suspend = iwl_trans_pcie_suspend, diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index dcc7e1256e39..eac0481a9c71 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -42,12 +42,11 @@ #define IWL_TX_CRC_SIZE 4 #define IWL_TX_DELIMITER_SIZE 4 -/** - * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array +/* + * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array */ -void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_tx_queue *txq, - u16 byte_cnt) +void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, u16 byte_cnt) { struct iwlagn_scd_bc_tbl *scd_bc_tbl; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -88,10 +87,10 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; } -/** - * iwl_txq_update_write_ptr - Send new write index to hardware +/* + * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware */ -void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) +void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { u32 reg = 0; int txq_id = txq->q.id; @@ -206,8 +205,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, tfd->num_tbs = 0; } -/** - * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] +/* + * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] * @trans - transport private data * @txq - tx queue * @dma_dir - the direction of the DMA mapping @@ -215,8 +214,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ -void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, - enum dma_data_direction dma_dir) +void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, + enum dma_data_direction dma_dir) { struct iwl_tfd *tfd_tmp = txq->tfds; @@ -247,10 +246,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, } } -int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, - struct iwl_tx_queue *txq, - dma_addr_t addr, u16 len, - u8 reset) +int iwl_pcie_tx_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, + dma_addr_t addr, u16 len, u8 reset) { struct iwl_queue *q; struct iwl_tfd *tfd, *tfd_tmp; @@ -322,7 +319,7 @@ int iwl_queue_space(const struct iwl_queue *q) return s; } -/** +/* * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) @@ -355,7 +352,7 @@ int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) } static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_tx_queue *txq) + struct iwl_txq *txq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -415,8 +412,8 @@ static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); } -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn) +void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, + int sta_id, int tid, int frame_limit, u16 ssn) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -477,7 +474,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, txq_id, fifo, ssn & 0xff); } -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) +void iwl_pcie_txq_disable(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 stts_addr = trans_pcie->scd_base_addr + @@ -494,14 +491,14 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) _iwl_write_targ_mem_dwords(trans, stts_addr, zero_val, ARRAY_SIZE(zero_val)); - iwl_tx_queue_unmap(trans, txq_id); + iwl_pcie_txq_unmap(trans, txq_id); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ -/** +/* * iwl_enqueue_hcmd - enqueue a uCode command * @priv: device private data point * @cmd: a point to the ucode command structure @@ -513,7 +510,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_queue *q = &txq->q; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; @@ -576,8 +573,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) */ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, "Command %s (%#x) is too large (%d bytes)\n", - trans_pcie_get_cmd_string(trans_pcie, cmd->id), - cmd->id, copy_size)) { + get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) { idx = -EINVAL; goto free_dup_buf; } @@ -640,7 +636,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", - trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), + get_cmd_string(trans_pcie, out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); @@ -654,7 +650,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) dma_unmap_addr_set(out_meta, mapping, phys_addr); dma_unmap_len_set(out_meta, len, copy_size); - iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); + iwl_pcie_tx_build_tfd(trans, txq, phys_addr, copy_size, 1); for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { const void *data = cmd->data[i]; @@ -676,8 +672,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) goto out; } - iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, - cmd->len[i], 0); + iwl_pcie_tx_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); } out_meta->flags = cmd->flags; @@ -696,7 +691,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) /* Increment and update queue's write index */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); - iwl_txq_update_write_ptr(trans, txq); + iwl_pcie_txq_inc_wr_ptr(trans, txq); out: spin_unlock_bh(&txq->lock); @@ -707,7 +702,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) } static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, - struct iwl_tx_queue *txq) + struct iwl_txq *txq) { if (!trans_pcie->wd_timeout) return; @@ -722,7 +717,7 @@ static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); } -/** +/* * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd * * When FW advances 'R' index, all entries between old and new 'R' index @@ -733,7 +728,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, int idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; int nfreed = 0; @@ -761,8 +756,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, iwl_queue_progress(trans_pcie, txq); } -/** - * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them +/* + * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim * @handler_status: return value of the handler of the command * (put in setup_rx_handlers) @@ -771,8 +766,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, * will be executed. The attached skb (if present) will only be freed * if the callback returns 1 */ -void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, - int handler_status) +void iwl_pcie_hcmd_complete(struct iwl_trans *trans, + struct iwl_rx_cmd_buffer *rxb, int handler_status) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -782,7 +777,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced @@ -820,13 +815,11 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { IWL_WARN(trans, "HCMD_ACTIVE already clear for command %s\n", - trans_pcie_get_cmd_string(trans_pcie, - cmd->hdr.cmd)); + get_cmd_string(trans_pcie, cmd->hdr.cmd)); } clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", - trans_pcie_get_cmd_string(trans_pcie, - cmd->hdr.cmd)); + get_cmd_string(trans_pcie, cmd->hdr.cmd)); wake_up(&trans_pcie->wait_command_queue); } @@ -851,7 +844,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (ret < 0) { IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", - trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); + get_cmd_string(trans_pcie, cmd->id), ret); return ret; } return 0; @@ -864,17 +857,17 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", - trans_pcie_get_cmd_string(trans_pcie, cmd->id)); + get_cmd_string(trans_pcie, cmd->id)); if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status))) { IWL_ERR(trans, "Command %s: a command is already active!\n", - trans_pcie_get_cmd_string(trans_pcie, cmd->id)); + get_cmd_string(trans_pcie, cmd->id)); return -EIO; } IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", - trans_pcie_get_cmd_string(trans_pcie, cmd->id)); + get_cmd_string(trans_pcie, cmd->id)); cmd_idx = iwl_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { @@ -882,7 +875,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", - trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); + get_cmd_string(trans_pcie, cmd->id), ret); return ret; } @@ -892,13 +885,13 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) HOST_COMPLETE_TIMEOUT); if (!ret) { if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { - struct iwl_tx_queue *txq = + struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_queue *q = &txq->q; IWL_ERR(trans, "Error sending %s: time out after %dms.\n", - trans_pcie_get_cmd_string(trans_pcie, cmd->id), + get_cmd_string(trans_pcie, cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, @@ -908,8 +901,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", - trans_pcie_get_cmd_string(trans_pcie, - cmd->id)); + get_cmd_string(trans_pcie, cmd->id)); ret = -ETIMEDOUT; goto cancel; } @@ -917,7 +909,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", - trans_pcie_get_cmd_string(trans_pcie, cmd->id)); + get_cmd_string(trans_pcie, cmd->id)); ret = -EIO; goto cancel; } @@ -930,7 +922,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", - trans_pcie_get_cmd_string(trans_pcie, cmd->id)); + get_cmd_string(trans_pcie, cmd->id)); ret = -EIO; goto cancel; } @@ -957,7 +949,7 @@ cancel: return ret; } -int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) +int iwl_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -975,11 +967,11 @@ int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) } /* Frees buffers until index _not_ inclusive */ -int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, +int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index, struct sk_buff_head *skbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; int last_to_free; int freed = 0; @@ -1019,7 +1011,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, iwlagn_txq_inval_byte_cnt_tbl(trans, txq); - iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE); + iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); freed++; } -- 2.39.5