-static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
-
- return addr;
-}
-
-static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
-
- tb->hi_n_len = cpu_to_le16(hi_n_len);
-
- tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
- return tfd->num_tbs & 0x1f;
-}
-
-/**
- * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
- * @txq - tx queue
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
- struct iwl_tfd *tfd;
- struct pci_dev *dev = priv->pci_dev;
- int index = txq->q.read_ptr;
- int i;
- int num_tbs;
-
- tfd = &tfd_tmp[index];
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_tfd_get_num_tbs(tfd);
-
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERROR("Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (num_tbs)
- pci_unmap_single(dev,
- pci_unmap_addr(&txq->cmd[index]->meta, mapping),
- pci_unmap_len(&txq->cmd[index]->meta, len),
- PCI_DMA_BIDIRECTIONAL);
-
- /* Unmap chunks, if any. */
- for (i = 1; i < num_tbs; i++) {
- pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
- iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
-
- if (txq->txb) {
- dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
- txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
- }
- }
-}
-
-static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tfd *tfd,
- dma_addr_t addr, u16 len)
-{
-
- u32 num_tbs = iwl_tfd_get_num_tbs(tfd);
-
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERROR("Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
- return -EINVAL;
- }
-
- BUG_ON(addr & ~DMA_BIT_MASK(36));
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERROR("Unaligned address = %llx\n",
- (unsigned long long)addr);
-
- iwl_tfd_set_tb(tfd, num_tbs, addr, len);
-
- return 0;
-}
-