]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/net/wireless/iwlwifi/iwl-tx.c
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mv-sheeva.git] / drivers / net / wireless / iwlwifi / iwl-tx.c
index 9b50b1052b09fe6f750c1693b12dfafca4b3f2f6..d82823b5c8abce08948fad392cd9e8c3368aabf5 100644 (file)
@@ -208,11 +208,12 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
  * Free all buffers.
  * 0-fill, but do not free "txq" descriptor structure.
  */
-static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
 {
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
        struct iwl_queue *q = &txq->q;
        struct pci_dev *dev = priv->pci_dev;
-       int len;
+       int i, slots_num, len;
 
        if (q->n_bd == 0)
                return;
@@ -227,7 +228,12 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
                len += IWL_MAX_SCAN_SIZE;
 
        /* De-alloc array of command/tx buffers */
-       pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
+       slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
+                       TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+       for (i = 0; i < slots_num; i++)
+               kfree(txq->cmd[i]);
+       if (txq_id == IWL_CMD_QUEUE_NUM)
+               kfree(txq->cmd[slots_num]);
 
        /* De-alloc circular buffer of TFDs */
        if (txq->q.n_bd)
@@ -400,8 +406,7 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
                             struct iwl_tx_queue *txq,
                             int slots_num, u32 txq_id)
 {
-       struct pci_dev *dev = priv->pci_dev;
-       int len;
+       int i, len;
        int rc = 0;
 
        /*
@@ -412,17 +417,25 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
         * For normal Tx queues (all other queues), no super-size command
         * space is needed.
         */
-       len = sizeof(struct iwl_cmd) * slots_num;
-       if (txq_id == IWL_CMD_QUEUE_NUM)
-               len +=  IWL_MAX_SCAN_SIZE;
-       txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
-       if (!txq->cmd)
-               return -ENOMEM;
+       len = sizeof(struct iwl_cmd);
+       for (i = 0; i <= slots_num; i++) {
+               if (i == slots_num) {
+                       if (txq_id == IWL_CMD_QUEUE_NUM)
+                               len += IWL_MAX_SCAN_SIZE;
+                       else
+                               continue;
+               }
+
+               txq->cmd[i] = kmalloc(len, GFP_KERNEL | GFP_DMA);
+               if (!txq->cmd[i])
+                       return -ENOMEM;
+       }
 
        /* Alloc driver data array and TFD circular buffer */
        rc = iwl_tx_queue_alloc(priv, txq, txq_id);
        if (rc) {
-               pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
+               for (i = 0; i < slots_num; i++)
+                       kfree(txq->cmd[i]);
 
                return -ENOMEM;
        }
@@ -451,7 +464,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
 
        /* Tx queues */
        for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-               iwl_tx_queue_free(priv, &priv->txq[txq_id]);
+               iwl_tx_queue_free(priv, txq_id);
 
        /* Keep-warm buffer */
        iwl_kw_free(priv);
@@ -480,7 +493,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
        /* Alloc keep-warm buffer */
        ret = iwl_kw_alloc(priv);
        if (ret) {
-               IWL_ERROR("Keep Warm allocation failed");
+               IWL_ERROR("Keep Warm allocation failed\n");
                goto error_kw;
        }
        spin_lock_irqsave(&priv->lock, flags);
@@ -751,20 +764,19 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_tfd_frame *tfd;
-       u32 *control_flags;
-       int txq_id = skb_get_queue_mapping(skb);
-       struct iwl_tx_queue *txq = NULL;
-       struct iwl_queue *q = NULL;
+       struct iwl_tx_queue *txq;
+       struct iwl_queue *q;
+       struct iwl_cmd *out_cmd;
+       struct iwl_tx_cmd *tx_cmd;
+       int swq_id, txq_id;
        dma_addr_t phys_addr;
        dma_addr_t txcmd_phys;
        dma_addr_t scratch_phys;
-       struct iwl_cmd *out_cmd = NULL;
-       struct iwl_tx_cmd *tx_cmd;
        u16 len, idx, len_org;
        u16 seq_number = 0;
-       u8 id, hdr_len, unicast;
-       u8 sta_id;
        __le16 fc;
+       u8 hdr_len, unicast;
+       u8 sta_id;
        u8 wait_write_ptr = 0;
        u8 tid = 0;
        u8 *qc = NULL;
@@ -789,7 +801,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        }
 
        unicast = !is_multicast_ether_addr(hdr->addr1);
-       id = 0;
 
        fc = hdr->frame_control;
 
@@ -827,14 +838,16 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
        IWL_DEBUG_TX("station Id %d\n", sta_id);
 
+       swq_id = skb_get_queue_mapping(skb);
+       txq_id = swq_id;
        if (ieee80211_is_data_qos(fc)) {
                qc = ieee80211_get_qos_ctl(hdr);
                tid = qc[0] & 0xf;
-               seq_number = priv->stations[sta_id].tid[tid].seq_number &
-                               IEEE80211_SCTL_SEQ;
-               hdr->seq_ctrl = cpu_to_le16(seq_number) |
-                       (hdr->seq_ctrl &
-                               __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
+               seq_number = priv->stations[sta_id].tid[tid].seq_number;
+               seq_number &= IEEE80211_SCTL_SEQ;
+               hdr->seq_ctrl = hdr->seq_ctrl &
+                               __constant_cpu_to_le16(IEEE80211_SCTL_FRAG);
+               hdr->seq_ctrl |= cpu_to_le16(seq_number);
                seq_number += 0x10;
                /* aggregation is on for this <sta,tid> */
                if (info->flags & IEEE80211_TX_CTL_AMPDU)
@@ -851,7 +864,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        /* Set up first empty TFD within this queue's circular TFD buffer */
        tfd = &txq->bd[q->write_ptr];
        memset(tfd, 0, sizeof(*tfd));
-       control_flags = (u32 *) tfd;
        idx = get_cmd_index(q, q->write_ptr, 0);
 
        /* Set up driver data for this TFD */
@@ -859,7 +871,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        txq->txb[q->write_ptr].skb[0] = skb;
 
        /* Set up first empty entry in queue's array of Tx/cmd buffers */
-       out_cmd = &txq->cmd[idx];
+       out_cmd = txq->cmd[idx];
        tx_cmd = &out_cmd->cmd.tx;
        memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
        memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
@@ -899,14 +911,15 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
        /* Physical address of this Tx command's header (not MAC header!),
         * within command buffer array. */
-       txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
-                    offsetof(struct iwl_cmd, hdr);
+       txcmd_phys = pci_map_single(priv->pci_dev, out_cmd,
+                               sizeof(struct iwl_cmd), PCI_DMA_TODEVICE);
+       txcmd_phys += offsetof(struct iwl_cmd, hdr);
 
        /* Add buffer containing Tx command and MAC(!) header to TFD's
         * first entry */
        iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
 
-       if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
+       if (info->control.hw_key)
                iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
 
        /* Set up TFD's 2nd entry to point directly to remainder of skb,
@@ -962,16 +975,15 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        if (ret)
                return ret;
 
-       if ((iwl_queue_space(q) < q->high_mark)
-           && priv->mac80211_registered) {
+       if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
                if (wait_write_ptr) {
                        spin_lock_irqsave(&priv->lock, flags);
                        txq->need_update = 1;
                        iwl_txq_update_write_ptr(priv, txq);
                        spin_unlock_irqrestore(&priv->lock, flags);
+               } else {
+                       ieee80211_stop_queue(priv->hw, swq_id);
                }
-
-               ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
        }
 
        return 0;
@@ -999,13 +1011,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
        struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
        struct iwl_queue *q = &txq->q;
        struct iwl_tfd_frame *tfd;
-       u32 *control_flags;
        struct iwl_cmd *out_cmd;
-       u32 idx;
-       u16 fix_size;
        dma_addr_t phys_addr;
-       int ret;
        unsigned long flags;
+       int len, ret;
+       u32 idx;
+       u16 fix_size;
 
        cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
        fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
@@ -1031,10 +1042,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
        tfd = &txq->bd[q->write_ptr];
        memset(tfd, 0, sizeof(*tfd));
 
-       control_flags = (u32 *) tfd;
 
        idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
-       out_cmd = &txq->cmd[idx];
+       out_cmd = txq->cmd[idx];
 
        out_cmd->hdr.cmd = cmd->id;
        memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
@@ -1048,9 +1058,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
                        INDEX_TO_SEQ(q->write_ptr));
        if (out_cmd->meta.flags & CMD_SIZE_HUGE)
                out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
-
-       phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
-                       offsetof(struct iwl_cmd, hdr);
+       len = (idx == TFD_CMD_SLOTS) ?
+                       IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
+       phys_addr = pci_map_single(priv->pci_dev, out_cmd, len,
+                                               PCI_DMA_TODEVICE);
+       phys_addr += offsetof(struct iwl_cmd, hdr);
        iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
 
        IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
@@ -1115,6 +1127,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
 {
        struct iwl_tx_queue *txq = &priv->txq[txq_id];
        struct iwl_queue *q = &txq->q;
+       struct iwl_tfd_frame *bd = &txq->bd[index];
+       dma_addr_t dma_addr;
+       int is_odd, buf_len;
        int nfreed = 0;
 
        if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
@@ -1132,6 +1147,19 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
                                        q->write_ptr, q->read_ptr);
                        queue_work(priv->workqueue, &priv->restart);
                }
+               is_odd = (index/2) & 0x1;
+               if (is_odd) {
+                       dma_addr = IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
+                                       (IWL_GET_BITS(bd->pa[index],
+                                                       tb2_addr_hi20) << 16);
+                       buf_len = IWL_GET_BITS(bd->pa[index], tb2_len);
+               } else {
+                       dma_addr = le32_to_cpu(bd->pa[index].tb1_addr);
+                       buf_len = IWL_GET_BITS(bd->pa[index], tb1_len);
+               }
+
+               pci_unmap_single(priv->pci_dev, dma_addr, buf_len,
+                                PCI_DMA_TODEVICE);
                nfreed++;
        }
 }
@@ -1163,7 +1191,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
        BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
 
        cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
-       cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
+       cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
 
        /* Input error checking is done when commands are added to queue. */
        if (cmd->meta.flags & CMD_WANT_SKB) {
@@ -1391,7 +1419,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
        /* For each frame attempted in aggregation,
         * update driver's record of tx frame's status. */
        for (i = 0; i < agg->frame_count ; i++) {
-               ack = bitmap & (1 << i);
+               ack = bitmap & (1ULL << i);
                successes += !!ack;
                IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
                        ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
@@ -1435,7 +1463,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
        u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
 
        if (scd_flow >= priv->hw_params.max_txq_num) {
-               IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
+               IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n");
                return;
        }