]> git.karo-electronics.de Git - linux-beck.git/commitdiff
ath10k: use pre-allocated DMA buffer in Tx
authorPeter Oh <poh@qca.qualcomm.com>
Mon, 5 Oct 2015 14:56:40 +0000 (17:56 +0300)
committerKalle Valo <kvalo@qca.qualcomm.com>
Tue, 6 Oct 2015 12:04:12 +0000 (15:04 +0300)
ath10k driver is using dma_pool_alloc per packet and dma_pool_free
in coresponding at Tx completion.
Use of pre-allocated DMA buffer in Tx will improve saving CPU resource
by 5% while it consumes about 56KB memory more as trade off.

Signed-off-by: Peter Oh <poh@qca.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/txrx.c

index 5a8e4eae7a9cbe817c7d62107f8bdc30dd89b449..db0a99b4e9d9356a1bb412d1413206ec8805df53 100644 (file)
@@ -1488,7 +1488,6 @@ struct ath10k_htt {
        int num_pending_mgmt_tx;
        struct idr pending_tx;
        wait_queue_head_t empty_tx_wq;
-       struct dma_pool *tx_pool;
 
        /* set if host-fw communication goes haywire
         * used to avoid further failures */
@@ -1509,6 +1508,11 @@ struct ath10k_htt {
                dma_addr_t paddr;
                struct htt_msdu_ext_desc *vaddr;
        } frag_desc;
+
+       struct {
+               dma_addr_t paddr;
+               struct ath10k_htt_txbuf *vaddr;
+       } txbuf;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
index a31d508fc397d93a29e169c0f8b7c82aa0503f28..555174756eb176ade8bc4d5bf40b1b1b77e13d8d 100644 (file)
@@ -108,9 +108,12 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
        spin_lock_init(&htt->tx_lock);
        idr_init(&htt->pending_tx);
 
-       htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
-                                      sizeof(struct ath10k_htt_txbuf), 4, 0);
-       if (!htt->tx_pool) {
+       size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
+       htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
+                                                 &htt->txbuf.paddr,
+                                                 GFP_DMA);
+       if (!htt->txbuf.vaddr) {
+               ath10k_err(ar, "failed to alloc tx buffer\n");
                ret = -ENOMEM;
                goto free_idr_pending_tx;
        }
@@ -125,14 +128,17 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
        if (!htt->frag_desc.vaddr) {
                ath10k_warn(ar, "failed to alloc fragment desc memory\n");
                ret = -ENOMEM;
-               goto free_tx_pool;
+               goto free_txbuf;
        }
 
 skip_frag_desc_alloc:
        return 0;
 
-free_tx_pool:
-       dma_pool_destroy(htt->tx_pool);
+free_txbuf:
+       size = htt->max_num_pending_tx *
+                         sizeof(struct ath10k_htt_txbuf);
+       dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
+                         htt->txbuf.paddr);
 free_idr_pending_tx:
        idr_destroy(&htt->pending_tx);
        return ret;
@@ -160,7 +166,13 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
 
        idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
        idr_destroy(&htt->pending_tx);
-       dma_pool_destroy(htt->tx_pool);
+
+       if (htt->txbuf.vaddr) {
+               size = htt->max_num_pending_tx *
+                                 sizeof(struct ath10k_htt_txbuf);
+               dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
+                                 htt->txbuf.paddr);
+       }
 
        if (htt->frag_desc.vaddr) {
                size = htt->max_num_pending_tx *
@@ -521,7 +533,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        int res;
        u8 flags0 = 0;
        u16 msdu_id, flags1 = 0;
-       dma_addr_t paddr = 0;
        u32 frags_paddr = 0;
        struct htt_msdu_ext_desc *ext_desc = NULL;
        bool limit_mgmt_desc = false;
@@ -550,13 +561,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        prefetch_len = min(htt->prefetch_len, msdu->len);
        prefetch_len = roundup(prefetch_len, 4);
 
-       skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
-                                          &paddr);
-       if (!skb_cb->htt.txbuf) {
-               res = -ENOMEM;
-               goto err_free_msdu_id;
-       }
-       skb_cb->htt.txbuf_paddr = paddr;
+       skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id];
+       skb_cb->htt.txbuf_paddr = htt->txbuf.paddr +
+               (sizeof(struct ath10k_htt_txbuf) * msdu_id);
 
        if ((ieee80211_is_action(hdr->frame_control) ||
             ieee80211_is_deauth(hdr->frame_control) ||
@@ -574,7 +581,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        res = dma_mapping_error(dev, skb_cb->paddr);
        if (res) {
                res = -EIO;
-               goto err_free_txbuf;
+               goto err_free_msdu_id;
        }
 
        switch (skb_cb->txmode) {
@@ -706,10 +713,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 
 err_unmap_msdu:
        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
-err_free_txbuf:
-       dma_pool_free(htt->tx_pool,
-                     skb_cb->htt.txbuf,
-                     skb_cb->htt.txbuf_paddr);
 err_free_msdu_id:
        spin_lock_bh(&htt->tx_lock);
        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
index 7db7d501726b9b7605e955ba03163a83ed65188c..6d1105ab4592b1cb6434b9fb6b98702288324b5f 100644 (file)
@@ -92,11 +92,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
        skb_cb = ATH10K_SKB_CB(msdu);
        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 
-       if (skb_cb->htt.txbuf)
-               dma_pool_free(htt->tx_pool,
-                             skb_cb->htt.txbuf,
-                             skb_cb->htt.txbuf_paddr);
-
        ath10k_report_offchan_tx(htt->ar, msdu);
 
        info = IEEE80211_SKB_CB(msdu);