]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
net: bcmgenet: simplify __bcmgenet_tx_reclaim()
authorPetri Gynther <pgynther@google.com>
Wed, 4 Mar 2015 22:30:01 +0000 (14:30 -0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 5 Mar 2015 04:54:54 +0000 (23:54 -0500)
1. Use c_index and ring->c_index to determine how many TxCBs/TxBDs are
   ready for cleanup
   - c_index = the current value of TDMA_CONS_INDEX
   - TDMA_CONS_INDEX is HW-incremented and auto-wraparound (0x0-0xFFFF)
   - ring->c_index = __bcmgenet_tx_reclaim() cleaned up to this point on
     the previous invocation

2. Add bcmgenet_tx_ring->clean_ptr
   - index of the next TxCB to be cleaned
   - incremented as TxCBs/TxBDs are processed
   - value always in range [ring->cb_ptr, ring->end_ptr]

3. Fix incrementing of dev->stats.tx_packets
   - should be incremented only when tx_cb_ptr->skb != NULL

These changes simplify __bcmgenet_tx_reclaim(). Furthermore, Tx ring size
can now be any value.

With the old code, Tx ring size had to be a power-of-2:
   num_tx_bds = ring->size;
   c_index &= (num_tx_bds - 1);
   last_c_index &= (num_tx_bds - 1);

Signed-off-by: Petri Gynther <pgynther@google.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h

index 84feb241d60b05ad11ede04752f22ed768ecc781..9137187063f7db90e99b38d58ccecbb3f4df7dbd 100644 (file)
@@ -978,39 +978,32 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
                                          struct bcmgenet_tx_ring *ring)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
-       int last_tx_cn, last_c_index, num_tx_bds;
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
        unsigned int pkts_compl = 0;
-       unsigned int bds_compl;
        unsigned int c_index;
+       unsigned int txbds_ready;
+       unsigned int txbds_processed = 0;
 
        /* Compute how many buffers are transmitted since last xmit call */
        c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
-       txq = netdev_get_tx_queue(dev, ring->queue);
-
-       last_c_index = ring->c_index;
-       num_tx_bds = ring->size;
-
-       c_index &= (num_tx_bds - 1);
+       c_index &= DMA_C_INDEX_MASK;
 
-       if (c_index >= last_c_index)
-               last_tx_cn = c_index - last_c_index;
+       if (likely(c_index >= ring->c_index))
+               txbds_ready = c_index - ring->c_index;
        else
-               last_tx_cn = num_tx_bds - last_c_index + c_index;
+               txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
 
        netif_dbg(priv, tx_done, dev,
-                 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
-                 __func__, ring->index,
-                 c_index, last_tx_cn, last_c_index);
+                 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+                 __func__, ring->index, ring->c_index, c_index, txbds_ready);
 
        /* Reclaim transmitted buffers */
-       while (last_tx_cn-- > 0) {
-               tx_cb_ptr = ring->cbs + last_c_index;
-               bds_compl = 0;
+       while (txbds_processed < txbds_ready) {
+               tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
                if (tx_cb_ptr->skb) {
                        pkts_compl++;
-                       bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
+                       dev->stats.tx_packets++;
                        dev->stats.tx_bytes += tx_cb_ptr->skb->len;
                        dma_unmap_single(&dev->dev,
                                         dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -1026,20 +1019,23 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
                                       DMA_TO_DEVICE);
                        dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
                }
-               dev->stats.tx_packets++;
-               ring->free_bds += bds_compl;
 
-               last_c_index++;
-               last_c_index &= (num_tx_bds - 1);
+               txbds_processed++;
+               if (likely(ring->clean_ptr < ring->end_ptr))
+                       ring->clean_ptr++;
+               else
+                       ring->clean_ptr = ring->cb_ptr;
        }
 
+       ring->free_bds += txbds_processed;
+       ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+
        if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+               txq = netdev_get_tx_queue(dev, ring->queue);
                if (netif_tx_queue_stopped(txq))
                        netif_tx_wake_queue(txq);
        }
 
-       ring->c_index = c_index;
-
        return pkts_compl;
 }
 
@@ -1734,6 +1730,7 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
        }
        ring->cbs = priv->tx_cbs + start_ptr;
        ring->size = size;
+       ring->clean_ptr = start_ptr;
        ring->c_index = 0;
        ring->free_bds = size;
        ring->write_ptr = start_ptr;
index 016bd12bf493e514b4b82a58857a34f3f3a5e025..548b7e9347271586dfa73baada1d6315eefda938 100644 (file)
@@ -525,6 +525,7 @@ struct bcmgenet_tx_ring {
        unsigned int    queue;          /* queue index */
        struct enet_cb  *cbs;           /* tx ring buffer control block*/
        unsigned int    size;           /* size of each tx ring */
+       unsigned int    clean_ptr;      /* Tx ring clean pointer */
        unsigned int    c_index;        /* last consumer index of each ring*/
        unsigned int    free_bds;       /* # of free bds for each ring */
        unsigned int    write_ptr;      /* Tx ring write pointer SW copy */