]> git.karo-electronics.de Git - linux-beck.git/commitdiff
mv643xx_eth: rework receive skb cache alignment
authorLennert Buytenhek <buytenh@wantstofly.org>
Wed, 6 May 2009 03:01:22 +0000 (03:01 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 6 May 2009 22:33:39 +0000 (15:33 -0700)
On the platforms that mv643xx_eth is used on, the manual skb->data
alignment logic in mv643xx_eth can be simplified, as the only case we
need to handle is where NET_SKB_PAD is not a multiple of the cache
line size.  If this is the case, the extra padding we need can be
computed at compile time, while if NET_SKB_PAD _is_ a multiple of
the cache line size, the code can be optimised out entirely.

Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/mv643xx_eth.c

index cc16f3e4d89c961914869cbd0e1c78bcb209acfc..05bb1c55da665653ffef8b5cacc68b463b5fd6b4 100644 (file)
@@ -174,6 +174,7 @@ static char mv643xx_eth_driver_version[] = "1.4";
  */
 #define DEFAULT_RX_QUEUE_SIZE  128
 #define DEFAULT_TX_QUEUE_SIZE  256
+#define SKB_DMA_REALIGN                ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
 
 
 /*
@@ -649,23 +650,20 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
        refilled = 0;
        while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
                struct sk_buff *skb;
-               int unaligned;
                int rx;
                struct rx_desc *rx_desc;
 
                skb = __skb_dequeue(&mp->rx_recycle);
                if (skb == NULL)
-                       skb = dev_alloc_skb(mp->skb_size +
-                                           dma_get_cache_alignment() - 1);
+                       skb = dev_alloc_skb(mp->skb_size);
 
                if (skb == NULL) {
                        mp->oom = 1;
                        goto oom;
                }
 
-               unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
-               if (unaligned)
-                       skb_reserve(skb, dma_get_cache_alignment() - unaligned);
+               if (SKB_DMA_REALIGN)
+                       skb_reserve(skb, SKB_DMA_REALIGN);
 
                refilled++;
                rxq->rx_desc_count++;
@@ -964,8 +962,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                if (skb != NULL) {
                        if (skb_queue_len(&mp->rx_recycle) <
                                        mp->rx_ring_size &&
-                           skb_recycle_check(skb, mp->skb_size +
-                                       dma_get_cache_alignment() - 1))
+                           skb_recycle_check(skb, mp->skb_size))
                                __skb_queue_head(&mp->rx_recycle, skb);
                        else
                                dev_kfree_skb(skb);
@@ -2336,6 +2333,14 @@ static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
         * size field are ignored by the hardware.
         */
        mp->skb_size = (skb_size + 7) & ~7;
+
+       /*
+        * If NET_SKB_PAD is smaller than a cache line,
+        * netdev_alloc_skb() will cause skb->data to be misaligned
+        * to a cache line boundary.  If this is the case, include
+        * some extra space to allow re-aligning the data area.
+        */
+       mp->skb_size += SKB_DMA_REALIGN;
 }
 
 static int mv643xx_eth_open(struct net_device *dev)