]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
igb: update code to better handle incrementing page count
authorAlexander Duyck <alexander.h.duyck@intel.com>
Wed, 14 Dec 2016 23:05:34 +0000 (15:05 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Dec 2016 00:04:08 +0000 (16:04 -0800)
Update the driver code so that we do bulk updates of the page reference
count instead of just incrementing it by one reference at a time.  The
advantage to doing this is that we cut down on atomic operations and
this in turn should give us a slight improvement in cycles per packet.
In addition if we eventually move this over to using build_skb the gains
will be more noticeable.

Link: http://lkml.kernel.org/r/20161110113616.76501.17072.stgit@ahduyck-blue-test.jf.intel.com
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Acked-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no>
Cc: Helge Deller <deller@gmx.de>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Keguang Zhang <keguang.zhang@gmail.com>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Steven Miao <realmz6@gmail.com>
Cc: Tobias Klauser <tklauser@distanz.ch>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c

index d11093dce1b98dc3c892ba7a0507358b92bc13d6..acbc3abe2dddfc7bdf87a3724a95b136237a519e 100644 (file)
@@ -210,7 +210,12 @@ struct igb_tx_buffer {
 struct igb_rx_buffer {
        dma_addr_t dma;
        struct page *page;
-       unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+       __u32 page_offset;
+#else
+       __u16 page_offset;
+#endif
+       __u16 pagecnt_bias;
 };
 
 struct igb_tx_queue_stats {
index 60f122531e29886ae7d3f1b69eace6afd6d7be85..a761001308dcc190d8c9bad57dad40f09dc49caa 100644 (file)
@@ -3962,7 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
                                     PAGE_SIZE,
                                     DMA_FROM_DEVICE,
                                     DMA_ATTR_SKIP_CPU_SYNC);
-               __free_page(buffer_info->page);
+               __page_frag_drain(buffer_info->page, 0,
+                                 buffer_info->pagecnt_bias);
 
                buffer_info->page = NULL;
        }
@@ -6834,13 +6835,15 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
                                  struct page *page,
                                  unsigned int truesize)
 {
+       unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
        /* avoid re-using remote pages */
        if (unlikely(igb_page_is_reserved(page)))
                return false;
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely(page_count(page) != 1))
+       if (unlikely(page_ref_count(page) != pagecnt_bias))
                return false;
 
        /* flip page offset to other buffer */
@@ -6853,10 +6856,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
                return false;
 #endif
 
-       /* Even if we own the page, we are not allowed to use atomic_set()
-        * This would break get_page_unless_zero() users.
+       /* If we have drained the page fragment pool we need to update
+        * the pagecnt_bias and page count so that we fully restock the
+        * number of references the driver holds.
         */
-       page_ref_inc(page);
+       if (unlikely(pagecnt_bias == 1)) {
+               page_ref_add(page, USHRT_MAX);
+               rx_buffer->pagecnt_bias = USHRT_MAX;
+       }
 
        return true;
 }
@@ -6908,7 +6915,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
                        return true;
 
                /* this page cannot be reused so discard it */
-               __free_page(page);
                return false;
        }
 
@@ -6979,10 +6985,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
                /* hand second half of page back to the ring */
                igb_reuse_rx_page(rx_ring, rx_buffer);
        } else {
-               /* we are not reusing the buffer so unmap it */
+               /* We are not reusing the buffer so unmap it and free
+                * any references we are holding to it
+                */
                dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
                                     PAGE_SIZE, DMA_FROM_DEVICE,
                                     DMA_ATTR_SKIP_CPU_SYNC);
+               __page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
        }
 
        /* clear contents of rx_buffer */
@@ -7256,6 +7265,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
        bi->dma = dma;
        bi->page = page;
        bi->page_offset = 0;
+       bi->pagecnt_bias = 1;
 
        return true;
 }