]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
sfc: Properly sync RX DMA buffer when it is not the last in the page
authorBen Hutchings <bhutchings@solarflare.com>
Thu, 20 Dec 2012 18:48:20 +0000 (18:48 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 28 Mar 2013 19:06:01 +0000 (12:06 -0700)
[ Upstream commit 3a68f19d7afb80f548d016effbc6ed52643a8085 ]

We may currently allocate two RX DMA buffers to a page, and only unmap
the page when the second is completed.  We do not sync the first RX
buffer to be completed; this can result in packet loss or corruption
if the last RX buffer completed in a NAPI poll is the first in a page
and is not DMA-coherent.  (In the middle of a NAPI poll, we will
handle the following RX completion and unmap the page *before* looking
at the content of the first buffer.)

Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
[bwh: Backported to 3.0: adjust context]
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/sfc/rx.c

index 4004fc2477bed97766d3b4c4c101c90d0a7f3e64..ce6f53fc48ac73e89edfce910f708f11ab8e56a9 100644 (file)
@@ -245,7 +245,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
 }
 
 static void efx_unmap_rx_buffer(struct efx_nic *efx,
-                               struct efx_rx_buffer *rx_buf)
+                               struct efx_rx_buffer *rx_buf,
+                               unsigned int used_len)
 {
        if (rx_buf->is_page && rx_buf->u.page) {
                struct efx_rx_page_state *state;
@@ -256,6 +257,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
                                       state->dma_addr,
                                       efx_rx_buf_size(efx),
                                       PCI_DMA_FROMDEVICE);
+               } else if (used_len) {
+                       dma_sync_single_for_cpu(&efx->pci_dev->dev,
+                                               rx_buf->dma_addr, used_len,
+                                               DMA_FROM_DEVICE);
                }
        } else if (!rx_buf->is_page && rx_buf->u.skb) {
                pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
@@ -278,7 +283,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
                               struct efx_rx_buffer *rx_buf)
 {
-       efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+       efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
        efx_free_rx_buffer(rx_queue->efx, rx_buf);
 }
 
@@ -549,10 +554,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
                goto out;
        }
 
-       /* Release card resources - assumes all RX buffers consumed in-order
-        * per RX queue
+       /* Release and/or sync DMA mapping - assumes all RX buffers
+        * consumed in-order per RX queue
         */
-       efx_unmap_rx_buffer(efx, rx_buf);
+       efx_unmap_rx_buffer(efx, rx_buf, len);
 
        /* Prefetch nice and early so data will (hopefully) be in cache by
         * the time we look at it.