]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc...
authorDavid S. Miller <davem@davemloft.net>
Wed, 18 Jul 2012 16:08:36 +0000 (09:08 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 18 Jul 2012 16:08:36 +0000 (09:08 -0700)
Ben Hutchings says:

====================
1. Fix potential badness when running a self-test with SR-IOV enabled.
2. Fix calculation of some interface statistics that could run backward.
3. Miscellaneous cleanup.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
1  2 
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c

index a1965c07d1e35244c565f745f353954f29d3d599,55be2fdb0e620bd88527ce5590552619489f3caa..cd9c0a989692b5a547c74136c9be28e208f6951c
@@@ -68,6 -68,8 +68,8 @@@
  #define EFX_TXQ_TYPES         4
  #define EFX_MAX_TX_QUEUES     (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
  
+ struct efx_self_tests;
  /**
   * struct efx_special_buffer - An Efx special buffer
   * @addr: CPU base address of the buffer
@@@ -100,7 -102,7 +102,7 @@@ struct efx_special_buffer 
   * @len: Length of this fragment.
   *    This field is zero when the queue slot is empty.
   * @continuation: True if this fragment is not the end of a packet.
-  * @unmap_single: True if pci_unmap_single should be used.
+  * @unmap_single: True if dma_unmap_single should be used.
   * @unmap_len: Length of this fragment to unmap
   */
  struct efx_tx_buffer {
@@@ -527,7 -529,7 +529,7 @@@ struct efx_phy_operations 
  };
  
  /**
 - * @enum efx_phy_mode - PHY operating mode flags
 + * enum efx_phy_mode - PHY operating mode flags
   * @PHY_MODE_NORMAL: on and should pass traffic
   * @PHY_MODE_TX_DISABLED: on with TX disabled
   * @PHY_MODE_LOW_POWER: set to low power through MDIO
@@@ -901,7 -903,8 +903,8 @@@ static inline unsigned int efx_port_num
   * @get_wol: Get WoL configuration from driver state
   * @set_wol: Push WoL configuration to the NIC
   * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
-  * @test_registers: Test read/write functionality of control registers
+  * @test_chip: Test registers.  Should use efx_nic_test_registers(), and is
+  *    expected to reset the NIC.
   * @test_nvram: Test validity of NVRAM contents
   * @revision: Hardware architecture revision
   * @mem_map_size: Memory BAR mapped size
@@@ -946,7 -949,7 +949,7 @@@ struct efx_nic_type 
        void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
        int (*set_wol)(struct efx_nic *efx, u32 type);
        void (*resume_wol)(struct efx_nic *efx);
-       int (*test_registers)(struct efx_nic *efx);
+       int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
        int (*test_nvram)(struct efx_nic *efx);
  
        int revision;
index fca61fea38e05dd6a063c66932f656cf3d943a04,6d1c6cfd6ba8fb8ccdbbb2c6487b6f3512871cfc..719319b89d7a8086f315559a1c68cfe28a82acd3
@@@ -155,11 -155,11 +155,11 @@@ static int efx_init_rx_buffers_skb(stru
                rx_buf->len = skb_len - NET_IP_ALIGN;
                rx_buf->flags = 0;
  
-               rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+               rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
                                                  skb->data, rx_buf->len,
-                                                 PCI_DMA_FROMDEVICE);
-               if (unlikely(pci_dma_mapping_error(efx->pci_dev,
-                                                  rx_buf->dma_addr))) {
+                                                 DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+                                              rx_buf->dma_addr))) {
                        dev_kfree_skb_any(skb);
                        rx_buf->u.skb = NULL;
                        return -EIO;
@@@ -200,10 -200,10 +200,10 @@@ static int efx_init_rx_buffers_page(str
                                   efx->rx_buffer_order);
                if (unlikely(page == NULL))
                        return -ENOMEM;
-               dma_addr = pci_map_page(efx->pci_dev, page, 0,
+               dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
                                        efx_rx_buf_size(efx),
-                                       PCI_DMA_FROMDEVICE);
-               if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
+                                       DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
                        __free_pages(page, efx->rx_buffer_order);
                        return -EIO;
                }
@@@ -247,14 -247,14 +247,14 @@@ static void efx_unmap_rx_buffer(struct 
  
                state = page_address(rx_buf->u.page);
                if (--state->refcnt == 0) {
-                       pci_unmap_page(efx->pci_dev,
+                       dma_unmap_page(&efx->pci_dev->dev,
                                       state->dma_addr,
                                       efx_rx_buf_size(efx),
-                                      PCI_DMA_FROMDEVICE);
+                                      DMA_FROM_DEVICE);
                }
        } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
-               pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
-                                rx_buf->len, PCI_DMA_FROMDEVICE);
+               dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
+                                rx_buf->len, DMA_FROM_DEVICE);
        }
  }
  
@@@ -336,7 -336,6 +336,7 @@@ static void efx_recycle_rx_buffer(struc
  /**
   * efx_fast_push_rx_descriptors - push new RX descriptors quickly
   * @rx_queue:         RX descriptor queue
 + *
   * This will aim to fill the RX descriptor queue up to
   * @rx_queue->@max_fill. If there is insufficient atomic
   * memory to do so, a slow fill will be scheduled.