]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/net/ixgbe/ixgbe_ethtool.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mv-sheeva.git] / drivers / net / ixgbe / ixgbe_ethtool.c
index d77961fc75f90c8eda86139f6885b5384fe47f56..8f461d5cee7775da632822317da4151ac02a2ff4 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/types.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
@@ -441,10 +442,8 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data)
                netdev->features |= NETIF_F_TSO;
                netdev->features |= NETIF_F_TSO6;
        } else {
-               netif_tx_stop_all_queues(netdev);
                netdev->features &= ~NETIF_F_TSO;
                netdev->features &= ~NETIF_F_TSO6;
-               netif_tx_start_all_queues(netdev);
        }
        return 0;
 }
@@ -834,8 +833,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
                                 struct ethtool_ringparam *ring)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_ring *tx_ring = adapter->tx_ring;
-       struct ixgbe_ring *rx_ring = adapter->rx_ring;
+       struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+       struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
 
        ring->rx_max_pending = IXGBE_MAX_RXD;
        ring->tx_max_pending = IXGBE_MAX_TXD;
@@ -867,8 +866,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
        new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
        new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
 
-       if ((new_tx_count == adapter->tx_ring->count) &&
-           (new_rx_count == adapter->rx_ring->count)) {
+       if ((new_tx_count == adapter->tx_ring[0]->count) &&
+           (new_rx_count == adapter->rx_ring[0]->count)) {
                /* nothing to do */
                return 0;
        }
@@ -878,25 +877,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
 
        if (!netif_running(adapter->netdev)) {
                for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i].count = new_tx_count;
+                       adapter->tx_ring[i]->count = new_tx_count;
                for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i].count = new_rx_count;
+                       adapter->rx_ring[i]->count = new_rx_count;
                adapter->tx_ring_count = new_tx_count;
                adapter->rx_ring_count = new_rx_count;
-               goto err_setup;
+               goto clear_reset;
        }
 
-       temp_tx_ring = kcalloc(adapter->num_tx_queues,
-                              sizeof(struct ixgbe_ring), GFP_KERNEL);
+       temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
        if (!temp_tx_ring) {
                err = -ENOMEM;
-               goto err_setup;
+               goto clear_reset;
        }
 
        if (new_tx_count != adapter->tx_ring_count) {
-               memcpy(temp_tx_ring, adapter->tx_ring,
-                      adapter->num_tx_queues * sizeof(struct ixgbe_ring));
                for (i = 0; i < adapter->num_tx_queues; i++) {
+                       memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+                              sizeof(struct ixgbe_ring));
                        temp_tx_ring[i].count = new_tx_count;
                        err = ixgbe_setup_tx_resources(adapter,
                                                       &temp_tx_ring[i]);
@@ -904,28 +902,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                                while (i) {
                                        i--;
                                        ixgbe_free_tx_resources(adapter,
-                                                               &temp_tx_ring[i]);
+                                                             &temp_tx_ring[i]);
                                }
-                               goto err_setup;
+                               goto clear_reset;
                        }
                }
                need_update = true;
        }
 
-       temp_rx_ring = kcalloc(adapter->num_rx_queues,
-                              sizeof(struct ixgbe_ring), GFP_KERNEL);
-       if ((!temp_rx_ring) && (need_update)) {
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
-               kfree(temp_tx_ring);
+       temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+       if (!temp_rx_ring) {
                err = -ENOMEM;
                goto err_setup;
        }
 
        if (new_rx_count != adapter->rx_ring_count) {
-               memcpy(temp_rx_ring, adapter->rx_ring,
-                      adapter->num_rx_queues * sizeof(struct ixgbe_ring));
                for (i = 0; i < adapter->num_rx_queues; i++) {
+                       memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+                              sizeof(struct ixgbe_ring));
                        temp_rx_ring[i].count = new_rx_count;
                        err = ixgbe_setup_rx_resources(adapter,
                                                       &temp_rx_ring[i]);
@@ -947,22 +941,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
 
                /* tx */
                if (new_tx_count != adapter->tx_ring_count) {
-                       kfree(adapter->tx_ring);
-                       adapter->tx_ring = temp_tx_ring;
-                       temp_tx_ring = NULL;
+                       for (i = 0; i < adapter->num_tx_queues; i++) {
+                               ixgbe_free_tx_resources(adapter,
+                                                       adapter->tx_ring[i]);
+                               memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
+                                      sizeof(struct ixgbe_ring));
+                       }
                        adapter->tx_ring_count = new_tx_count;
                }
 
                /* rx */
                if (new_rx_count != adapter->rx_ring_count) {
-                       kfree(adapter->rx_ring);
-                       adapter->rx_ring = temp_rx_ring;
-                       temp_rx_ring = NULL;
+                       for (i = 0; i < adapter->num_rx_queues; i++) {
+                               ixgbe_free_rx_resources(adapter,
+                                                       adapter->rx_ring[i]);
+                               memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
+                                      sizeof(struct ixgbe_ring));
+                       }
                        adapter->rx_ring_count = new_rx_count;
                }
                ixgbe_up(adapter);
        }
+
+       vfree(temp_rx_ring);
 err_setup:
+       vfree(temp_tx_ring);
+clear_reset:
        clear_bit(__IXGBE_RESETTING, &adapter->state);
        return err;
 }
@@ -974,6 +978,9 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
                return IXGBE_TEST_LEN;
        case ETH_SS_STATS:
                return IXGBE_STATS_LEN;
+       case ETH_SS_NTUPLE_FILTERS:
+               return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
+                       ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
        default:
                return -EOPNOTSUPP;
        }
@@ -1007,13 +1014,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
        for (j = 0; j < adapter->num_tx_queues; j++) {
-               queue_stat = (u64 *)&adapter->tx_ring[j].stats;
+               queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
                for (k = 0; k < stat_count; k++)
                        data[i + k] = queue_stat[k];
                i += k;
        }
        for (j = 0; j < adapter->num_rx_queues; j++) {
-               queue_stat = (u64 *)&adapter->rx_ring[j].stats;
+               queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
                for (k = 0; k < stat_count; k++)
                        data[i + k] = queue_stat[k];
                i += k;
@@ -1627,7 +1634,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
        reg_data |= IXGBE_RXDCTL_ENABLE;
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               int j = adapter->rx_ring[0].reg_idx;
+               int j = adapter->rx_ring[0]->reg_idx;
                u32 k;
                for (k = 0; k < 10; k++) {
                        if (IXGBE_READ_REG(&adapter->hw,
@@ -1847,6 +1854,26 @@ static void ixgbe_diag_test(struct net_device *netdev,
                if (ixgbe_link_test(adapter, &data[4]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
+               if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+                       int i;
+                       for (i = 0; i < adapter->num_vfs; i++) {
+                               if (adapter->vfinfo[i].clear_to_send) {
+                                       netdev_warn(netdev, "%s",
+                                                   "offline diagnostic is not "
+                                                   "supported when VFs are "
+                                                   "present\n");
+                                       data[0] = 1;
+                                       data[1] = 1;
+                                       data[2] = 1;
+                                       data[3] = 1;
+                                       eth_test->flags |= ETH_TEST_FL_FAILED;
+                                       clear_bit(__IXGBE_TESTING,
+                                                 &adapter->state);
+                                       goto skip_ol_tests;
+                               }
+                       }
+               }
+
                if (if_running)
                        /* indicate we're in test mode */
                        dev_close(netdev);
@@ -1867,11 +1894,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
                if (ixgbe_intr_test(adapter, &data[2]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
+               /* If SRIOV or VMDq is enabled then skip MAC
+                * loopback diagnostic. */
+               if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+                                     IXGBE_FLAG_VMDQ_ENABLED)) {
+                       DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
+                               "mode\n");
+                       data[3] = 0;
+                       goto skip_loopback;
+               }
+
                ixgbe_reset(adapter);
                DPRINTK(HW, INFO, "loopback testing starting\n");
                if (ixgbe_loopback_test(adapter, &data[3]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
+skip_loopback:
                ixgbe_reset(adapter);
 
                clear_bit(__IXGBE_TESTING, &adapter->state);
@@ -1891,6 +1929,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
 
                clear_bit(__IXGBE_TESTING, &adapter->state);
        }
+skip_ol_tests:
        msleep_interruptible(4 * 1000);
 }
 
@@ -2000,7 +2039,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
+       ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
 
        /* only valid if in constant ITR mode */
        switch (adapter->rx_itr_setting) {
@@ -2053,7 +2092,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
                return -EINVAL;
 
        if (ec->tx_max_coalesced_frames_irq)
-               adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
+               adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
 
        if (ec->rx_coalesce_usecs > 1) {
                /* check the limits */
@@ -2134,23 +2173,124 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
 static int ixgbe_set_flags(struct net_device *netdev, u32 data)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       bool need_reset = false;
 
        ethtool_op_set_flags(netdev, data);
 
-       if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
-               return 0;
-
        /* if state changes we need to update adapter->flags and reset */
        if ((!!(data & ETH_FLAG_LRO)) != 
            (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
                adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+               need_reset = true;
+       }
+
+       /*
+        * Check if Flow Director n-tuple support was enabled or disabled.  If
+        * the state changed, we need to reset.
+        */
+       if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
+           (!(data & ETH_FLAG_NTUPLE))) {
+               /* turn off Flow Director perfect, set hash and reset */
+               adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+               need_reset = true;
+       } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
+                  (data & ETH_FLAG_NTUPLE)) {
+               /* turn off Flow Director hash, enable perfect and reset */
+               adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+               adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+               need_reset = true;
+       } else {
+               /* no state change */
+       }
+
+       if (need_reset) {
                if (netif_running(netdev))
                        ixgbe_reinit_locked(adapter);
                else
                        ixgbe_reset(adapter);
        }
+
        return 0;
+}
+
+static int ixgbe_set_rx_ntuple(struct net_device *dev,
+                               struct ethtool_rx_ntuple *cmd)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
+       struct ixgbe_atr_input input_struct;
+       struct ixgbe_atr_input_masks input_masks;
+       int target_queue;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+               return -EOPNOTSUPP;
+
+       /*
+        * Don't allow programming if the action is a queue greater than
+        * the number of online Tx queues.
+        */
+       if ((fs.action >= adapter->num_tx_queues) ||
+           (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+               return -EINVAL;
+
+       memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+       memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
+
+       input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
+       input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
+       input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
+       input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
+       input_masks.vlan_id_mask = fs.vlan_tag_mask;
+       /* only use the lowest 2 bytes for flex bytes */
+       input_masks.data_mask = (fs.data_mask & 0xffff);
 
+       switch (fs.flow_type) {
+       case TCP_V4_FLOW:
+               ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+               break;
+       case UDP_V4_FLOW:
+               ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+               break;
+       case SCTP_V4_FLOW:
+               ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+               break;
+       default:
+               return -1;
+       }
+
+       /* Mask bits from the inputs based on user-supplied mask */
+       ixgbe_atr_set_src_ipv4_82599(&input_struct,
+                   (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
+       ixgbe_atr_set_dst_ipv4_82599(&input_struct,
+                   (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
+       /* 82599 expects these to be byte-swapped for perfect filtering */
+       ixgbe_atr_set_src_port_82599(&input_struct,
+              ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
+       ixgbe_atr_set_dst_port_82599(&input_struct,
+              ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
+
+       /* VLAN and Flex bytes are either completely masked or not */
+       if (!fs.vlan_tag_mask)
+               ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
+
+       if (!input_masks.data_mask)
+               /* make sure we only use the first 2 bytes of user data */
+               ixgbe_atr_set_flex_byte_82599(&input_struct,
+                                             (fs.data & 0xffff));
+
+       /* determine if we need to drop or route the packet */
+       if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+               target_queue = MAX_RX_QUEUES - 1;
+       else
+               target_queue = fs.action;
+
+       spin_lock(&adapter->fdir_perfect_lock);
+       ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
+                                           &input_masks, 0, target_queue);
+       spin_unlock(&adapter->fdir_perfect_lock);
+
+       return 0;
 }
 
 static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2188,6 +2328,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
        .set_coalesce           = ixgbe_set_coalesce,
        .get_flags              = ethtool_op_get_flags,
        .set_flags              = ixgbe_set_flags,
+       .set_rx_ntuple          = ixgbe_set_rx_ntuple,
 };
 
 void ixgbe_set_ethtool_ops(struct net_device *netdev)