]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Thu, 5 May 2011 21:59:02 +0000 (14:59 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 5 May 2011 21:59:02 +0000 (14:59 -0700)
Conflicts:
drivers/net/tg3.c

13 files changed:
1  2 
MAINTAINERS
drivers/net/bnx2.c
drivers/net/mii.c
drivers/net/tg3.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/wireless/iwlegacy/iwl4965-base.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
net/core/dev.c
net/ipv4/fib_trie.c
net/ipv6/addrconf.c
net/ipv6/esp6.c

diff --combined MAINTAINERS
index 17c0917a26ea566083c5f680e6a207fefbb026d5,eb4f996d39b7278408f7692198cfa5c39a552cad..e653a99ddc4cd424db0fe7774bb31a1df082be5f
@@@ -1225,6 -1225,13 +1225,6 @@@ W:     http://wireless.kernel.org/en/users/
  S:    Supported
  F:    drivers/net/wireless/ath/ath9k/
  
 -ATHEROS AR9170 WIRELESS DRIVER
 -M:    Christian Lamparter <chunkeey@web.de>
 -L:    linux-wireless@vger.kernel.org
 -W:    http://wireless.kernel.org/en/users/Drivers/ar9170
 -S:    Obsolete
 -F:    drivers/net/wireless/ath/ar9170/
 -
  CARL9170 LINUX COMMUNITY WIRELESS DRIVER
  M:    Christian Lamparter <chunkeey@googlemail.com>
  L:    linux-wireless@vger.kernel.org
@@@ -3349,12 -3356,6 +3349,12 @@@ F:    Documentation/wimax/README.i2400
  F:    drivers/net/wimax/i2400m/
  F:    include/linux/wimax/i2400m.h
  
 +INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
 +M:    Stanislaw Gruszka <sgruszka@redhat.com>
 +L:    linux-wireless@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/wireless/iwlegacy/
 +
  INTEL WIRELESS WIFI LINK (iwlwifi)
  M:    Wey-Yi Guy <wey-yi.w.guy@intel.com>
  M:    Intel Linux Wireless <ilw@linux.intel.com>
@@@ -4372,7 -4373,6 +4372,7 @@@ S:      Maintaine
  F:    net/ipv4/
  F:    net/ipv6/
  F:    include/net/ip*
 +F:    arch/x86/net/*
  
  NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK)
  M:    Paul Moore <paul.moore@hp.com>
@@@ -6551,7 -6551,7 +6551,7 @@@ S:      Maintaine
  F:    drivers/usb/host/uhci*
  
  USB "USBNET" DRIVER FRAMEWORK
- M:    David Brownell <dbrownell@users.sourceforge.net>
+ M:    Oliver Neukum <oneukum@suse.de>
  L:    netdev@vger.kernel.org
  W:    http://www.linux-usb.org/usbnet
  S:    Maintained
diff --combined drivers/net/bnx2.c
index 1bebdfb9679f90f658272c3ee783b70b35c92212,d8383a9af9ad21a756a090bc1fd7342f3af005ee..57d3293c65bd8829638a2b23bb71e2d2b328c026
@@@ -3174,7 -3174,7 +3174,7 @@@ bnx2_rx_int(struct bnx2 *bp, struct bnx
                }
  
                skb_checksum_none_assert(skb);
 -              if (bp->rx_csum &&
 +              if ((bp->dev->features & NETIF_F_RXCSUM) &&
                        (status & (L2_FHDR_STATUS_TCP_SEGMENT |
                        L2_FHDR_STATUS_UDP_DATAGRAM))) {
  
@@@ -6696,16 -6696,17 +6696,16 @@@ bnx2_get_settings(struct net_device *de
  
        if (bp->autoneg & AUTONEG_SPEED) {
                cmd->autoneg = AUTONEG_ENABLE;
 -      }
 -      else {
 +      } else {
                cmd->autoneg = AUTONEG_DISABLE;
        }
  
        if (netif_carrier_ok(dev)) {
 -              cmd->speed = bp->line_speed;
 +              ethtool_cmd_speed_set(cmd, bp->line_speed);
                cmd->duplex = bp->duplex;
        }
        else {
 -              cmd->speed = -1;
 +              ethtool_cmd_speed_set(cmd, -1);
                cmd->duplex = -1;
        }
        spin_unlock_bh(&bp->phy_lock);
@@@ -6757,21 -6758,21 +6757,21 @@@ bnx2_set_settings(struct net_device *de
                advertising |= ADVERTISED_Autoneg;
        }
        else {
 +              u32 speed = ethtool_cmd_speed(cmd);
                if (cmd->port == PORT_FIBRE) {
 -                      if ((cmd->speed != SPEED_1000 &&
 -                           cmd->speed != SPEED_2500) ||
 +                      if ((speed != SPEED_1000 &&
 +                           speed != SPEED_2500) ||
                            (cmd->duplex != DUPLEX_FULL))
                                goto err_out_unlock;
  
 -                      if (cmd->speed == SPEED_2500 &&
 +                      if (speed == SPEED_2500 &&
                            !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
                                goto err_out_unlock;
 -              }
 -              else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
 +              } else if (speed == SPEED_1000 || speed == SPEED_2500)
                        goto err_out_unlock;
  
                autoneg &= ~AUTONEG_SPEED;
 -              req_line_speed = cmd->speed;
 +              req_line_speed = speed;
                req_duplex = cmd->duplex;
                advertising = 0;
        }
@@@ -7188,6 -7189,38 +7188,6 @@@ bnx2_set_pauseparam(struct net_device *
        return 0;
  }
  
 -static u32
 -bnx2_get_rx_csum(struct net_device *dev)
 -{
 -      struct bnx2 *bp = netdev_priv(dev);
 -
 -      return bp->rx_csum;
 -}
 -
 -static int
 -bnx2_set_rx_csum(struct net_device *dev, u32 data)
 -{
 -      struct bnx2 *bp = netdev_priv(dev);
 -
 -      bp->rx_csum = data;
 -      return 0;
 -}
 -
 -static int
 -bnx2_set_tso(struct net_device *dev, u32 data)
 -{
 -      struct bnx2 *bp = netdev_priv(dev);
 -
 -      if (data) {
 -              dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
 -              if (CHIP_NUM(bp) == CHIP_NUM_5709)
 -                      dev->features |= NETIF_F_TSO6;
 -      } else
 -              dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
 -                                 NETIF_F_TSO_ECN);
 -      return 0;
 -}
 -
  static struct {
        char string[ETH_GSTRING_LEN];
  } bnx2_stats_str_arr[] = {
@@@ -7462,74 -7495,82 +7462,74 @@@ bnx2_get_ethtool_stats(struct net_devic
  }
  
  static int
 -bnx2_phys_id(struct net_device *dev, u32 data)
 +bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
  {
        struct bnx2 *bp = netdev_priv(dev);
 -      int i;
 -      u32 save;
  
 -      bnx2_set_power_state(bp, PCI_D0);
 +      switch (state) {
 +      case ETHTOOL_ID_ACTIVE:
 +              bnx2_set_power_state(bp, PCI_D0);
 +
 +              bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
 +              REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
 +              return 1;       /* cycle on/off once per second */
 +
 +      case ETHTOOL_ID_ON:
 +              REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
 +                     BNX2_EMAC_LED_1000MB_OVERRIDE |
 +                     BNX2_EMAC_LED_100MB_OVERRIDE |
 +                     BNX2_EMAC_LED_10MB_OVERRIDE |
 +                     BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
 +                     BNX2_EMAC_LED_TRAFFIC);
 +              break;
  
 -      if (data == 0)
 -              data = 2;
 +      case ETHTOOL_ID_OFF:
 +              REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
 +              break;
  
 -      save = REG_RD(bp, BNX2_MISC_CFG);
 -      REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
 +      case ETHTOOL_ID_INACTIVE:
 +              REG_WR(bp, BNX2_EMAC_LED, 0);
 +              REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
  
 -      for (i = 0; i < (data * 2); i++) {
 -              if ((i % 2) == 0) {
 -                      REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
 -              }
 -              else {
 -                      REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
 -                              BNX2_EMAC_LED_1000MB_OVERRIDE |
 -                              BNX2_EMAC_LED_100MB_OVERRIDE |
 -                              BNX2_EMAC_LED_10MB_OVERRIDE |
 -                              BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
 -                              BNX2_EMAC_LED_TRAFFIC);
 -              }
 -              msleep_interruptible(500);
 -              if (signal_pending(current))
 -                      break;
 +              if (!netif_running(dev))
 +                      bnx2_set_power_state(bp, PCI_D3hot);
 +              break;
        }
 -      REG_WR(bp, BNX2_EMAC_LED, 0);
 -      REG_WR(bp, BNX2_MISC_CFG, save);
 -
 -      if (!netif_running(dev))
 -              bnx2_set_power_state(bp, PCI_D3hot);
  
        return 0;
  }
  
 -static int
 -bnx2_set_tx_csum(struct net_device *dev, u32 data)
 +static u32
 +bnx2_fix_features(struct net_device *dev, u32 features)
  {
        struct bnx2 *bp = netdev_priv(dev);
  
 -      if (CHIP_NUM(bp) == CHIP_NUM_5709)
 -              return ethtool_op_set_tx_ipv6_csum(dev, data);
 -      else
 -              return ethtool_op_set_tx_csum(dev, data);
 +      if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
 +              features |= NETIF_F_HW_VLAN_RX;
 +
 +      return features;
  }
  
  static int
 -bnx2_set_flags(struct net_device *dev, u32 data)
 +bnx2_set_features(struct net_device *dev, u32 features)
  {
        struct bnx2 *bp = netdev_priv(dev);
 -      int rc;
 -
 -      if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) &&
 -          !(data & ETH_FLAG_RXVLAN))
 -              return -EINVAL;
  
        /* TSO with VLAN tag won't work with current firmware */
 -      if (!(data & ETH_FLAG_TXVLAN))
 -              return -EINVAL;
 -
 -      rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
 -                                ETH_FLAG_TXVLAN);
 -      if (rc)
 -              return rc;
 +      if (features & NETIF_F_HW_VLAN_TX)
 +              dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
 +      else
 +              dev->vlan_features &= ~NETIF_F_ALL_TSO;
  
 -      if ((!!(data & ETH_FLAG_RXVLAN) !=
 +      if ((!!(features & NETIF_F_HW_VLAN_RX) !=
            !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
            netif_running(dev)) {
                bnx2_netif_stop(bp, false);
 +              dev->features = features;
                bnx2_set_rx_mode(dev);
                bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
                bnx2_netif_start(bp, false);
 +              return 1;
        }
  
        return 0;
@@@ -7554,11 -7595,18 +7554,11 @@@ static const struct ethtool_ops bnx2_et
        .set_ringparam          = bnx2_set_ringparam,
        .get_pauseparam         = bnx2_get_pauseparam,
        .set_pauseparam         = bnx2_set_pauseparam,
 -      .get_rx_csum            = bnx2_get_rx_csum,
 -      .set_rx_csum            = bnx2_set_rx_csum,
 -      .set_tx_csum            = bnx2_set_tx_csum,
 -      .set_sg                 = ethtool_op_set_sg,
 -      .set_tso                = bnx2_set_tso,
        .self_test              = bnx2_self_test,
        .get_strings            = bnx2_get_strings,
 -      .phys_id                = bnx2_phys_id,
 +      .set_phys_id            = bnx2_set_phys_id,
        .get_ethtool_stats      = bnx2_get_ethtool_stats,
        .get_sset_count         = bnx2_get_sset_count,
 -      .set_flags              = bnx2_set_flags,
 -      .get_flags              = ethtool_op_get_flags,
  };
  
  /* Called with rtnl_lock */
@@@ -8070,6 -8118,8 +8070,6 @@@ bnx2_init_board(struct pci_dev *pdev, s
        bp->tx_ring_size = MAX_TX_DESC_CNT;
        bnx2_set_rx_ring_size(bp, 255);
  
 -      bp->rx_csum = 1;
 -
        bp->tx_quick_cons_trip_int = 2;
        bp->tx_quick_cons_trip = 20;
        bp->tx_ticks_int = 18;
@@@ -8261,14 -8311,17 +8261,14 @@@ static const struct net_device_ops bnx2
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = bnx2_change_mac_addr,
        .ndo_change_mtu         = bnx2_change_mtu,
 +      .ndo_fix_features       = bnx2_fix_features,
 +      .ndo_set_features       = bnx2_set_features,
        .ndo_tx_timeout         = bnx2_tx_timeout,
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = poll_bnx2,
  #endif
  };
  
 -static inline void vlan_features_add(struct net_device *dev, u32 flags)
 -{
 -      dev->vlan_features |= flags;
 -}
 -
  static int __devinit
  bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
        memcpy(dev->dev_addr, bp->mac_addr, 6);
        memcpy(dev->perm_addr, bp->mac_addr, 6);
  
 -      dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
 -                       NETIF_F_RXHASH;
 -      vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
 -      if (CHIP_NUM(bp) == CHIP_NUM_5709) {
 -              dev->features |= NETIF_F_IPV6_CSUM;
 -              vlan_features_add(dev, NETIF_F_IPV6_CSUM);
 -      }
 -      dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 -      dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
 -      vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
 -      if (CHIP_NUM(bp) == CHIP_NUM_5709) {
 -              dev->features |= NETIF_F_TSO6;
 -              vlan_features_add(dev, NETIF_F_TSO6);
 -      }
 +      dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
 +              NETIF_F_TSO | NETIF_F_TSO_ECN |
 +              NETIF_F_RXHASH | NETIF_F_RXCSUM;
 +
 +      if (CHIP_NUM(bp) == CHIP_NUM_5709)
 +              dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
 +
 +      dev->vlan_features = dev->hw_features;
 +      dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 +      dev->features |= dev->hw_features;
 +
        if ((rc = register_netdev(dev))) {
                dev_err(&pdev->dev, "Cannot register net device\n");
                goto error;
@@@ -8357,6 -8413,8 +8357,8 @@@ bnx2_remove_one(struct pci_dev *pdev
  
        unregister_netdev(dev);
  
+       del_timer_sync(&bp->timer);
        if (bp->mips_firmware)
                release_firmware(bp->mips_firmware);
        if (bp->rv2p_firmware)
diff --combined drivers/net/mii.c
index 4fbc816efee28078f2149b09a5af16db39d49267,d4fc00b1ff93442f699844517736a0ac1b6a0bde..c62e7816d54864d317b1c646c00c18bc78b97ead
@@@ -49,6 -49,10 +49,10 @@@ static u32 mii_get_an(struct mii_if_inf
                result |= ADVERTISED_100baseT_Half;
        if (advert & ADVERTISE_100FULL)
                result |= ADVERTISED_100baseT_Full;
+       if (advert & ADVERTISE_PAUSE_CAP)
+               result |= ADVERTISED_Pause;
+       if (advert & ADVERTISE_PAUSE_ASYM)
+               result |= ADVERTISED_Asym_Pause;
  
        return result;
  }
@@@ -58,9 -62,6 +62,9 @@@
   * @mii: MII interface
   * @ecmd: requested ethtool_cmd
   *
 + * The @ecmd parameter is expected to have been cleared before calling
 + * mii_ethtool_gset().
 + *
   * Returns 0 for success, negative on error.
   */
  int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
  
                if (nego & (ADVERTISED_1000baseT_Full |
                            ADVERTISED_1000baseT_Half)) {
 -                      ecmd->speed = SPEED_1000;
 +                      ethtool_cmd_speed_set(ecmd, SPEED_1000);
                        ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full);
                } else if (nego & (ADVERTISED_100baseT_Full |
                                   ADVERTISED_100baseT_Half)) {
 -                      ecmd->speed = SPEED_100;
 +                      ethtool_cmd_speed_set(ecmd, SPEED_100);
                        ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full);
                } else {
 -                      ecmd->speed = SPEED_10;
 +                      ethtool_cmd_speed_set(ecmd, SPEED_10);
                        ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full);
                }
        } else {
                ecmd->autoneg = AUTONEG_DISABLE;
  
 -              ecmd->speed = ((bmcr & BMCR_SPEED1000 &&
 -                              (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 :
 -                             (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10);
 +              ethtool_cmd_speed_set(ecmd,
 +                                    ((bmcr & BMCR_SPEED1000 &&
 +                                      (bmcr & BMCR_SPEED100) == 0) ?
 +                                     SPEED_1000 :
 +                                     ((bmcr & BMCR_SPEED100) ?
 +                                      SPEED_100 : SPEED_10)));
                ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
        }
  
  int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
  {
        struct net_device *dev = mii->dev;
 +      u32 speed = ethtool_cmd_speed(ecmd);
  
 -      if (ecmd->speed != SPEED_10 &&
 -          ecmd->speed != SPEED_100 &&
 -          ecmd->speed != SPEED_1000)
 +      if (speed != SPEED_10 &&
 +          speed != SPEED_100 &&
 +          speed != SPEED_1000)
                return -EINVAL;
        if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
                return -EINVAL;
                return -EINVAL;
        if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
                return -EINVAL;
 -      if ((ecmd->speed == SPEED_1000) && (!mii->supports_gmii))
 +      if ((speed == SPEED_1000) && (!mii->supports_gmii))
                return -EINVAL;
  
        /* ignore supported, maxtxpkt, maxrxpkt */
                bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
                tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
                               BMCR_SPEED1000 | BMCR_FULLDPLX);
 -              if (ecmd->speed == SPEED_1000)
 +              if (speed == SPEED_1000)
                        tmp |= BMCR_SPEED1000;
 -              else if (ecmd->speed == SPEED_100)
 +              else if (speed == SPEED_100)
                        tmp |= BMCR_SPEED100;
                if (ecmd->duplex == DUPLEX_FULL) {
                        tmp |= BMCR_FULLDPLX;
diff --combined drivers/net/tg3.c
index 7c7c9a897c09477835bb849452941ee14ec5e29a,7a5daefb6f3311e6e6d169113d7977e908045cbb..ec1953043102285c356aff16e48ccb124b6b8035
  
  #include "tg3.h"
  
 +/* Functions & macros to verify TG3_FLAGS types */
 +
 +static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
 +{
 +      return test_bit(flag, bits);
 +}
 +
 +static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
 +{
 +      set_bit(flag, bits);
 +}
 +
 +static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 +{
 +      clear_bit(flag, bits);
 +}
 +
 +#define tg3_flag(tp, flag)                            \
 +      _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
 +#define tg3_flag_set(tp, flag)                                \
 +      _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
 +#define tg3_flag_clear(tp, flag)                      \
 +      _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
 +
  #define DRV_MODULE_NAME               "tg3"
  #define TG3_MAJ_NUM                   3
 -#define TG3_MIN_NUM                   117
 +#define TG3_MIN_NUM                   118
  #define DRV_MODULE_VERSION    \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
 -#define DRV_MODULE_RELDATE    "January 25, 2011"
 +#define DRV_MODULE_RELDATE    "April 22, 2011"
  
  #define TG3_DEF_MAC_MODE      0
  #define TG3_DEF_RX_MODE               0
  /* length of time before we decide the hardware is borked,
   * and dev->tx_timeout() should be called to fix the problem
   */
 +
  #define TG3_TX_TIMEOUT                        (5 * HZ)
  
  /* hardware minimum and maximum for a single frame's data payload */
  #define TG3_MIN_MTU                   60
  #define TG3_MAX_MTU(tp)       \
 -      ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
 +      (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
  
  /* These numbers seem to be hard coded in the NIC firmware somehow.
   * You can't change the ring sizes, but you can change where you place
   * them in the NIC onboard memory.
   */
  #define TG3_RX_STD_RING_SIZE(tp) \
 -      ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
 -        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
 -       RX_STD_MAX_SIZE_5717 : 512)
 +      (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
 +       TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
  #define TG3_DEF_RX_RING_PENDING               200
  #define TG3_RX_JMB_RING_SIZE(tp) \
 -      ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
 -        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
 -       1024 : 256)
 +      (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
 +       TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
  #define TG3_DEF_RX_JUMBO_RING_PENDING 100
  #define TG3_RSS_INDIR_TBL_SIZE                128
  
  
  #define TG3_RAW_IP_ALIGN 2
  
 -/* number of ETHTOOL_GSTATS u64's */
 -#define TG3_NUM_STATS         (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
 -
 -#define TG3_NUM_TEST          6
 -
  #define TG3_FW_UPDATE_TIMEOUT_SEC     5
  
  #define FIRMWARE_TG3          "tigon/tg3.bin"
@@@ -284,7 -266,6 +284,7 @@@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
 +      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
        {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@@ -299,7 -280,7 +299,7 @@@ MODULE_DEVICE_TABLE(pci, tg3_pci_tbl)
  
  static const struct {
        const char string[ETH_GSTRING_LEN];
 -} ethtool_stats_keys[TG3_NUM_STATS] = {
 +} ethtool_stats_keys[] = {
        { "rx_octets" },
        { "rx_fragments" },
        { "rx_ucast_packets" },
        { "dma_write_prioq_full" },
        { "rxbds_empty" },
        { "rx_discards" },
 +      { "mbuf_lwm_thresh_hit" },
        { "rx_errors" },
        { "rx_threshold_hit" },
  
        { "nic_tx_threshold_hit" }
  };
  
 +#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
 +
 +
  static const struct {
        const char string[ETH_GSTRING_LEN];
 -} ethtool_test_keys[TG3_NUM_TEST] = {
 +} ethtool_test_keys[] = {
        { "nvram test     (online) " },
        { "link test      (online) " },
        { "register test  (offline)" },
        { "interrupt test (offline)" },
  };
  
 +#define TG3_NUM_TEST  ARRAY_SIZE(ethtool_test_keys)
 +
 +
  static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
  {
        writel(val, tp->regs + off);
@@@ -493,7 -467,8 +493,7 @@@ static u32 tg3_read_indirect_mbox(struc
   */
  static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
  {
 -      if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
 -          (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
 +      if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
                /* Non-posted methods */
                tp->write32(tp, off, val);
        else {
  static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
  {
        tp->write32_mbox(tp, off, val);
 -      if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
 -          !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
 +      if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
                tp->read32_mbox(tp, off);
  }
  
@@@ -521,9 -497,9 +521,9 @@@ static void tg3_write32_tx_mbox(struct 
  {
        void __iomem *mbox = tp->regs + off;
        writel(val, mbox);
 -      if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
 +      if (tg3_flag(tp, TXD_MBOX_HWBUG))
                writel(val, mbox);
 -      if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
 +      if (tg3_flag(tp, MBOX_WRITE_REORDER))
                readl(mbox);
  }
  
@@@ -557,7 -533,7 +557,7 @@@ static void tg3_write_mem(struct tg3 *t
                return;
  
        spin_lock_irqsave(&tp->indirect_lock, flags);
 -      if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
 +      if (tg3_flag(tp, SRAM_USE_CONFIG)) {
                pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
                pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  
@@@ -584,7 -560,7 +584,7 @@@ static void tg3_read_mem(struct tg3 *tp
        }
  
        spin_lock_irqsave(&tp->indirect_lock, flags);
 -      if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
 +      if (tg3_flag(tp, SRAM_USE_CONFIG)) {
                pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
                pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  
@@@ -621,7 -597,7 +621,7 @@@ static int tg3_ape_lock(struct tg3 *tp
        int ret = 0;
        u32 status, req, gnt;
  
 -      if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
 +      if (!tg3_flag(tp, ENABLE_APE))
                return 0;
  
        switch (locknum) {
@@@ -667,7 -643,7 +667,7 @@@ static void tg3_ape_unlock(struct tg3 *
  {
        u32 gnt;
  
 -      if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
 +      if (!tg3_flag(tp, ENABLE_APE))
                return;
  
        switch (locknum) {
@@@ -711,14 -687,14 +711,14 @@@ static void tg3_enable_ints(struct tg3 
                struct tg3_napi *tnapi = &tp->napi[i];
  
                tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 -              if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
 +              if (tg3_flag(tp, 1SHOT_MSI))
                        tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
  
                tp->coal_now |= tnapi->coal_now;
        }
  
        /* Force an initial interrupt */
 -      if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
 +      if (!tg3_flag(tp, TAGGED_STATUS) &&
            (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
                tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
        else
@@@ -734,7 -710,9 +734,7 @@@ static inline unsigned int tg3_has_work
        unsigned int work_exists = 0;
  
        /* check for phy events */
 -      if (!(tp->tg3_flags &
 -            (TG3_FLAG_USE_LINKCHG_REG |
 -             TG3_FLAG_POLL_SERDES))) {
 +      if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
                if (sblk->status & SD_STATUS_LINK_CHG)
                        work_exists = 1;
        }
@@@ -762,7 -740,8 +762,7 @@@ static void tg3_int_reenable(struct tg3
         * The last_tag we write above tells the chip which piece of
         * work we've completed.
         */
 -      if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
 -          tg3_has_work(tnapi))
 +      if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
                tw32(HOSTCC_MODE, tp->coalesce_mode |
                     HOSTCC_MODE_ENABLE | tnapi->coal_now);
  }
@@@ -772,7 -751,8 +772,7 @@@ static void tg3_switch_clocks(struct tg
        u32 clock_ctrl;
        u32 orig_clock_ctrl;
  
 -      if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
 -          (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
 +      if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
                return;
  
        clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
                       0x1f);
        tp->pci_clock_ctrl = clock_ctrl;
  
 -      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
 +      if (tg3_flag(tp, 5705_PLUS)) {
                if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
                        tw32_wait_f(TG3PCI_CLOCK_CTRL,
                                    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
@@@ -900,104 -880,6 +900,104 @@@ static int tg3_writephy(struct tg3 *tp
        return ret;
  }
  
 +static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
 +{
 +      int err;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 +                         MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
 +
 +done:
 +      return err;
 +}
 +
 +static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
 +{
 +      int err;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 +                         MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 +      if (err)
 +              goto done;
 +
 +      err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
 +
 +done:
 +      return err;
 +}
 +
 +static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
 +{
 +      int err;
 +
 +      err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 +      if (!err)
 +              err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
 +
 +      return err;
 +}
 +
 +static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
 +{
 +      int err;
 +
 +      err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 +      if (!err)
 +              err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
 +
 +      return err;
 +}
 +
 +static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
 +{
 +      int err;
 +
 +      err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
 +                         (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
 +                         MII_TG3_AUXCTL_SHDWSEL_MISC);
 +      if (!err)
 +              err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
 +
 +      return err;
 +}
 +
 +static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
 +{
 +      if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
 +              set |= MII_TG3_AUXCTL_MISC_WREN;
 +
 +      return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
 +}
 +
 +#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
 +      tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
 +                           MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
 +                           MII_TG3_AUXCTL_ACTL_TX_6DB)
 +
 +#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
 +      tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
 +                           MII_TG3_AUXCTL_ACTL_TX_6DB);
 +
  static int tg3_bmcr_reset(struct tg3 *tp)
  {
        u32 phy_control;
@@@ -1100,7 -982,7 +1100,7 @@@ static void tg3_mdio_config_5785(struc
                return;
        }
  
 -      if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
 +      if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
                val |= MAC_PHYCFG2_EMODE_MASK_MASK |
                       MAC_PHYCFG2_FMODE_MASK_MASK |
                       MAC_PHYCFG2_GMODE_MASK_MASK |
        val = tr32(MAC_PHYCFG1);
        val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
                 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
 -      if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
 -              if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
 +      if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
 +              if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
                        val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
 -              if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
 +              if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
                        val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
        }
        val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
                 MAC_RGMII_MODE_TX_ENABLE |
                 MAC_RGMII_MODE_TX_LOWPWR |
                 MAC_RGMII_MODE_TX_RESET);
 -      if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
 -              if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
 +      if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
 +              if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
                        val |= MAC_RGMII_MODE_RX_INT_B |
                               MAC_RGMII_MODE_RX_QUALITY |
                               MAC_RGMII_MODE_RX_ACTIVITY |
                               MAC_RGMII_MODE_RX_ENG_DET;
 -              if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
 +              if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
                        val |= MAC_RGMII_MODE_TX_ENABLE |
                               MAC_RGMII_MODE_TX_LOWPWR |
                               MAC_RGMII_MODE_TX_RESET;
@@@ -1151,7 -1033,7 +1151,7 @@@ static void tg3_mdio_start(struct tg3 *
        tw32_f(MAC_MI_MODE, tp->mi_mode);
        udelay(80);
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
 +      if (tg3_flag(tp, MDIOBUS_INITED) &&
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
                tg3_mdio_config_5785(tp);
  }
@@@ -1162,7 -1044,8 +1162,7 @@@ static int tg3_mdio_init(struct tg3 *tp
        u32 reg;
        struct phy_device *phydev;
  
 -      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 -          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
 +      if (tg3_flag(tp, 5717_PLUS)) {
                u32 is_serdes;
  
                tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
  
        tg3_mdio_start(tp);
  
 -      if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
 -          (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
 +      if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
                return 0;
  
        tp->mdio_bus = mdiobus_alloc();
                                     PHY_BRCM_RX_REFCLK_UNUSED |
                                     PHY_BRCM_DIS_TXCRXC_NOENRGY |
                                     PHY_BRCM_AUTO_PWRDWN_ENABLE;
 -              if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
 +              if (tg3_flag(tp, RGMII_INBAND_DISABLE))
                        phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
 -              if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
 +              if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
                        phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
 -              if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
 +              if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
                        phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
                /* fallthru */
        case PHY_ID_RTL8211C:
                break;
        }
  
 -      tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
 +      tg3_flag_set(tp, MDIOBUS_INITED);
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
                tg3_mdio_config_5785(tp);
  
  static void tg3_mdio_fini(struct tg3 *tp)
  {
 -      if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
 -              tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
 +      if (tg3_flag(tp, MDIOBUS_INITED)) {
 +              tg3_flag_clear(tp, MDIOBUS_INITED);
                mdiobus_unregister(tp->mdio_bus);
                mdiobus_free(tp->mdio_bus);
        }
  }
  
 -static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
 -{
 -      int err;
 -
 -      err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 -      if (err)
 -              goto done;
 -
 -      err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 -      if (err)
 -              goto done;
 -
 -      err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 -                         MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 -      if (err)
 -              goto done;
 -
 -      err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
 -
 -done:
 -      return err;
 -}
 -
 -static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
 -{
 -      int err;
 -
 -      err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 -      if (err)
 -              goto done;
 -
 -      err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 -      if (err)
 -              goto done;
 -
 -      err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 -                         MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 -      if (err)
 -              goto done;
 -
 -      err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
 -
 -done:
 -      return err;
 -}
 -
  /* tp->lock is held. */
  static inline void tg3_generate_fw_event(struct tg3 *tp)
  {
@@@ -1317,7 -1247,8 +1317,7 @@@ static void tg3_ump_link_report(struct 
        u32 reg;
        u32 val;
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
 -          !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
 +      if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
                return;
  
        tg3_wait_for_event_ack(tp);
@@@ -1377,11 -1308,6 +1377,11 @@@ static void tg3_link_report(struct tg3 
                            "on" : "off",
                            (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
                            "on" : "off");
 +
 +              if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
 +                      netdev_info(tp->dev, "EEE is %s\n",
 +                                  tp->setlpicnt ? "enabled" : "disabled");
 +
                tg3_ump_link_report(tp);
        }
  }
@@@ -1447,12 -1373,13 +1447,12 @@@ static void tg3_setup_flow_control(stru
        u32 old_rx_mode = tp->rx_mode;
        u32 old_tx_mode = tp->tx_mode;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
 +      if (tg3_flag(tp, USE_PHYLIB))
                autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
        else
                autoneg = tp->link_config.autoneg;
  
 -      if (autoneg == AUTONEG_ENABLE &&
 -          (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
 +      if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
                if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
                        flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
                else
@@@ -1649,6 -1576,28 +1649,6 @@@ static void tg3_phy_fini(struct tg3 *tp
        }
  }
  
 -static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
 -{
 -      int err;
 -
 -      err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 -      if (!err)
 -              err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
 -
 -      return err;
 -}
 -
 -static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
 -{
 -      int err;
 -
 -      err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 -      if (!err)
 -              err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
 -
 -      return err;
 -}
 -
  static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
  {
        u32 phytest;
@@@ -1673,8 -1622,9 +1673,8 @@@ static void tg3_phy_toggle_apd(struct t
  {
        u32 reg;
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
 -          ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 -            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
 +      if (!tg3_flag(tp, 5705_PLUS) ||
 +          (tg3_flag(tp, 5717_PLUS) &&
             (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
                return;
  
@@@ -1708,7 -1658,7 +1708,7 @@@ static void tg3_phy_toggle_automdix(str
  {
        u32 phy;
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
 +      if (!tg3_flag(tp, 5705_PLUS) ||
            (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
                return;
  
                        tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
                }
        } else {
 -              phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
 -                    MII_TG3_AUXCTL_SHDWSEL_MISC;
 -              if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
 -                  !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
 +              int ret;
 +
 +              ret = tg3_phy_auxctl_read(tp,
 +                                        MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
 +              if (!ret) {
                        if (enable)
                                phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
                        else
                                phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
 -                      phy |= MII_TG3_AUXCTL_MISC_WREN;
 -                      tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
 +                      tg3_phy_auxctl_write(tp,
 +                                           MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
                }
        }
  }
  
  static void tg3_phy_set_wirespeed(struct tg3 *tp)
  {
 +      int ret;
        u32 val;
  
        if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
                return;
  
 -      if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
 -          !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL,
 -                           (val | (1 << 15) | (1 << 4)));
 +      ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
 +      if (!ret)
 +              tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
 +                                   val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
  }
  
  static void tg3_phy_apply_otp(struct tg3 *tp)
  
        otp = tp->phy_otp;
  
 -      /* Enable SM_DSP clock and tx 6dB coding. */
 -      phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
 -            MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
 -            MII_TG3_AUXCTL_ACTL_TX_6DB;
 -      tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
 +      if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
 +              return;
  
        phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
        phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
              ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
        tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
  
 -      /* Turn off SM_DSP clock. */
 -      phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
 -            MII_TG3_AUXCTL_ACTL_TX_6DB;
 -      tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
 +      TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
  }
  
  static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
                        case ASIC_REV_5717:
                        case ASIC_REV_5719:
                        case ASIC_REV_57765:
 -                              /* Enable SM_DSP clock and tx 6dB coding. */
 -                              val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
 -                                    MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
 -                                    MII_TG3_AUXCTL_ACTL_TX_6DB;
 -                              tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
 -
 -                              tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
 -
 -                              /* Turn off SM_DSP clock. */
 -                              val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
 -                                    MII_TG3_AUXCTL_ACTL_TX_6DB;
 -                              tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
 +                              if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 +                                      tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
 +                                                       0x0000);
 +                                      TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +                              }
                        }
                        /* Fallthrough */
                case TG3_CL45_D7_EEERES_STAT_LP_100TX:
@@@ -1984,9 -1945,8 +1984,9 @@@ static int tg3_phy_reset_5703_4_5(struc
                             (MII_TG3_CTRL_AS_MASTER |
                              MII_TG3_CTRL_ENABLE_AS_MASTER));
  
 -              /* Enable SM_DSP_CLOCK and 6dB.  */
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
 +              err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
 +              if (err)
 +                      return err;
  
                /* Block the PHY control access.  */
                tg3_phydsp_write(tp, 0x8005, 0x0800);
        tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
        tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
  
 -      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 -          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
 -              /* Set Extended packet length bit for jumbo frames */
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
 -      } else {
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
 -      }
 +      TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
  
        tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
  
@@@ -2081,7 -2047,8 +2081,7 @@@ static int tg3_phy_reset(struct tg3 *tp
                }
        }
  
 -      if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 -           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
 +      if (tg3_flag(tp, 5717_PLUS) &&
            (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
                return 0;
  
                tg3_phy_toggle_apd(tp, false);
  
  out:
 -      if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
 +      if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
 +          !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
                tg3_phydsp_write(tp, 0x201f, 0x2aaa);
                tg3_phydsp_write(tp, 0x000a, 0x0323);
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
 +              TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
        }
 +
        if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
                tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
                tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
        }
 +
        if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
 -              tg3_phydsp_write(tp, 0x000a, 0x310b);
 -              tg3_phydsp_write(tp, 0x201f, 0x9506);
 -              tg3_phydsp_write(tp, 0x401f, 0x14e2);
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
 +              if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 +                      tg3_phydsp_write(tp, 0x000a, 0x310b);
 +                      tg3_phydsp_write(tp, 0x201f, 0x9506);
 +                      tg3_phydsp_write(tp, 0x401f, 0x14e2);
 +                      TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +              }
        } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
 -              tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
 -              if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
 -                      tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
 -                      tg3_writephy(tp, MII_TG3_TEST1,
 -                                   MII_TG3_TEST1_TRIM_EN | 0x4);
 -              } else
 -                      tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
 +              if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 +                      tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
 +                      if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
 +                              tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
 +                              tg3_writephy(tp, MII_TG3_TEST1,
 +                                           MII_TG3_TEST1_TRIM_EN | 0x4);
 +                      } else
 +                              tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
 +
 +                      TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 +              }
        }
 +
        /* Set Extended packet length bit (bit 14) on all chips that */
        /* support jumbo frames */
        if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
                /* Cannot do read-modify-write on 5401 */
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
 -      } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
 +              tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
 +      } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
                /* Set bit 14 with read-modify-write to preserve other bits */
 -              if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
 -                  !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
 -                      tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
 +              err = tg3_phy_auxctl_read(tp,
 +                                        MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 +              if (!err)
 +                      tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 +                                         val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
        }
  
        /* Set phy register 0x10 bit 0 to high fifo elasticity to support
         * jumbo frames transmission.
         */
 -      if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
 +      if (tg3_flag(tp, JUMBO_CAPABLE)) {
                if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
                        tg3_writephy(tp, MII_TG3_EXT_CTRL,
                                     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
@@@ -2164,15 -2123,14 +2164,15 @@@ static void tg3_frob_aux_power(struct t
        bool need_vaux = false;
  
        /* The GPIOs do something completely different on 57765. */
 -      if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
 +      if (!tg3_flag(tp, IS_NIC) ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                return;
  
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
 -           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) &&
 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
            tp->pdev_peer != tp->pdev) {
                struct net_device *dev_peer;
  
                if (dev_peer) {
                        struct tg3 *tp_peer = netdev_priv(dev_peer);
  
 -                      if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
 +                      if (tg3_flag(tp_peer, INIT_COMPLETE))
                                return;
  
 -                      if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
 -                          (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
 +                      if (tg3_flag(tp_peer, WOL_ENABLE) ||
 +                          tg3_flag(tp_peer, ENABLE_ASF))
                                need_vaux = true;
                }
        }
  
 -      if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
 -          (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
 +      if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
                need_vaux = true;
  
        if (need_vaux) {
@@@ -2345,10 -2304,11 +2345,10 @@@ static void tg3_power_down_phy(struct t
                tg3_writephy(tp, MII_TG3_EXT_CTRL,
                             MII_TG3_EXT_CTRL_FORCE_LED_OFF);
  
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL,
 -                           MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
 -                           MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 -                           MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
 -                           MII_TG3_AUXCTL_PCTL_VREG_11V);
 +              val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 +                    MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
 +                    MII_TG3_AUXCTL_PCTL_VREG_11V;
 +              tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
        }
  
        /* The PHY should not be powered down on some chips because
  /* tp->lock is held. */
  static int tg3_nvram_lock(struct tg3 *tp)
  {
 -      if (tp->tg3_flags & TG3_FLAG_NVRAM) {
 +      if (tg3_flag(tp, NVRAM)) {
                int i;
  
                if (tp->nvram_lock_cnt == 0) {
  /* tp->lock is held. */
  static void tg3_nvram_unlock(struct tg3 *tp)
  {
 -      if (tp->tg3_flags & TG3_FLAG_NVRAM) {
 +      if (tg3_flag(tp, NVRAM)) {
                if (tp->nvram_lock_cnt > 0)
                        tp->nvram_lock_cnt--;
                if (tp->nvram_lock_cnt == 0)
  /* tp->lock is held. */
  static void tg3_enable_nvram_access(struct tg3 *tp)
  {
 -      if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
 -          !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
 +      if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
                u32 nvaccess = tr32(NVRAM_ACCESS);
  
                tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
  /* tp->lock is held. */
  static void tg3_disable_nvram_access(struct tg3 *tp)
  {
 -      if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
 -          !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
 +      if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
                u32 nvaccess = tr32(NVRAM_ACCESS);
  
                tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
@@@ -2488,10 -2450,10 +2488,10 @@@ static int tg3_nvram_exec_cmd(struct tg
  
  static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
  {
 -      if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
 -          (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
 -          (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
 -         !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
 +      if (tg3_flag(tp, NVRAM) &&
 +          tg3_flag(tp, NVRAM_BUFFERED) &&
 +          tg3_flag(tp, FLASH) &&
 +          !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
            (tp->nvram_jedecnum == JEDEC_ATMEL))
  
                addr = ((addr / tp->nvram_pagesize) <<
  
  static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
  {
 -      if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
 -          (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
 -          (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
 -         !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
 +      if (tg3_flag(tp, NVRAM) &&
 +          tg3_flag(tp, NVRAM_BUFFERED) &&
 +          tg3_flag(tp, FLASH) &&
 +          !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
            (tp->nvram_jedecnum == JEDEC_ATMEL))
  
                addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
@@@ -2526,7 -2488,7 +2526,7 @@@ static int tg3_nvram_read(struct tg3 *t
  {
        int ret;
  
 -      if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
 +      if (!tg3_flag(tp, NVRAM))
                return tg3_nvram_read_using_eeprom(tp, offset, val);
  
        offset = tg3_nvram_phys_addr(tp, offset);
@@@ -2618,7 -2580,7 +2618,7 @@@ static int tg3_power_up(struct tg3 *tp
        pci_set_power_state(tp->pdev, PCI_D0);
  
        /* Switch out of Vaux if it is a NIC */
 -      if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
 +      if (tg3_flag(tp, IS_NIC))
                tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
  
        return 0;
@@@ -2632,7 -2594,7 +2632,7 @@@ static int tg3_power_down_prepare(struc
        tg3_enable_register_access(tp);
  
        /* Restore the CLKREQ setting. */
 -      if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
 +      if (tg3_flag(tp, CLKREQ_BUG)) {
                u16 lnkctl;
  
                pci_read_config_word(tp->pdev,
             misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
  
        device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
 -                           (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
 +                           tg3_flag(tp, WOL_ENABLE);
  
 -      if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 +      if (tg3_flag(tp, USE_PHYLIB)) {
                do_low_power = false;
                if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
                    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
                                      ADVERTISED_Autoneg |
                                      ADVERTISED_10baseT_Half;
  
 -                      if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
 -                          device_should_wake) {
 -                              if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
 +                      if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
 +                              if (tg3_flag(tp, WOL_SPEED_100MB))
                                        advertising |=
                                                ADVERTISED_100baseT_Half |
                                                ADVERTISED_100baseT_Full |
  
                val = tr32(GRC_VCPU_EXT_CTRL);
                tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
 -      } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
 +      } else if (!tg3_flag(tp, ENABLE_ASF)) {
                int i;
                u32 val;
  
                        msleep(1);
                }
        }
 -      if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
 +      if (tg3_flag(tp, WOL_CAP))
                tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
                                                     WOL_DRV_STATE_SHUTDOWN |
                                                     WOL_DRV_WOL |
                u32 mac_mode;
  
                if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 -                      if (do_low_power) {
 -                              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
 +                      if (do_low_power &&
 +                          !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 +                              tg3_phy_auxctl_write(tp,
 +                                             MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
 +                                             MII_TG3_AUXCTL_PCTL_WOL_EN |
 +                                             MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 +                                             MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
                                udelay(40);
                        }
  
                        mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
                        if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
                            ASIC_REV_5700) {
 -                              u32 speed = (tp->tg3_flags &
 -                                           TG3_FLAG_WOL_SPEED_100MB) ?
 +                              u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
                                             SPEED_100 : SPEED_10;
                                if (tg3_5700_link_polarity(tp, speed))
                                        mac_mode |= MAC_MODE_LINK_POLARITY;
                        mac_mode = MAC_MODE_PORT_MODE_TBI;
                }
  
 -              if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
 +              if (!tg3_flag(tp, 5750_PLUS))
                        tw32(MAC_LED_CTRL, tp->led_ctrl);
  
                mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
 -              if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
 -                  !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
 -                  ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
 -                   (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
 +              if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
 +                  (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
                        mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
  
 -              if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
 +              if (tg3_flag(tp, ENABLE_APE))
                        mac_mode |= MAC_MODE_APE_TX_EN |
                                    MAC_MODE_APE_RX_EN |
                                    MAC_MODE_TDE_ENABLE;
                udelay(10);
        }
  
 -      if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
 +      if (!tg3_flag(tp, WOL_SPEED_100MB) &&
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
                u32 base_val;
  
                tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
                            CLOCK_CTRL_PWRDOWN_PLL133, 40);
 -      } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
 -                 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
 +      } else if (tg3_flag(tp, 5780_CLASS) ||
 +                 tg3_flag(tp, CPMU_PRESENT) ||
                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
                /* do nothing */
 -      } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
 -                   (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
 +      } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
                u32 newbits1, newbits2;
  
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
                                    CLOCK_CTRL_TXCLK_DISABLE |
                                    CLOCK_CTRL_ALTCLK);
                        newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
 -              } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
 +              } else if (tg3_flag(tp, 5705_PLUS)) {
                        newbits1 = CLOCK_CTRL_625_CORE;
                        newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
                } else {
                tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
                            40);
  
 -              if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
 +              if (!tg3_flag(tp, 5705_PLUS)) {
                        u32 newbits3;
  
                        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
                }
        }
  
 -      if (!(device_should_wake) &&
 -          !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
 +      if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
                tg3_power_down_phy(tp, do_low_power);
  
        tg3_frob_aux_power(tp);
  
                val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
                tw32(0x7d00, val);
 -              if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
 +              if (!tg3_flag(tp, ENABLE_ASF)) {
                        int err;
  
                        err = tg3_nvram_lock(tp);
@@@ -2874,7 -2837,7 +2874,7 @@@ static void tg3_power_down(struct tg3 *
  {
        tg3_power_down_prepare(tp);
  
 -      pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
 +      pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
        pci_set_power_state(tp->pdev, PCI_D3hot);
  }
  
@@@ -2938,7 -2901,7 +2938,7 @@@ static void tg3_phy_copper_begin(struc
  
                new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
                           ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
 -              if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
 +              if (tg3_flag(tp, WOL_SPEED_100MB))
                        new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
  
                tg3_writephy(tp, MII_ADVERTISE, new_adv);
                tw32(TG3_CPMU_EEE_MODE,
                     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
  
 -              /* Enable SM_DSP clock and tx 6dB coding. */
 -              val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
 -                    MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
 -                    MII_TG3_AUXCTL_ACTL_TX_6DB;
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
 +              TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
  
                switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
                case ASIC_REV_5717:
                }
                tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
  
 -              /* Turn off SM_DSP clock. */
 -              val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
 -                    MII_TG3_AUXCTL_ACTL_TX_6DB;
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
 +              TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
        }
  
        if (tp->link_config.autoneg == AUTONEG_DISABLE &&
@@@ -3107,7 -3077,7 +3107,7 @@@ static int tg3_init_5401phy_dsp(struct 
  
        /* Turn off tap power management. */
        /* Set Extended packet length bit */
 -      err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
 +      err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
  
        err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
        err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
@@@ -3170,7 -3140,7 +3170,7 @@@ static int tg3_adv_1000T_flowctrl_ok(st
                if (curadv != reqadv)
                        return 0;
  
 -              if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
 +              if (tg3_flag(tp, PAUSE_AUTONEG))
                        tg3_readphy(tp, MII_LPA, rmtadv);
        } else {
                /* Reprogram the advertisement register, even if it
@@@ -3213,7 -3183,7 +3213,7 @@@ static int tg3_setup_copper_phy(struct 
                udelay(80);
        }
  
 -      tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
 +      tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
  
        /* Some third-party PHYs need to be reset on link going
         * down.
        if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
                tg3_readphy(tp, MII_BMSR, &bmsr);
                if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
 -                  !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
 +                  !tg3_flag(tp, INIT_COMPLETE))
                        bmsr = 0;
  
                if (!(bmsr & BMSR_LSTATUS)) {
        current_duplex = DUPLEX_INVALID;
  
        if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
 -              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
 -              tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
 -              if (!(val & (1 << 10))) {
 -                      val |= (1 << 10);
 -                      tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
 +              err = tg3_phy_auxctl_read(tp,
 +                                        MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
 +                                        &val);
 +              if (!err && !(val & (1 << 10))) {
 +                      tg3_phy_auxctl_write(tp,
 +                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
 +                                           val | (1 << 10));
                        goto relink;
                }
        }
@@@ -3417,7 -3385,7 +3417,7 @@@ relink
  
        tg3_phy_eee_adjust(tp, current_link_up);
  
 -      if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
 +      if (tg3_flag(tp, USE_LINKCHG_REG)) {
                /* Polled via timer. */
                tw32_f(MAC_EVENT, 0);
        } else {
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
            current_link_up == 1 &&
            tp->link_config.active_speed == SPEED_1000 &&
 -          ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
 -           (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
 +          (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
                udelay(120);
                tw32_f(MAC_STATUS,
                     (MAC_STATUS_SYNC_CHANGED |
        }
  
        /* Prevent send BD corruption. */
 -      if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
 +      if (tg3_flag(tp, CLKREQ_BUG)) {
                u16 oldlnkctl, newlnkctl;
  
                pci_read_config_word(tp->pdev,
@@@ -3835,7 -3804,7 +3835,7 @@@ static void tg3_init_bcm8002(struct tg
        int i;
  
        /* Reset when initting first time or we have a link. */
 -      if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
 +      if (tg3_flag(tp, INIT_COMPLETE) &&
            !(mac_status & MAC_STATUS_PCS_SYNCED))
                return;
  
@@@ -4096,9 -4065,9 +4096,9 @@@ static int tg3_setup_fiber_phy(struct t
        orig_active_speed = tp->link_config.active_speed;
        orig_active_duplex = tp->link_config.active_duplex;
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
 +      if (!tg3_flag(tp, HW_AUTONEG) &&
            netif_carrier_ok(tp->dev) &&
 -          (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
 +          tg3_flag(tp, INIT_COMPLETE)) {
                mac_status = tr32(MAC_STATUS);
                mac_status &= (MAC_STATUS_PCS_SYNCED |
                               MAC_STATUS_SIGNAL_DET |
        current_link_up = 0;
        mac_status = tr32(MAC_STATUS);
  
 -      if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
 +      if (tg3_flag(tp, HW_AUTONEG))
                current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
        else
                current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
@@@ -4328,7 -4297,7 +4328,7 @@@ static int tg3_setup_fiber_mii_phy(stru
                                        current_duplex = DUPLEX_FULL;
                                else
                                        current_duplex = DUPLEX_HALF;
 -                      } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 +                      } else if (!tg3_flag(tp, 5780_CLASS)) {
                                /* Link is up via parallel detect */
                        } else {
                                current_link_up = 0;
@@@ -4425,7 -4394,6 +4425,7 @@@ static void tg3_serdes_parallel_detect(
  
  static int tg3_setup_phy(struct tg3 *tp, int force_reset)
  {
 +      u32 val;
        int err;
  
        if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                err = tg3_setup_copper_phy(tp, force_reset);
  
        if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
 -              u32 val, scale;
 +              u32 scale;
  
                val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
                if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
                tw32(GRC_MISC_CFG, val);
        }
  
 +      val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 +            (6 << TX_LENGTHS_IPG_SHIFT);
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +              val |= tr32(MAC_TX_LENGTHS) &
 +                     (TX_LENGTHS_JMB_FRM_LEN_MSK |
 +                      TX_LENGTHS_CNT_DWN_VAL_MSK);
 +
        if (tp->link_config.active_speed == SPEED_1000 &&
            tp->link_config.active_duplex == DUPLEX_HALF)
 -              tw32(MAC_TX_LENGTHS,
 -                   ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 -                    (6 << TX_LENGTHS_IPG_SHIFT) |
 -                    (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
 +              tw32(MAC_TX_LENGTHS, val |
 +                   (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
        else
 -              tw32(MAC_TX_LENGTHS,
 -                   ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 -                    (6 << TX_LENGTHS_IPG_SHIFT) |
 -                    (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
 +              tw32(MAC_TX_LENGTHS, val |
 +                   (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
 +      if (!tg3_flag(tp, 5705_PLUS)) {
                if (netif_carrier_ok(tp->dev)) {
                        tw32(HOSTCC_STAT_COAL_TICKS,
                             tp->coal.stats_block_coalesce_usecs);
                }
        }
  
 -      if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
 -              u32 val = tr32(PCIE_PWR_MGMT_THRESH);
 +      if (tg3_flag(tp, ASPM_WORKAROUND)) {
 +              val = tr32(PCIE_PWR_MGMT_THRESH);
                if (!netif_carrier_ok(tp->dev))
                        val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
                              tp->pwrmgmt_thresh;
@@@ -4493,123 -4458,6 +4493,123 @@@ static inline int tg3_irq_sync(struct t
        return tp->irq_sync;
  }
  
 +static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
 +{
 +      int i;
 +
 +      dst = (u32 *)((u8 *)dst + off);
 +      for (i = 0; i < len; i += sizeof(u32))
 +              *dst++ = tr32(off + i);
 +}
 +
 +static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
 +{
 +      tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
 +      tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
 +      tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
 +      tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
 +      tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
 +      tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
 +      tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
 +      tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
 +      tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
 +      tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
 +      tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
 +      tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
 +      tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
 +      tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
 +      tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
 +      tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
 +      tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
 +      tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
 +      tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
 +
 +      if (tg3_flag(tp, SUPPORT_MSIX))
 +              tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
 +
 +      tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
 +      tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
 +      tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
 +      tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
 +      tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
 +      tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
 +      tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
 +      tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
 +
 +      if (!tg3_flag(tp, 5705_PLUS)) {
 +              tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
 +              tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
 +              tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
 +      }
 +
 +      tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
 +      tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
 +      tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
 +      tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
 +      tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
 +
 +      if (tg3_flag(tp, NVRAM))
 +              tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
 +}
 +
 +static void tg3_dump_state(struct tg3 *tp)
 +{
 +      int i;
 +      u32 *regs;
 +
 +      regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
 +      if (!regs) {
 +              netdev_err(tp->dev, "Failed allocating register dump buffer\n");
 +              return;
 +      }
 +
 +      if (tg3_flag(tp, PCI_EXPRESS)) {
 +              /* Read up to but not including private PCI registers */
 +              for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
 +                      regs[i / sizeof(u32)] = tr32(i);
 +      } else
 +              tg3_dump_legacy_regs(tp, regs);
 +
 +      for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
 +              if (!regs[i + 0] && !regs[i + 1] &&
 +                  !regs[i + 2] && !regs[i + 3])
 +                      continue;
 +
 +              netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
 +                         i * 4,
 +                         regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
 +      }
 +
 +      kfree(regs);
 +
 +      for (i = 0; i < tp->irq_cnt; i++) {
 +              struct tg3_napi *tnapi = &tp->napi[i];
 +
 +              /* SW status block */
 +              netdev_err(tp->dev,
 +                       "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
 +                         i,
 +                         tnapi->hw_status->status,
 +                         tnapi->hw_status->status_tag,
 +                         tnapi->hw_status->rx_jumbo_consumer,
 +                         tnapi->hw_status->rx_consumer,
 +                         tnapi->hw_status->rx_mini_consumer,
 +                         tnapi->hw_status->idx[0].rx_producer,
 +                         tnapi->hw_status->idx[0].tx_consumer);
 +
 +              netdev_err(tp->dev,
 +              "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
 +                         i,
 +                         tnapi->last_tag, tnapi->last_irq_tag,
 +                         tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
 +                         tnapi->rx_rcb_ptr,
 +                         tnapi->prodring.rx_std_prod_idx,
 +                         tnapi->prodring.rx_std_cons_idx,
 +                         tnapi->prodring.rx_jmb_prod_idx,
 +                         tnapi->prodring.rx_jmb_cons_idx);
 +      }
 +}
 +
  /* This is called whenever we suspect that the system chipset is re-
   * ordering the sequence of MMIO to the tx send mailbox. The symptom
   * is bogus tx completions. We try to recover by setting the
   */
  static void tg3_tx_recover(struct tg3 *tp)
  {
 -      BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
 +      BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
               tp->write32_tx_mbox == tg3_write_indirect_mbox);
  
        netdev_warn(tp->dev,
                    "and include system chipset information.\n");
  
        spin_lock(&tp->lock);
 -      tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
 +      tg3_flag_set(tp, TX_RECOVERY_PENDING);
        spin_unlock(&tp->lock);
  }
  
@@@ -4652,7 -4500,7 +4652,7 @@@ static void tg3_tx(struct tg3_napi *tna
        struct netdev_queue *txq;
        int index = tnapi - tp->napi;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
 +      if (tg3_flag(tp, ENABLE_TSS))
                index--;
  
        txq = netdev_get_tx_queue(tp->dev, index);
@@@ -4967,7 -4815,7 +4967,7 @@@ static int tg3_rx(struct tg3_napi *tnap
                        skb = copy_skb;
                }
  
 -              if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
 +              if ((tp->dev->features & NETIF_F_RXCSUM) &&
                    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
                    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
                      >> RXD_TCPCSUM_SHIFT) == 0xffff))
@@@ -5020,7 -4868,7 +5020,7 @@@ next_pkt_nopost
        tw32_rx_mbox(tnapi->consmbox, sw_idx);
  
        /* Refill RX ring(s). */
 -      if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
 +      if (!tg3_flag(tp, ENABLE_RSS)) {
                if (work_mask & RXD_OPAQUE_RING_STD) {
                        tpr->rx_std_prod_idx = std_prod_idx &
                                               tp->rx_std_ring_mask;
  static void tg3_poll_link(struct tg3 *tp)
  {
        /* handle link change and other phy events */
 -      if (!(tp->tg3_flags &
 -            (TG3_FLAG_USE_LINKCHG_REG |
 -             TG3_FLAG_POLL_SERDES))) {
 +      if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
                struct tg3_hw_status *sblk = tp->napi[0].hw_status;
  
                if (sblk->status & SD_STATUS_LINK_CHG) {
                        sblk->status = SD_STATUS_UPDATED |
                                       (sblk->status & ~SD_STATUS_LINK_CHG);
                        spin_lock(&tp->lock);
 -                      if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 +                      if (tg3_flag(tp, USE_PHYLIB)) {
                                tw32_f(MAC_STATUS,
                                     (MAC_STATUS_SYNC_CHANGED |
                                      MAC_STATUS_CFG_CHANGED |
@@@ -5207,7 -5057,7 +5207,7 @@@ static int tg3_poll_work(struct tg3_nap
        /* run TX completion thread */
        if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
                tg3_tx(tnapi);
 -              if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
 +              if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
                        return work_done;
        }
  
        if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
                work_done += tg3_rx(tnapi, budget - work_done);
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
 +      if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
                struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
                int i, err = 0;
                u32 std_prod_idx = dpr->rx_std_prod_idx;
@@@ -5257,7 -5107,7 +5257,7 @@@ static int tg3_poll_msix(struct napi_st
        while (1) {
                work_done = tg3_poll_work(tnapi, work_done, budget);
  
 -              if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
 +              if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
                        goto tx_recovery;
  
                if (unlikely(work_done >= budget))
@@@ -5291,40 -5141,6 +5291,40 @@@ tx_recovery
        return work_done;
  }
  
 +static void tg3_process_error(struct tg3 *tp)
 +{
 +      u32 val;
 +      bool real_error = false;
 +
 +      if (tg3_flag(tp, ERROR_PROCESSED))
 +              return;
 +
 +      /* Check Flow Attention register */
 +      val = tr32(HOSTCC_FLOW_ATTN);
 +      if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
 +              netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
 +              real_error = true;
 +      }
 +
 +      if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
 +              netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
 +              real_error = true;
 +      }
 +
 +      if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
 +              netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
 +              real_error = true;
 +      }
 +
 +      if (!real_error)
 +              return;
 +
 +      tg3_dump_state(tp);
 +
 +      tg3_flag_set(tp, ERROR_PROCESSED);
 +      schedule_work(&tp->reset_task);
 +}
 +
  static int tg3_poll(struct napi_struct *napi, int budget)
  {
        struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
        struct tg3_hw_status *sblk = tnapi->hw_status;
  
        while (1) {
 +              if (sblk->status & SD_STATUS_ERROR)
 +                      tg3_process_error(tp);
 +
                tg3_poll_link(tp);
  
                work_done = tg3_poll_work(tnapi, work_done, budget);
  
 -              if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
 +              if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
                        goto tx_recovery;
  
                if (unlikely(work_done >= budget))
                        break;
  
 -              if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
 +              if (tg3_flag(tp, TAGGED_STATUS)) {
                        /* tp->last_tag is used in tg3_int_reenable() below
                         * to tell the hw how much work has been processed,
                         * so we must read it before checking for more work.
@@@ -5513,7 -5326,7 +5513,7 @@@ static irqreturn_t tg3_interrupt(int ir
         * interrupt is ours and will flush the status block.
         */
        if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
 -              if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
 +              if (tg3_flag(tp, CHIP_RESETTING) ||
                    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
                        handled = 0;
                        goto out;
@@@ -5562,7 -5375,7 +5562,7 @@@ static irqreturn_t tg3_interrupt_tagged
         * interrupt is ours and will flush the status block.
         */
        if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
 -              if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
 +              if (tg3_flag(tp, CHIP_RESETTING) ||
                    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
                        handled = 0;
                        goto out;
@@@ -5675,14 -5488,14 +5675,14 @@@ static void tg3_reset_task(struct work_
  
        tg3_full_lock(tp, 1);
  
 -      restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
 -      tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
 +      restart_timer = tg3_flag(tp, RESTART_TIMER);
 +      tg3_flag_clear(tp, RESTART_TIMER);
  
 -      if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
 +      if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
                tp->write32_tx_mbox = tg3_write32_tx_mbox;
                tp->write32_rx_mbox = tg3_write_flush_reg32;
 -              tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
 -              tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
 +              tg3_flag_set(tp, MBOX_WRITE_REORDER);
 +              tg3_flag_clear(tp, TX_RECOVERY_PENDING);
        }
  
        tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
                tg3_phy_start(tp);
  }
  
 -static void tg3_dump_short_state(struct tg3 *tp)
 -{
 -      netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
 -                 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
 -      netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
 -                 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
 -}
 -
  static void tg3_tx_timeout(struct net_device *dev)
  {
        struct tg3 *tp = netdev_priv(dev);
  
        if (netif_msg_tx_err(tp)) {
                netdev_err(dev, "transmit timed out, resetting\n");
 -              tg3_dump_short_state(tp);
 +              tg3_dump_state(tp);
        }
  
        schedule_work(&tp->reset_task);
@@@ -5727,7 -5548,7 +5727,7 @@@ static inline int tg3_40bit_overflow_te
                                          int len)
  {
  #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
 -      if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
 +      if (tg3_flag(tp, 40BIT_DMA_BUG))
                return ((u64) mapping + len) > DMA_BIT_MASK(40);
        return 0;
  #else
@@@ -5774,8 -5595,8 +5774,8 @@@ static int tigon3_dma_hwbug_workaround(
                /* Make sure new skb does not cross any 4G boundaries.
                 * Drop the packet if it does.
                 */
 -              } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
 -                          tg3_4g_overflow_test(new_addr, new_skb->len)) {
 +              } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
 +                         tg3_4g_overflow_test(new_addr, new_skb->len)) {
                        pci_unmap_single(tp->pdev, new_addr, new_skb->len,
                                         PCI_DMA_TODEVICE);
                        ret = -1;
@@@ -5842,7 -5663,7 +5842,7 @@@ static void tg3_set_txd(struct tg3_nap
  }
  
  /* hard_start_xmit for devices that don't have any bugs and
 - * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
 + * support TG3_FLAG_HW_TSO_2 and TG3_FLAG_HW_TSO_3 only.
   */
  static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
                                  struct net_device *dev)
  
        txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
        tnapi = &tp->napi[skb_get_queue_mapping(skb)];
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
 +      if (tg3_flag(tp, ENABLE_TSS))
                tnapi++;
  
        /* We are running in BH disabled context with netif_tx_lock
                        hdrlen = ip_tcp_len + tcp_opt_len;
                }
  
 -              if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
 +              if (tg3_flag(tp, HW_TSO_3)) {
                        mss |= (hdrlen & 0xc) << 12;
                        if (hdrlen & 0x10)
                                base_flags |= 0x00000010;
        tnapi->tx_buffers[entry].skb = skb;
        dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
 +      if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
            !mss && skb->len > VLAN_ETH_FRAME_LEN)
                base_flags |= TXD_FLAG_JMB_PKT;
  
@@@ -6057,7 -5878,7 +6057,7 @@@ tg3_tso_bug_end
  }
  
  /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
 - * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
 + * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
   */
  static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
                                          struct net_device *dev)
  
        txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
        tnapi = &tp->napi[skb_get_queue_mapping(skb)];
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
 +      if (tg3_flag(tp, ENABLE_TSS))
                tnapi++;
  
        /* We are running in BH disabled context with netif_tx_lock
                }
  
                if (unlikely((ETH_HLEN + hdr_len) > 80) &&
 -                           (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
 +                  tg3_flag(tp, TSO_BUG))
                        return tg3_tso_bug(tp, skb);
  
                base_flags |= (TXD_FLAG_CPU_PRE_DMA |
                               TXD_FLAG_CPU_POST_DMA);
  
 -              if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
 +              if (tg3_flag(tp, HW_TSO_1) ||
 +                  tg3_flag(tp, HW_TSO_2) ||
 +                  tg3_flag(tp, HW_TSO_3)) {
                        tcp_hdr(skb)->check = 0;
                        base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
                } else
                                                                 IPPROTO_TCP,
                                                                 0);
  
 -              if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
 +              if (tg3_flag(tp, HW_TSO_3)) {
                        mss |= (hdr_len & 0xc) << 12;
                        if (hdr_len & 0x10)
                                base_flags |= 0x00000010;
                        base_flags |= (hdr_len & 0x3e0) << 5;
 -              } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
 +              } else if (tg3_flag(tp, HW_TSO_2))
                        mss |= hdr_len << 9;
 -              else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
 +              else if (tg3_flag(tp, HW_TSO_1) ||
                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
                        if (tcp_opt_len || iph->ihl > 5) {
                                int tsflags;
                base_flags |= (TXD_FLAG_VLAN |
                               (vlan_tx_tag_get(skb) << 16));
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
 +      if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
            !mss && skb->len > VLAN_ETH_FRAME_LEN)
                base_flags |= TXD_FLAG_JMB_PKT;
  
  
        would_hit_hwbug = 0;
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
 +      if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
                would_hit_hwbug = 1;
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
 +      if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
            tg3_4g_overflow_test(mapping, len))
                would_hit_hwbug = 1;
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
 +      if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
            tg3_40bit_overflow_test(tp, mapping, len))
                would_hit_hwbug = 1;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
 +      if (tg3_flag(tp, 5701_DMA_BUG))
                would_hit_hwbug = 1;
  
        tg3_set_txd(tnapi, entry, mapping, len, base_flags,
                        if (pci_dma_mapping_error(tp->pdev, mapping))
                                goto dma_error;
  
 -                      if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
 +                      if (tg3_flag(tp, SHORT_DMA_BUG) &&
                            len <= 8)
                                would_hit_hwbug = 1;
  
 -                      if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
 +                      if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
                            tg3_4g_overflow_test(mapping, len))
                                would_hit_hwbug = 1;
  
 -                      if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
 +                      if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
                            tg3_40bit_overflow_test(tp, mapping, len))
                                would_hit_hwbug = 1;
  
 -                      if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
 +                      if (tg3_flag(tp, HW_TSO_1) ||
 +                          tg3_flag(tp, HW_TSO_2) ||
 +                          tg3_flag(tp, HW_TSO_3))
                                tg3_set_txd(tnapi, entry, mapping, len,
                                            base_flags, (i == last)|(mss << 1));
                        else
@@@ -6309,34 -6126,22 +6309,34 @@@ dma_error
        return NETDEV_TX_OK;
  }
  
 +static u32 tg3_fix_features(struct net_device *dev, u32 features)
 +{
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
 +              features &= ~NETIF_F_ALL_TSO;
 +
 +      return features;
 +}
 +
  static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
                               int new_mtu)
  {
        dev->mtu = new_mtu;
  
        if (new_mtu > ETH_DATA_LEN) {
 -              if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
 -                      tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
 -                      ethtool_op_set_tso(dev, 0);
 +              if (tg3_flag(tp, 5780_CLASS)) {
 +                      netdev_update_features(dev);
 +                      tg3_flag_clear(tp, TSO_CAPABLE);
                } else {
 -                      tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
 +                      tg3_flag_set(tp, JUMBO_RING_ENABLE);
                }
        } else {
 -              if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
 -                      tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
 -              tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
 +              if (tg3_flag(tp, 5780_CLASS)) {
 +                      tg3_flag_set(tp, TSO_CAPABLE);
 +                      netdev_update_features(dev);
 +              }
 +              tg3_flag_clear(tp, JUMBO_RING_ENABLE);
        }
  }
  
@@@ -6390,7 -6195,7 +6390,7 @@@ static void tg3_rx_prodring_free(struc
                        tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
                                        tp->rx_pkt_map_sz);
  
 -              if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
 +              if (tg3_flag(tp, JUMBO_CAPABLE)) {
                        for (i = tpr->rx_jmb_cons_idx;
                             i != tpr->rx_jmb_prod_idx;
                             i = (i + 1) & tp->rx_jmb_ring_mask) {
                tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
                                tp->rx_pkt_map_sz);
  
 -      if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
 -          !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 +      if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
                for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
                        tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
                                        TG3_RX_JMB_MAP_SZ);
@@@ -6443,7 -6249,7 +6443,7 @@@ static int tg3_rx_prodring_alloc(struc
        memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
  
        rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
 -      if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
 +      if (tg3_flag(tp, 5780_CLASS) &&
            tp->dev->mtu > ETH_DATA_LEN)
                rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
        tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
                }
        }
  
 -      if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
 -          (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
 +      if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
                goto done;
  
        memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
  
 -      if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
 +      if (!tg3_flag(tp, JUMBO_RING_ENABLE))
                goto done;
  
        for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
@@@ -6550,7 -6357,8 +6550,7 @@@ static int tg3_rx_prodring_init(struct 
        if (!tpr->rx_std)
                goto err_out;
  
 -      if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
 -          !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 +      if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
                tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
                                              GFP_KERNEL);
                if (!tpr->rx_jmb_buffers)
@@@ -6748,8 -6556,8 +6748,8 @@@ static int tg3_alloc_consistent(struct 
                /* If multivector TSS is enabled, vector 0 does not handle
                 * tx interrupts.  Don't allocate any resources for it.
                 */
 -              if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
 -                  (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
 +              if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
 +                  (i && tg3_flag(tp, ENABLE_TSS))) {
                        tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
                                                    TG3_TX_RING_SIZE,
                                                    GFP_KERNEL);
                 * If multivector RSS is enabled, vector 0 does not handle
                 * rx or tx interrupts.  Don't allocate any resources for it.
                 */
 -              if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
 +              if (!i && tg3_flag(tp, ENABLE_RSS))
                        continue;
  
                tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
@@@ -6819,7 -6627,7 +6819,7 @@@ static int tg3_stop_block(struct tg3 *t
        unsigned int i;
        u32 val;
  
 -      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
 +      if (tg3_flag(tp, 5705_PLUS)) {
                switch (ofs) {
                case RCVLSC_MODE:
                case DMAC_MODE:
@@@ -6929,7 -6737,7 +6929,7 @@@ static void tg3_ape_send_event(struct t
        u32 apedata;
  
        /* NCSI does not support APE events */
 -      if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
 +      if (tg3_flag(tp, APE_HAS_NCSI))
                return;
  
        apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
@@@ -6968,7 -6776,7 +6968,7 @@@ static void tg3_ape_driver_state_change
        u32 event;
        u32 apedata;
  
 -      if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
 +      if (!tg3_flag(tp, ENABLE_APE))
                return;
  
        switch (kind) {
                tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
  
                if (device_may_wakeup(&tp->pdev->dev) &&
 -                  (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
 +                  tg3_flag(tp, WOL_ENABLE)) {
                        tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
                                            TG3_APE_HOST_WOL_SPEED_AUTO);
                        apedata = TG3_APE_HOST_DRVR_STATE_WOL;
@@@ -7026,7 -6834,7 +7026,7 @@@ static void tg3_write_sig_pre_reset(str
        tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
                      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
  
 -      if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
 +      if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
                switch (kind) {
                case RESET_KIND_INIT:
                        tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  /* tp->lock is held. */
  static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
  {
 -      if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
 +      if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
                switch (kind) {
                case RESET_KIND_INIT:
                        tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
  /* tp->lock is held. */
  static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
  {
 -      if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
 +      if (tg3_flag(tp, ENABLE_ASF)) {
                switch (kind) {
                case RESET_KIND_INIT:
                        tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
@@@ -7131,8 -6939,9 +7131,8 @@@ static int tg3_poll_fw(struct tg3 *tp
         * of the above loop as an error, but do report the lack of
         * running firmware once.
         */
 -      if (i >= 100000 &&
 -          !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
 -              tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
 +      if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
 +              tg3_flag_set(tp, NO_FWARE_REPORTED);
  
                netdev_info(tp->dev, "No firmware running\n");
        }
@@@ -7165,10 -6974,10 +7165,10 @@@ static void tg3_restore_pci_state(struc
        /* Set MAX PCI retry to zero. */
        val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
        if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
 -          (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
 +          tg3_flag(tp, PCIX_MODE))
                val |= PCISTATE_RETRY_SAME_DMA;
        /* Allow reads and writes to the APE register and memory space. */
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
 +      if (tg3_flag(tp, ENABLE_APE))
                val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
                       PCISTATE_ALLOW_APE_SHMEM_WR |
                       PCISTATE_ALLOW_APE_PSPACE_WR;
        pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
 -              if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
 +              if (tg3_flag(tp, PCI_EXPRESS))
                        pcie_set_readrq(tp->pdev, tp->pcie_readrq);
                else {
                        pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
        }
  
        /* Make sure PCI-X relaxed ordering bit is clear. */
 -      if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
 +      if (tg3_flag(tp, PCIX_MODE)) {
                u16 pcix_cmd;
  
                pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
                                      pcix_cmd);
        }
  
 -      if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
 +      if (tg3_flag(tp, 5780_CLASS)) {
  
                /* Chip reset on 5780 will reset MSI enable bit,
                 * so need to restore it.
                 */
 -              if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
 +              if (tg3_flag(tp, USING_MSI)) {
                        u16 ctrl;
  
                        pci_read_config_word(tp->pdev,
@@@ -7243,7 -7052,7 +7243,7 @@@ static int tg3_chip_reset(struct tg3 *t
        tg3_save_pci_state(tp);
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
 -          (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
 +          tg3_flag(tp, 5755_PLUS))
                tw32(GRC_FASTBOOT_PC, 0);
  
        /*
         * at this time, but the irq handler may still be called due to irq
         * sharing or irqpoll.
         */
 -      tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
 +      tg3_flag_set(tp, CHIP_RESETTING);
        for (i = 0; i < tp->irq_cnt; i++) {
                struct tg3_napi *tnapi = &tp->napi[i];
                if (tnapi->hw_status) {
        /* do the reset */
        val = GRC_MISC_CFG_CORECLK_RESET;
  
 -      if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
 +      if (tg3_flag(tp, PCI_EXPRESS)) {
                /* Force PCIe 1.0a mode */
                if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 -                  !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
 +                  !tg3_flag(tp, 57765_PLUS) &&
                    tr32(TG3_PCIE_PHY_TSTCTL) ==
                    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
                        tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
        }
  
        /* Manage gphy power for all CPMU absent PCIe devices. */
 -      if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
 -          !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
 +      if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
                val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
  
        tw32(GRC_MISC_CFG, val);
  
        udelay(120);
  
 -      if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
 +      if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
                u16 val16;
  
                if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
                 * Older PCIe devices only support the 128 byte
                 * MPS setting.  Enforce the restriction.
                 */
 -              if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
 +              if (!tg3_flag(tp, CPMU_PRESENT))
                        val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
                pci_write_config_word(tp->pdev,
                                      tp->pcie_cap + PCI_EXP_DEVCTL,
  
        tg3_restore_pci_state(tp);
  
 -      tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
 +      tg3_flag_clear(tp, CHIP_RESETTING);
 +      tg3_flag_clear(tp, ERROR_PROCESSED);
  
        val = 0;
 -      if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
 +      if (tg3_flag(tp, 5780_CLASS))
                val = tr32(MEMARB_MODE);
        tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  
                tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
        }
  
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
 +      if (tg3_flag(tp, ENABLE_APE))
                tp->mac_mode = MAC_MODE_APE_TX_EN |
                               MAC_MODE_APE_RX_EN |
                               MAC_MODE_TDE_ENABLE;
  
        tg3_mdio_start(tp);
  
 -      if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
 +      if (tg3_flag(tp, PCI_EXPRESS) &&
            tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 -          !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
 +          !tg3_flag(tp, 57765_PLUS)) {
                val = tr32(0x7c00);
  
                tw32(0x7c00, val | (1 << 25));
        }
  
 -      /* Reprobe ASF enable state.  */
 -      tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
 -      tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 +              val = tr32(TG3_CPMU_CLCK_ORIDE);
 +              tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
 +      }
 +
 +      /* Reprobe ASF enable state.  */
 +      tg3_flag_clear(tp, ENABLE_ASF);
 +      tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
        tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
        if (val == NIC_SRAM_DATA_SIG_MAGIC) {
                u32 nic_cfg;
  
                tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
                if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
 -                      tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
 +                      tg3_flag_set(tp, ENABLE_ASF);
                        tp->last_event_jiffies = jiffies;
 -                      if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
 -                              tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
 +                      if (tg3_flag(tp, 5750_PLUS))
 +                              tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
                }
        }
  
  /* tp->lock is held. */
  static void tg3_stop_fw(struct tg3 *tp)
  {
 -      if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
 -         !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
 +      if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
                /* Wait for RX cpu to ACK the previous event. */
                tg3_wait_for_event_ack(tp);
  
@@@ -7520,7 -7325,8 +7520,7 @@@ static int tg3_halt_cpu(struct tg3 *tp
  {
        int i;
  
 -      BUG_ON(offset == TX_CPU_BASE &&
 -          (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
 +      BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
                u32 val = tr32(GRC_VCPU_EXT_CTRL);
        }
  
        /* Clear firmware's nvram arbitration. */
 -      if (tp->tg3_flags & TG3_FLAG_NVRAM)
 +      if (tg3_flag(tp, NVRAM))
                tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
        return 0;
  }
@@@ -7573,14 -7379,15 +7573,14 @@@ static int tg3_load_firmware_cpu(struc
        int err, lock_err, i;
        void (*write_op)(struct tg3 *, u32, u32);
  
 -      if (cpu_base == TX_CPU_BASE &&
 -          (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
 +      if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
                netdev_err(tp->dev,
                           "%s: Trying to load TX cpu firmware which is 5705\n",
                           __func__);
                return -EINVAL;
        }
  
 -      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
 +      if (tg3_flag(tp, 5705_PLUS))
                write_op = tg3_write_mem;
        else
                write_op = tg3_write_indirect_reg32;
@@@ -7666,6 -7473,8 +7666,6 @@@ static int tg3_load_5701_a0_firmware_fi
        return 0;
  }
  
 -/* 5705 needs a special version of the TSO firmware.  */
 -
  /* tp->lock is held. */
  static int tg3_load_tso_firmware(struct tg3 *tp)
  {
        unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
        int err, i;
  
 -      if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
 +      if (tg3_flag(tp, HW_TSO_1) ||
 +          tg3_flag(tp, HW_TSO_2) ||
 +          tg3_flag(tp, HW_TSO_3))
                return 0;
  
        fw_data = (void *)tp->fw->data;
@@@ -7745,7 -7552,7 +7745,7 @@@ static int tg3_set_mac_addr(struct net_
        if (!netif_running(dev))
                return 0;
  
 -      if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
 +      if (tg3_flag(tp, ENABLE_ASF)) {
                u32 addr0_high, addr0_low, addr1_high, addr1_low;
  
                addr0_high = tr32(MAC_ADDR_0_HIGH);
@@@ -7780,7 -7587,7 +7780,7 @@@ static void tg3_set_bdinfo(struct tg3 *
                      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
                       maxlen_flags);
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 +      if (!tg3_flag(tp, 5705_PLUS))
                tg3_write_mem(tp,
                              (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
                              nic_addr);
@@@ -7791,7 -7598,7 +7791,7 @@@ static void __tg3_set_coalesce(struct t
  {
        int i;
  
 -      if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
 +      if (!tg3_flag(tp, ENABLE_TSS)) {
                tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
                tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
                tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
                tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
        }
  
 -      if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
 +      if (!tg3_flag(tp, ENABLE_RSS)) {
                tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
                tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
                tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
                tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
        }
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
 +      if (!tg3_flag(tp, 5705_PLUS)) {
                u32 val = ec->stats_block_coalesce_usecs;
  
                tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
                reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
                tw32(reg, ec->rx_max_coalesced_frames_irq);
  
 -              if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
 +              if (tg3_flag(tp, ENABLE_TSS)) {
                        reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
                        tw32(reg, ec->tx_coalesce_usecs);
                        reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
                tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
                tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
  
 -              if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
 +              if (tg3_flag(tp, ENABLE_TSS)) {
                        tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
                        tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
                        tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
@@@ -7864,9 -7671,10 +7864,9 @@@ static void tg3_rings_reset(struct tg3 
        struct tg3_napi *tnapi = &tp->napi[0];
  
        /* Disable all transmit rings but the first. */
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 +      if (!tg3_flag(tp, 5705_PLUS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
 -      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 -               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 +      else if (tg3_flag(tp, 5717_PLUS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
  
  
        /* Disable all receive return rings but the first. */
 -      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 -          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 +      if (tg3_flag(tp, 5717_PLUS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
 -      else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 +      else if (!tg3_flag(tp, 5705_PLUS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
        tw32_mailbox_f(tp->napi[0].int_mbox, 1);
  
        /* Zero mailbox registers. */
 -      if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
 +      if (tg3_flag(tp, SUPPORT_MSIX)) {
                for (i = 1; i < tp->irq_max; i++) {
                        tp->napi[i].tx_prod = 0;
                        tp->napi[i].tx_cons = 0;
 -                      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
 +                      if (tg3_flag(tp, ENABLE_TSS))
                                tw32_mailbox(tp->napi[i].prodmbox, 0);
                        tw32_rx_mbox(tp->napi[i].consmbox, 0);
                        tw32_mailbox_f(tp->napi[i].int_mbox, 1);
                }
 -              if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
 +              if (!tg3_flag(tp, ENABLE_TSS))
                        tw32_mailbox(tp->napi[0].prodmbox, 0);
        } else {
                tp->napi[0].tx_prod = 0;
        }
  
        /* Make sure the NIC-based send BD rings are disabled. */
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
 +      if (!tg3_flag(tp, 5705_PLUS)) {
                u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
                for (i = 0; i < 16; i++)
                        tw32_tx_mbox(mbox + i * 8, 0);
        }
  }
  
 +static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
 +{
 +      u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
 +
 +      if (!tg3_flag(tp, 5750_PLUS) ||
 +          tg3_flag(tp, 5780_CLASS) ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
 +              bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
 +      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 +               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
 +              bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
 +      else
 +              bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
 +
 +      nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
 +      host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
 +
 +      val = min(nic_rep_thresh, host_rep_thresh);
 +      tw32(RCVBDI_STD_THRESH, val);
 +
 +      if (tg3_flag(tp, 57765_PLUS))
 +              tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
 +
 +      if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
 +              return;
 +
 +      if (!tg3_flag(tp, 5705_PLUS))
 +              bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
 +      else
 +              bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
 +
 +      host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
 +
 +      val = min(bdcache_maxcnt / 2, host_rep_thresh);
 +      tw32(RCVBDI_JUMBO_THRESH, val);
 +
 +      if (tg3_flag(tp, 57765_PLUS))
 +              tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
 +}
 +
  /* tp->lock is held. */
  static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
  {
  
        tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
  
 -      if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
 +      if (tg3_flag(tp, INIT_COMPLETE))
                tg3_abort_hw(tp, 1);
  
        /* Enable MAC control of LPI */
                if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
                        val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
  
 -              if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
 +              if (tg3_flag(tp, ENABLE_APE))
                        val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
  
                tw32_f(TG3_CPMU_EEE_MODE, val);
                tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
        }
  
 -      if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
 +      if (tg3_flag(tp, L1PLLPD_EN)) {
                u32 grc_mode = tr32(GRC_MODE);
  
                /* Access the lower 1K of PL PCIE block registers. */
         * other revision.  But do not set this on PCI Express
         * chips and don't even touch the clocks if the CPMU is present.
         */
 -      if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
 -              if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
 +      if (!tg3_flag(tp, CPMU_PRESENT)) {
 +              if (!tg3_flag(tp, PCI_EXPRESS))
                        tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
                tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
        }
  
        if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
 -          (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
 +          tg3_flag(tp, PCIX_MODE)) {
                val = tr32(TG3PCI_PCISTATE);
                val |= PCISTATE_RETRY_SAME_DMA;
                tw32(TG3PCI_PCISTATE, val);
        }
  
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
 +      if (tg3_flag(tp, ENABLE_APE)) {
                /* Allow reads and writes to the
                 * APE register and memory space.
                 */
        if (err)
                return err;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 +      if (tg3_flag(tp, 57765_PLUS)) {
                val = tr32(TG3PCI_DMA_RW_CTRL) &
                      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
                if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
                        val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
 +                      val |= DMA_RWCTRL_TAGGED_STAT_WA;
                tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
        } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
        tw32(GRC_MISC_CFG, val);
  
        /* Initialize MBUF/DESC pool. */
 -      if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
 +      if (tg3_flag(tp, 5750_PLUS)) {
                /* Do nothing.  */
        } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
                tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
                        tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
                tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
                tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
 -      } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
 +      } else if (tg3_flag(tp, TSO_CAPABLE)) {
                int fw_len;
  
                fw_len = tp->fw_len;
        val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
                val |= BUFMGR_MODE_NO_TX_UNDERRUN;
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
 +              val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
        tw32(BUFMGR_MODE, val);
        for (i = 0; i < 2000; i++) {
                if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
                return -ENODEV;
        }
  
 -      /* Setup replenish threshold. */
 -      val = tp->rx_pending / 8;
 -      if (val == 0)
 -              val = 1;
 -      else if (val > tp->rx_std_max_post)
 -              val = tp->rx_std_max_post;
 -      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 -              if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
 -                      tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
 +      if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
 +              tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
  
 -              if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
 -                      val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
 -      }
 -
 -      tw32(RCVBDI_STD_THRESH, val);
 +      tg3_setup_rxbd_thresholds(tp);
  
        /* Initialize TG3_BDINFO's at:
         *  RCVDBDI_STD_BD:     standard eth size rx ring
             ((u64) tpr->rx_std_mapping >> 32));
        tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
             ((u64) tpr->rx_std_mapping & 0xffffffff));
 -      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
 -          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
 +      if (!tg3_flag(tp, 5717_PLUS))
                tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
                     NIC_SRAM_RX_BUFFER_DESC);
  
        /* Disable the mini ring */
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 +      if (!tg3_flag(tp, 5705_PLUS))
                tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
                     BDINFO_FLAGS_DISABLED);
  
         * blocks on those devices that have them.
         */
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 -          ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
 -          !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
 -              /* Setup replenish threshold. */
 -              tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
 +          (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
  
 -              if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
 +              if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
                        tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
                             ((u64) tpr->rx_jmb_mapping >> 32));
                        tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
                             ((u64) tpr->rx_jmb_mapping & 0xffffffff));
 +                      val = TG3_RX_JMB_RING_SIZE(tp) <<
 +                            BDINFO_FLAGS_MAXLEN_SHIFT;
                        tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 -                           (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
 -                           BDINFO_FLAGS_USE_EXT_RECV);
 -                      if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
 +                           val | BDINFO_FLAGS_USE_EXT_RECV);
 +                      if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                                tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
                                     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
                             BDINFO_FLAGS_DISABLED);
                }
  
 -              if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 +              if (tg3_flag(tp, 57765_PLUS)) {
                        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 -                              val = RX_STD_MAX_SIZE_5705;
 +                              val = TG3_RX_STD_MAX_SIZE_5700;
                        else
 -                              val = RX_STD_MAX_SIZE_5717;
 +                              val = TG3_RX_STD_MAX_SIZE_5717;
                        val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
                        val |= (TG3_RX_STD_DMA_SZ << 2);
                } else
                        val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
        } else
 -              val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
 +              val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
  
        tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
  
        tpr->rx_std_prod_idx = tp->rx_pending;
        tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
  
 -      tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
 -                        tp->rx_jumbo_pending : 0;
 +      tpr->rx_jmb_prod_idx =
 +              tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
        tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
  
 -      if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 -              tw32(STD_REPLENISH_LWM, 32);
 -              tw32(JMB_REPLENISH_LWM, 16);
 -      }
 -
        tg3_rings_reset(tp);
  
        /* Initialize MAC address and backoff seed. */
        /* The slot time is changed by tg3_setup_phy if we
         * run at gigabit with half duplex.
         */
 -      tw32(MAC_TX_LENGTHS,
 -           (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 -           (6 << TX_LENGTHS_IPG_SHIFT) |
 -           (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
 +      val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 +            (6 << TX_LENGTHS_IPG_SHIFT) |
 +            (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +              val |= tr32(MAC_TX_LENGTHS) &
 +                     (TX_LENGTHS_JMB_FRM_LEN_MSK |
 +                      TX_LENGTHS_CNT_DWN_VAL_MSK);
 +
 +      tw32(MAC_TX_LENGTHS, val);
  
        /* Receive rules. */
        tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
            tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
 -              if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
 +              if (tg3_flag(tp, TSO_CAPABLE) &&
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
                        rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
                } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
 -                         !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
 +                         !tg3_flag(tp, IS_5788)) {
                        rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
                }
        }
  
 -      if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
 +      if (tg3_flag(tp, PCI_EXPRESS))
                rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  
 -      if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
 +      if (tg3_flag(tp, HW_TSO_1) ||
 +          tg3_flag(tp, HW_TSO_2) ||
 +          tg3_flag(tp, HW_TSO_3))
                rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
  
 -      if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
 +      if (tg3_flag(tp, HW_TSO_3) ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
                rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
  
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +              rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
 +
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 -          (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
 +          tg3_flag(tp, 57765_PLUS)) {
                val = tr32(TG3_RDMA_RSRVCTRL_REG);
 -              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
                        val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
                                 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
                                 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
                     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
        }
  
 -      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
                val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
                tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
                     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
        }
  
        /* Receive/send statistics. */
 -      if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
 +      if (tg3_flag(tp, 5750_PLUS)) {
                val = tr32(RCVLPC_STATS_ENABLE);
                val &= ~RCVLPC_STATSENAB_DACK_FIX;
                tw32(RCVLPC_STATS_ENABLE, val);
        } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
 -                 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
 +                 tg3_flag(tp, TSO_CAPABLE)) {
                val = tr32(RCVLPC_STATS_ENABLE);
                val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
                tw32(RCVLPC_STATS_ENABLE, val);
  
        __tg3_set_coalesce(tp, &tp->coal);
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
 +      if (!tg3_flag(tp, 5705_PLUS)) {
                /* Status/statistics block address.  See tg3_timer,
                 * the tg3_periodic_fetch_stats call there, and
                 * tg3_get_stats to see how this works for 5705/5750 chips.
  
        tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
        tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 +      if (!tg3_flag(tp, 5705_PLUS))
                tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
  
        if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
                udelay(10);
        }
  
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
 +      if (tg3_flag(tp, ENABLE_APE))
                tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
        else
                tp->mac_mode = 0;
        tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
                MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
 +      if (!tg3_flag(tp, 5705_PLUS) &&
            !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
                tp->mac_mode |= MAC_MODE_LINK_POLARITY;
        udelay(40);
  
        /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
 -       * If TG3_FLG2_IS_NIC is zero, we should read the
 +       * If TG3_FLAG_IS_NIC is zero, we should read the
         * register to preserve the GPIO settings for LOMs. The GPIOs,
         * whether used as inputs or outputs, are set by boot code after
         * reset.
         */
 -      if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
 +      if (!tg3_flag(tp, IS_NIC)) {
                u32 gpio_mask;
  
                gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
                tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
  
                /* GPIO1 must be driven high for eeprom write protect */
 -              if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
 +              if (tg3_flag(tp, EEPROM_WRITE_PROT))
                        tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
                                               GRC_LCLCTRL_GPIO_OUTPUT1);
        }
        tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
        udelay(100);
  
 -      if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
 -              tp->irq_cnt > 1) {
 +      if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
                val = tr32(MSGINT_MODE);
                val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
                tw32(MSGINT_MODE, val);
        }
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
 +      if (!tg3_flag(tp, 5705_PLUS)) {
                tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
                udelay(40);
        }
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
            tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
 -              if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
 +              if (tg3_flag(tp, TSO_CAPABLE) &&
                    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
                     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
                        /* nothing */
                } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
 -                         !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
 +                         !tg3_flag(tp, IS_5788)) {
                        val |= WDMAC_MODE_RX_ACCEL;
                }
        }
  
        /* Enable host coalescing bug fix */
 -      if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
 +      if (tg3_flag(tp, 5755_PLUS))
                val |= WDMAC_MODE_STATUS_TAG_FIX;
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
        tw32_f(WDMAC_MODE, val);
        udelay(40);
  
 -      if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
 +      if (tg3_flag(tp, PCIX_MODE)) {
                u16 pcix_cmd;
  
                pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
        udelay(40);
  
        tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 +      if (!tg3_flag(tp, 5705_PLUS))
                tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
        tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
        tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
        val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
 -      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 -          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 +      if (tg3_flag(tp, LRG_PROD_RING_CAP))
                val |= RCVDBDI_MODE_LRG_RING_SZ;
        tw32(RCVDBDI_MODE, val);
        tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
 -      if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
 +      if (tg3_flag(tp, HW_TSO_1) ||
 +          tg3_flag(tp, HW_TSO_2) ||
 +          tg3_flag(tp, HW_TSO_3))
                tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
        val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
 +      if (tg3_flag(tp, ENABLE_TSS))
                val |= SNDBDI_MODE_MULTI_TXQ_EN;
        tw32(SNDBDI_MODE, val);
        tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
                        return err;
        }
  
 -      if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
 +      if (tg3_flag(tp, TSO_CAPABLE)) {
                err = tg3_load_tso_firmware(tp);
                if (err)
                        return err;
        }
  
        tp->tx_mode = TX_MODE_ENABLE;
 -      if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
 +
 +      if (tg3_flag(tp, 5755_PLUS) ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 +              val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
 +              tp->tx_mode &= ~val;
 +              tp->tx_mode |= tr32(MAC_TX_MODE) & val;
 +      }
 +
        tw32_f(MAC_TX_MODE, tp->tx_mode);
        udelay(100);
  
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
 +      if (tg3_flag(tp, ENABLE_RSS)) {
                u32 reg = MAC_RSS_INDIR_TBL_0;
                u8 *ent = (u8 *)&val;
  
        }
  
        tp->rx_mode = RX_MODE_ENABLE;
 -      if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
 +      if (tg3_flag(tp, 5755_PLUS))
                tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
 +      if (tg3_flag(tp, ENABLE_RSS))
                tp->rx_mode |= RX_MODE_RSS_ENABLE |
                               RX_MODE_RSS_ITBL_HASH_BITS_7 |
                               RX_MODE_RSS_IPV6_HASH_EN |
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
            (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
                /* Use hardware link auto-negotiation */
 -              tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
 +              tg3_flag_set(tp, HW_AUTONEG);
        }
  
        if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
                tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
        }
  
 -      if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
 +      if (!tg3_flag(tp, USE_PHYLIB)) {
                if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
                        tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
                        tp->link_config.speed = tp->link_config.orig_speed;
        tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
        tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
  
 -      if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
 -          !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
 +      if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
                limit = 8;
        else
                limit = 16;
 -      if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
 +      if (tg3_flag(tp, ENABLE_ASF))
                limit -= 4;
        switch (limit) {
        case 16:
                break;
        }
  
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
 +      if (tg3_flag(tp, ENABLE_APE))
                /* Write our heartbeat update interval to APE. */
                tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
                                APE_HOST_HEARTBEAT_INT_DISABLE);
@@@ -8928,19 -8688,7 +8928,19 @@@ static void tg3_periodic_fetch_stats(st
        TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
  
        TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
 -      TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
 +              TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
 +      } else {
 +              u32 val = tr32(HOSTCC_FLOW_ATTN);
 +              val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
 +              if (val) {
 +                      tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
 +                      sp->rx_discards.low += val;
 +                      if (sp->rx_discards.low < val)
 +                              sp->rx_discards.high += 1;
 +              }
 +              sp->mbuf_lwm_thresh_hit = sp->rx_discards;
 +      }
        TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
  }
  
@@@ -8953,7 -8701,7 +8953,7 @@@ static void tg3_timer(unsigned long __o
  
        spin_lock(&tp->lock);
  
 -      if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
 +      if (!tg3_flag(tp, TAGGED_STATUS)) {
                /* All of this garbage is because when using non-tagged
                 * IRQ status the mailbox/status_block protocol the chip
                 * uses with the cpu is race prone.
                }
  
                if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
 -                      tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
 +                      tg3_flag_set(tp, RESTART_TIMER);
                        spin_unlock(&tp->lock);
                        schedule_work(&tp->reset_task);
                        return;
  
        /* This part only runs once per second. */
        if (!--tp->timer_counter) {
 -              if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
 +              if (tg3_flag(tp, 5705_PLUS))
                        tg3_periodic_fetch_stats(tp);
  
                if (tp->setlpicnt && !--tp->setlpicnt) {
                             val | TG3_CPMU_EEEMD_LPI_ENABLE);
                }
  
 -              if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
 +              if (tg3_flag(tp, USE_LINKCHG_REG)) {
                        u32 mac_stat;
                        int phy_event;
  
  
                        if (phy_event)
                                tg3_setup_phy(tp, 0);
 -              } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
 +              } else if (tg3_flag(tp, POLL_SERDES)) {
                        u32 mac_stat = tr32(MAC_STATUS);
                        int need_setup = 0;
  
                                tg3_setup_phy(tp, 0);
                        }
                } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 -                         (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 +                         tg3_flag(tp, 5780_CLASS)) {
                        tg3_serdes_parallel_detect(tp);
                }
  
         * resets.
         */
        if (!--tp->asf_counter) {
 -              if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
 -                  !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
 +              if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
                        tg3_wait_for_event_ack(tp);
  
                        tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
@@@ -9086,16 -8835,16 +9086,16 @@@ static int tg3_request_irq(struct tg3 *
                name[IFNAMSIZ-1] = 0;
        }
  
 -      if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
 +      if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
                fn = tg3_msi;
 -              if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
 +              if (tg3_flag(tp, 1SHOT_MSI))
                        fn = tg3_msi_1shot;
 -              flags = IRQF_SAMPLE_RANDOM;
 +              flags = 0;
        } else {
                fn = tg3_interrupt;
 -              if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
 +              if (tg3_flag(tp, TAGGED_STATUS))
                        fn = tg3_interrupt_tagged;
 -              flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
 +              flags = IRQF_SHARED;
        }
  
        return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
@@@ -9119,7 -8868,8 +9119,7 @@@ static int tg3_test_interrupt(struct tg
         * Turn off MSI one shot mode.  Otherwise this test has no
         * observable way to know whether the interrupt was delivered.
         */
 -      if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
 -          (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
 +      if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
                val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
                tw32(MSGINT_MODE, val);
        }
  
        if (intr_ok) {
                /* Reenable MSI one shot mode. */
 -              if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
 -                  (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
 +              if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
                        val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
                        tw32(MSGINT_MODE, val);
                }
@@@ -9179,7 -8930,7 +9179,7 @@@ static int tg3_test_msi(struct tg3 *tp
        int err;
        u16 pci_cmd;
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
 +      if (!tg3_flag(tp, USING_MSI))
                return 0;
  
        /* Turn off SERR reporting in case MSI terminates with Master
  
        pci_disable_msi(tp->pdev);
  
 -      tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
 +      tg3_flag_clear(tp, USING_MSI);
        tp->napi[0].irq_vec = tp->pdev->irq;
  
        err = tg3_request_irq(tp, 0);
@@@ -9306,11 -9057,9 +9306,11 @@@ static bool tg3_enable_msix(struct tg3 
        }
  
        if (tp->irq_cnt > 1) {
 -              tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
 -              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
 -                      tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
 +              tg3_flag_set(tp, ENABLE_RSS);
 +
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 +                      tg3_flag_set(tp, ENABLE_TSS);
                        netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
                }
        }
  
  static void tg3_ints_init(struct tg3 *tp)
  {
 -      if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
 -          !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
 +      if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
 +          !tg3_flag(tp, TAGGED_STATUS)) {
                /* All MSI supporting chips should support tagged
                 * status.  Assert that this is the case.
                 */
                goto defcfg;
        }
  
 -      if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
 -              tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
 -      else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
 -               pci_enable_msi(tp->pdev) == 0)
 -              tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
 +      if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
 +              tg3_flag_set(tp, USING_MSIX);
 +      else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
 +              tg3_flag_set(tp, USING_MSI);
  
 -      if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
 +      if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
                u32 msi_mode = tr32(MSGINT_MODE);
 -              if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
 -                  tp->irq_cnt > 1)
 +              if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
                        msi_mode |= MSGINT_MODE_MULTIVEC_EN;
                tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
        }
  defcfg:
 -      if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
 +      if (!tg3_flag(tp, USING_MSIX)) {
                tp->irq_cnt = 1;
                tp->napi[0].irq_vec = tp->pdev->irq;
                netif_set_real_num_tx_queues(tp->dev, 1);
  
  static void tg3_ints_fini(struct tg3 *tp)
  {
 -      if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
 +      if (tg3_flag(tp, USING_MSIX))
                pci_disable_msix(tp->pdev);
 -      else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
 +      else if (tg3_flag(tp, USING_MSI))
                pci_disable_msi(tp->pdev);
 -      tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
 -      tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
 +      tg3_flag_clear(tp, USING_MSI);
 +      tg3_flag_clear(tp, USING_MSIX);
 +      tg3_flag_clear(tp, ENABLE_RSS);
 +      tg3_flag_clear(tp, ENABLE_TSS);
  }
  
  static int tg3_open(struct net_device *dev)
                                return err;
                } else if (err) {
                        netdev_warn(tp->dev, "TSO capability disabled\n");
 -                      tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
 -              } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
 +                      tg3_flag_clear(tp, TSO_CAPABLE);
 +              } else if (!tg3_flag(tp, TSO_CAPABLE)) {
                        netdev_notice(tp->dev, "TSO capability restored\n");
 -                      tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
 +                      tg3_flag_set(tp, TSO_CAPABLE);
                }
        }
  
        tg3_full_lock(tp, 0);
  
        tg3_disable_ints(tp);
 -      tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
 +      tg3_flag_clear(tp, INIT_COMPLETE);
  
        tg3_full_unlock(tp);
  
                tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
                tg3_free_rings(tp);
        } else {
 -              if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
 +              if (tg3_flag(tp, TAGGED_STATUS))
                        tp->timer_offset = HZ;
                else
                        tp->timer_offset = HZ / 10;
        if (err)
                goto err_out3;
  
 -      if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
 +      if (tg3_flag(tp, USING_MSI)) {
                err = tg3_test_msi(tp);
  
                if (err) {
                        goto err_out2;
                }
  
 -              if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
 -                  (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
 +              if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
                        u32 val = tr32(PCIE_TRANSACTION_CFG);
  
                        tw32(PCIE_TRANSACTION_CFG,
        tg3_full_lock(tp, 0);
  
        add_timer(&tp->timer);
 -      tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
 +      tg3_flag_set(tp, INIT_COMPLETE);
        tg3_enable_ints(tp);
  
        tg3_full_unlock(tp);
@@@ -9527,7 -9277,7 +9527,7 @@@ static int tg3_close(struct net_device 
  
        tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
        tg3_free_rings(tp);
 -      tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
 +      tg3_flag_clear(tp, INIT_COMPLETE);
  
        tg3_full_unlock(tp);
  
@@@ -9784,7 -9534,7 +9784,7 @@@ static void __tg3_set_rx_mode(struct ne
        /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
         * flag clear.
         */
 -      if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
 +      if (!tg3_flag(tp, ENABLE_ASF))
                rx_mode |= RX_MODE_KEEP_VLAN_TAG;
  #endif
  
@@@ -9838,26 -9588,82 +9838,26 @@@ static void tg3_set_rx_mode(struct net_
        tg3_full_unlock(tp);
  }
  
 -#define TG3_REGDUMP_LEN               (32 * 1024)
 -
  static int tg3_get_regs_len(struct net_device *dev)
  {
 -      return TG3_REGDUMP_LEN;
 +      return TG3_REG_BLK_SIZE;
  }
  
  static void tg3_get_regs(struct net_device *dev,
                struct ethtool_regs *regs, void *_p)
  {
 -      u32 *p = _p;
        struct tg3 *tp = netdev_priv(dev);
 -      u8 *orig_p = _p;
 -      int i;
  
        regs->version = 0;
  
 -      memset(p, 0, TG3_REGDUMP_LEN);
 +      memset(_p, 0, TG3_REG_BLK_SIZE);
  
        if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
                return;
  
        tg3_full_lock(tp, 0);
  
 -#define __GET_REG32(reg)      (*(p)++ = tr32(reg))
 -#define GET_REG32_LOOP(base, len)             \
 -do {  p = (u32 *)(orig_p + (base));           \
 -      for (i = 0; i < len; i += 4)            \
 -              __GET_REG32((base) + i);        \
 -} while (0)
 -#define GET_REG32_1(reg)                      \
 -do {  p = (u32 *)(orig_p + (reg));            \
 -      __GET_REG32((reg));                     \
 -} while (0)
 -
 -      GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
 -      GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
 -      GET_REG32_LOOP(MAC_MODE, 0x4f0);
 -      GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
 -      GET_REG32_1(SNDDATAC_MODE);
 -      GET_REG32_LOOP(SNDBDS_MODE, 0x80);
 -      GET_REG32_LOOP(SNDBDI_MODE, 0x48);
 -      GET_REG32_1(SNDBDC_MODE);
 -      GET_REG32_LOOP(RCVLPC_MODE, 0x20);
 -      GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
 -      GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
 -      GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
 -      GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
 -      GET_REG32_1(RCVDCC_MODE);
 -      GET_REG32_LOOP(RCVBDI_MODE, 0x20);
 -      GET_REG32_LOOP(RCVCC_MODE, 0x14);
 -      GET_REG32_LOOP(RCVLSC_MODE, 0x08);
 -      GET_REG32_1(MBFREE_MODE);
 -      GET_REG32_LOOP(HOSTCC_MODE, 0x100);
 -      GET_REG32_LOOP(MEMARB_MODE, 0x10);
 -      GET_REG32_LOOP(BUFMGR_MODE, 0x58);
 -      GET_REG32_LOOP(RDMAC_MODE, 0x08);
 -      GET_REG32_LOOP(WDMAC_MODE, 0x08);
 -      GET_REG32_1(RX_CPU_MODE);
 -      GET_REG32_1(RX_CPU_STATE);
 -      GET_REG32_1(RX_CPU_PGMCTR);
 -      GET_REG32_1(RX_CPU_HWBKPT);
 -      GET_REG32_1(TX_CPU_MODE);
 -      GET_REG32_1(TX_CPU_STATE);
 -      GET_REG32_1(TX_CPU_PGMCTR);
 -      GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
 -      GET_REG32_LOOP(FTQ_RESET, 0x120);
 -      GET_REG32_LOOP(MSGINT_MODE, 0x0c);
 -      GET_REG32_1(DMAC_MODE);
 -      GET_REG32_LOOP(GRC_MODE, 0x4c);
 -      if (tp->tg3_flags & TG3_FLAG_NVRAM)
 -              GET_REG32_LOOP(NVRAM_CMD, 0x24);
 -
 -#undef __GET_REG32
 -#undef GET_REG32_LOOP
 -#undef GET_REG32_1
 +      tg3_dump_legacy_regs(tp, (u32 *)_p);
  
        tg3_full_unlock(tp);
  }
@@@ -9877,7 -9683,7 +9877,7 @@@ static int tg3_get_eeprom(struct net_de
        u32 i, offset, len, b_offset, b_count;
        __be32 val;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
 +      if (tg3_flag(tp, NO_NVRAM))
                return -EINVAL;
  
        if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
@@@ -9945,7 -9751,7 +9945,7 @@@ static int tg3_set_eeprom(struct net_de
        if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
                return -EAGAIN;
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
 +      if (tg3_flag(tp, NO_NVRAM) ||
            eeprom->magic != TG3_EEPROM_MAGIC)
                return -EINVAL;
  
@@@ -9997,7 -9803,7 +9997,7 @@@ static int tg3_get_settings(struct net_
  {
        struct tg3 *tp = netdev_priv(dev);
  
 -      if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 +      if (tg3_flag(tp, USE_PHYLIB)) {
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
  
        cmd->advertising = tp->link_config.advertising;
        if (netif_running(dev)) {
 -              cmd->speed = tp->link_config.active_speed;
 +              ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
                cmd->duplex = tp->link_config.active_duplex;
        } else {
 -              cmd->speed = SPEED_INVALID;
 +              ethtool_cmd_speed_set(cmd, SPEED_INVALID);
                cmd->duplex = DUPLEX_INVALID;
        }
        cmd->phy_address = tp->phy_addr;
  static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  {
        struct tg3 *tp = netdev_priv(dev);
 +      u32 speed = ethtool_cmd_speed(cmd);
  
 -      if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 +      if (tg3_flag(tp, USE_PHYLIB)) {
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
                cmd->advertising &= mask;
        } else {
                if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
 -                      if (cmd->speed != SPEED_1000)
 +                      if (speed != SPEED_1000)
                                return -EINVAL;
  
                        if (cmd->duplex != DUPLEX_FULL)
                                return -EINVAL;
                } else {
 -                      if (cmd->speed != SPEED_100 &&
 -                          cmd->speed != SPEED_10)
 +                      if (speed != SPEED_100 &&
 +                          speed != SPEED_10)
                                return -EINVAL;
                }
        }
                tp->link_config.duplex = DUPLEX_INVALID;
        } else {
                tp->link_config.advertising = 0;
 -              tp->link_config.speed = cmd->speed;
 +              tp->link_config.speed = speed;
                tp->link_config.duplex = cmd->duplex;
        }
  
@@@ -10144,12 -9949,14 +10144,12 @@@ static void tg3_get_wol(struct net_devi
  {
        struct tg3 *tp = netdev_priv(dev);
  
 -      if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
 -          device_can_wakeup(&tp->pdev->dev))
 +      if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
                wol->supported = WAKE_MAGIC;
        else
                wol->supported = 0;
        wol->wolopts = 0;
 -      if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
 -          device_can_wakeup(&tp->pdev->dev))
 +      if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
                wol->wolopts = WAKE_MAGIC;
        memset(&wol->sopass, 0, sizeof(wol->sopass));
  }
@@@ -10162,18 -9969,19 +10162,18 @@@ static int tg3_set_wol(struct net_devic
        if (wol->wolopts & ~WAKE_MAGIC)
                return -EINVAL;
        if ((wol->wolopts & WAKE_MAGIC) &&
 -          !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
 +          !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
                return -EINVAL;
  
        device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
  
        spin_lock_bh(&tp->lock);
        if (device_may_wakeup(dp))
 -              tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
 +              tg3_flag_set(tp, WOL_ENABLE);
        else
 -              tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
 +              tg3_flag_clear(tp, WOL_ENABLE);
        spin_unlock_bh(&tp->lock);
  
 -
        return 0;
  }
  
@@@ -10189,6 -9997,33 +10189,6 @@@ static void tg3_set_msglevel(struct net
        tp->msg_enable = value;
  }
  
 -static int tg3_set_tso(struct net_device *dev, u32 value)
 -{
 -      struct tg3 *tp = netdev_priv(dev);
 -
 -      if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
 -              if (value)
 -                      return -EINVAL;
 -              return 0;
 -      }
 -      if ((dev->features & NETIF_F_IPV6_CSUM) &&
 -          ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
 -           (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
 -              if (value) {
 -                      dev->features |= NETIF_F_TSO6;
 -                      if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
 -                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 -                          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 -                           GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
 -                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 -                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 -                              dev->features |= NETIF_F_TSO_ECN;
 -              } else
 -                      dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
 -      }
 -      return ethtool_op_set_tso(dev, value);
 -}
 -
  static int tg3_nway_reset(struct net_device *dev)
  {
        struct tg3 *tp = netdev_priv(dev);
        if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                return -EINVAL;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 +      if (tg3_flag(tp, USE_PHYLIB)) {
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
                r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
@@@ -10229,7 -10064,7 +10229,7 @@@ static void tg3_get_ringparam(struct ne
  
        ering->rx_max_pending = tp->rx_std_ring_mask;
        ering->rx_mini_max_pending = 0;
 -      if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
 +      if (tg3_flag(tp, JUMBO_RING_ENABLE))
                ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
        else
                ering->rx_jumbo_max_pending = 0;
  
        ering->rx_pending = tp->rx_pending;
        ering->rx_mini_pending = 0;
 -      if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
 +      if (tg3_flag(tp, JUMBO_RING_ENABLE))
                ering->rx_jumbo_pending = tp->rx_jumbo_pending;
        else
                ering->rx_jumbo_pending = 0;
@@@ -10255,7 -10090,7 +10255,7 @@@ static int tg3_set_ringparam(struct net
            (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
            (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
            (ering->tx_pending <= MAX_SKB_FRAGS) ||
 -          ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
 +          (tg3_flag(tp, TSO_BUG) &&
             (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
                return -EINVAL;
  
  
        tp->rx_pending = ering->rx_pending;
  
 -      if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
 +      if (tg3_flag(tp, MAX_RXPEND_64) &&
            tp->rx_pending > 63)
                tp->rx_pending = 63;
        tp->rx_jumbo_pending = ering->rx_jumbo_pending;
@@@ -10296,7 -10131,7 +10296,7 @@@ static void tg3_get_pauseparam(struct n
  {
        struct tg3 *tp = netdev_priv(dev);
  
 -      epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
 +      epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
  
        if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
                epause->rx_pause = 1;
@@@ -10314,7 -10149,7 +10314,7 @@@ static int tg3_set_pauseparam(struct ne
        struct tg3 *tp = netdev_priv(dev);
        int err = 0;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 +      if (tg3_flag(tp, USE_PHYLIB)) {
                u32 newadv;
                struct phy_device *phydev;
  
                        newadv = 0;
  
                if (epause->autoneg)
 -                      tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
 +                      tg3_flag_set(tp, PAUSE_AUTONEG);
                else
 -                      tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
 +                      tg3_flag_clear(tp, PAUSE_AUTONEG);
  
                if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
                        u32 oldadv = phydev->advertising &
                tg3_full_lock(tp, irq_sync);
  
                if (epause->autoneg)
 -                      tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
 +                      tg3_flag_set(tp, PAUSE_AUTONEG);
                else
 -                      tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
 +                      tg3_flag_clear(tp, PAUSE_AUTONEG);
                if (epause->rx_pause)
                        tp->link_config.flowctrl |= FLOW_CTRL_RX;
                else
        return err;
  }
  
 -static u32 tg3_get_rx_csum(struct net_device *dev)
 -{
 -      struct tg3 *tp = netdev_priv(dev);
 -      return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
 -}
 -
 -static int tg3_set_rx_csum(struct net_device *dev, u32 data)
 -{
 -      struct tg3 *tp = netdev_priv(dev);
 -
 -      if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
 -              if (data != 0)
 -                      return -EINVAL;
 -              return 0;
 -      }
 -
 -      spin_lock_bh(&tp->lock);
 -      if (data)
 -              tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
 -      else
 -              tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
 -      spin_unlock_bh(&tp->lock);
 -
 -      return 0;
 -}
 -
 -static int tg3_set_tx_csum(struct net_device *dev, u32 data)
 -{
 -      struct tg3 *tp = netdev_priv(dev);
 -
 -      if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
 -              if (data != 0)
 -                      return -EINVAL;
 -              return 0;
 -      }
 -
 -      if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
 -              ethtool_op_set_tx_ipv6_csum(dev, data);
 -      else
 -              ethtool_op_set_tx_csum(dev, data);
 -
 -      return 0;
 -}
 -
  static int tg3_get_sset_count(struct net_device *dev, int sset)
  {
        switch (sset) {
@@@ -10438,38 -10317,35 +10438,38 @@@ static void tg3_get_strings(struct net_
        }
  }
  
 -static int tg3_phys_id(struct net_device *dev, u32 data)
 +static int tg3_set_phys_id(struct net_device *dev,
 +                          enum ethtool_phys_id_state state)
  {
        struct tg3 *tp = netdev_priv(dev);
 -      int i;
  
        if (!netif_running(tp->dev))
                return -EAGAIN;
  
 -      if (data == 0)
 -              data = UINT_MAX / 2;
 -
 -      for (i = 0; i < (data * 2); i++) {
 -              if ((i % 2) == 0)
 -                      tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
 -                                         LED_CTRL_1000MBPS_ON |
 -                                         LED_CTRL_100MBPS_ON |
 -                                         LED_CTRL_10MBPS_ON |
 -                                         LED_CTRL_TRAFFIC_OVERRIDE |
 -                                         LED_CTRL_TRAFFIC_BLINK |
 -                                         LED_CTRL_TRAFFIC_LED);
 +      switch (state) {
 +      case ETHTOOL_ID_ACTIVE:
 +              return 1;       /* cycle on/off once per second */
 +
 +      case ETHTOOL_ID_ON:
 +              tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
 +                   LED_CTRL_1000MBPS_ON |
 +                   LED_CTRL_100MBPS_ON |
 +                   LED_CTRL_10MBPS_ON |
 +                   LED_CTRL_TRAFFIC_OVERRIDE |
 +                   LED_CTRL_TRAFFIC_BLINK |
 +                   LED_CTRL_TRAFFIC_LED);
 +              break;
  
 -              else
 -                      tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
 -                                         LED_CTRL_TRAFFIC_OVERRIDE);
 +      case ETHTOOL_ID_OFF:
 +              tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
 +                   LED_CTRL_TRAFFIC_OVERRIDE);
 +              break;
  
 -              if (msleep_interruptible(500))
 -                      break;
 +      case ETHTOOL_ID_INACTIVE:
 +              tw32(MAC_LED_CTRL, tp->led_ctrl);
 +              break;
        }
 -      tw32(MAC_LED_CTRL, tp->led_ctrl);
 +
        return 0;
  }
  
@@@ -10480,80 -10356,6 +10480,80 @@@ static void tg3_get_ethtool_stats(struc
        memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
  }
  
 +static __be32 * tg3_vpd_readblock(struct tg3 *tp)
 +{
 +      int i;
 +      __be32 *buf;
 +      u32 offset = 0, len = 0;
 +      u32 magic, val;
 +
 +      if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
 +              return NULL;
 +
 +      if (magic == TG3_EEPROM_MAGIC) {
 +              for (offset = TG3_NVM_DIR_START;
 +                   offset < TG3_NVM_DIR_END;
 +                   offset += TG3_NVM_DIRENT_SIZE) {
 +                      if (tg3_nvram_read(tp, offset, &val))
 +                              return NULL;
 +
 +                      if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
 +                          TG3_NVM_DIRTYPE_EXTVPD)
 +                              break;
 +              }
 +
 +              if (offset != TG3_NVM_DIR_END) {
 +                      len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
 +                      if (tg3_nvram_read(tp, offset + 4, &offset))
 +                              return NULL;
 +
 +                      offset = tg3_nvram_logical_addr(tp, offset);
 +              }
 +      }
 +
 +      if (!offset || !len) {
 +              offset = TG3_NVM_VPD_OFF;
 +              len = TG3_NVM_VPD_LEN;
 +      }
 +
 +      buf = kmalloc(len, GFP_KERNEL);
 +      if (buf == NULL)
 +              return NULL;
 +
 +      if (magic == TG3_EEPROM_MAGIC) {
 +              for (i = 0; i < len; i += 4) {
 +                      /* The data is in little-endian format in NVRAM.
 +                       * Use the big-endian read routines to preserve
 +                       * the byte order as it exists in NVRAM.
 +                       */
 +                      if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
 +                              goto error;
 +              }
 +      } else {
 +              u8 *ptr;
 +              ssize_t cnt;
 +              unsigned int pos = 0;
 +
 +              ptr = (u8 *)&buf[0];
 +              for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
 +                      cnt = pci_read_vpd(tp->pdev, pos,
 +                                         len - pos, ptr);
 +                      if (cnt == -ETIMEDOUT || cnt == -EINTR)
 +                              cnt = 0;
 +                      else if (cnt < 0)
 +                              goto error;
 +              }
 +              if (pos != len)
 +                      goto error;
 +      }
 +
 +      return buf;
 +
 +error:
 +      kfree(buf);
 +      return NULL;
 +}
 +
  #define NVRAM_TEST_SIZE 0x100
  #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
  #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
@@@ -10567,7 -10369,7 +10567,7 @@@ static int tg3_test_nvram(struct tg3 *t
        __be32 *buf;
        int i, j, k, err = 0, size;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
 +      if (tg3_flag(tp, NO_NVRAM))
                return 0;
  
        if (tg3_nvram_read(tp, 0, &magic) != 0)
        if (csum != le32_to_cpu(buf[0xfc/4]))
                goto out;
  
 -      for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
 -              /* The data is in little-endian format in NVRAM.
 -               * Use the big-endian read routines to preserve
 -               * the byte order as it exists in NVRAM.
 -               */
 -              if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &buf[i/4]))
 -                      goto out;
 -      }
 +      kfree(buf);
 +
 +      buf = tg3_vpd_readblock(tp);
 +      if (!buf)
 +              return -ENOMEM;
  
        i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
                             PCI_VPD_LRDT_RO_DATA);
@@@ -10909,9 -10714,9 +10909,9 @@@ static int tg3_test_registers(struct tg
        };
  
        is_5705 = is_5750 = 0;
 -      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
 +      if (tg3_flag(tp, 5705_PLUS)) {
                is_5705 = 1;
 -              if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
 +              if (tg3_flag(tp, 5750_PLUS))
                        is_5750 = 1;
        }
  
                if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
                        continue;
  
 -              if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
 +              if (tg3_flag(tp, IS_5788) &&
                    (reg_tbl[i].flags & TG3_FL_NOT_5788))
                        continue;
  
@@@ -11045,15 -10850,16 +11045,15 @@@ static int tg3_test_memory(struct tg3 *
        int err = 0;
        int i;
  
 -      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 -          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 +      if (tg3_flag(tp, 5717_PLUS))
                mem_tbl = mem_tbl_5717;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                mem_tbl = mem_tbl_57765;
 -      else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
 +      else if (tg3_flag(tp, 5755_PLUS))
                mem_tbl = mem_tbl_5755;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                mem_tbl = mem_tbl_5906;
 -      else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
 +      else if (tg3_flag(tp, 5705_PLUS))
                mem_tbl = mem_tbl_5705;
        else
                mem_tbl = mem_tbl_570x;
  
  #define TG3_MAC_LOOPBACK      0
  #define TG3_PHY_LOOPBACK      1
 +#define TG3_TSO_LOOPBACK      2
 +
 +#define TG3_TSO_MSS           500
 +
 +#define TG3_TSO_IP_HDR_LEN    20
 +#define TG3_TSO_TCP_HDR_LEN   20
 +#define TG3_TSO_TCP_OPT_LEN   12
 +
 +static const u8 tg3_tso_header[] = {
 +0x08, 0x00,
 +0x45, 0x00, 0x00, 0x00,
 +0x00, 0x00, 0x40, 0x00,
 +0x40, 0x06, 0x00, 0x00,
 +0x0a, 0x00, 0x00, 0x01,
 +0x0a, 0x00, 0x00, 0x02,
 +0x0d, 0x00, 0xe0, 0x00,
 +0x00, 0x00, 0x01, 0x00,
 +0x00, 0x00, 0x02, 0x00,
 +0x80, 0x10, 0x10, 0x00,
 +0x14, 0x09, 0x00, 0x00,
 +0x01, 0x01, 0x08, 0x0a,
 +0x11, 0x11, 0x11, 0x11,
 +0x11, 0x11, 0x11, 0x11,
 +};
  
 -static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
 +static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
  {
        u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
 -      u32 desc_idx, coal_now;
 +      u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
        struct sk_buff *skb, *rx_skb;
        u8 *tx_data;
        dma_addr_t map;
        tnapi = &tp->napi[0];
        rnapi = &tp->napi[0];
        if (tp->irq_cnt > 1) {
 -              if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
 +              if (tg3_flag(tp, ENABLE_RSS))
                        rnapi = &tp->napi[1];
 -              if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
 +              if (tg3_flag(tp, ENABLE_TSS))
                        tnapi = &tp->napi[1];
        }
        coal_now = tnapi->coal_now | rnapi->coal_now;
                 * all newer ASIC revisions.
                 */
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
 -                  (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
 +                  tg3_flag(tp, CPMU_PRESENT))
                        return 0;
  
                mac_mode = tp->mac_mode &
                           ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
                mac_mode |= MAC_MODE_PORT_INT_LPBACK;
 -              if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 +              if (!tg3_flag(tp, 5705_PLUS))
                        mac_mode |= MAC_MODE_LINK_POLARITY;
                if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
                        mac_mode |= MAC_MODE_PORT_MODE_MII;
                else
                        mac_mode |= MAC_MODE_PORT_MODE_GMII;
                tw32(MAC_MODE, mac_mode);
 -      } else if (loopback_mode == TG3_PHY_LOOPBACK) {
 -              u32 val;
 -
 +      } else {
                if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
                        tg3_phy_fet_toggle_apd(tp, false);
                        val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
                                break;
                        mdelay(1);
                }
 -      } else {
 -              return -EINVAL;
        }
  
        err = -EIO;
  
 -      tx_len = 1514;
 +      tx_len = pktsz;
        skb = netdev_alloc_skb(tp->dev, tx_len);
        if (!skb)
                return -ENOMEM;
        memcpy(tx_data, tp->dev->dev_addr, 6);
        memset(tx_data + 6, 0x0, 8);
  
 -      tw32(MAC_RX_MTU_SIZE, tx_len + 4);
 +      tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
 +
 +      if (loopback_mode == TG3_TSO_LOOPBACK) {
 +              struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
 +
 +              u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
 +                            TG3_TSO_TCP_OPT_LEN;
 +
 +              memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
 +                     sizeof(tg3_tso_header));
 +              mss = TG3_TSO_MSS;
 +
 +              val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
 +              num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
 +
 +              /* Set the total length field in the IP header */
 +              iph->tot_len = htons((u16)(mss + hdr_len));
 +
 +              base_flags = (TXD_FLAG_CPU_PRE_DMA |
 +                            TXD_FLAG_CPU_POST_DMA);
 +
 +              if (tg3_flag(tp, HW_TSO_1) ||
 +                  tg3_flag(tp, HW_TSO_2) ||
 +                  tg3_flag(tp, HW_TSO_3)) {
 +                      struct tcphdr *th;
 +                      val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
 +                      th = (struct tcphdr *)&tx_data[val];
 +                      th->check = 0;
 +              } else
 +                      base_flags |= TXD_FLAG_TCPUDP_CSUM;
 +
 +              if (tg3_flag(tp, HW_TSO_3)) {
 +                      mss |= (hdr_len & 0xc) << 12;
 +                      if (hdr_len & 0x10)
 +                              base_flags |= 0x00000010;
 +                      base_flags |= (hdr_len & 0x3e0) << 5;
 +              } else if (tg3_flag(tp, HW_TSO_2))
 +                      mss |= hdr_len << 9;
 +              else if (tg3_flag(tp, HW_TSO_1) ||
 +                       GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 +                      mss |= (TG3_TSO_TCP_OPT_LEN << 9);
 +              } else {
 +                      base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
 +              }
 +
 +              data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
 +      } else {
 +              num_pkts = 1;
 +              data_off = ETH_HLEN;
 +      }
  
 -      for (i = 14; i < tx_len; i++)
 +      for (i = data_off; i < tx_len; i++)
                tx_data[i] = (u8) (i & 0xff);
  
        map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
  
        rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
  
 -      num_pkts = 0;
 -
 -      tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
 +      tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
 +                  base_flags, (mss << 1) | 1);
  
        tnapi->tx_prod++;
 -      num_pkts++;
  
        tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
        tr32_mailbox(tnapi->prodmbox);
        if (rx_idx != rx_start_idx + num_pkts)
                goto out;
  
 -      desc = &rnapi->rx_rcb[rx_start_idx];
 -      desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
 -      opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
 -      if (opaque_key != RXD_OPAQUE_RING_STD)
 -              goto out;
 +      val = data_off;
 +      while (rx_idx != rx_start_idx) {
 +              desc = &rnapi->rx_rcb[rx_start_idx++];
 +              desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
 +              opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
  
 -      if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
 -          (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
 -              goto out;
 +              if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
 +                  (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
 +                      goto out;
  
 -      rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
 -      if (rx_len != tx_len)
 -              goto out;
 +              rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
 +                       - ETH_FCS_LEN;
  
 -      rx_skb = tpr->rx_std_buffers[desc_idx].skb;
 +              if (loopback_mode != TG3_TSO_LOOPBACK) {
 +                      if (rx_len != tx_len)
 +                              goto out;
  
 -      map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
 -      pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
 +                      if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
 +                              if (opaque_key != RXD_OPAQUE_RING_STD)
 +                                      goto out;
 +                      } else {
 +                              if (opaque_key != RXD_OPAQUE_RING_JUMBO)
 +                                      goto out;
 +                      }
 +              } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
 +                         (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
 +                          >> RXD_TCPCSUM_SHIFT == 0xffff) {
 +                      goto out;
 +              }
  
 -      for (i = 14; i < tx_len; i++) {
 -              if (*(rx_skb->data + i) != (u8) (i & 0xff))
 +              if (opaque_key == RXD_OPAQUE_RING_STD) {
 +                      rx_skb = tpr->rx_std_buffers[desc_idx].skb;
 +                      map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
 +                                           mapping);
 +              } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
 +                      rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
 +                      map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
 +                                           mapping);
 +              } else
                        goto out;
 +
 +              pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
 +                                          PCI_DMA_FROMDEVICE);
 +
 +              for (i = data_off; i < rx_len; i++, val++) {
 +                      if (*(rx_skb->data + i) != (u8) (val & 0xff))
 +                              goto out;
 +              }
        }
 +
        err = 0;
  
        /* tg3_free_rings will unmap and free the rx_skb */
        return err;
  }
  
 -#define TG3_MAC_LOOPBACK_FAILED               1
 -#define TG3_PHY_LOOPBACK_FAILED               2
 -#define TG3_LOOPBACK_FAILED           (TG3_MAC_LOOPBACK_FAILED |      \
 -                                       TG3_PHY_LOOPBACK_FAILED)
 +#define TG3_STD_LOOPBACK_FAILED               1
 +#define TG3_JMB_LOOPBACK_FAILED               2
 +#define TG3_TSO_LOOPBACK_FAILED               4
 +
 +#define TG3_MAC_LOOPBACK_SHIFT                0
 +#define TG3_PHY_LOOPBACK_SHIFT                4
 +#define TG3_LOOPBACK_FAILED           0x00000077
  
  static int tg3_test_loopback(struct tg3 *tp)
  {
                goto done;
        }
  
 +      if (tg3_flag(tp, ENABLE_RSS)) {
 +              int i;
 +
 +              /* Reroute all rx packets to the 1st queue */
 +              for (i = MAC_RSS_INDIR_TBL_0;
 +                   i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
 +                      tw32(i, 0x0);
 +      }
 +
        /* Turn off gphy autopowerdown. */
        if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
                tg3_phy_toggle_apd(tp, false);
  
 -      if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
 +      if (tg3_flag(tp, CPMU_PRESENT)) {
                int i;
                u32 status;
  
                                  CPMU_CTRL_LINK_AWARE_MODE));
        }
  
 -      if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
 -              err |= TG3_MAC_LOOPBACK_FAILED;
 +      if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
 +              err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
  
 -      if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
 +      if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
 +          tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
 +              err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
 +
 +      if (tg3_flag(tp, CPMU_PRESENT)) {
                tw32(TG3_CPMU_CTRL, cpmuctrl);
  
                /* Release the mutex */
        }
  
        if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 -          !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
 -              if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
 -                      err |= TG3_PHY_LOOPBACK_FAILED;
 +          !tg3_flag(tp, USE_PHYLIB)) {
 +              if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
 +                      err |= TG3_STD_LOOPBACK_FAILED <<
 +                             TG3_PHY_LOOPBACK_SHIFT;
 +              if (tg3_flag(tp, TSO_CAPABLE) &&
 +                  tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
 +                      err |= TG3_TSO_LOOPBACK_FAILED <<
 +                             TG3_PHY_LOOPBACK_SHIFT;
 +              if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
 +                  tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
 +                      err |= TG3_JMB_LOOPBACK_FAILED <<
 +                             TG3_PHY_LOOPBACK_SHIFT;
        }
  
        /* Re-enable gphy autopowerdown. */
@@@ -11489,7 -11176,7 +11489,7 @@@ static void tg3_self_test(struct net_de
                tg3_halt(tp, RESET_KIND_SUSPEND, 1);
                err = tg3_nvram_lock(tp);
                tg3_halt_cpu(tp, RX_CPU_BASE);
 -              if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 +              if (!tg3_flag(tp, 5705_PLUS))
                        tg3_halt_cpu(tp, TX_CPU_BASE);
                if (!err)
                        tg3_nvram_unlock(tp);
  
                tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
                if (netif_running(dev)) {
 -                      tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
 +                      tg3_flag_set(tp, INIT_COMPLETE);
                        err2 = tg3_restart_hw(tp, 1);
                        if (!err2)
                                tg3_netif_start(tp);
@@@ -11541,7 -11228,7 +11541,7 @@@ static int tg3_ioctl(struct net_device 
        struct tg3 *tp = netdev_priv(dev);
        int err;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 +      if (tg3_flag(tp, USE_PHYLIB)) {
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
                if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                        break;                  /* We have no PHY */
  
 -              if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
 -                  ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
 -                   !netif_running(dev)))
 +              if (!netif_running(dev))
                        return -EAGAIN;
  
                spin_lock_bh(&tp->lock);
                if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                        break;                  /* We have no PHY */
  
 -              if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
 -                  ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
 -                   !netif_running(dev)))
 +              if (!netif_running(dev))
                        return -EAGAIN;
  
                spin_lock_bh(&tp->lock);
@@@ -11606,7 -11297,7 +11606,7 @@@ static int tg3_set_coalesce(struct net_
        u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
        u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
 +      if (!tg3_flag(tp, 5705_PLUS)) {
                max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
                max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
                max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
@@@ -11673,9 -11364,14 +11673,9 @@@ static const struct ethtool_ops tg3_eth
        .set_ringparam          = tg3_set_ringparam,
        .get_pauseparam         = tg3_get_pauseparam,
        .set_pauseparam         = tg3_set_pauseparam,
 -      .get_rx_csum            = tg3_get_rx_csum,
 -      .set_rx_csum            = tg3_set_rx_csum,
 -      .set_tx_csum            = tg3_set_tx_csum,
 -      .set_sg                 = ethtool_op_set_sg,
 -      .set_tso                = tg3_set_tso,
        .self_test              = tg3_self_test,
        .get_strings            = tg3_get_strings,
 -      .phys_id                = tg3_phys_id,
 +      .set_phys_id            = tg3_set_phys_id,
        .get_ethtool_stats      = tg3_get_ethtool_stats,
        .get_coalesce           = tg3_get_coalesce,
        .set_coalesce           = tg3_set_coalesce,
@@@ -11720,7 -11416,8 +11720,7 @@@ static void __devinit tg3_get_nvram_siz
  {
        u32 val;
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
 -          tg3_nvram_read(tp, 0, &val) != 0)
 +      if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
                return;
  
        /* Selfboot format */
@@@ -11755,19 -11452,19 +11755,19 @@@ static void __devinit tg3_get_nvram_inf
  
        nvcfg1 = tr32(NVRAM_CFG1);
        if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, FLASH);
        } else {
                nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
                tw32(NVRAM_CFG1, nvcfg1);
        }
  
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
 -          (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 +          tg3_flag(tp, 5780_CLASS)) {
                switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
                case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
                        tp->nvram_jedecnum = JEDEC_ATMEL;
                        tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
 -                      tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 +                      tg3_flag_set(tp, NVRAM_BUFFERED);
                        break;
                case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
                        tp->nvram_jedecnum = JEDEC_ATMEL;
                case FLASH_VENDOR_ATMEL_EEPROM:
                        tp->nvram_jedecnum = JEDEC_ATMEL;
                        tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
 -                      tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 +                      tg3_flag_set(tp, NVRAM_BUFFERED);
                        break;
                case FLASH_VENDOR_ST:
                        tp->nvram_jedecnum = JEDEC_ST;
                        tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
 -                      tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 +                      tg3_flag_set(tp, NVRAM_BUFFERED);
                        break;
                case FLASH_VENDOR_SAIFUN:
                        tp->nvram_jedecnum = JEDEC_SAIFUN;
        } else {
                tp->nvram_jedecnum = JEDEC_ATMEL;
                tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
        }
  }
  
@@@ -11835,29 -11532,29 +11835,29 @@@ static void __devinit tg3_get_5752_nvra
  
        /* NVRAM protection for TPM */
        if (nvcfg1 & (1 << 27))
 -              tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
 +              tg3_flag_set(tp, PROTECTED_NVRAM);
  
        switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
        case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
        case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
                tp->nvram_jedecnum = JEDEC_ATMEL;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
                break;
        case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
                tp->nvram_jedecnum = JEDEC_ATMEL;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
                break;
        case FLASH_5752VENDOR_ST_M45PE10:
        case FLASH_5752VENDOR_ST_M45PE20:
        case FLASH_5752VENDOR_ST_M45PE40:
                tp->nvram_jedecnum = JEDEC_ST;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
                break;
        }
  
 -      if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
 +      if (tg3_flag(tp, FLASH)) {
                tg3_nvram_get_pagesize(tp, nvcfg1);
        } else {
                /* For eeprom, set pagesize to maximum eeprom size */
@@@ -11876,7 -11573,7 +11876,7 @@@ static void __devinit tg3_get_5755_nvra
  
        /* NVRAM protection for TPM */
        if (nvcfg1 & (1 << 27)) {
 -              tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
 +              tg3_flag_set(tp, PROTECTED_NVRAM);
                protect = 1;
        }
  
        case FLASH_5755VENDOR_ATMEL_FLASH_3:
        case FLASH_5755VENDOR_ATMEL_FLASH_5:
                tp->nvram_jedecnum = JEDEC_ATMEL;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
                tp->nvram_pagesize = 264;
                if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
                    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
        case FLASH_5752VENDOR_ST_M45PE20:
        case FLASH_5752VENDOR_ST_M45PE40:
                tp->nvram_jedecnum = JEDEC_ST;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
                tp->nvram_pagesize = 256;
                if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
                        tp->nvram_size = (protect ?
@@@ -11936,7 -11633,7 +11936,7 @@@ static void __devinit tg3_get_5787_nvra
        case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
        case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
                tp->nvram_jedecnum = JEDEC_ATMEL;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
                tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  
                nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
        case FLASH_5755VENDOR_ATMEL_FLASH_2:
        case FLASH_5755VENDOR_ATMEL_FLASH_3:
                tp->nvram_jedecnum = JEDEC_ATMEL;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
                tp->nvram_pagesize = 264;
                break;
        case FLASH_5752VENDOR_ST_M45PE10:
        case FLASH_5752VENDOR_ST_M45PE20:
        case FLASH_5752VENDOR_ST_M45PE40:
                tp->nvram_jedecnum = JEDEC_ST;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
                tp->nvram_pagesize = 256;
                break;
        }
@@@ -11970,7 -11667,7 +11970,7 @@@ static void __devinit tg3_get_5761_nvra
  
        /* NVRAM protection for TPM */
        if (nvcfg1 & (1 << 27)) {
 -              tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
 +              tg3_flag_set(tp, PROTECTED_NVRAM);
                protect = 1;
        }
  
        case FLASH_5761VENDOR_ATMEL_MDB081D:
        case FLASH_5761VENDOR_ATMEL_MDB161D:
                tp->nvram_jedecnum = JEDEC_ATMEL;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 -              tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +              tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
                tp->nvram_pagesize = 256;
                break;
        case FLASH_5761VENDOR_ST_A_M45PE20:
        case FLASH_5761VENDOR_ST_M_M45PE80:
        case FLASH_5761VENDOR_ST_M_M45PE16:
                tp->nvram_jedecnum = JEDEC_ST;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
                tp->nvram_pagesize = 256;
                break;
        }
  static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
  {
        tp->nvram_jedecnum = JEDEC_ATMEL;
 -      tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 +      tg3_flag_set(tp, NVRAM_BUFFERED);
        tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  }
  
@@@ -12054,7 -11751,7 +12054,7 @@@ static void __devinit tg3_get_57780_nvr
        case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
        case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
                tp->nvram_jedecnum = JEDEC_ATMEL;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
                tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  
                nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
        case FLASH_57780VENDOR_ATMEL_AT45DB041D:
        case FLASH_57780VENDOR_ATMEL_AT45DB041B:
                tp->nvram_jedecnum = JEDEC_ATMEL;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
  
                switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
                case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
        case FLASH_5752VENDOR_ST_M45PE20:
        case FLASH_5752VENDOR_ST_M45PE40:
                tp->nvram_jedecnum = JEDEC_ST;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
  
                switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
                case FLASH_5752VENDOR_ST_M45PE10:
                }
                break;
        default:
 -              tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
 +              tg3_flag_set(tp, NO_NVRAM);
                return;
        }
  
        tg3_nvram_get_pagesize(tp, nvcfg1);
        if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
 -              tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
 +              tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
  }
  
  
@@@ -12127,7 -11824,7 +12127,7 @@@ static void __devinit tg3_get_5717_nvra
        case FLASH_5717VENDOR_ATMEL_EEPROM:
        case FLASH_5717VENDOR_MICRO_EEPROM:
                tp->nvram_jedecnum = JEDEC_ATMEL;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
                tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
  
                nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
        case FLASH_5717VENDOR_ATMEL_ADB021D:
        case FLASH_5717VENDOR_ATMEL_45USPT:
                tp->nvram_jedecnum = JEDEC_ATMEL;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
  
                switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
                case FLASH_5717VENDOR_ATMEL_MDB021D:
 +                      /* Detect size with tg3_nvram_get_size() */
 +                      break;
                case FLASH_5717VENDOR_ATMEL_ADB021B:
                case FLASH_5717VENDOR_ATMEL_ADB021D:
                        tp->nvram_size = TG3_NVRAM_SIZE_256KB;
        case FLASH_5717VENDOR_ST_25USPT:
        case FLASH_5717VENDOR_ST_45USPT:
                tp->nvram_jedecnum = JEDEC_ST;
 -              tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
 -              tp->tg3_flags2 |= TG3_FLG2_FLASH;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
  
                switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
                case FLASH_5717VENDOR_ST_M_M25PE20:
 -              case FLASH_5717VENDOR_ST_A_M25PE20:
                case FLASH_5717VENDOR_ST_M_M45PE20:
 +                      /* Detect size with tg3_nvram_get_size() */
 +                      break;
 +              case FLASH_5717VENDOR_ST_A_M25PE20:
                case FLASH_5717VENDOR_ST_A_M45PE20:
                        tp->nvram_size = TG3_NVRAM_SIZE_256KB;
                        break;
                }
                break;
        default:
 -              tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
 +              tg3_flag_set(tp, NO_NVRAM);
                return;
        }
  
        tg3_nvram_get_pagesize(tp, nvcfg1);
        if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
 -              tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
 +              tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
 +}
 +
 +static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
 +{
 +      u32 nvcfg1, nvmpinstrp;
 +
 +      nvcfg1 = tr32(NVRAM_CFG1);
 +      nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
 +
 +      switch (nvmpinstrp) {
 +      case FLASH_5720_EEPROM_HD:
 +      case FLASH_5720_EEPROM_LD:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +
 +              nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
 +              tw32(NVRAM_CFG1, nvcfg1);
 +              if (nvmpinstrp == FLASH_5720_EEPROM_HD)
 +                      tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
 +              else
 +                      tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
 +              return;
 +      case FLASH_5720VENDOR_M_ATMEL_DB011D:
 +      case FLASH_5720VENDOR_A_ATMEL_DB011B:
 +      case FLASH_5720VENDOR_A_ATMEL_DB011D:
 +      case FLASH_5720VENDOR_M_ATMEL_DB021D:
 +      case FLASH_5720VENDOR_A_ATMEL_DB021B:
 +      case FLASH_5720VENDOR_A_ATMEL_DB021D:
 +      case FLASH_5720VENDOR_M_ATMEL_DB041D:
 +      case FLASH_5720VENDOR_A_ATMEL_DB041B:
 +      case FLASH_5720VENDOR_A_ATMEL_DB041D:
 +      case FLASH_5720VENDOR_M_ATMEL_DB081D:
 +      case FLASH_5720VENDOR_A_ATMEL_DB081D:
 +      case FLASH_5720VENDOR_ATMEL_45USPT:
 +              tp->nvram_jedecnum = JEDEC_ATMEL;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +
 +              switch (nvmpinstrp) {
 +              case FLASH_5720VENDOR_M_ATMEL_DB021D:
 +              case FLASH_5720VENDOR_A_ATMEL_DB021B:
 +              case FLASH_5720VENDOR_A_ATMEL_DB021D:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_256KB;
 +                      break;
 +              case FLASH_5720VENDOR_M_ATMEL_DB041D:
 +              case FLASH_5720VENDOR_A_ATMEL_DB041B:
 +              case FLASH_5720VENDOR_A_ATMEL_DB041D:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_512KB;
 +                      break;
 +              case FLASH_5720VENDOR_M_ATMEL_DB081D:
 +              case FLASH_5720VENDOR_A_ATMEL_DB081D:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_1MB;
 +                      break;
 +              default:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_128KB;
 +                      break;
 +              }
 +              break;
 +      case FLASH_5720VENDOR_M_ST_M25PE10:
 +      case FLASH_5720VENDOR_M_ST_M45PE10:
 +      case FLASH_5720VENDOR_A_ST_M25PE10:
 +      case FLASH_5720VENDOR_A_ST_M45PE10:
 +      case FLASH_5720VENDOR_M_ST_M25PE20:
 +      case FLASH_5720VENDOR_M_ST_M45PE20:
 +      case FLASH_5720VENDOR_A_ST_M25PE20:
 +      case FLASH_5720VENDOR_A_ST_M45PE20:
 +      case FLASH_5720VENDOR_M_ST_M25PE40:
 +      case FLASH_5720VENDOR_M_ST_M45PE40:
 +      case FLASH_5720VENDOR_A_ST_M25PE40:
 +      case FLASH_5720VENDOR_A_ST_M45PE40:
 +      case FLASH_5720VENDOR_M_ST_M25PE80:
 +      case FLASH_5720VENDOR_M_ST_M45PE80:
 +      case FLASH_5720VENDOR_A_ST_M25PE80:
 +      case FLASH_5720VENDOR_A_ST_M45PE80:
 +      case FLASH_5720VENDOR_ST_25USPT:
 +      case FLASH_5720VENDOR_ST_45USPT:
 +              tp->nvram_jedecnum = JEDEC_ST;
 +              tg3_flag_set(tp, NVRAM_BUFFERED);
 +              tg3_flag_set(tp, FLASH);
 +
 +              switch (nvmpinstrp) {
 +              case FLASH_5720VENDOR_M_ST_M25PE20:
 +              case FLASH_5720VENDOR_M_ST_M45PE20:
 +              case FLASH_5720VENDOR_A_ST_M25PE20:
 +              case FLASH_5720VENDOR_A_ST_M45PE20:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_256KB;
 +                      break;
 +              case FLASH_5720VENDOR_M_ST_M25PE40:
 +              case FLASH_5720VENDOR_M_ST_M45PE40:
 +              case FLASH_5720VENDOR_A_ST_M25PE40:
 +              case FLASH_5720VENDOR_A_ST_M45PE40:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_512KB;
 +                      break;
 +              case FLASH_5720VENDOR_M_ST_M25PE80:
 +              case FLASH_5720VENDOR_M_ST_M45PE80:
 +              case FLASH_5720VENDOR_A_ST_M25PE80:
 +              case FLASH_5720VENDOR_A_ST_M45PE80:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_1MB;
 +                      break;
 +              default:
 +                      tp->nvram_size = TG3_NVRAM_SIZE_128KB;
 +                      break;
 +              }
 +              break;
 +      default:
 +              tg3_flag_set(tp, NO_NVRAM);
 +              return;
 +      }
 +
 +      tg3_nvram_get_pagesize(tp, nvcfg1);
 +      if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
 +              tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
  }
  
  /* Chips other than 5700/5701 use the NVRAM for fetching info. */
@@@ -12324,7 -11905,7 +12324,7 @@@ static void __devinit tg3_nvram_init(st
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
 -              tp->tg3_flags |= TG3_FLAG_NVRAM;
 +              tg3_flag_set(tp, NVRAM);
  
                if (tg3_nvram_lock(tp)) {
                        netdev_warn(tp->dev,
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
                        tg3_get_5717_nvram_info(tp);
 +              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +                      tg3_get_5720_nvram_info(tp);
                else
                        tg3_get_nvram_info(tp);
  
                tg3_nvram_unlock(tp);
  
        } else {
 -              tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
 +              tg3_flag_clear(tp, NVRAM);
 +              tg3_flag_clear(tp, NVRAM_BUFFERED);
  
                tg3_get_eeprom_size(tp);
        }
@@@ -12550,7 -12128,7 +12550,7 @@@ static int tg3_nvram_write_block_buffer
                        nvram_cmd |= NVRAM_CMD_LAST;
  
                if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
 -                  !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
 +                  !tg3_flag(tp, 5755_PLUS) &&
                    (tp->nvram_jedecnum == JEDEC_ST) &&
                    (nvram_cmd & NVRAM_CMD_FIRST)) {
  
  
                                break;
                }
 -              if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
 +              if (!tg3_flag(tp, FLASH)) {
                        /* We always do complete word writes to eeprom. */
                        nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
                }
@@@ -12576,13 -12154,13 +12576,13 @@@ static int tg3_nvram_write_block(struc
  {
        int ret;
  
 -      if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
 +      if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
                tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
                       ~GRC_LCLCTRL_GPIO_OUTPUT1);
                udelay(40);
        }
  
 -      if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
 +      if (!tg3_flag(tp, NVRAM)) {
                ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
        } else {
                u32 grc_mode;
                        return ret;
  
                tg3_enable_nvram_access(tp);
 -              if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
 -                  !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
 +              if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
                        tw32(NVRAM_WRITE1, 0x406);
  
                grc_mode = tr32(GRC_MODE);
                tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
  
 -              if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
 -                      !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
 -
 +              if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
                        ret = tg3_nvram_write_block_buffered(tp, offset, len,
                                buf);
                } else {
                tg3_nvram_unlock(tp);
        }
  
 -      if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
 +      if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
                tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
                udelay(40);
        }
@@@ -12735,20 -12316,21 +12735,22 @@@ static void __devinit tg3_get_eeprom_hw
        tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  
        /* Assume an onboard device and WOL capable by default.  */
 -      tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
 +      tg3_flag_set(tp, EEPROM_WRITE_PROT);
 +      tg3_flag_set(tp, WOL_CAP);
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
                if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
 -                      tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
 -                      tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
 +                      tg3_flag_clear(tp, EEPROM_WRITE_PROT);
 +                      tg3_flag_set(tp, IS_NIC);
                }
                val = tr32(VCPU_CFGSHDW);
                if (val & VCPU_CFGSHDW_ASPM_DBNC)
 -                      tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
 +                      tg3_flag_set(tp, ASPM_WORKAROUND);
                if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
-                   (val & VCPU_CFGSHDW_WOL_MAGPKT))
+                   (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
 -                      tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
 +                      tg3_flag_set(tp, WOL_ENABLE);
+                       device_set_wakeup_enable(&tp->pdev->dev, true);
+               }
                goto done;
        }
  
  
                tp->phy_id = eeprom_phy_id;
                if (eeprom_phy_serdes) {
 -                      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 +                      if (!tg3_flag(tp, 5705_PLUS))
                                tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
                        else
                                tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
                }
  
 -              if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
 +              if (tg3_flag(tp, 5750_PLUS))
                        led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
                                    SHASTA_EXT_LED_MODE_MASK);
                else
                        tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  
                if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
 -                      tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
 +                      tg3_flag_set(tp, EEPROM_WRITE_PROT);
                        if ((tp->pdev->subsystem_vendor ==
                             PCI_VENDOR_ID_ARIMA) &&
                            (tp->pdev->subsystem_device == 0x205a ||
                             tp->pdev->subsystem_device == 0x2063))
 -                              tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
 +                              tg3_flag_clear(tp, EEPROM_WRITE_PROT);
                } else {
 -                      tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
 -                      tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
 +                      tg3_flag_clear(tp, EEPROM_WRITE_PROT);
 +                      tg3_flag_set(tp, IS_NIC);
                }
  
                if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
 -                      tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
 -                      if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
 -                              tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
 +                      tg3_flag_set(tp, ENABLE_ASF);
 +                      if (tg3_flag(tp, 5750_PLUS))
 +                              tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
                }
  
                if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
 -                      (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
 -                      tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
 +                  tg3_flag(tp, 5750_PLUS))
 +                      tg3_flag_set(tp, ENABLE_APE);
  
                if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
                    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
 -                      tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
 +                      tg3_flag_clear(tp, WOL_CAP);
  
 -              if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
 +              if (tg3_flag(tp, WOL_CAP) &&
-                   (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
+                   (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
 -                      tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
 +                      tg3_flag_set(tp, WOL_ENABLE);
+                       device_set_wakeup_enable(&tp->pdev->dev, true);
+               }
  
                if (cfg2 & (1 << 17))
                        tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
                if (cfg2 & (1 << 18))
                        tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
  
 -              if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
 -                  ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 -                    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
 +              if ((tg3_flag(tp, 57765_PLUS) ||
 +                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 +                    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
                    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
                        tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
  
 -              if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
 +              if (tg3_flag(tp, PCI_EXPRESS) &&
                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 -                  !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
 +                  !tg3_flag(tp, 57765_PLUS)) {
                        u32 cfg3;
  
                        tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
                        if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
 -                              tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
 +                              tg3_flag_set(tp, ASPM_WORKAROUND);
                }
  
                if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
 -                      tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
 +                      tg3_flag_set(tp, RGMII_INBAND_DISABLE);
                if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
 -                      tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
 +                      tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
                if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
 -                      tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
 +                      tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
        }
  done:
 -      if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
 +      if (tg3_flag(tp, WOL_CAP))
                device_set_wakeup_enable(&tp->pdev->dev,
 -                               tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
 +                                       tg3_flag(tp, WOL_ENABLE));
        else
                device_set_wakeup_capable(&tp->pdev->dev, false);
  }
@@@ -13008,17 -12592,18 +13012,17 @@@ static int __devinit tg3_phy_probe(stru
        int err;
  
        /* flow control autonegotiation is default behavior */
 -      tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
 +      tg3_flag_set(tp, PAUSE_AUTONEG);
        tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
 +      if (tg3_flag(tp, USE_PHYLIB))
                return tg3_phy_init(tp);
  
        /* Reading the PHY ID register can conflict with ASF
         * firmware access to the PHY hardware.
         */
        err = 0;
 -      if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
 -          (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
 +      if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
                hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
        } else {
                /* Now read the physical PHY_ID from the chip and verify
        tg3_phy_init_link_config(tp);
  
        if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
 -          !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
 -          !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
 +          !tg3_flag(tp, ENABLE_APE) &&
 +          !tg3_flag(tp, ENABLE_ASF)) {
                u32 bmsr, adv_reg, tg3_ctrl, mask;
  
                tg3_readphy(tp, MII_BMSR, &bmsr);
@@@ -13136,11 -12721,46 +13140,11 @@@ static void __devinit tg3_read_vpd(stru
        u8 *vpd_data;
        unsigned int block_end, rosize, len;
        int j, i = 0;
 -      u32 magic;
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
 -          tg3_nvram_read(tp, 0x0, &magic))
 -              goto out_no_vpd;
 -
 -      vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
 +      vpd_data = (u8 *)tg3_vpd_readblock(tp);
        if (!vpd_data)
                goto out_no_vpd;
  
 -      if (magic == TG3_EEPROM_MAGIC) {
 -              for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
 -                      u32 tmp;
 -
 -                      /* The data is in little-endian format in NVRAM.
 -                       * Use the big-endian read routines to preserve
 -                       * the byte order as it exists in NVRAM.
 -                       */
 -                      if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
 -                              goto out_not_found;
 -
 -                      memcpy(&vpd_data[i], &tmp, sizeof(tmp));
 -              }
 -      } else {
 -              ssize_t cnt;
 -              unsigned int pos = 0;
 -
 -              for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
 -                      cnt = pci_read_vpd(tp->pdev, pos,
 -                                         TG3_NVM_VPD_LEN - pos,
 -                                         &vpd_data[pos]);
 -                      if (cnt == -ETIMEDOUT || cnt == -EINTR)
 -                              cnt = 0;
 -                      else if (cnt < 0)
 -                              goto out_not_found;
 -              }
 -              if (pos != TG3_NVM_VPD_LEN)
 -                      goto out_not_found;
 -      }
 -
        i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
                             PCI_VPD_LRDT_RO_DATA);
        if (i < 0)
@@@ -13394,7 -13014,7 +13398,7 @@@ static void __devinit tg3_read_mgmtfw_v
        if (offset == TG3_NVM_DIR_END)
                return;
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 +      if (!tg3_flag(tp, 5705_PLUS))
                start = 0x08000000;
        else if (tg3_nvram_read(tp, offset - 4, &start))
                return;
@@@ -13434,7 -13054,8 +13438,7 @@@ static void __devinit tg3_read_dash_ver
        u32 apedata;
        char *fwtype;
  
 -      if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
 -          !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
 +      if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
                return;
  
        apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
        apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
  
        if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
 -              tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
 +              tg3_flag_set(tp, APE_HAS_NCSI);
                fwtype = "NCSI";
        } else {
                fwtype = "DASH";
@@@ -13472,7 -13093,7 +13476,7 @@@ static void __devinit tg3_read_fw_ver(s
        if (tp->fw_ver[0] != 0)
                vpd_vers = true;
  
 -      if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
 +      if (tg3_flag(tp, NO_NVRAM)) {
                strcat(tp->fw_ver, "sb");
                return;
        }
        else
                return;
  
 -      if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
 -           (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
 +      if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
                goto done;
  
        tg3_read_mgmtfw_ver(tp);
  
  static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
  
 -static inline void vlan_features_add(struct net_device *dev, unsigned long flags)
 -{
 -      dev->vlan_features |= flags;
 -}
 -
  static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
  {
 -      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 -          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 -              return 4096;
 -      else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
 -               !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
 -              return 1024;
 +      if (tg3_flag(tp, LRG_PROD_RING_CAP))
 +              return TG3_RX_RET_MAX_SIZE_5717;
 +      else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
 +              return TG3_RX_RET_MAX_SIZE_5700;
        else
 -              return 512;
 +              return TG3_RX_RET_MAX_SIZE_5705;
  }
  
  static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
@@@ -13552,8 -13181,7 +13556,8 @@@ static int __devinit tg3_get_invariants
  
                if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
 -                  tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
 +                  tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
 +                  tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
                        pci_read_config_dword(tp->pdev,
                                              TG3PCI_GEN2_PRODID_ASICREV,
                                              &prod_id_asic_rev);
                        if (bridge->subordinate &&
                            (bridge->subordinate->number ==
                             tp->pdev->bus->number)) {
 -
 -                              tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
 +                              tg3_flag_set(tp, ICH_WORKAROUND);
                                pci_dev_put(bridge);
                                break;
                        }
                             tp->pdev->bus->number) &&
                            (bridge->subordinate->subordinate >=
                             tp->pdev->bus->number)) {
 -                              tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
 +                              tg3_flag_set(tp, 5701_DMA_BUG);
                                pci_dev_put(bridge);
                                break;
                        }
         */
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
 -              tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
 -              tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
 +              tg3_flag_set(tp, 5780_CLASS);
 +              tg3_flag_set(tp, 40BIT_DMA_BUG);
                tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
        } else {
                struct pci_dev *bridge = NULL;
                             tp->pdev->bus->number) &&
                            (bridge->subordinate->subordinate >=
                             tp->pdev->bus->number)) {
 -                              tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
 +                              tg3_flag_set(tp, 40BIT_DMA_BUG);
                                pci_dev_put(bridge);
                                break;
                        }
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
 -          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
                tp->pdev_peer = tg3_find_peer(tp);
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 -          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
 -              tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
 +          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +              tg3_flag_set(tp, 5717_PLUS);
 +
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
 +          tg3_flag(tp, 5717_PLUS))
 +              tg3_flag_set(tp, 57765_PLUS);
  
        /* Intentionally exclude ASIC_REV_5906 */
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 -          (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
 -              tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
 +          tg3_flag(tp, 57765_PLUS))
 +              tg3_flag_set(tp, 5755_PLUS);
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
 -          (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
 -          (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
 -              tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
 +          tg3_flag(tp, 5755_PLUS) ||
 +          tg3_flag(tp, 5780_CLASS))
 +              tg3_flag_set(tp, 5750_PLUS);
  
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
 -          (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
 -              tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
 +          tg3_flag(tp, 5750_PLUS))
 +              tg3_flag_set(tp, 5705_PLUS);
  
        /* 5700 B0 chips do not support checksumming correctly due
         * to hardware bugs.
         */
 -      if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
 -              tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
 -      else {
 -              unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
 +      if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
 +              u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
  
 -              tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
 -              if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
 +              if (tg3_flag(tp, 5755_PLUS))
                        features |= NETIF_F_IPV6_CSUM;
                tp->dev->features |= features;
 -              vlan_features_add(tp->dev, features);
 +              tp->dev->hw_features |= features;
 +              tp->dev->vlan_features |= features;
        }
  
        /* Determine TSO capabilities */
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
                ; /* Do nothing. HW bug. */
 -      else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
 -              tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
 -      else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
 +      else if (tg3_flag(tp, 57765_PLUS))
 +              tg3_flag_set(tp, HW_TSO_3);
 +      else if (tg3_flag(tp, 5755_PLUS) ||
                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 -              tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
 -      else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
 -              tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
 +              tg3_flag_set(tp, HW_TSO_2);
 +      else if (tg3_flag(tp, 5750_PLUS)) {
 +              tg3_flag_set(tp, HW_TSO_1);
 +              tg3_flag_set(tp, TSO_BUG);
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
                    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
 -                      tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
 +                      tg3_flag_clear(tp, TSO_BUG);
        } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
                   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
 -              tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
 +                      tg3_flag_set(tp, TSO_BUG);
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
                        tp->fw_needed = FIRMWARE_TG3TSO5;
                else
  
        tp->irq_max = 1;
  
 -      if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
 -              tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
 +      if (tg3_flag(tp, 5750_PLUS)) {
 +              tg3_flag_set(tp, SUPPORT_MSI);
                if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
                    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
                     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
                     tp->pdev_peer == tp->pdev))
 -                      tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
 +                      tg3_flag_clear(tp, SUPPORT_MSI);
  
 -              if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
 +              if (tg3_flag(tp, 5755_PLUS) ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 -                      tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
 +                      tg3_flag_set(tp, 1SHOT_MSI);
                }
  
 -              if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 -                      tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
 +              if (tg3_flag(tp, 57765_PLUS)) {
 +                      tg3_flag_set(tp, SUPPORT_MSIX);
                        tp->irq_max = TG3_IRQ_MAX_VECS;
                }
        }
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 -              tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
 -      else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
 -              tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
 -              tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
 +              tg3_flag_set(tp, SHORT_DMA_BUG);
 +      else if (!tg3_flag(tp, 5755_PLUS)) {
 +              tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
 +              tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
        }
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
 +      if (tg3_flag(tp, 5717_PLUS))
 +              tg3_flag_set(tp, LRG_PROD_RING_CAP);
 +
 +      if (tg3_flag(tp, 57765_PLUS) &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
 -              tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
 +              tg3_flag_set(tp, USE_JUMBO_BDFLAG);
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
 -          (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
 -          (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
 -              tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
 +      if (!tg3_flag(tp, 5705_PLUS) ||
 +          tg3_flag(tp, 5780_CLASS) ||
 +          tg3_flag(tp, USE_JUMBO_BDFLAG))
 +              tg3_flag_set(tp, JUMBO_CAPABLE);
  
        pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
                              &pci_state_reg);
        if (tp->pcie_cap != 0) {
                u16 lnkctl;
  
 -              tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
 +              tg3_flag_set(tp, PCI_EXPRESS);
  
                tp->pcie_readrq = 4096;
 -              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 +              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
                        tp->pcie_readrq = 2048;
  
                pcie_set_readrq(tp->pdev, tp->pcie_readrq);
                                     &lnkctl);
                if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
                        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 -                              tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
 +                              tg3_flag_clear(tp, HW_TSO_2);
                        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
                            tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
                            tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
 -                              tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
 +                              tg3_flag_set(tp, CLKREQ_BUG);
                } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
 -                      tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
 +                      tg3_flag_set(tp, L1PLLPD_EN);
                }
        } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
 -              tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
 -      } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
 -                 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 +              tg3_flag_set(tp, PCI_EXPRESS);
 +      } else if (!tg3_flag(tp, 5705_PLUS) ||
 +                 tg3_flag(tp, 5780_CLASS)) {
                tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
                if (!tp->pcix_cap) {
                        dev_err(&tp->pdev->dev,
                }
  
                if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
 -                      tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
 +                      tg3_flag_set(tp, PCIX_MODE);
        }
  
        /* If we have an AMD 762 or VIA K8T800 chipset, write
         * posted to the chip in order.
         */
        if (pci_dev_present(tg3_write_reorder_chipsets) &&
 -          !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
 -              tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
 +          !tg3_flag(tp, PCI_EXPRESS))
 +              tg3_flag_set(tp, MBOX_WRITE_REORDER);
  
        pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
                             &tp->pci_cacheline_sz);
                /* 5700 BX chips need to have their TX producer index
                 * mailboxes written twice to workaround a bug.
                 */
 -              tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
 +              tg3_flag_set(tp, TXD_MBOX_HWBUG);
  
                /* If we are in PCI-X mode, enable register write workaround.
                 *
                 * The workaround is to use indirect register accesses
                 * for all chip writes not to mailbox registers.
                 */
 -              if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
 +              if (tg3_flag(tp, PCIX_MODE)) {
                        u32 pm_reg;
  
 -                      tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
 +                      tg3_flag_set(tp, PCIX_TARGET_HWBUG);
  
                        /* The chip can have it's power management PCI config
                         * space registers clobbered due to this bug.
        }
  
        if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
 -              tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
 +              tg3_flag_set(tp, PCI_HIGH_SPEED);
        if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
 -              tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
 +              tg3_flag_set(tp, PCI_32BIT);
  
        /* Chip-specific fixup from Broadcom driver */
        if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
        tp->write32_rx_mbox = tg3_write32;
  
        /* Various workaround register access methods */
 -      if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
 +      if (tg3_flag(tp, PCIX_TARGET_HWBUG))
                tp->write32 = tg3_write_indirect_reg32;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
 -               ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
 +               (tg3_flag(tp, PCI_EXPRESS) &&
                  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
                /*
                 * Back to back register writes can cause problems on these
                tp->write32 = tg3_write_flush_reg32;
        }
  
 -      if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
 -          (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
 +      if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
                tp->write32_tx_mbox = tg3_write32_tx_mbox;
 -              if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
 +              if (tg3_flag(tp, MBOX_WRITE_REORDER))
                        tp->write32_rx_mbox = tg3_write_flush_reg32;
        }
  
 -      if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
 +      if (tg3_flag(tp, ICH_WORKAROUND)) {
                tp->read32 = tg3_read_indirect_reg32;
                tp->write32 = tg3_write_indirect_reg32;
                tp->read32_mbox = tg3_read_indirect_mbox;
        }
  
        if (tp->write32 == tg3_write_indirect_reg32 ||
 -          ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
 +          (tg3_flag(tp, PCIX_MODE) &&
             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
 -              tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
 +              tg3_flag_set(tp, SRAM_USE_CONFIG);
  
        /* Get eeprom hw config before calling tg3_set_power_state().
 -       * In particular, the TG3_FLG2_IS_NIC flag must be
 +       * In particular, the TG3_FLAG_IS_NIC flag must be
         * determined before calling tg3_set_power_state() so that
         * we know whether or not to switch out of Vaux power.
         * When the flag is set, it means that GPIO1 is used for eeprom
         */
        tg3_get_eeprom_hw_cfg(tp);
  
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
 +      if (tg3_flag(tp, ENABLE_APE)) {
                /* Allow reads and writes to the
                 * APE register and memory space.
                 */
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 -          (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
 -              tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
 +          tg3_flag(tp, 57765_PLUS))
 +              tg3_flag_set(tp, CPMU_PRESENT);
  
 -      /* Set up tp->grc_local_ctrl before calling tg_power_up().
 +      /* Set up tp->grc_local_ctrl before calling tg3_power_up().
         * GPIO1 driven high will bring 5700's external PHY out of reset.
         * It is also used as eeprom write protect on LOMs.
         */
        tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
 -          (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
 +          tg3_flag(tp, EEPROM_WRITE_PROT))
                tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
                                       GRC_LCLCTRL_GPIO_OUTPUT1);
        /* Unused GPIO3 must be driven as output on 5752 because there
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
                /* Turn off the debug UART. */
                tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
 -              if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
 +              if (tg3_flag(tp, IS_NIC))
                        /* Keep VMain power. */
                        tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
                                              GRC_LCLCTRL_GPIO_OUTPUT0;
        /* Derive initial jumbo mode from MTU assigned in
         * ether_setup() via the alloc_etherdev() call
         */
 -      if (tp->dev->mtu > ETH_DATA_LEN &&
 -          !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
 -              tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
 +      if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
 +              tg3_flag_set(tp, JUMBO_RING_ENABLE);
  
        /* Determine WakeOnLan speed to use. */
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
            tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
            tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
            tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
 -              tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
 +              tg3_flag_clear(tp, WOL_SPEED_100MB);
        } else {
 -              tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
 +              tg3_flag_set(tp, WOL_SPEED_100MB);
        }
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
        if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
                tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
  
 -      if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
 +      if (tg3_flag(tp, 5705_PLUS) &&
            !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
 -          !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
 +          !tg3_flag(tp, 57765_PLUS)) {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
                        tp->phy_otp = TG3_OTP_DEFAULT;
        }
  
 -      if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
 +      if (tg3_flag(tp, CPMU_PRESENT))
                tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
        else
                tp->mi_mode = MAC_MI_MODE_BASE;
            GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
                tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
  
 +      /* Set these bits to enable statistics workaround. */
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
 +          tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
 +              tp->coalesce_mode |= HOSTCC_MODE_ATTN;
 +              tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
 +      }
 +
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 -              tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
 +              tg3_flag_set(tp, USE_PHYLIB);
  
        err = tg3_mdio_init(tp);
        if (err)
  
        /* Initialize data/descriptor byte/word swapping. */
        val = tr32(GRC_MODE);
 -      val &= GRC_MODE_HOST_STACKUP;
 +      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 +              val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
 +                      GRC_MODE_WORD_SWAP_B2HRX_DATA |
 +                      GRC_MODE_B2HRX_ENABLE |
 +                      GRC_MODE_HTX2B_ENABLE |
 +                      GRC_MODE_HOST_STACKUP);
 +      else
 +              val &= GRC_MODE_HOST_STACKUP;
 +
        tw32(GRC_MODE, val | tp->grc_mode);
  
        tg3_switch_clocks(tp);
        pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
                              &pci_state_reg);
        if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
 -          (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
 +          !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
                u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
  
                if (chiprevid == CHIPREV_ID_5701_A0 ||
                        writel(0x00000000, sram_base + 4);
                        writel(0xffffffff, sram_base + 4);
                        if (readl(sram_base) != 0x00000000)
 -                              tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
 +                              tg3_flag_set(tp, PCIX_TARGET_HWBUG);
                }
        }
  
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
            (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
 -              tp->tg3_flags2 |= TG3_FLG2_IS_5788;
 +              tg3_flag_set(tp, IS_5788);
  
 -      if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
 +      if (!tg3_flag(tp, IS_5788) &&
            (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
 -              tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
 -      if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
 +              tg3_flag_set(tp, TAGGED_STATUS);
 +      if (tg3_flag(tp, TAGGED_STATUS)) {
                tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
                                      HOSTCC_MODE_CLRTICK_TXBD);
  
        }
  
        /* Preserve the APE MAC_MODE bits */
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
 +      if (tg3_flag(tp, ENABLE_APE))
                tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
        else
                tp->mac_mode = TG3_DEF_MAC_MODE;
         * status register in those cases.
         */
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
 -              tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
 +              tg3_flag_set(tp, USE_LINKCHG_REG);
        else
 -              tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
 +              tg3_flag_clear(tp, USE_LINKCHG_REG);
  
        /* The led_ctrl is set during tg3_phy_probe, here we might
         * have to force the link status polling mechanism based
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
            !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
                tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
 -              tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
 +              tg3_flag_set(tp, USE_LINKCHG_REG);
        }
  
        /* For all SERDES we poll the MAC status register. */
        if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 -              tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
 +              tg3_flag_set(tp, POLL_SERDES);
        else
 -              tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
 +              tg3_flag_clear(tp, POLL_SERDES);
  
        tp->rx_offset = NET_IP_ALIGN;
        tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
 -          (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
 +          tg3_flag(tp, PCIX_MODE)) {
                tp->rx_offset = 0;
  #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
                tp->rx_copy_thresh = ~(u16)0;
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
                tp->rx_std_max_post = 8;
  
 -      if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
 +      if (tg3_flag(tp, ASPM_WORKAROUND))
                tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
                                     PCIE_PWR_MGMT_L1_THRESH_MSK;
  
@@@ -14351,14 -13958,15 +14355,14 @@@ static int __devinit tg3_get_device_add
  
        mac_offset = 0x7c;
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
 -          (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 +          tg3_flag(tp, 5780_CLASS)) {
                if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
                        mac_offset = 0xcc;
                if (tg3_nvram_lock(tp))
                        tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
                else
                        tg3_nvram_unlock(tp);
 -      } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 -                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
 +      } else if (tg3_flag(tp, 5717_PLUS)) {
                if (PCI_FUNC(tp->pdev->devfn) & 1)
                        mac_offset = 0xcc;
                if (PCI_FUNC(tp->pdev->devfn) > 1)
        }
        if (!addr_ok) {
                /* Next, try NVRAM. */
 -              if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
 +              if (!tg3_flag(tp, NO_NVRAM) &&
                    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
                    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
                        memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
@@@ -14434,7 -14042,7 +14438,7 @@@ static u32 __devinit tg3_calc_dma_bndry
         */
        if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
 -          !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
 +          !tg3_flag(tp, PCI_EXPRESS))
                goto out;
  
  #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
  #endif
  #endif
  
 -      if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 +      if (tg3_flag(tp, 57765_PLUS)) {
                val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
                goto out;
        }
         * other than 5700 and 5701 which do not implement the
         * boundary bits.
         */
 -      if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
 -          !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
 +      if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
                switch (cacheline_size) {
                case 16:
                case 32:
                                DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
                        break;
                }
 -      } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
 +      } else if (tg3_flag(tp, PCI_EXPRESS)) {
                switch (cacheline_size) {
                case 16:
                case 32:
@@@ -14663,13 -14272,13 +14667,13 @@@ static int __devinit tg3_test_dma(struc
  
        tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
  
 -      if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
 +      if (tg3_flag(tp, 57765_PLUS))
                goto out;
  
 -      if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
 +      if (tg3_flag(tp, PCI_EXPRESS)) {
                /* DMA read watermark not used on PCIE */
                tp->dma_rwctrl |= 0x00180000;
 -      } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
 +      } else if (!tg3_flag(tp, PCIX_MODE)) {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
                        tp->dma_rwctrl |= 0x003f0000;
                         * do the less restrictive ONE_DMA workaround for
                         * better performance.
                         */
 -                      if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
 +                      if (tg3_flag(tp, 40BIT_DMA_BUG) &&
                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
                                tp->dma_rwctrl |= 0x8000;
                        else if (ccval == 0x6 || ccval == 0x7)
        }
        if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
            DMA_RWCTRL_WRITE_BNDRY_16) {
 -
                /* DMA test passed without adjusting DMA boundary,
                 * now look for chipsets that are known to expose the
                 * DMA bug without failing the test.
@@@ -14837,7 -14447,7 +14841,7 @@@ out_nofree
  
  static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
  {
 -      if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 +      if (tg3_flag(tp, 57765_PLUS)) {
                tp->bufmgr_config.mbuf_read_dma_low_water =
                        DEFAULT_MB_RDMA_LOW_WATER_5705;
                tp->bufmgr_config.mbuf_mac_rx_low_water =
                        DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
                tp->bufmgr_config.mbuf_high_water_jumbo =
                        DEFAULT_MB_HIGH_WATER_JUMBO_57765;
 -      } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
 +      } else if (tg3_flag(tp, 5705_PLUS)) {
                tp->bufmgr_config.mbuf_read_dma_low_water =
                        DEFAULT_MB_RDMA_LOW_WATER_5705;
                tp->bufmgr_config.mbuf_mac_rx_low_water =
@@@ -14915,7 -14525,6 +14919,7 @@@ static char * __devinit tg3_phy_string(
        case TG3_PHY_ID_BCM5718S:       return "5718S";
        case TG3_PHY_ID_BCM57765:       return "57765";
        case TG3_PHY_ID_BCM5719C:       return "5719C";
 +      case TG3_PHY_ID_BCM5720C:       return "5720C";
        case TG3_PHY_ID_BCM8002:        return "8002/serdes";
        case 0:                 return "serdes";
        default:                return "unknown";
  
  static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
  {
 -      if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
 +      if (tg3_flag(tp, PCI_EXPRESS)) {
                strcpy(str, "PCI Express");
                return str;
 -      } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
 +      } else if (tg3_flag(tp, PCIX_MODE)) {
                u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
  
                strcpy(str, "PCIX:");
                        strcat(str, "100MHz");
        } else {
                strcpy(str, "PCI:");
 -              if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
 +              if (tg3_flag(tp, PCI_HIGH_SPEED))
                        strcat(str, "66MHz");
                else
                        strcat(str, "33MHz");
        }
 -      if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
 +      if (tg3_flag(tp, PCI_32BIT))
                strcat(str, ":32-bit");
        else
                strcat(str, ":64-bit");
@@@ -15010,7 -14619,7 +15014,7 @@@ static void __devinit tg3_init_coal(str
                ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
        }
  
 -      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
 +      if (tg3_flag(tp, 5705_PLUS)) {
                ec->rx_coalesce_usecs_irq = 0;
                ec->tx_coalesce_usecs_irq = 0;
                ec->stats_block_coalesce_usecs = 0;
@@@ -15028,7 -14637,6 +15032,7 @@@ static const struct net_device_ops tg3_
        .ndo_do_ioctl           = tg3_ioctl,
        .ndo_tx_timeout         = tg3_tx_timeout,
        .ndo_change_mtu         = tg3_change_mtu,
 +      .ndo_fix_features       = tg3_fix_features,
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = tg3_poll_controller,
  #endif
@@@ -15059,7 -14667,6 +15063,7 @@@ static int __devinit tg3_init_one(struc
        u32 sndmbx, rcvmbx, intmbx;
        char str[40];
        u64 dma_mask, persist_dma_mask;
 +      u32 hw_features = 0;
  
        printk_once(KERN_INFO "%s\n", version);
  
                goto err_out_iounmap;
        }
  
 -      if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
 -          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
 -          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
 +      if (tg3_flag(tp, 5755_PLUS) && !tg3_flag(tp, 5717_PLUS))
                dev->netdev_ops = &tg3_netdev_ops;
        else
                dev->netdev_ops = &tg3_netdev_ops_dma_bug;
         * On 64-bit systems without IOMMU, use 64-bit dma_mask and
         * do DMA address check in tg3_start_xmit().
         */
 -      if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
 +      if (tg3_flag(tp, IS_5788))
                persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
 -      else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
 +      else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
                persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
  #ifdef CONFIG_HIGHMEM
                dma_mask = DMA_BIT_MASK(64);
        tg3_init_bufmgr_config(tp);
  
        /* Selectively allow TSO based on operating conditions */
 -      if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
 -          (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
 -              tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
 +      if ((tg3_flag(tp, HW_TSO_1) ||
 +           tg3_flag(tp, HW_TSO_2) ||
 +           tg3_flag(tp, HW_TSO_3)) ||
 +          (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
 +              tg3_flag_set(tp, TSO_CAPABLE);
        else {
 -              tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
 +              tg3_flag_clear(tp, TSO_CAPABLE);
 +              tg3_flag_clear(tp, TSO_BUG);
                tp->fw_needed = NULL;
        }
  
         * Firmware TSO on older chips gives lower performance, so it
         * is off by default, but can be enabled using ethtool.
         */
 -      if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
 -          (dev->features & NETIF_F_IP_CSUM)) {
 -              dev->features |= NETIF_F_TSO;
 -              vlan_features_add(dev, NETIF_F_TSO);
 -      }
 -      if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
 -          (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
 -              if (dev->features & NETIF_F_IPV6_CSUM) {
 -                      dev->features |= NETIF_F_TSO6;
 -                      vlan_features_add(dev, NETIF_F_TSO6);
 -              }
 -              if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
 +      if ((tg3_flag(tp, HW_TSO_1) ||
 +           tg3_flag(tp, HW_TSO_2) ||
 +           tg3_flag(tp, HW_TSO_3)) &&
 +          (dev->features & NETIF_F_IP_CSUM))
 +              hw_features |= NETIF_F_TSO;
 +      if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
 +              if (dev->features & NETIF_F_IPV6_CSUM)
 +                      hw_features |= NETIF_F_TSO6;
 +              if (tg3_flag(tp, HW_TSO_3) ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
                     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
 -                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 -                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
 -                      dev->features |= NETIF_F_TSO_ECN;
 -                      vlan_features_add(dev, NETIF_F_TSO_ECN);
 -              }
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 +                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 +                      hw_features |= NETIF_F_TSO_ECN;
        }
  
 +      dev->hw_features |= hw_features;
 +      dev->features |= hw_features;
 +      dev->vlan_features |= hw_features;
 +
        if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
 -          !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
 +          !tg3_flag(tp, TSO_CAPABLE) &&
            !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
 -              tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
 +              tg3_flag_set(tp, MAX_RXPEND_64);
                tp->rx_pending = 63;
        }
  
                goto err_out_iounmap;
        }
  
 -      if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
 +      if (tg3_flag(tp, ENABLE_APE)) {
                tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
                if (!tp->aperegs) {
                        dev_err(&pdev->dev,
  
                tg3_ape_lock_init(tp);
  
 -              if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
 +              if (tg3_flag(tp, ENABLE_ASF))
                        tg3_read_dash_ver(tp);
        }
  
                else
                        tnapi->coal_now = HOSTCC_MODE_NOW;
  
 -              if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
 +              if (!tg3_flag(tp, SUPPORT_MSIX))
                        break;
  
                /*
                        ethtype = "10/100/1000Base-T";
  
                netdev_info(dev, "attached PHY is %s (%s Ethernet) "
 -                          "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
 -                        (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
 +                          "(WireSpeed[%d], EEE[%d])\n",
 +                          tg3_phy_string(tp), ethtype,
 +                          (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
 +                          (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
        }
  
        netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
 -                  (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
 -                  (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
 +                  (dev->features & NETIF_F_RXCSUM) != 0,
 +                  tg3_flag(tp, USE_LINKCHG_REG) != 0,
                    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
 -                  (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
 -                  (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
 +                  tg3_flag(tp, ENABLE_ASF) != 0,
 +                  tg3_flag(tp, TSO_CAPABLE) != 0);
        netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
                    tp->dma_rwctrl,
                    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
                    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
  
 +      pci_save_state(pdev);
 +
        return 0;
  
  err_out_apeunmap:
@@@ -15422,7 -15025,7 +15426,7 @@@ static void __devexit tg3_remove_one(st
  
                cancel_work_sync(&tp->reset_task);
  
 -              if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 +              if (!tg3_flag(tp, USE_PHYLIB)) {
                        tg3_phy_fini(tp);
                        tg3_mdio_fini(tp);
                }
@@@ -15468,7 -15071,7 +15472,7 @@@ static int tg3_suspend(struct device *d
  
        tg3_full_lock(tp, 0);
        tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 -      tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
 +      tg3_flag_clear(tp, INIT_COMPLETE);
        tg3_full_unlock(tp);
  
        err = tg3_power_down_prepare(tp);
  
                tg3_full_lock(tp, 0);
  
 -              tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
 +              tg3_flag_set(tp, INIT_COMPLETE);
                err2 = tg3_restart_hw(tp, 1);
                if (err2)
                        goto out;
@@@ -15512,7 -15115,7 +15516,7 @@@ static int tg3_resume(struct device *de
  
        tg3_full_lock(tp, 0);
  
 -      tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
 +      tg3_flag_set(tp, INIT_COMPLETE);
        err = tg3_restart_hw(tp, 1);
        if (err)
                goto out;
@@@ -15540,156 -15143,11 +15544,156 @@@ static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg
  
  #endif /* CONFIG_PM_SLEEP */
  
 +/**
 + * tg3_io_error_detected - called when PCI error is detected
 + * @pdev: Pointer to PCI device
 + * @state: The current pci connection state
 + *
 + * This function is called after a PCI bus error affecting
 + * this device has been detected.
 + */
 +static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 +                                            pci_channel_state_t state)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct tg3 *tp = netdev_priv(netdev);
 +      pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
 +
 +      netdev_info(netdev, "PCI I/O error detected\n");
 +
 +      rtnl_lock();
 +
 +      if (!netif_running(netdev))
 +              goto done;
 +
 +      tg3_phy_stop(tp);
 +
 +      tg3_netif_stop(tp);
 +
 +      del_timer_sync(&tp->timer);
 +      tg3_flag_clear(tp, RESTART_TIMER);
 +
 +      /* Want to make sure that the reset task doesn't run */
 +      cancel_work_sync(&tp->reset_task);
 +      tg3_flag_clear(tp, TX_RECOVERY_PENDING);
 +      tg3_flag_clear(tp, RESTART_TIMER);
 +
 +      netif_device_detach(netdev);
 +
 +      /* Clean up software state, even if MMIO is blocked */
 +      tg3_full_lock(tp, 0);
 +      tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
 +      tg3_full_unlock(tp);
 +
 +done:
 +      if (state == pci_channel_io_perm_failure)
 +              err = PCI_ERS_RESULT_DISCONNECT;
 +      else
 +              pci_disable_device(pdev);
 +
 +      rtnl_unlock();
 +
 +      return err;
 +}
 +
 +/**
 + * tg3_io_slot_reset - called after the pci bus has been reset.
 + * @pdev: Pointer to PCI device
 + *
 + * Restart the card from scratch, as if from a cold-boot.
 + * At this point, the card has exprienced a hard reset,
 + * followed by fixups by BIOS, and has its config space
 + * set up identically to what it was at cold boot.
 + */
 +static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct tg3 *tp = netdev_priv(netdev);
 +      pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
 +      int err;
 +
 +      rtnl_lock();
 +
 +      if (pci_enable_device(pdev)) {
 +              netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
 +              goto done;
 +      }
 +
 +      pci_set_master(pdev);
 +      pci_restore_state(pdev);
 +      pci_save_state(pdev);
 +
 +      if (!netif_running(netdev)) {
 +              rc = PCI_ERS_RESULT_RECOVERED;
 +              goto done;
 +      }
 +
 +      err = tg3_power_up(tp);
 +      if (err) {
 +              netdev_err(netdev, "Failed to restore register access.\n");
 +              goto done;
 +      }
 +
 +      rc = PCI_ERS_RESULT_RECOVERED;
 +
 +done:
 +      rtnl_unlock();
 +
 +      return rc;
 +}
 +
 +/**
 + * tg3_io_resume - called when traffic can start flowing again.
 + * @pdev: Pointer to PCI device
 + *
 + * This callback is called when the error recovery driver tells
 + * us that its OK to resume normal operation.
 + */
 +static void tg3_io_resume(struct pci_dev *pdev)
 +{
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct tg3 *tp = netdev_priv(netdev);
 +      int err;
 +
 +      rtnl_lock();
 +
 +      if (!netif_running(netdev))
 +              goto done;
 +
 +      tg3_full_lock(tp, 0);
 +      tg3_flag_set(tp, INIT_COMPLETE);
 +      err = tg3_restart_hw(tp, 1);
 +      tg3_full_unlock(tp);
 +      if (err) {
 +              netdev_err(netdev, "Cannot restart hardware after reset.\n");
 +              goto done;
 +      }
 +
 +      netif_device_attach(netdev);
 +
 +      tp->timer.expires = jiffies + tp->timer_offset;
 +      add_timer(&tp->timer);
 +
 +      tg3_netif_start(tp);
 +
 +      tg3_phy_start(tp);
 +
 +done:
 +      rtnl_unlock();
 +}
 +
 +static struct pci_error_handlers tg3_err_handler = {
 +      .error_detected = tg3_io_error_detected,
 +      .slot_reset     = tg3_io_slot_reset,
 +      .resume         = tg3_io_resume
 +};
 +
  static struct pci_driver tg3_driver = {
        .name           = DRV_MODULE_NAME,
        .id_table       = tg3_pci_tbl,
        .probe          = tg3_init_one,
        .remove         = __devexit_p(tg3_remove_one),
 +      .err_handler    = &tg3_err_handler,
        .driver.pm      = TG3_PM_OPS,
  };
  
index b374a9997908bd7f2667629a0d9784e25753140f,48d4efdb4959a61b6704d58178879593ca4edad2..f74f3ce7152630fc7adaf43cc6ce107fc8dd6214
@@@ -52,6 -52,8 +52,6 @@@ struct smsc95xx_priv 
        u32 hash_hi;
        u32 hash_lo;
        spinlock_t mac_cr_lock;
 -      bool use_tx_csum;
 -      bool use_rx_csum;
  };
  
  struct usb_context {
@@@ -457,7 -459,7 +457,7 @@@ static int smsc95xx_link_reset(struct u
  {
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        struct mii_if_info *mii = &dev->mii;
 -      struct ethtool_cmd ecmd;
 +      struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
        unsigned long flags;
        u16 lcladv, rmtadv;
        u32 intdata;
        lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
        rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
  
 -      netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x rmtadv: %04x\n",
 -                ecmd.speed, ecmd.duplex, lcladv, rmtadv);
 +      netif_dbg(dev, link, dev->net,
 +                "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n",
 +                ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv);
  
        spin_lock_irqsave(&pdata->mac_cr_lock, flags);
        if (ecmd.duplex != DUPLEX_FULL) {
@@@ -516,24 -517,22 +516,24 @@@ static void smsc95xx_status(struct usbn
  }
  
  /* Enable or disable Tx & Rx checksum offload engines */
 -static int smsc95xx_set_csums(struct usbnet *dev)
 +static int smsc95xx_set_features(struct net_device *netdev, u32 features)
  {
 -      struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 +      struct usbnet *dev = netdev_priv(netdev);
        u32 read_buf;
 -      int ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
 +      int ret;
 +
 +      ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
        if (ret < 0) {
                netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
                return ret;
        }
  
 -      if (pdata->use_tx_csum)
 +      if (features & NETIF_F_HW_CSUM)
                read_buf |= Tx_COE_EN_;
        else
                read_buf &= ~Tx_COE_EN_;
  
 -      if (pdata->use_rx_csum)
 +      if (features & NETIF_F_RXCSUM)
                read_buf |= Rx_COE_EN_;
        else
                read_buf &= ~Rx_COE_EN_;
@@@ -577,6 -576,43 +577,6 @@@ static int smsc95xx_ethtool_set_eeprom(
        return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data);
  }
  
 -static u32 smsc95xx_ethtool_get_rx_csum(struct net_device *netdev)
 -{
 -      struct usbnet *dev = netdev_priv(netdev);
 -      struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 -
 -      return pdata->use_rx_csum;
 -}
 -
 -static int smsc95xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val)
 -{
 -      struct usbnet *dev = netdev_priv(netdev);
 -      struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 -
 -      pdata->use_rx_csum = !!val;
 -
 -      return smsc95xx_set_csums(dev);
 -}
 -
 -static u32 smsc95xx_ethtool_get_tx_csum(struct net_device *netdev)
 -{
 -      struct usbnet *dev = netdev_priv(netdev);
 -      struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 -
 -      return pdata->use_tx_csum;
 -}
 -
 -static int smsc95xx_ethtool_set_tx_csum(struct net_device *netdev, u32 val)
 -{
 -      struct usbnet *dev = netdev_priv(netdev);
 -      struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 -
 -      pdata->use_tx_csum = !!val;
 -
 -      ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
 -      return smsc95xx_set_csums(dev);
 -}
 -
  static const struct ethtool_ops smsc95xx_ethtool_ops = {
        .get_link       = usbnet_get_link,
        .nway_reset     = usbnet_nway_reset,
        .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
        .get_eeprom     = smsc95xx_ethtool_get_eeprom,
        .set_eeprom     = smsc95xx_ethtool_set_eeprom,
 -      .get_tx_csum    = smsc95xx_ethtool_get_tx_csum,
 -      .set_tx_csum    = smsc95xx_ethtool_set_tx_csum,
 -      .get_rx_csum    = smsc95xx_ethtool_get_rx_csum,
 -      .set_rx_csum    = smsc95xx_ethtool_set_rx_csum,
  };
  
  static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@@ -690,7 -730,7 +690,7 @@@ static int smsc95xx_phy_initialize(stru
                msleep(10);
                bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
                timeout++;
-       } while ((bmcr & MII_BMCR) && (timeout < 100));
+       } while ((bmcr & BMCR_RESET) && (timeout < 100));
  
        if (timeout >= 100) {
                netdev_warn(dev->net, "timeout on PHY Reset");
  static int smsc95xx_reset(struct usbnet *dev)
  {
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 -      struct net_device *netdev = dev->net;
        u32 read_buf, write_buf, burst_cap;
        int ret = 0, timeout;
  
        }
  
        /* Enable or disable checksum offload engines */
 -      ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
 -      ret = smsc95xx_set_csums(dev);
 -      if (ret < 0) {
 -              netdev_warn(dev->net, "Failed to set csum offload: %d\n", ret);
 -              return ret;
 -      }
 +      smsc95xx_set_features(dev->net, dev->net->features);
  
        smsc95xx_set_multicast(dev->net);
  
@@@ -973,7 -1019,6 +973,7 @@@ static const struct net_device_ops smsc
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = smsc95xx_ioctl,
        .ndo_set_multicast_list = smsc95xx_set_multicast,
 +      .ndo_set_features       = smsc95xx_set_features,
  };
  
  static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
  
        spin_lock_init(&pdata->mac_cr_lock);
  
 -      pdata->use_tx_csum = DEFAULT_TX_CSUM_ENABLE;
 -      pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE;
 +      if (DEFAULT_TX_CSUM_ENABLE)
 +              dev->net->features |= NETIF_F_HW_CSUM;
 +      if (DEFAULT_RX_CSUM_ENABLE)
 +              dev->net->features |= NETIF_F_RXCSUM;
 +
 +      dev->net->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
  
        smsc95xx_init_mac_address(dev);
  
        dev->net->netdev_ops = &smsc95xx_netdev_ops;
        dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
        dev->net->flags |= IFF_MULTICAST;
 -      dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD;
 +      dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
        return 0;
  }
  
@@@ -1039,6 -1080,8 +1039,6 @@@ static void smsc95xx_rx_csum_offload(st
  
  static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
  {
 -      struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 -
        while (skb->len > 0) {
                u32 header, align_count;
                struct sk_buff *ax_skb;
  
                        /* last frame in this batch */
                        if (skb->len == size) {
 -                              if (pdata->use_rx_csum)
 +                              if (dev->net->features & NETIF_F_RXCSUM)
                                        smsc95xx_rx_csum_offload(skb);
                                skb_trim(skb, skb->len - 4); /* remove fcs */
                                skb->truesize = size + sizeof(struct sk_buff);
                        ax_skb->data = packet;
                        skb_set_tail_pointer(ax_skb, size);
  
 -                      if (pdata->use_rx_csum)
 +                      if (dev->net->features & NETIF_F_RXCSUM)
                                smsc95xx_rx_csum_offload(ax_skb);
                        skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
                        ax_skb->truesize = size + sizeof(struct sk_buff);
@@@ -1131,7 -1174,8 +1131,7 @@@ static u32 smsc95xx_calc_csum_preamble(
  static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
                                         struct sk_buff *skb, gfp_t flags)
  {
 -      struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 -      bool csum = pdata->use_tx_csum && (skb->ip_summed == CHECKSUM_PARTIAL);
 +      bool csum = skb->ip_summed == CHECKSUM_PARTIAL;
        int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD;
        u32 tx_cmd_a, tx_cmd_b;
  
diff --combined drivers/net/usb/usbnet.c
index 7bc9852bd57c136bbd8a9d1c36e665519bc6a4c4,9ab439d144ed0207758c8c5535d3fa1dcb3359b8..e6dd244669656e0cd2cbd9576ac6298ba1ef3537
@@@ -645,6 -645,7 +645,7 @@@ int usbnet_stop (struct net_device *net
        struct driver_info      *info = dev->driver_info;
        int                     retval;
  
+       clear_bit(EVENT_DEV_OPEN, &dev->flags);
        netif_stop_queue (net);
  
        netif_info(dev, ifdown, dev->net,
@@@ -736,6 -737,7 +737,7 @@@ int usbnet_open (struct net_device *net
                }
        }
  
+       set_bit(EVENT_DEV_OPEN, &dev->flags);
        netif_start_queue (net);
        netif_info(dev, ifup, dev->net,
                   "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
@@@ -1259,6 -1261,9 +1261,9 @@@ void usbnet_disconnect (struct usb_inte
        if (dev->driver_info->unbind)
                dev->driver_info->unbind (dev, intf);
  
+       usb_kill_urb(dev->interrupt);
+       usb_free_urb(dev->interrupt);
        free_netdev(net);
        usb_put_dev (xdev);
  }
@@@ -1498,6 -1503,10 +1503,10 @@@ int usbnet_resume (struct usb_interfac
        int                     retval;
  
        if (!--dev->suspend_count) {
+               /* resume interrupt URBs */
+               if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
+                       usb_submit_urb(dev->interrupt, GFP_NOIO);
                spin_lock_irq(&dev->txq.lock);
                while ((res = usb_get_from_anchor(&dev->deferred))) {
  
                smp_mb();
                clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
                spin_unlock_irq(&dev->txq.lock);
-               if (!(dev->txq.qlen >= TX_QLEN(dev)))
-                       netif_start_queue(dev->net);
-               tasklet_schedule (&dev->bh);
+               if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+                       if (!(dev->txq.qlen >= TX_QLEN(dev)))
+                               netif_start_queue(dev->net);
+                       tasklet_schedule (&dev->bh);
+               }
        }
        return 0;
  }
@@@ -1529,9 -1541,9 +1541,9 @@@ EXPORT_SYMBOL_GPL(usbnet_resume)
  
  static int __init usbnet_init(void)
  {
 -      /* compiler should optimize this out */
 -      BUILD_BUG_ON (sizeof (((struct sk_buff *)0)->cb)
 -                      < sizeof (struct skb_data));
 +      /* Compiler should optimize this out. */
 +      BUILD_BUG_ON(
 +              FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
  
        random_ether_addr(node_id);
        return 0;
diff --combined drivers/net/veth.c
index cbe953a5bf5a6a26b9323866e1d0544ba60a1c10,3b99f64104fd528f7e1582931cacb58ff3bf7a92..3b0151a2a31bec76c7e401d481a9b67e9122c20e
@@@ -36,6 -36,7 +36,6 @@@ struct veth_net_stats 
  struct veth_priv {
        struct net_device *peer;
        struct veth_net_stats __percpu *stats;
 -      unsigned ip_summed;
  };
  
  /*
@@@ -52,7 -53,7 +52,7 @@@ static int veth_get_settings(struct net
  {
        cmd->supported          = 0;
        cmd->advertising        = 0;
 -      cmd->speed              = SPEED_10000;
 +      ethtool_cmd_speed_set(cmd, SPEED_10000);
        cmd->duplex             = DUPLEX_FULL;
        cmd->port               = PORT_TP;
        cmd->phy_address        = 0;
@@@ -98,10 -99,47 +98,10 @@@ static void veth_get_ethtool_stats(stru
        data[0] = priv->peer->ifindex;
  }
  
 -static u32 veth_get_rx_csum(struct net_device *dev)
 -{
 -      struct veth_priv *priv;
 -
 -      priv = netdev_priv(dev);
 -      return priv->ip_summed == CHECKSUM_UNNECESSARY;
 -}
 -
 -static int veth_set_rx_csum(struct net_device *dev, u32 data)
 -{
 -      struct veth_priv *priv;
 -
 -      priv = netdev_priv(dev);
 -      priv->ip_summed = data ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
 -      return 0;
 -}
 -
 -static u32 veth_get_tx_csum(struct net_device *dev)
 -{
 -      return (dev->features & NETIF_F_NO_CSUM) != 0;
 -}
 -
 -static int veth_set_tx_csum(struct net_device *dev, u32 data)
 -{
 -      if (data)
 -              dev->features |= NETIF_F_NO_CSUM;
 -      else
 -              dev->features &= ~NETIF_F_NO_CSUM;
 -      return 0;
 -}
 -
  static const struct ethtool_ops veth_ethtool_ops = {
        .get_settings           = veth_get_settings,
        .get_drvinfo            = veth_get_drvinfo,
        .get_link               = ethtool_op_get_link,
 -      .get_rx_csum            = veth_get_rx_csum,
 -      .set_rx_csum            = veth_set_rx_csum,
 -      .get_tx_csum            = veth_get_tx_csum,
 -      .set_tx_csum            = veth_set_tx_csum,
 -      .get_sg                 = ethtool_op_get_sg,
 -      .set_sg                 = ethtool_op_set_sg,
        .get_strings            = veth_get_strings,
        .get_sset_count         = veth_get_sset_count,
        .get_ethtool_stats      = veth_get_ethtool_stats,
@@@ -130,9 -168,8 +130,9 @@@ static netdev_tx_t veth_xmit(struct sk_
  
        /* don't change ip_summed == CHECKSUM_PARTIAL, as that
           will cause bad checksum on forwarded packets */
 -      if (skb->ip_summed == CHECKSUM_NONE)
 -              skb->ip_summed = rcv_priv->ip_summed;
 +      if (skb->ip_summed == CHECKSUM_NONE &&
 +          rcv->features & NETIF_F_RXCSUM)
 +              skb->ip_summed = CHECKSUM_UNNECESSARY;
  
        length = skb->len;
        if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
@@@ -267,8 -304,6 +267,8 @@@ static void veth_setup(struct net_devic
        dev->ethtool_ops = &veth_ethtool_ops;
        dev->features |= NETIF_F_LLTX;
        dev->destructor = veth_dev_free;
 +
 +      dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
  }
  
  /*
@@@ -368,6 -403,17 +368,17 @@@ static int veth_newlink(struct net *src
        if (tb[IFLA_ADDRESS] == NULL)
                random_ether_addr(dev->dev_addr);
  
+       if (tb[IFLA_IFNAME])
+               nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
+       else
+               snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
+       if (strchr(dev->name, '%')) {
+               err = dev_alloc_name(dev, dev->name);
+               if (err < 0)
+                       goto err_alloc_name;
+       }
        err = register_netdevice(dev);
        if (err < 0)
                goto err_register_dev;
  
  err_register_dev:
        /* nothing to do */
+ err_alloc_name:
  err_configure_peer:
        unregister_netdevice(peer);
        return err;
index f781b7e225b466679d5a8738ae69ee74f01c833b,a62fe24ee594cbbb5b462701ca3ec369b422a437..af2ae22fcfd32c22ab0b28f1bddcc64b7f873c46
@@@ -1069,12 -1069,9 +1069,12 @@@ static void iwl4965_irq_tasklet(struct 
        }
  
        /* Re-enable all interrupts */
 -      /* only Re-enable if diabled by irq */
 +      /* only Re-enable if disabled by irq */
        if (test_bit(STATUS_INT_ENABLED, &priv->status))
                iwl_legacy_enable_interrupts(priv);
 +      /* Re-enable RF_KILL if it occurred */
 +      else if (handled & CSR_INT_BIT_RF_KILL)
 +              iwl_legacy_enable_rfkill_int(priv);
  
  #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
        if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
@@@ -2142,7 -2139,7 +2142,7 @@@ static void iwl4965_cancel_deferred_wor
  static void __iwl4965_down(struct iwl_priv *priv)
  {
        unsigned long flags;
 -      int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
 +      int exit_pending;
  
        IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
  
@@@ -2404,12 -2401,11 +2404,12 @@@ static void iwl4965_bg_init_alive_start
        struct iwl_priv *priv =
            container_of(data, struct iwl_priv, init_alive_start.work);
  
 +      mutex_lock(&priv->mutex);
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
 -              return;
 +              goto out;
  
 -      mutex_lock(&priv->mutex);
        priv->cfg->ops->lib->init_alive_start(priv);
 +out:
        mutex_unlock(&priv->mutex);
  }
  
@@@ -2418,12 -2414,11 +2418,12 @@@ static void iwl4965_bg_alive_start(stru
        struct iwl_priv *priv =
            container_of(data, struct iwl_priv, alive_start.work);
  
 +      mutex_lock(&priv->mutex);
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
 -              return;
 +              goto out;
  
 -      mutex_lock(&priv->mutex);
        iwl4965_alive_start(priv);
 +out:
        mutex_unlock(&priv->mutex);
  }
  
@@@ -2473,12 -2468,10 +2473,12 @@@ static void iwl4965_bg_restart(struct w
        } else {
                iwl4965_down(priv);
  
 -              if (test_bit(STATUS_EXIT_PENDING, &priv->status))
 +              mutex_lock(&priv->mutex);
 +              if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
 +                      mutex_unlock(&priv->mutex);
                        return;
 +              }
  
 -              mutex_lock(&priv->mutex);
                __iwl4965_up(priv);
                mutex_unlock(&priv->mutex);
        }
@@@ -2631,10 -2624,9 +2631,10 @@@ void iwl4965_mac_stop(struct ieee80211_
  
        flush_workqueue(priv->workqueue);
  
 -      /* enable interrupts again in order to receive rfkill changes */
 +      /* User space software may expect getting rfkill changes
 +       * even if interface is down */
        iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
 -      iwl_legacy_enable_interrupts(priv);
 +      iwl_legacy_enable_rfkill_int(priv);
  
        IWL_DEBUG_MAC80211(priv, "leave\n");
  }
@@@ -2855,22 -2847,21 +2855,22 @@@ void iwl4965_mac_channel_switch(struct 
  
        IWL_DEBUG_MAC80211(priv, "enter\n");
  
 +      mutex_lock(&priv->mutex);
 +
        if (iwl_legacy_is_rfkill(priv))
 -              goto out_exit;
 +              goto out;
  
        if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
            test_bit(STATUS_SCANNING, &priv->status))
 -              goto out_exit;
 +              goto out;
  
        if (!iwl_legacy_is_associated_ctx(ctx))
 -              goto out_exit;
 +              goto out;
  
        /* channel switch in progress */
        if (priv->switch_rxon.switch_in_progress == true)
 -              goto out_exit;
 +              goto out;
  
 -      mutex_lock(&priv->mutex);
        if (priv->cfg->ops->lib->set_channel_switch) {
  
                ch = channel->hw_value;
        }
  out:
        mutex_unlock(&priv->mutex);
 -out_exit:
        if (!priv->switch_rxon.switch_in_progress)
                ieee80211_chswitch_done(ctx->vif, false);
        IWL_DEBUG_MAC80211(priv, "leave\n");
@@@ -2992,15 -2984,15 +2992,15 @@@ static void iwl4965_bg_txpower_work(str
        struct iwl_priv *priv = container_of(work, struct iwl_priv,
                        txpower_work);
  
+       mutex_lock(&priv->mutex);
        /* If a scan happened to start before we got here
         * then just return; the statistics notification will
         * kick off another scheduled work to compensate for
         * any temperature delta we missed here. */
        if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
            test_bit(STATUS_SCANNING, &priv->status))
-               return;
-       mutex_lock(&priv->mutex);
+               goto out;
  
        /* Regardless of if we are associated, we must reconfigure the
         * TX power since frames can be sent on non-radar channels while
        /* Update last_temperature to keep is_calib_needed from running
         * when it isn't needed... */
        priv->last_temperature = priv->temperature;
+ out:
        mutex_unlock(&priv->mutex);
  }
  
@@@ -3124,6 -3116,7 +3124,6 @@@ static int iwl4965_init_drv(struct iwl_
        INIT_LIST_HEAD(&priv->free_frames);
  
        mutex_init(&priv->mutex);
 -      mutex_init(&priv->sync_cmd_mutex);
  
        priv->ieee_channels = NULL;
        priv->ieee_rates = NULL;
@@@ -3180,7 -3173,7 +3180,7 @@@ static void iwl4965_hw_detect(struct iw
  {
        priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
        priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
 -      pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
 +      priv->rev_id = priv->pci_dev->revision;
        IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
  }
  
@@@ -3413,14 -3406,14 +3413,14 @@@ iwl4965_pci_probe(struct pci_dev *pdev
         * 8. Enable interrupts and read RFKILL state
         *********************************************/
  
 -      /* enable interrupts if needed: hw bug w/a */
 +      /* enable rfkill interrupt: hw bug w/a */
        pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
        if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
                pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
                pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
        }
  
 -      iwl_legacy_enable_interrupts(priv);
 +      iwl_legacy_enable_rfkill_int(priv);
  
        /* If platform's RF_KILL switch is NOT set to KILL */
        if (iwl_read32(priv, CSR_GP_CNTRL) &
index 494de0e59cb4e6cd49d1aec06c418a65b8c7d4d2,0712b67283a4f7e1b4a4ae702623ba32fd61a933..4afae144658296c31625dba650c541bd527908dc
@@@ -2,7 -2,7 +2,7 @@@
   *
   * GPL LICENSE SUMMARY
   *
 - * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
 + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of version 2 of the GNU General Public License as
@@@ -222,8 -222,13 +222,8 @@@ void iwlagn_tx_queue_set_status(struct 
                       scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
  }
  
 -int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
 -                        int tx_fifo, int sta_id, int tid, u16 ssn_idx)
 +static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id, int tid)
  {
 -      unsigned long flags;
 -      u16 ra_tid;
 -      int ret;
 -
        if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
            (IWLAGN_FIRST_AMPDU_QUEUE +
                priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
                return -EINVAL;
        }
  
 -      ra_tid = BUILD_RAxTID(sta_id, tid);
 -
        /* Modify device's station table to Tx this TID */
 -      ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
 -      if (ret)
 -              return ret;
 +      return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
 +}
 +
 +void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv,
 +                              struct ieee80211_sta *sta,
 +                              int tid, int frame_limit)
 +{
 +      int sta_id, tx_fifo, txq_id, ssn_idx;
 +      u16 ra_tid;
 +      unsigned long flags;
 +      struct iwl_tid_data *tid_data;
 +
 +      sta_id = iwl_sta_id(sta);
 +      if (WARN_ON(sta_id == IWL_INVALID_STATION))
 +              return;
 +      if (WARN_ON(tid >= MAX_TID_COUNT))
 +              return;
 +
 +      spin_lock_irqsave(&priv->sta_lock, flags);
 +      tid_data = &priv->stations[sta_id].tid[tid];
 +      ssn_idx = SEQ_TO_SN(tid_data->seq_number);
 +      txq_id = tid_data->agg.txq_id;
 +      tx_fifo = tid_data->agg.tx_fifo;
 +      spin_unlock_irqrestore(&priv->sta_lock, flags);
 +
 +      ra_tid = BUILD_RAxTID(sta_id, tid);
  
        spin_lock_irqsave(&priv->lock, flags);
  
        iwl_write_targ_mem(priv, priv->scd_base_addr +
                        IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
                        sizeof(u32),
 -                      ((SCD_WIN_SIZE <<
 +                      ((frame_limit <<
                        IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
                        IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
 -                      ((SCD_FRAME_LIMIT <<
 +                      ((frame_limit <<
                        IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
                        IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  
        iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
  
        spin_unlock_irqrestore(&priv->lock, flags);
 -
 -      return 0;
  }
  
 -int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
 -                         u16 ssn_idx, u8 tx_fifo)
 +static int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
 +                                u16 ssn_idx, u8 tx_fifo)
  {
        if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
            (IWLAGN_FIRST_AMPDU_QUEUE +
@@@ -582,12 -568,17 +582,17 @@@ int iwlagn_tx_skb(struct iwl_priv *priv
  
        hdr_len = ieee80211_hdrlen(fc);
  
-       /* Find index into station table for destination station */
-       sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
-                              hdr->addr1);
-               goto drop_unlock;
+       /* For management frames use broadcast id to do not break aggregation */
+       if (!ieee80211_is_data(fc))
+               sta_id = ctx->bcast_sta_id;
+       else {
+               /* Find index into station table for destination station */
+               sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
+               if (sta_id == IWL_INVALID_STATION) {
+                       IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+                                      hdr->addr1);
+                       goto drop_unlock;
+               }
        }
  
        IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
@@@ -1048,11 -1039,11 +1053,11 @@@ int iwlagn_tx_agg_start(struct iwl_pri
        tid_data = &priv->stations[sta_id].tid[tid];
        *ssn = SEQ_TO_SN(tid_data->seq_number);
        tid_data->agg.txq_id = txq_id;
 +      tid_data->agg.tx_fifo = tx_fifo;
        iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
        spin_unlock_irqrestore(&priv->sta_lock, flags);
  
 -      ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
 -                                                sta_id, tid, *ssn);
 +      ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
        if (ret)
                return ret;
  
@@@ -1139,7 -1130,8 +1144,7 @@@ int iwlagn_tx_agg_stop(struct iwl_priv 
         * to deactivate the uCode queue, just return "success" to allow
         *  mac80211 to clean up it own data.
         */
 -      priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
 -                                                 tx_fifo_id);
 +      iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
        spin_unlock_irqrestore(&priv->lock, flags);
  
        ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@@ -1168,7 -1160,8 +1173,7 @@@ int iwlagn_txq_check_empty(struct iwl_p
                        u16 ssn = SEQ_TO_SN(tid_data->seq_number);
                        int tx_fifo = get_fifo_from_tid(ctx, tid);
                        IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
 -                      priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
 -                                                           ssn, tx_fifo);
 +                      iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
                        tid_data->agg.state = IWL_AGG_OFF;
                        ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
                }
@@@ -1267,11 -1260,11 +1272,11 @@@ static int iwlagn_tx_status_reply_compr
                                 struct iwl_compressed_ba_resp *ba_resp)
  
  {
 -      int i, sh, ack;
 +      int sh;
        u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
        u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
 -      int successes = 0;
        struct ieee80211_tx_info *info;
 +      u64 bitmap, sent_bitmap;
  
        if (unlikely(!agg->wait_for_ba))  {
                if (unlikely(ba_resp->bitmap))
  
        /* Calculate shift to align block-ack bits with our Tx window bits */
        sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
 -      if (sh < 0) /* tbw something is wrong with indices */
 +      if (sh < 0)
                sh += 0x100;
  
 -      if (agg->frame_count > (64 - sh)) {
 -              IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
 -              return -1;
 -      }
 -      if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) {
 +      /*
 +       * Check for success or failure according to the
 +       * transmitted bitmap and block-ack bitmap
 +       */
 +      bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
 +      sent_bitmap = bitmap & agg->bitmap;
 +
 +      /* Sanity check values reported by uCode */
 +      if (ba_resp->txed_2_done > ba_resp->txed) {
 +              IWL_DEBUG_TX_REPLY(priv,
 +                      "bogus sent(%d) and ack(%d) count\n",
 +                      ba_resp->txed, ba_resp->txed_2_done);
                /*
 -               * sent and ack information provided by uCode
 -               * use it instead of figure out ourself
 +               * set txed_2_done = txed,
 +               * so it won't impact rate scale
                 */
 -              if (ba_resp->txed_2_done > ba_resp->txed) {
 -                      IWL_DEBUG_TX_REPLY(priv,
 -                              "bogus sent(%d) and ack(%d) count\n",
 -                              ba_resp->txed, ba_resp->txed_2_done);
 -                      /*
 -                       * set txed_2_done = txed,
 -                       * so it won't impact rate scale
 -                       */
 -                      ba_resp->txed = ba_resp->txed_2_done;
 -              }
 -              IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
 -                              ba_resp->txed, ba_resp->txed_2_done);
 -      } else {
 -              u64 bitmap, sent_bitmap;
 -
 -              /* don't use 64-bit values for now */
 -              bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
 -
 -              /* check for success or failure according to the
 -               * transmitted bitmap and block-ack bitmap */
 -              sent_bitmap = bitmap & agg->bitmap;
 -
 -              /* For each frame attempted in aggregation,
 -               * update driver's record of tx frame's status. */
 -              i = 0;
 -              while (sent_bitmap) {
 -                      ack = sent_bitmap & 1ULL;
 -                      successes += ack;
 -                      IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
 -                              ack ? "ACK" : "NACK", i,
 -                              (agg->start_idx + i) & 0xff,
 -                              agg->start_idx + i);
 -                      sent_bitmap >>= 1;
 -                      ++i;
 -              }
 +              ba_resp->txed = ba_resp->txed_2_done;
 +      }
 +      IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
 +                      ba_resp->txed, ba_resp->txed_2_done);
  
 -              IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
 -                                 (unsigned long long)bitmap);
 +      /* Find the first ACKed frame to store the TX status */
 +      while (sent_bitmap && !(sent_bitmap & 1)) {
 +              agg->start_idx = (agg->start_idx + 1) & 0xff;
 +              sent_bitmap >>= 1;
        }
  
        info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
        memset(&info->status, 0, sizeof(info->status));
        info->flags |= IEEE80211_TX_STAT_ACK;
        info->flags |= IEEE80211_TX_STAT_AMPDU;
 -      if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) {
 -              info->status.ampdu_ack_len = ba_resp->txed_2_done;
 -              info->status.ampdu_len = ba_resp->txed;
 -
 -      } else {
 -              info->status.ampdu_ack_len = successes;
 -              info->status.ampdu_len = agg->frame_count;
 -      }
 +      info->status.ampdu_ack_len = ba_resp->txed_2_done;
 +      info->status.ampdu_len = ba_resp->txed;
        iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
  
        return 0;
diff --combined net/core/dev.c
index 3b79bad3d02d3292330b8e92efa7bfe6b783fd40,856b6ee9a1d5d5c1d4347d54048ac4f5d6bc73a1..44ef8f8998ca35203d75ba7a95c6d17a5ad6966f
@@@ -948,7 -948,7 +948,7 @@@ int dev_alloc_name(struct net_device *d
  }
  EXPORT_SYMBOL(dev_alloc_name);
  
 -static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
 +static int dev_get_valid_name(struct net_device *dev, const char *name)
  {
        struct net *net;
  
        if (!dev_valid_name(name))
                return -EINVAL;
  
 -      if (fmt && strchr(name, '%'))
 +      if (strchr(name, '%'))
                return dev_alloc_name(dev, name);
        else if (__dev_get_by_name(net, name))
                return -EEXIST;
@@@ -995,7 -995,7 +995,7 @@@ int dev_change_name(struct net_device *
  
        memcpy(oldname, dev->name, IFNAMSIZ);
  
 -      err = dev_get_valid_name(dev, newname, 1);
 +      err = dev_get_valid_name(dev, newname);
        if (err < 0)
                return err;
  
@@@ -1315,8 -1315,7 +1315,8 @@@ void dev_disable_lro(struct net_device 
                return;
  
        __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
 -      WARN_ON(dev->features & NETIF_F_LRO);
 +      if (unlikely(dev->features & NETIF_F_LRO))
 +              netdev_WARN(dev, "failed to disable LRO!\n");
  }
  EXPORT_SYMBOL(dev_disable_lro);
  
@@@ -2503,8 -2502,8 +2503,8 @@@ static inline void ____napi_schedule(st
  __u32 __skb_get_rxhash(struct sk_buff *skb)
  {
        int nhoff, hash = 0, poff;
 -      struct ipv6hdr *ip6;
 -      struct iphdr *ip;
 +      const struct ipv6hdr *ip6;
 +      const struct iphdr *ip;
        u8 ip_proto;
        u32 addr1, addr2, ihl;
        union {
                if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
                        goto done;
  
 -              ip = (struct iphdr *) (skb->data + nhoff);
 +              ip = (const struct iphdr *) (skb->data + nhoff);
                if (ip->frag_off & htons(IP_MF | IP_OFFSET))
                        ip_proto = 0;
                else
                if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
                        goto done;
  
 -              ip6 = (struct ipv6hdr *) (skb->data + nhoff);
 +              ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
                ip_proto = ip6->nexthdr;
                addr1 = (__force u32) ip6->saddr.s6_addr32[3];
                addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@@ -3077,6 -3076,25 +3077,6 @@@ void netdev_rx_handler_unregister(struc
  }
  EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
  
 -static void vlan_on_bond_hook(struct sk_buff *skb)
 -{
 -      /*
 -       * Make sure ARP frames received on VLAN interfaces stacked on
 -       * bonding interfaces still make their way to any base bonding
 -       * device that may have registered for a specific ptype.
 -       */
 -      if (skb->dev->priv_flags & IFF_802_1Q_VLAN &&
 -          vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING &&
 -          skb->protocol == htons(ETH_P_ARP)) {
 -              struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 -
 -              if (!skb2)
 -                      return;
 -              skb2->dev = vlan_dev_real_dev(skb->dev);
 -              netif_rx(skb2);
 -      }
 -}
 -
  static int __netif_receive_skb(struct sk_buff *skb)
  {
        struct packet_type *ptype, *pt_prev;
@@@ -3112,12 -3130,6 +3112,12 @@@ another_round
  
        __this_cpu_inc(softnet_data.processed);
  
 +      if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
 +              skb = vlan_untag(skb);
 +              if (unlikely(!skb))
 +                      goto out;
 +      }
 +
  #ifdef CONFIG_NET_CLS_ACT
        if (skb->tc_verd & TC_NCLS) {
                skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@@ -3165,13 -3177,15 +3165,13 @@@ ncls
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = NULL;
                }
 -              if (vlan_hwaccel_do_receive(&skb)) {
 +              if (vlan_do_receive(&skb)) {
                        ret = __netif_receive_skb(skb);
                        goto out;
                } else if (unlikely(!skb))
                        goto out;
        }
  
 -      vlan_on_bond_hook(skb);
 -
        /* deliver only exact match when indicated */
        null_or_dev = deliver_exact ? skb->dev : NULL;
  
@@@ -4495,30 -4509,6 +4495,30 @@@ void dev_set_rx_mode(struct net_device 
        netif_addr_unlock_bh(dev);
  }
  
 +/**
 + *    dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
 + *    @dev: device
 + *    @cmd: memory area for ethtool_ops::get_settings() result
 + *
 + *      The cmd arg is initialized properly (cleared and
 + *      ethtool_cmd::cmd field set to ETHTOOL_GSET).
 + *
 + *    Return device's ethtool_ops::get_settings() result value or
 + *    -EOPNOTSUPP when device doesn't expose
 + *    ethtool_ops::get_settings() operation.
 + */
 +int dev_ethtool_get_settings(struct net_device *dev,
 +                           struct ethtool_cmd *cmd)
 +{
 +      if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
 +              return -EOPNOTSUPP;
 +
 +      memset(cmd, 0, sizeof(struct ethtool_cmd));
 +      cmd->cmd = ETHTOOL_GSET;
 +      return dev->ethtool_ops->get_settings(dev, cmd);
 +}
 +EXPORT_SYMBOL(dev_ethtool_get_settings);
 +
  /**
   *    dev_get_flags - get flags reported to userspace
   *    @dev: device
@@@ -4783,7 -4773,7 +4783,7 @@@ static int dev_ifsioc_locked(struct ne
                 * is never reached
                 */
                WARN_ON(1);
-               err = -EINVAL;
+               err = -ENOTTY;
                break;
  
        }
@@@ -5051,7 -5041,7 +5051,7 @@@ int dev_ioctl(struct net *net, unsigne
                /* Set the per device memory buffer space.
                 * Not applicable in our case */
        case SIOCSIFLINK:
-               return -EINVAL;
+               return -ENOTTY;
  
        /*
         *      Unknown or private ioctl.
                /* Take care of Wireless Extensions */
                if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
                        return wext_handle_ioctl(net, &ifr, cmd, arg);
-               return -EINVAL;
+               return -ENOTTY;
        }
  }
  
@@@ -5250,13 -5240,11 +5250,13 @@@ u32 netdev_fix_features(struct net_devi
  }
  EXPORT_SYMBOL(netdev_fix_features);
  
 -void netdev_update_features(struct net_device *dev)
 +int __netdev_update_features(struct net_device *dev)
  {
        u32 features;
        int err = 0;
  
 +      ASSERT_RTNL();
 +
        features = netdev_get_wanted_features(dev);
  
        if (dev->netdev_ops->ndo_fix_features)
        features = netdev_fix_features(dev, features);
  
        if (dev->features == features)
 -              return;
 +              return 0;
  
        netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
                dev->features, features);
        if (dev->netdev_ops->ndo_set_features)
                err = dev->netdev_ops->ndo_set_features(dev, features);
  
 -      if (!err)
 -              dev->features = features;
 -      else if (err < 0)
 +      if (unlikely(err < 0)) {
                netdev_err(dev,
                        "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
                        err, features, dev->features);
 +              return -1;
 +      }
 +
 +      if (!err)
 +              dev->features = features;
 +
 +      return 1;
 +}
 +
 +void netdev_update_features(struct net_device *dev)
 +{
 +      if (__netdev_update_features(dev))
 +              netdev_features_change(dev);
  }
  EXPORT_SYMBOL(netdev_update_features);
  
@@@ -5420,8 -5397,8 +5420,8 @@@ int register_netdevice(struct net_devic
                }
        }
  
 -      ret = dev_get_valid_name(dev, dev->name, 0);
 -      if (ret)
 +      ret = dev_get_valid_name(dev, dev->name);
 +      if (ret < 0)
                goto err_uninit;
  
        dev->ifindex = dev_new_index(net);
                dev->features &= ~NETIF_F_GSO;
        }
  
 +      /* Turn on no cache copy if HW is doing checksum */
 +      dev->hw_features |= NETIF_F_NOCACHE_COPY;
 +      if ((dev->features & NETIF_F_ALL_CSUM) &&
 +          !(dev->features & NETIF_F_NO_CSUM)) {
 +              dev->wanted_features |= NETIF_F_NOCACHE_COPY;
 +              dev->features |= NETIF_F_NOCACHE_COPY;
 +      }
 +
        /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
         * vlan_dev_init() will do the dev->features check, so these features
         * are enabled only if supported by underlying device.
                goto err_uninit;
        dev->reg_state = NETREG_REGISTERED;
  
 -      netdev_update_features(dev);
 +      __netdev_update_features(dev);
  
        /*
         *      Default initial state at registry is that the
@@@ -5562,7 -5531,19 +5562,7 @@@ int register_netdev(struct net_device *
        int err;
  
        rtnl_lock();
 -
 -      /*
 -       * If the name is a format string the caller wants us to do a
 -       * name allocation.
 -       */
 -      if (strchr(dev->name, '%')) {
 -              err = dev_alloc_name(dev, dev->name);
 -              if (err < 0)
 -                      goto out;
 -      }
 -
        err = register_netdevice(dev);
 -out:
        rtnl_unlock();
        return err;
  }
@@@ -6044,7 -6025,7 +6044,7 @@@ int dev_change_net_namespace(struct net
                /* We get here if we can't use the current device name */
                if (!pat)
                        goto out;
 -              if (dev_get_valid_name(dev, pat, 1))
 +              if (dev_get_valid_name(dev, pat) < 0)
                        goto out;
        }
  
@@@ -6176,20 -6157,29 +6176,20 @@@ static int dev_cpu_callback(struct noti
   */
  u32 netdev_increment_features(u32 all, u32 one, u32 mask)
  {
 -      /* If device needs checksumming, downgrade to it. */
 -      if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
 -              all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
 -      else if (mask & NETIF_F_ALL_CSUM) {
 -              /* If one device supports v4/v6 checksumming, set for all. */
 -              if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
 -                  !(all & NETIF_F_GEN_CSUM)) {
 -                      all &= ~NETIF_F_ALL_CSUM;
 -                      all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
 -              }
 +      if (mask & NETIF_F_GEN_CSUM)
 +              mask |= NETIF_F_ALL_CSUM;
 +      mask |= NETIF_F_VLAN_CHALLENGED;
  
 -              /* If one device supports hw checksumming, set for all. */
 -              if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
 -                      all &= ~NETIF_F_ALL_CSUM;
 -                      all |= NETIF_F_HW_CSUM;
 -              }
 -      }
 +      all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
 +      all &= one | ~NETIF_F_ALL_FOR_ALL;
  
 -      one |= NETIF_F_ALL_CSUM;
 +      /* If device needs checksumming, downgrade to it. */
 +      if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
 +              all &= ~NETIF_F_NO_CSUM;
  
 -      one |= all & NETIF_F_ONE_FOR_ALL;
 -      all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
 -      all |= one & mask & NETIF_F_ONE_FOR_ALL;
 +      /* If one device supports hw checksumming, set for all. */
 +      if (all & NETIF_F_GEN_CSUM)
 +              all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
  
        return all;
  }
diff --combined net/ipv4/fib_trie.c
index 9ac481a10d37dbff04910af538ab0a624fd2808c,5fe9b8b41df34fe8cdf7af72ce10138a3f92191a..6375c1c5f6428038b27c0b050f1e4d6b7e850a74
@@@ -126,7 -126,7 +126,7 @@@ struct tnode 
                struct work_struct work;
                struct tnode *tnode_free;
        };
 -      struct rt_trie_node *child[0];
 +      struct rt_trie_node __rcu *child[0];
  };
  
  #ifdef CONFIG_IP_FIB_TRIE_STATS
@@@ -151,7 -151,7 +151,7 @@@ struct trie_stat 
  };
  
  struct trie {
 -      struct rt_trie_node *trie;
 +      struct rt_trie_node __rcu *trie;
  #ifdef CONFIG_IP_FIB_TRIE_STATS
        struct trie_use_stats stats;
  #endif
@@@ -177,29 -177,16 +177,29 @@@ static const int sync_pages = 128
  static struct kmem_cache *fn_alias_kmem __read_mostly;
  static struct kmem_cache *trie_leaf_kmem __read_mostly;
  
 -static inline struct tnode *node_parent(struct rt_trie_node *node)
 +/*
 + * caller must hold RTNL
 + */
 +static inline struct tnode *node_parent(const struct rt_trie_node *node)
  {
 -      return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
 +      unsigned long parent;
 +
 +      parent = rcu_dereference_index_check(node->parent, lockdep_rtnl_is_held());
 +
 +      return (struct tnode *)(parent & ~NODE_TYPE_MASK);
  }
  
 -static inline struct tnode *node_parent_rcu(struct rt_trie_node *node)
 +/*
 + * caller must hold RCU read lock or RTNL
 + */
 +static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
  {
 -      struct tnode *ret = node_parent(node);
 +      unsigned long parent;
 +
 +      parent = rcu_dereference_index_check(node->parent, rcu_read_lock_held() ||
 +                                                         lockdep_rtnl_is_held());
  
 -      return rcu_dereference_rtnl(ret);
 +      return (struct tnode *)(parent & ~NODE_TYPE_MASK);
  }
  
  /* Same as rcu_assign_pointer
@@@ -211,24 -198,18 +211,24 @@@ static inline void node_set_parent(stru
        node->parent = (unsigned long)ptr | NODE_TYPE(node);
  }
  
 -static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i)
 +/*
 + * caller must hold RTNL
 + */
 +static inline struct rt_trie_node *tnode_get_child(const struct tnode *tn, unsigned int i)
  {
        BUG_ON(i >= 1U << tn->bits);
  
 -      return tn->child[i];
 +      return rtnl_dereference(tn->child[i]);
  }
  
 -static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
 +/*
 + * caller must hold RCU read lock or RTNL
 + */
 +static inline struct rt_trie_node *tnode_get_child_rcu(const struct tnode *tn, unsigned int i)
  {
 -      struct rt_trie_node *ret = tnode_get_child(tn, i);
 +      BUG_ON(i >= 1U << tn->bits);
  
 -      return rcu_dereference_rtnl(ret);
 +      return rcu_dereference_rtnl(tn->child[i]);
  }
  
  static inline int tnode_child_length(const struct tnode *tn)
@@@ -506,7 -487,7 +506,7 @@@ static inline void put_child(struct tri
  static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
                                  int wasfull)
  {
 -      struct rt_trie_node *chi = tn->child[i];
 +      struct rt_trie_node *chi = rtnl_dereference(tn->child[i]);
        int isfull;
  
        BUG_ON(i >= 1<<tn->bits);
@@@ -684,7 -665,7 +684,7 @@@ one_child
                for (i = 0; i < tnode_child_length(tn); i++) {
                        struct rt_trie_node *n;
  
 -                      n = tn->child[i];
 +                      n = rtnl_dereference(tn->child[i]);
                        if (!n)
                                continue;
  
        return (struct rt_trie_node *) tn;
  }
  
 +
 +static void tnode_clean_free(struct tnode *tn)
 +{
 +      int i;
 +      struct tnode *tofree;
 +
 +      for (i = 0; i < tnode_child_length(tn); i++) {
 +              tofree = (struct tnode *)rtnl_dereference(tn->child[i]);
 +              if (tofree)
 +                      tnode_free(tofree);
 +      }
 +      tnode_free(tn);
 +}
 +
  static struct tnode *inflate(struct trie *t, struct tnode *tn)
  {
        struct tnode *oldtnode = tn;
                inode = (struct tnode *) node;
  
                if (inode->bits == 1) {
 -                      put_child(t, tn, 2*i, inode->child[0]);
 -                      put_child(t, tn, 2*i+1, inode->child[1]);
 +                      put_child(t, tn, 2*i, rtnl_dereference(inode->child[0]));
 +                      put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1]));
  
                        tnode_free_safe(inode);
                        continue;
  
                size = tnode_child_length(left);
                for (j = 0; j < size; j++) {
 -                      put_child(t, left, j, inode->child[j]);
 -                      put_child(t, right, j, inode->child[j + size]);
 +                      put_child(t, left, j, rtnl_dereference(inode->child[j]));
 +                      put_child(t, right, j, rtnl_dereference(inode->child[j + size]));
                }
                put_child(t, tn, 2*i, resize(t, left));
                put_child(t, tn, 2*i+1, resize(t, right));
        tnode_free_safe(oldtnode);
        return tn;
  nomem:
 -      {
 -              int size = tnode_child_length(tn);
 -              int j;
 -
 -              for (j = 0; j < size; j++)
 -                      if (tn->child[j])
 -                              tnode_free((struct tnode *)tn->child[j]);
 -
 -              tnode_free(tn);
 -
 -              return ERR_PTR(-ENOMEM);
 -      }
 +      tnode_clean_free(tn);
 +      return ERR_PTR(-ENOMEM);
  }
  
  static struct tnode *halve(struct trie *t, struct tnode *tn)
        tnode_free_safe(oldtnode);
        return tn;
  nomem:
 -      {
 -              int size = tnode_child_length(tn);
 -              int j;
 -
 -              for (j = 0; j < size; j++)
 -                      if (tn->child[j])
 -                              tnode_free((struct tnode *)tn->child[j]);
 -
 -              tnode_free(tn);
 -
 -              return ERR_PTR(-ENOMEM);
 -      }
 +      tnode_clean_free(tn);
 +      return ERR_PTR(-ENOMEM);
  }
  
  /* readside must use rcu_read_lock currently dump routines
@@@ -1046,7 -1033,7 +1046,7 @@@ static struct list_head *fib_insert_nod
        t_key cindex;
  
        pos = 0;
 -      n = t->trie;
 +      n = rtnl_dereference(t->trie);
  
        /* If we point to NULL, stop. Either the tree is empty and we should
         * just put a new leaf in if, or we have reached an empty child slot,
@@@ -1332,9 -1319,6 +1332,9 @@@ int fib_table_insert(struct fib_table *
                }
        }
  
 +      if (!plen)
 +              tb->tb_num_default++;
 +
        list_add_tail_rcu(&new_fa->fa_list,
                          (fa ? &fa->fa_list : fa_head));
  
@@@ -1700,9 -1684,6 +1700,9 @@@ int fib_table_delete(struct fib_table *
  
        list_del_rcu(&fa->fa_list);
  
 +      if (!plen)
 +              tb->tb_num_default--;
 +
        if (list_empty(fa_head)) {
                hlist_del_rcu(&li->hlist);
                free_leaf_info(li);
@@@ -1775,7 -1756,7 +1775,7 @@@ static struct leaf *leaf_walk_rcu(struc
                                continue;
  
                        if (IS_LEAF(c)) {
 -                              prefetch(p->child[idx]);
 +                              prefetch(rcu_dereference_rtnl(p->child[idx]));
                                return (struct leaf *) c;
                        }
  
@@@ -1993,14 -1974,10 +1993,11 @@@ struct fib_table *fib_trie_table(u32 id
  
        tb->tb_id = id;
        tb->tb_default = -1;
 +      tb->tb_num_default = 0;
  
        t = (struct trie *) tb->tb_data;
        memset(t, 0, sizeof(*t));
  
-       if (id == RT_TABLE_LOCAL)
-               pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION);
        return tb;
  }
  
@@@ -2292,7 -2269,7 +2289,7 @@@ static void *fib_trie_seq_next(struct s
  
        /* walk rest of this hash chain */
        h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
 -      while ( (tb_node = rcu_dereference(tb->tb_hlist.next)) ) {
 +      while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
                tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
                n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
                if (n)
diff --combined net/ipv6/addrconf.c
index c663a3b709245b3a0c433d74687812486148c4e5,a7bda0757053278e58218766df54cb49fac100d6..f2f9b2e3cfe9f5a242ea5fdc2e3cf7e7a3ab79e9
@@@ -825,8 -825,6 +825,8 @@@ static void ipv6_del_addr(struct inet6_
                dst_release(&rt->dst);
        }
  
 +      /* clean up prefsrc entries */
 +      rt6_remove_prefsrc(ifp);
  out:
        in6_ifa_put(ifp);
  }
@@@ -1283,7 -1281,7 +1283,7 @@@ static int ipv6_count_addresses(struct 
        return cnt;
  }
  
 -int ipv6_chk_addr(struct net *net, struct in6_addr *addr,
 +int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
                  struct net_device *dev, int strict)
  {
        struct inet6_ifaddr *ifp;
@@@ -1326,7 -1324,7 +1326,7 @@@ static bool ipv6_chk_same_addr(struct n
        return false;
  }
  
 -int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
 +int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
  {
        struct inet6_dev *idev;
        struct inet6_ifaddr *ifa;
@@@ -1457,7 -1455,7 +1457,7 @@@ void addrconf_dad_failure(struct inet6_
  
  /* Join to solicited addr multicast group. */
  
 -void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr)
 +void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
  {
        struct in6_addr maddr;
  
        ipv6_dev_mc_inc(dev, &maddr);
  }
  
 -void addrconf_leave_solict(struct inet6_dev *idev, struct in6_addr *addr)
 +void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
  {
        struct in6_addr maddr;
  
@@@ -2113,7 -2111,7 +2113,7 @@@ err_exit
  /*
   *    Manual configuration of address on an interface
   */
 -static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
 +static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
                          unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
                          __u32 valid_lft)
  {
        return PTR_ERR(ifp);
  }
  
 -static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
 +static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *pfx,
                          unsigned int plen)
  {
        struct inet6_ifaddr *ifp;
@@@ -2350,7 -2348,7 +2350,7 @@@ static void init_loopback(struct net_de
        add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
  }
  
 -static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr)
 +static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
  {
        struct inet6_ifaddr * ifp;
        u32 addr_flags = IFA_F_PERMANENT;
@@@ -3121,7 -3119,7 +3121,7 @@@ void if6_proc_exit(void
  
  #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
  /* Check if address is a home address configured on any interface. */
 -int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
 +int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
  {
        int ret = 0;
        struct inet6_ifaddr *ifp = NULL;
@@@ -4539,7 -4537,7 +4539,7 @@@ static void __addrconf_sysctl_unregiste
  
        t = p->sysctl;
        p->sysctl = NULL;
-       unregister_sysctl_table(t->sysctl_header);
+       unregister_net_sysctl_table(t->sysctl_header);
        kfree(t->dev_name);
        kfree(t);
  }
diff --combined net/ipv6/esp6.c
index e97b4b7ca2f2f9396f60066cba01fce141bcf921,59dccfbb5b11332f7f3957d2fb80a0f94a2784da..1ac7938dd9ec38a300266be6eb64b69d5b98df1c
@@@ -371,7 -371,7 +371,7 @@@ static int esp6_input(struct xfrm_stat
        iv = esp_tmp_iv(aead, tmp, seqhilen);
        req = esp_tmp_req(aead, iv);
        asg = esp_req_sg(aead, req);
-       sg = asg + 1;
+       sg = asg + sglists;
  
        skb->ip_summed = CHECKSUM_NONE;
  
@@@ -430,7 -430,7 +430,7 @@@ static void esp6_err(struct sk_buff *sk
                     u8 type, u8 code, int offset, __be32 info)
  {
        struct net *net = dev_net(skb->dev);
 -      struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
 +      const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
        struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
        struct xfrm_state *x;
  
            type != ICMPV6_PKT_TOOBIG)
                return;
  
 -      x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6);
 +      x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
 +                            esph->spi, IPPROTO_ESP, AF_INET6);
        if (!x)
                return;
        printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",