]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
authorDavid S. Miller <davem@davemloft.net>
Thu, 29 Oct 2009 08:05:38 +0000 (01:05 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 29 Oct 2009 08:05:38 +0000 (01:05 -0700)
14 files changed:
drivers/net/bnx2.h
drivers/net/bonding/bond_main.c
drivers/net/cnic.c
drivers/net/e1000e/e1000.h
drivers/net/e1000e/hw.h
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/phy.c
drivers/net/igb/igb_ethtool.c
drivers/net/igbvf/ethtool.c
drivers/net/ixgbe/ixgbe_ethtool.c
drivers/net/pppoe.c
drivers/net/sfc/rx.c
drivers/net/sh_eth.c
drivers/net/virtio_net.c

index 6c7f795d12de5c35230ce8ed76a4888b2c431794..a4d83409f20555eb60c73e9d10ada9edd2a777b3 100644 (file)
@@ -361,9 +361,12 @@ struct l2_fhdr {
 #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE       (1<<28)
 
 #define BNX2_L2CTX_HOST_BDIDX                          0x00000004
-#define BNX2_L2CTX_STATUSB_NUM_SHIFT                    16
-#define BNX2_L2CTX_STATUSB_NUM(sb_id)                   \
-       (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
+#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT                         16
+#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT                         24
+#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id)               \
+       (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0)
+#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id)               \
+       (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0)
 #define BNX2_L2CTX_HOST_BSEQ                           0x00000008
 #define BNX2_L2CTX_NX_BSEQ                             0x0000000c
 #define BNX2_L2CTX_NX_BDHADDR_HI                       0x00000010
index 69c5b15e22daca1239e5d10bc6f00b767ae3ce4c..40fb5eefc72e63b884d6f69a2897ef9b0038f1cd 100644 (file)
@@ -691,7 +691,7 @@ static int bond_check_dev_link(struct bonding *bond,
                               struct net_device *slave_dev, int reporting)
 {
        const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-       static int (*ioctl)(struct net_device *, struct ifreq *, int);
+       int (*ioctl)(struct net_device *, struct ifreq *, int);
        struct ifreq ifr;
        struct mii_ioctl_data *mii;
 
@@ -3665,10 +3665,10 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
 
        if (skb->protocol == htons(ETH_P_IP)) {
                return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
-                       (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count;
+                       (data->h_dest[5] ^ data->h_source[5])) % count;
        }
 
-       return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
+       return (data->h_dest[5] ^ data->h_source[5]) % count;
 }
 
 /*
@@ -3695,7 +3695,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
 
        }
 
-       return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
+       return (data->h_dest[5] ^ data->h_source[5]) % count;
 }
 
 /*
@@ -3706,7 +3706,7 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb,
 {
        struct ethhdr *data = (struct ethhdr *)skb->data;
 
-       return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
+       return (data->h_dest[5] ^ data->h_source[5]) % count;
 }
 
 /*-------------------------- Device entry points ----------------------------*/
index 46c87ec7960c83c03d10772a4793629c3cd8904d..3bf1b04f2cab77068c6cb08c206aa0174fbae6d9 100644 (file)
@@ -2264,9 +2264,9 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
        cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
 
        if (sb_id == 0)
-               val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
+               val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
        else
-               val = BNX2_L2CTX_STATUSB_NUM(sb_id);
+               val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
        cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
 
        rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
@@ -2423,7 +2423,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
        cp->int_num = 0;
        if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
                u32 sb_id = cp->status_blk_num;
-               u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
+               u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
 
                cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
                cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
index 981936c1fb46759846cad4b379471fe61ee1a99e..405a144ebcad6796917ed235e2ca94305451f2ab 100644 (file)
@@ -519,9 +519,13 @@ extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
 extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
 extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
 extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+                                          u16 *data);
 extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
 extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
 extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+                                           u16 data);
 extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
 extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
 extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
@@ -538,7 +542,11 @@ extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
 extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
 extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
 extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+                                        u16 data);
 extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+                                       u16 *data);
 extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
                               u32 usec_interval, bool *success);
 extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
@@ -546,7 +554,11 @@ extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
 extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 extern s32 e1000e_check_downshift(struct e1000_hw *hw);
 extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+                                        u16 *data);
 extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+                                         u16 data);
 extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
 extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
 extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
index fd44d9f907696aff5ee2fdb6558d79feaf0b27e7..7b05cf47f7f570c81d630199528909e3d95f2387 100644 (file)
@@ -764,11 +764,13 @@ struct e1000_phy_operations {
        s32  (*get_cable_length)(struct e1000_hw *);
        s32  (*get_phy_info)(struct e1000_hw *);
        s32  (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
+       s32  (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
        void (*release_phy)(struct e1000_hw *);
        s32  (*reset_phy)(struct e1000_hw *);
        s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
        s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
        s32  (*write_phy_reg)(struct e1000_hw *, u32, u16);
+       s32  (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
        s32  (*cfg_on_link_up)(struct e1000_hw *);
 };
 
index aa0ab0eb8c7db1d79006dbc34534ce7f355e6b93..b6388b9535fdb59a41f2d2978bcad00c16122a32 100644 (file)
 
 #define HV_LED_CONFIG          PHY_REG(768, 30) /* LED Configuration */
 
+#define SW_FLAG_TIMEOUT    1000 /* SW Semaphore flag timeout in milliseconds */
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS            PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU       0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
 /* Offset 04h HSFSTS */
 union ich8_hws_flash_status {
@@ -200,6 +207,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
 
 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 {
@@ -242,7 +250,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
 
        phy->ops.check_polarity       = e1000_check_polarity_ife_ich8lan;
        phy->ops.read_phy_reg         = e1000_read_phy_reg_hv;
+       phy->ops.read_phy_reg_locked  = e1000_read_phy_reg_hv_locked;
+       phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
+       phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
        phy->ops.write_phy_reg        = e1000_write_phy_reg_hv;
+       phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
        phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
        phy->id = e1000_phy_unknown;
@@ -303,6 +315,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
        case IGP03E1000_E_PHY_ID:
                phy->type = e1000_phy_igp_3;
                phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+               phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
+               phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
                break;
        case IFE_E_PHY_ID:
        case IFE_PLUS_E_PHY_ID:
@@ -567,13 +581,40 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
 
 static DEFINE_MUTEX(nvm_mutex);
 
+/**
+ *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquires the mutex for performing NVM operations.
+ **/
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+{
+       mutex_lock(&nvm_mutex);
+
+       return 0;
+}
+
+/**
+ *  e1000_release_nvm_ich8lan - Release NVM mutex
+ *  @hw: pointer to the HW structure
+ *
+ *  Releases the mutex used while performing NVM operations.
+ **/
+static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+{
+       mutex_unlock(&nvm_mutex);
+
+       return;
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
 /**
  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
  *  @hw: pointer to the HW structure
  *
- *  Acquires the software control flag for performing NVM and PHY
- *  operations.  This is a function pointer entry point only called by
- *  read/write routines for the PHY and NVM parts.
+ *  Acquires the software control flag for performing PHY and select
+ *  MAC CSR accesses.
  **/
 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
 {
@@ -582,7 +623,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
 
        might_sleep();
 
-       mutex_lock(&nvm_mutex);
+       mutex_lock(&swflag_mutex);
 
        while (timeout) {
                extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -599,7 +640,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
                goto out;
        }
 
-       timeout = PHY_CFG_TIMEOUT * 2;
+       timeout = SW_FLAG_TIMEOUT;
 
        extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
        ew32(EXTCNF_CTRL, extcnf_ctrl);
@@ -623,7 +664,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
 
 out:
        if (ret_val)
-               mutex_unlock(&nvm_mutex);
+               mutex_unlock(&swflag_mutex);
 
        return ret_val;
 }
@@ -632,9 +673,8 @@ out:
  *  e1000_release_swflag_ich8lan - Release software control flag
  *  @hw: pointer to the HW structure
  *
- *  Releases the software control flag for performing NVM and PHY operations.
- *  This is a function pointer entry point only called by read/write
- *  routines for the PHY and NVM parts.
+ *  Releases the software control flag for performing PHY and select
+ *  MAC CSR accesses.
  **/
 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
 {
@@ -644,7 +684,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
        extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
        ew32(EXTCNF_CTRL, extcnf_ctrl);
 
-       mutex_unlock(&nvm_mutex);
+       mutex_unlock(&swflag_mutex);
+
+       return;
 }
 
 /**
@@ -844,7 +886,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
        u32 i;
        u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
        s32 ret_val;
-       u16 word_addr, reg_data, reg_addr, phy_page = 0;
+       u16 reg, word_addr, reg_data, reg_addr, phy_page = 0;
 
        ret_val = e1000e_phy_hw_reset_generic(hw);
        if (ret_val)
@@ -859,6 +901,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
                        return ret_val;
        }
 
+       /* Dummy read to clear the phy wakeup bit after lcd reset */
+       if (hw->mac.type == e1000_pchlan)
+               e1e_rphy(hw, BM_WUC, &reg);
+
        /*
         * Initialize the PHY from the NVM on ICH platforms.  This
         * is needed due to an issue where the NVM configuration is
@@ -1053,6 +1099,38 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
        return ret_val;
 }
 
+/**
+ *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
+ *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
+ *  the phy speed. This function will manually set the LPLU bit and restart
+ *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
+ *  since it configures the same bit.
+ **/
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
+{
+       s32 ret_val = 0;
+       u16 oem_reg;
+
+       ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
+       if (ret_val)
+               goto out;
+
+       if (active)
+               oem_reg |= HV_OEM_BITS_LPLU;
+       else
+               oem_reg &= ~HV_OEM_BITS_LPLU;
+
+       oem_reg |= HV_OEM_BITS_RESTART_AN;
+       ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
+
+out:
+       return ret_val;
+}
+
 /**
  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
  *  @hw: pointer to the HW structure
@@ -1314,12 +1392,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
            (words == 0)) {
                hw_dbg(hw, "nvm parameter(s) out of bounds\n");
-               return -E1000_ERR_NVM;
+               ret_val = -E1000_ERR_NVM;
+               goto out;
        }
 
-       ret_val = e1000_acquire_swflag_ich8lan(hw);
-       if (ret_val)
-               goto out;
+       nvm->ops.acquire_nvm(hw);
 
        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
        if (ret_val) {
@@ -1345,7 +1422,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
                }
        }
 
-       e1000_release_swflag_ich8lan(hw);
+       nvm->ops.release_nvm(hw);
 
 out:
        if (ret_val)
@@ -1603,11 +1680,15 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
                return -E1000_ERR_NVM;
        }
 
+       nvm->ops.acquire_nvm(hw);
+
        for (i = 0; i < words; i++) {
                dev_spec->shadow_ram[offset+i].modified = 1;
                dev_spec->shadow_ram[offset+i].value = data[i];
        }
 
+       nvm->ops.release_nvm(hw);
+
        return 0;
 }
 
@@ -1637,9 +1718,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
        if (nvm->type != e1000_nvm_flash_sw)
                goto out;
 
-       ret_val = e1000_acquire_swflag_ich8lan(hw);
-       if (ret_val)
-               goto out;
+       nvm->ops.acquire_nvm(hw);
 
        /*
         * We're writing to the opposite bank so if we're on bank 1,
@@ -1657,7 +1736,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
                old_bank_offset = 0;
                ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
                if (ret_val) {
-                       e1000_release_swflag_ich8lan(hw);
+                       nvm->ops.release_nvm(hw);
                        goto out;
                }
        } else {
@@ -1665,7 +1744,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
                new_bank_offset = 0;
                ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
                if (ret_val) {
-                       e1000_release_swflag_ich8lan(hw);
+                       nvm->ops.release_nvm(hw);
                        goto out;
                }
        }
@@ -1723,7 +1802,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
        if (ret_val) {
                /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
                hw_dbg(hw, "Flash commit failed.\n");
-               e1000_release_swflag_ich8lan(hw);
+               nvm->ops.release_nvm(hw);
                goto out;
        }
 
@@ -1736,7 +1815,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
        act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
        ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
        if (ret_val) {
-               e1000_release_swflag_ich8lan(hw);
+               nvm->ops.release_nvm(hw);
                goto out;
        }
        data &= 0xBFFF;
@@ -1744,7 +1823,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
                                                       act_offset * 2 + 1,
                                                       (u8)(data >> 8));
        if (ret_val) {
-               e1000_release_swflag_ich8lan(hw);
+               nvm->ops.release_nvm(hw);
                goto out;
        }
 
@@ -1757,7 +1836,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
        act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
        ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
        if (ret_val) {
-               e1000_release_swflag_ich8lan(hw);
+               nvm->ops.release_nvm(hw);
                goto out;
        }
 
@@ -1767,7 +1846,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
                dev_spec->shadow_ram[i].value = 0xFFFF;
        }
 
-       e1000_release_swflag_ich8lan(hw);
+       nvm->ops.release_nvm(hw);
 
        /*
         * Reload the EEPROM, or else modifications will not appear
@@ -1831,14 +1910,12 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
  **/
 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
 {
+       struct e1000_nvm_info *nvm = &hw->nvm;
        union ich8_flash_protected_range pr0;
        union ich8_hws_flash_status hsfsts;
        u32 gfpreg;
-       s32 ret_val;
 
-       ret_val = e1000_acquire_swflag_ich8lan(hw);
-       if (ret_val)
-               return;
+       nvm->ops.acquire_nvm(hw);
 
        gfpreg = er32flash(ICH_FLASH_GFPREG);
 
@@ -1859,7 +1936,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
        hsfsts.hsf_status.flockdn = true;
        ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
 
-       e1000_release_swflag_ich8lan(hw);
+       nvm->ops.release_nvm(hw);
 }
 
 /**
@@ -2229,6 +2306,7 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
  **/
 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 {
+       u16 reg;
        u32 ctrl, icr, kab;
        s32 ret_val;
 
@@ -2304,6 +2382,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
                        hw_dbg(hw, "Auto Read Done did not complete\n");
                }
        }
+       /* Dummy read to clear the phy wakeup bit after lcd reset */
+       if (hw->mac.type == e1000_pchlan)
+               e1e_rphy(hw, BM_WUC, &reg);
 
        /*
         * For PCH, this write will make sure that any noise
@@ -3112,9 +3193,9 @@ static struct e1000_phy_operations ich8_phy_ops = {
 };
 
 static struct e1000_nvm_operations ich8_nvm_ops = {
-       .acquire_nvm            = e1000_acquire_swflag_ich8lan,
+       .acquire_nvm            = e1000_acquire_nvm_ich8lan,
        .read_nvm               = e1000_read_nvm_ich8lan,
-       .release_nvm            = e1000_release_swflag_ich8lan,
+       .release_nvm            = e1000_release_nvm_ich8lan,
        .update_nvm             = e1000_update_nvm_checksum_ich8lan,
        .valid_led_default      = e1000_valid_led_default_ich8lan,
        .validate_nvm           = e1000_validate_nvm_checksum_ich8lan,
index 994401fd0664fbc188980ea188e33bdf95fdb0ec..f9d33ab05e97814de596a538a73aa3d975e44763 100644 (file)
@@ -164,16 +164,25 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
                 * MDIC mode. No harm in trying again in this case since
                 * the PHY ID is unknown at this point anyway
                 */
+               ret_val = phy->ops.acquire_phy(hw);
+               if (ret_val)
+                       goto out;
                ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
                if (ret_val)
                        goto out;
+               phy->ops.release_phy(hw);
 
                retry_count++;
        }
 out:
        /* Revert to MDIO fast mode, if applicable */
-       if (retry_count)
+       if (retry_count) {
+               ret_val = phy->ops.acquire_phy(hw);
+               if (ret_val)
+                       return ret_val;
                ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
+               phy->ops.release_phy(hw);
+       }
 
        return ret_val;
 }
@@ -354,94 +363,173 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
 }
 
 /**
- *  e1000e_read_phy_reg_igp - Read igp PHY register
+ *  __e1000e_read_phy_reg_igp - Read igp PHY register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read
  *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary, then reads the PHY register at offset
- *  and storing the retrieved information in data.  Release any acquired
+ *  and stores the retrieved information in data.  Release any acquired
  *  semaphores before exiting.
  **/
-s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+                                    bool locked)
 {
-       s32 ret_val;
+       s32 ret_val = 0;
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               return ret_val;
+       if (!locked) {
+               if (!(hw->phy.ops.acquire_phy))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire_phy(hw);
+               if (ret_val)
+                       goto out;
+       }
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
                ret_val = e1000e_write_phy_reg_mdic(hw,
                                                    IGP01E1000_PHY_PAGE_SELECT,
                                                    (u16)offset);
-               if (ret_val) {
-                       hw->phy.ops.release_phy(hw);
-                       return ret_val;
-               }
+               if (ret_val)
+                       goto release;
        }
 
        ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
-                                          data);
-
-       hw->phy.ops.release_phy(hw);
+                                         data);
 
+release:
+       if (!locked)
+               hw->phy.ops.release_phy(hw);
+out:
        return ret_val;
 }
 
+/**
+ *  e1000e_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset and stores the
+ *  retrieved information in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000e_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_read_phy_reg_igp_locked - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000e_read_phy_reg_igp(hw, offset, data, true);
+}
+
 /**
  *  e1000e_write_phy_reg_igp - Write igp PHY register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to write to
  *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary, then writes the data to PHY register
  *  at the offset.  Release any acquired semaphores before exiting.
  **/
-s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+                                     bool locked)
 {
-       s32 ret_val;
+       s32 ret_val = 0;
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               return ret_val;
+       if (!locked) {
+               if (!(hw->phy.ops.acquire_phy))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire_phy(hw);
+               if (ret_val)
+                       goto out;
+       }
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
                ret_val = e1000e_write_phy_reg_mdic(hw,
                                                    IGP01E1000_PHY_PAGE_SELECT,
                                                    (u16)offset);
-               if (ret_val) {
-                       hw->phy.ops.release_phy(hw);
-                       return ret_val;
-               }
+               if (ret_val)
+                       goto release;
        }
 
        ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
                                            data);
 
-       hw->phy.ops.release_phy(hw);
+release:
+       if (!locked)
+               hw->phy.ops.release_phy(hw);
 
+out:
        return ret_val;
 }
 
 /**
- *  e1000e_read_kmrn_reg - Read kumeran register
+ *  e1000e_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000e_write_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_write_phy_reg_igp_locked - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000e_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ *  __e1000_read_kmrn_reg - Read kumeran register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read
  *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
  *  using the kumeran interface.  The information retrieved is stored in data.
  *  Release any acquired semaphores before exiting.
  **/
-s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+                                 bool locked)
 {
        u32 kmrnctrlsta;
-       s32 ret_val;
+       s32 ret_val = 0;
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               return ret_val;
+       if (!locked) {
+               if (!(hw->phy.ops.acquire_phy))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire_phy(hw);
+               if (ret_val)
+                       goto out;
+       }
 
        kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
                       E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
@@ -452,40 +540,110 @@ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
        kmrnctrlsta = er32(KMRNCTRLSTA);
        *data = (u16)kmrnctrlsta;
 
-       hw->phy.ops.release_phy(hw);
+       if (!locked)
+               hw->phy.ops.release_phy(hw);
 
+out:
        return ret_val;
 }
 
 /**
- *  e1000e_write_kmrn_reg - Write kumeran register
+ *  e1000e_read_kmrn_reg -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset using the
+ *  kumeran interface.  The information retrieved is stored in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ *  e1000_read_kmrn_reg_locked -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the kumeran interface.  The
+ *  information retrieved is stored in data.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ *  __e1000_write_kmrn_reg - Write kumeran register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to write to
  *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary.  Then write the data to PHY register
  *  at the offset using the kumeran interface.  Release any acquired semaphores
  *  before exiting.
  **/
-s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+                                  bool locked)
 {
        u32 kmrnctrlsta;
-       s32 ret_val;
+       s32 ret_val = 0;
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               return ret_val;
+       if (!locked) {
+               if (!(hw->phy.ops.acquire_phy))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire_phy(hw);
+               if (ret_val)
+                       goto out;
+       }
 
        kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
                       E1000_KMRNCTRLSTA_OFFSET) | data;
        ew32(KMRNCTRLSTA, kmrnctrlsta);
 
        udelay(2);
-       hw->phy.ops.release_phy(hw);
 
+       if (!locked)
+               hw->phy.ops.release_phy(hw);
+
+out:
        return ret_val;
 }
 
+/**
+ *  e1000e_write_kmrn_reg -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to the PHY register at the offset
+ *  using the kumeran interface.  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ *  e1000_write_kmrn_reg_locked -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Write the data to PHY register at the offset using the kumeran interface.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
 /**
  *  e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
  *  @hw: pointer to the HW structure
@@ -2105,6 +2263,10 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
        u32 page = offset >> IGP_PAGE_SHIFT;
        u32 page_shift = 0;
 
+       ret_val = hw->phy.ops.acquire_phy(hw);
+       if (ret_val)
+               return ret_val;
+
        /* Page 800 works differently than the rest so it has its own func */
        if (page == BM_WUC_PAGE) {
                ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
@@ -2112,10 +2274,6 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
                goto out;
        }
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               goto out;
-
        hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2135,18 +2293,15 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
                /* Page is shifted left, PHY expects (page x 32) */
                ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
                                                    (page << page_shift));
-               if (ret_val) {
-                       hw->phy.ops.release_phy(hw);
+               if (ret_val)
                        goto out;
-               }
        }
 
        ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
                                            data);
 
-       hw->phy.ops.release_phy(hw);
-
 out:
+       hw->phy.ops.release_phy(hw);
        return ret_val;
 }
 
@@ -2167,6 +2322,10 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
        u32 page = offset >> IGP_PAGE_SHIFT;
        u32 page_shift = 0;
 
+       ret_val = hw->phy.ops.acquire_phy(hw);
+       if (ret_val)
+               return ret_val;
+
        /* Page 800 works differently than the rest so it has its own func */
        if (page == BM_WUC_PAGE) {
                ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -2174,10 +2333,6 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
                goto out;
        }
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               goto out;
-
        hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2197,17 +2352,14 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
                /* Page is shifted left, PHY expects (page x 32) */
                ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
                                                    (page << page_shift));
-               if (ret_val) {
-                       hw->phy.ops.release_phy(hw);
+               if (ret_val)
                        goto out;
-               }
        }
 
        ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
                                           data);
-       hw->phy.ops.release_phy(hw);
-
 out:
+       hw->phy.ops.release_phy(hw);
        return ret_val;
 }
 
@@ -2226,17 +2378,17 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
        s32 ret_val;
        u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
 
+       ret_val = hw->phy.ops.acquire_phy(hw);
+       if (ret_val)
+               return ret_val;
+
        /* Page 800 works differently than the rest so it has its own func */
        if (page == BM_WUC_PAGE) {
                ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
                                                         true);
-               return ret_val;
+               goto out;
        }
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               return ret_val;
-
        hw->phy.addr = 1;
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2245,16 +2397,14 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
                ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
                                                    page);
 
-               if (ret_val) {
-                       hw->phy.ops.release_phy(hw);
-                       return ret_val;
-               }
+               if (ret_val)
+                       goto out;
        }
 
        ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
                                           data);
+out:
        hw->phy.ops.release_phy(hw);
-
        return ret_val;
 }
 
@@ -2272,17 +2422,17 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
        s32 ret_val;
        u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
 
+       ret_val = hw->phy.ops.acquire_phy(hw);
+       if (ret_val)
+               return ret_val;
+
        /* Page 800 works differently than the rest so it has its own func */
        if (page == BM_WUC_PAGE) {
                ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
                                                         false);
-               return ret_val;
+               goto out;
        }
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               return ret_val;
-
        hw->phy.addr = 1;
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2290,17 +2440,15 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
                ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
                                                    page);
 
-               if (ret_val) {
-                       hw->phy.ops.release_phy(hw);
-                       return ret_val;
-               }
+               if (ret_val)
+                       goto out;
        }
 
        ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
                                            data);
 
+out:
        hw->phy.ops.release_phy(hw);
-
        return ret_val;
 }
 
@@ -2320,6 +2468,8 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
  *  3) Write the address using the address opcode (0x11)
  *  4) Read or write the data using the data opcode (0x12)
  *  5) Restore 769_17.2 to its original value
+ *
+ *  Assumes semaphore already acquired.
  **/
 static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
                                          u16 *data, bool read)
@@ -2327,20 +2477,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
        s32 ret_val;
        u16 reg = BM_PHY_REG_NUM(offset);
        u16 phy_reg = 0;
-       u8  phy_acquired = 1;
-
 
        /* Gig must be disabled for MDIO accesses to page 800 */
        if ((hw->mac.type == e1000_pchlan) &&
           (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
                hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val) {
-               phy_acquired = 0;
-               goto out;
-       }
-
        /* All operations in this function are phy address 1 */
        hw->phy.addr = 1;
 
@@ -2397,8 +2539,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
        ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
 
 out:
-       if (phy_acquired == 1)
-               hw->phy.ops.release_phy(hw);
        return ret_val;
 }
 
@@ -2439,52 +2579,63 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
        return 0;
 }
 
+/**
+ *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ *  @hw:   pointer to the HW structure
+ *  @slow: true for slow mode, false for normal mode
+ *
+ *  Assumes semaphore already acquired.
+ **/
 s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
 {
        s32 ret_val = 0;
        u16 data = 0;
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               return ret_val;
-
        /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
        hw->phy.addr = 1;
        ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
                                         (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
-       if (ret_val) {
-               hw->phy.ops.release_phy(hw);
-               return ret_val;
-       }
+       if (ret_val)
+               goto out;
+
        ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
                                           (0x2180 | (slow << 10)));
+       if (ret_val)
+               goto out;
 
        /* dummy read when reverting to fast mode - throw away result */
        if (!slow)
-               e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
-
-       hw->phy.ops.release_phy(hw);
+               ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
 
+out:
        return ret_val;
 }
 
 /**
- *  e1000_read_phy_reg_hv -  Read HV PHY register
+ *  __e1000_read_phy_reg_hv -  Read HV PHY register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read
  *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary, then reads the PHY register at offset
- *  and storing the retrieved information in data.  Release any acquired
+ *  and stores the retrieved information in data.  Release any acquired
  *  semaphore before exiting.
  **/
-s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
+                                   bool locked)
 {
        s32 ret_val;
        u16 page = BM_PHY_REG_PAGE(offset);
        u16 reg = BM_PHY_REG_NUM(offset);
        bool in_slow_mode = false;
 
+       if (!locked) {
+               ret_val = hw->phy.ops.acquire_phy(hw);
+               if (ret_val)
+                       return ret_val;
+       }
+
        /* Workaround failure in MDIO access while cable is disconnected */
        if ((hw->phy.type == e1000_phy_82577) &&
            !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2508,10 +2659,6 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
                goto out;
        }
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               goto out;
-
        hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
 
        if (page == HV_INTC_FC_PAGE_START)
@@ -2529,42 +2676,76 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
                        ret_val = e1000e_write_phy_reg_mdic(hw,
                                                     IGP01E1000_PHY_PAGE_SELECT,
                                                     (page << IGP_PAGE_SHIFT));
-                       if (ret_val) {
-                               hw->phy.ops.release_phy(hw);
-                               goto out;
-                       }
                        hw->phy.addr = phy_addr;
                }
        }
 
        ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
                                          data);
-       hw->phy.ops.release_phy(hw);
-
 out:
        /* Revert to MDIO fast mode, if applicable */
        if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
                ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
 
+       if (!locked)
+               hw->phy.ops.release_phy(hw);
+
        return ret_val;
 }
 
 /**
- *  e1000_write_phy_reg_hv - Write HV PHY register
+ *  e1000_read_phy_reg_hv -  Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset and stores
+ *  the retrieved information in data.  Release the acquired semaphore
+ *  before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_phy_reg_hv(hw, offset, data, false);
+}
+
+/**
+ *  e1000_read_phy_reg_hv_locked -  Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_phy_reg_hv(hw, offset, data, true);
+}
+
+/**
+ *  __e1000_write_phy_reg_hv - Write HV PHY register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to write to
  *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary, then writes the data to PHY register
  *  at the offset.  Release any acquired semaphores before exiting.
  **/
-s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
+                                    bool locked)
 {
        s32 ret_val;
        u16 page = BM_PHY_REG_PAGE(offset);
        u16 reg = BM_PHY_REG_NUM(offset);
        bool in_slow_mode = false;
 
+       if (!locked) {
+               ret_val = hw->phy.ops.acquire_phy(hw);
+               if (ret_val)
+                       return ret_val;
+       }
+
        /* Workaround failure in MDIO access while cable is disconnected */
        if ((hw->phy.type == e1000_phy_82577) &&
            !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2588,10 +2769,6 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
                goto out;
        }
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val)
-               goto out;
-
        hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
 
        if (page == HV_INTC_FC_PAGE_START)
@@ -2607,15 +2784,10 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
            ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
            (data & (1 << 11))) {
                u16 data2 = 0x7EFF;
-               hw->phy.ops.release_phy(hw);
                ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
                                                         &data2, false);
                if (ret_val)
                        goto out;
-
-               ret_val = hw->phy.ops.acquire_phy(hw);
-               if (ret_val)
-                       goto out;
        }
 
        if (reg > MAX_PHY_MULTI_PAGE_REG) {
@@ -2630,26 +2802,52 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
                        ret_val = e1000e_write_phy_reg_mdic(hw,
                                                     IGP01E1000_PHY_PAGE_SELECT,
                                                     (page << IGP_PAGE_SHIFT));
-                       if (ret_val) {
-                               hw->phy.ops.release_phy(hw);
-                               goto out;
-                       }
                        hw->phy.addr = phy_addr;
                }
        }
 
        ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
                                          data);
-       hw->phy.ops.release_phy(hw);
 
 out:
        /* Revert to MDIO fast mode, if applicable */
        if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
                ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
 
+       if (!locked)
+               hw->phy.ops.release_phy(hw);
+
        return ret_val;
 }
 
+/**
+ *  e1000_write_phy_reg_hv - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to PHY register at the offset.
+ *  Release the acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_phy_reg_hv(hw, offset, data, false);
+}
+
+/**
+ *  e1000_write_phy_reg_hv_locked - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.  Assumes semaphore
+ *  already acquired.
+ **/
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_phy_reg_hv(hw, offset, data, true);
+}
+
 /**
  *  e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
  *  @page: page to be accessed
@@ -2671,10 +2869,9 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
  *  @data: pointer to the data to be read or written
  *  @read: determines if operation is read or written
  *
- *  Acquires semaphore, if necessary, then reads the PHY register at offset
- *  and storing the retreived information in data.  Release any acquired
- *  semaphores before exiting.  Note that the procedure to read these regs
- *  uses the address port and data port to read/write.
+ *  Reads the PHY register at offset and stores the retreived information
+ *  in data.  Assumes semaphore already acquired.  Note that the procedure
+ *  to read these regs uses the address port and data port to read/write.
  **/
 static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
                                           u16 *data, bool read)
@@ -2682,20 +2879,12 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
        s32 ret_val;
        u32 addr_reg = 0;
        u32 data_reg = 0;
-       u8  phy_acquired = 1;
 
        /* This takes care of the difference with desktop vs mobile phy */
        addr_reg = (hw->phy.type == e1000_phy_82578) ?
                   I82578_ADDR_REG : I82577_ADDR_REG;
        data_reg = addr_reg + 1;
 
-       ret_val = hw->phy.ops.acquire_phy(hw);
-       if (ret_val) {
-               hw_dbg(hw, "Could not acquire PHY\n");
-               phy_acquired = 0;
-               goto out;
-       }
-
        /* All operations in this function are phy address 2 */
        hw->phy.addr = 2;
 
@@ -2718,8 +2907,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
        }
 
 out:
-       if (phy_acquired == 1)
-               hw->phy.ops.release_phy(hw);
        return ret_val;
 }
 
index d004c359244c9b03e2e5e753f5300abc15be8bc1..aab3d971af517faf52a4a2cab51692512293ae95 100644 (file)
@@ -731,7 +731,7 @@ static int igb_set_ringparam(struct net_device *netdev,
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct igb_ring *temp_ring;
-       int i, err;
+       int i, err = 0;
        u32 new_rx_count, new_tx_count;
 
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -751,18 +751,30 @@ static int igb_set_ringparam(struct net_device *netdev,
                return 0;
        }
 
+       while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+               msleep(1);
+
+       if (!netif_running(adapter->netdev)) {
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       adapter->tx_ring[i].count = new_tx_count;
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       adapter->rx_ring[i].count = new_rx_count;
+               adapter->tx_ring_count = new_tx_count;
+               adapter->rx_ring_count = new_rx_count;
+               goto clear_reset;
+       }
+
        if (adapter->num_tx_queues > adapter->num_rx_queues)
                temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
        else
                temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
-       if (!temp_ring)
-               return -ENOMEM;
 
-       while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+       if (!temp_ring) {
+               err = -ENOMEM;
+               goto clear_reset;
+       }
 
-       if (netif_running(adapter->netdev))
-               igb_down(adapter);
+       igb_down(adapter);
 
        /*
         * We can't just free everything and then setup again,
@@ -819,14 +831,11 @@ static int igb_set_ringparam(struct net_device *netdev,
 
                adapter->rx_ring_count = new_rx_count;
        }
-
-       err = 0;
 err_setup:
-       if (netif_running(adapter->netdev))
-               igb_up(adapter);
-
-       clear_bit(__IGB_RESETTING, &adapter->state);
+       igb_up(adapter);
        vfree(temp_ring);
+clear_reset:
+       clear_bit(__IGB_RESETTING, &adapter->state);
        return err;
 }
 
index ee17a097d1ca91929f4ab405d6a9b81a02a14498..c68265bd0d1a41d1f39dbbc68473b04e1d4076a2 100644 (file)
@@ -279,7 +279,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct igbvf_ring *temp_ring;
-       int err;
+       int err = 0;
        u32 new_rx_count, new_tx_count;
 
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -299,15 +299,22 @@ static int igbvf_set_ringparam(struct net_device *netdev,
                return 0;
        }
 
-       temp_ring = vmalloc(sizeof(struct igbvf_ring));
-       if (!temp_ring)
-               return -ENOMEM;
-
        while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
                msleep(1);
 
-       if (netif_running(adapter->netdev))
-               igbvf_down(adapter);
+       if (!netif_running(adapter->netdev)) {
+               adapter->tx_ring->count = new_tx_count;
+               adapter->rx_ring->count = new_rx_count;
+               goto clear_reset;
+       }
+
+       temp_ring = vmalloc(sizeof(struct igbvf_ring));
+       if (!temp_ring) {
+               err = -ENOMEM;
+               goto clear_reset;
+       }
+
+       igbvf_down(adapter);
 
        /*
         * We can't just free everything and then setup again,
@@ -339,14 +346,11 @@ static int igbvf_set_ringparam(struct net_device *netdev,
 
                memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
        }
-
-       err = 0;
 err_setup:
-       if (netif_running(adapter->netdev))
-               igbvf_up(adapter);
-
-       clear_bit(__IGBVF_RESETTING, &adapter->state);
+       igbvf_up(adapter);
        vfree(temp_ring);
+clear_reset:
+       clear_bit(__IGBVF_RESETTING, &adapter->state);
        return err;
 }
 
index fa314cb005a4dcf15ce8884c0da5e9987f1e2604..856c18c207f33c9a7a4db9fad6dacc0cd6d47d07 100644 (file)
@@ -798,7 +798,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
-       int i, err;
+       int i, err = 0;
        u32 new_rx_count, new_tx_count;
        bool need_update = false;
 
@@ -822,6 +822,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
                msleep(1);
 
+       if (!netif_running(adapter->netdev)) {
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       adapter->tx_ring[i].count = new_tx_count;
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       adapter->rx_ring[i].count = new_rx_count;
+               adapter->tx_ring_count = new_tx_count;
+               adapter->rx_ring_count = new_rx_count;
+               goto err_setup;
+       }
+
        temp_tx_ring = kcalloc(adapter->num_tx_queues,
                               sizeof(struct ixgbe_ring), GFP_KERNEL);
        if (!temp_tx_ring) {
@@ -879,8 +889,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
 
        /* if rings need to be updated, here's the place to do it in one shot */
        if (need_update) {
-               if (netif_running(netdev))
-                       ixgbe_down(adapter);
+               ixgbe_down(adapter);
 
                /* tx */
                if (new_tx_count != adapter->tx_ring_count) {
@@ -897,13 +906,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                        temp_rx_ring = NULL;
                        adapter->rx_ring_count = new_rx_count;
                }
-       }
-
-       /* success! */
-       err = 0;
-       if (netif_running(netdev))
                ixgbe_up(adapter);
-
+       }
 err_setup:
        clear_bit(__IXGBE_RESETTING, &adapter->state);
        return err;
index 7cbf6f9b51deddb790372a652e169ca0ccf0ef77..2559991eea6a59e23a88837a897cf8e8dad7cc0a 100644 (file)
@@ -111,9 +111,6 @@ struct pppoe_net {
        rwlock_t hash_lock;
 };
 
-/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
-static DEFINE_SPINLOCK(flush_lock);
-
 /*
  * PPPoE could be in the following stages:
  * 1) Discovery stage (to obtain remote MAC and Session ID)
@@ -303,45 +300,48 @@ static void pppoe_flush_dev(struct net_device *dev)
        write_lock_bh(&pn->hash_lock);
        for (i = 0; i < PPPOE_HASH_SIZE; i++) {
                struct pppox_sock *po = pn->hash_table[i];
+               struct sock *sk;
 
-               while (po != NULL) {
-                       struct sock *sk;
-                       if (po->pppoe_dev != dev) {
+               while (po) {
+                       while (po && po->pppoe_dev != dev) {
                                po = po->next;
-                               continue;
                        }
+
+                       if (!po)
+                               break;
+
                        sk = sk_pppox(po);
-                       spin_lock(&flush_lock);
-                       po->pppoe_dev = NULL;
-                       spin_unlock(&flush_lock);
-                       dev_put(dev);
 
                        /* We always grab the socket lock, followed by the
-                        * hash_lock, in that order.  Since we should
-                        * hold the sock lock while doing any unbinding,
-                        * we need to release the lock we're holding.
-                        * Hold a reference to the sock so it doesn't disappear
-                        * as we're jumping between locks.
+                        * hash_lock, in that order.  Since we should hold the
+                        * sock lock while doing any unbinding, we need to
+                        * release the lock we're holding.  Hold a reference to
+                        * the sock so it doesn't disappear as we're jumping
+                        * between locks.
                         */
 
                        sock_hold(sk);
-
                        write_unlock_bh(&pn->hash_lock);
                        lock_sock(sk);
 
-                       if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+                       if (po->pppoe_dev == dev
+                           && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
                                pppox_unbind_sock(sk);
                                sk->sk_state = PPPOX_ZOMBIE;
                                sk->sk_state_change(sk);
+                               po->pppoe_dev = NULL;
+                               dev_put(dev);
                        }
 
                        release_sock(sk);
                        sock_put(sk);
 
-                       /* Restart scan at the beginning of this hash chain.
-                        * While the lock was dropped the chain contents may
-                        * have changed.
+                       /* Restart the process from the start of the current
+                        * hash chain. We dropped locks so the world may have
+                        * change from underneath us.
                         */
+
+                       BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
                        write_lock_bh(&pn->hash_lock);
                        po = pn->hash_table[i];
                }
@@ -388,11 +388,16 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
        struct pppox_sock *po = pppox_sk(sk);
        struct pppox_sock *relay_po;
 
+       /* Backlog receive. Semantics of backlog rcv preclude any code from
+        * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
+        * can't change.
+        */
+
        if (sk->sk_state & PPPOX_BOUND) {
                ppp_input(&po->chan, skb);
        } else if (sk->sk_state & PPPOX_RELAY) {
-               relay_po = get_item_by_addr(dev_net(po->pppoe_dev),
-                                               &po->pppoe_relay);
+               relay_po = get_item_by_addr(sock_net(sk),
+                                           &po->pppoe_relay);
                if (relay_po == NULL)
                        goto abort_kfree;
 
@@ -447,6 +452,10 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
                goto drop;
 
        pn = pppoe_pernet(dev_net(dev));
+
+       /* Note that get_item does a sock_hold(), so sk_pppox(po)
+        * is known to be safe.
+        */
        po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
        if (!po)
                goto drop;
@@ -561,6 +570,7 @@ static int pppoe_release(struct socket *sock)
        struct sock *sk = sock->sk;
        struct pppox_sock *po;
        struct pppoe_net *pn;
+       struct net *net = NULL;
 
        if (!sk)
                return 0;
@@ -571,44 +581,28 @@ static int pppoe_release(struct socket *sock)
                return -EBADF;
        }
 
+       po = pppox_sk(sk);
+
+       if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+               dev_put(po->pppoe_dev);
+               po->pppoe_dev = NULL;
+       }
+
        pppox_unbind_sock(sk);
 
        /* Signal the death of the socket. */
        sk->sk_state = PPPOX_DEAD;
 
-       /*
-        * pppoe_flush_dev could lead to a race with
-        * this routine so we use flush_lock to eliminate
-        * such a case (we only need per-net specific data)
-        */
-       spin_lock(&flush_lock);
-       po = pppox_sk(sk);
-       if (!po->pppoe_dev) {
-               spin_unlock(&flush_lock);
-               goto out;
-       }
-       pn = pppoe_pernet(dev_net(po->pppoe_dev));
-       spin_unlock(&flush_lock);
+       net = sock_net(sk);
+       pn = pppoe_pernet(net);
 
        /*
         * protect "po" from concurrent updates
         * on pppoe_flush_dev
         */
-       write_lock_bh(&pn->hash_lock);
+       delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
+                   po->pppoe_ifindex);
 
-       po = pppox_sk(sk);
-       if (stage_session(po->pppoe_pa.sid))
-               __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
-                               po->pppoe_ifindex);
-
-       if (po->pppoe_dev) {
-               dev_put(po->pppoe_dev);
-               po->pppoe_dev = NULL;
-       }
-
-       write_unlock_bh(&pn->hash_lock);
-
-out:
        sock_orphan(sk);
        sock->sk = NULL;
 
@@ -625,8 +619,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
        struct sock *sk = sock->sk;
        struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
        struct pppox_sock *po = pppox_sk(sk);
-       struct net_device *dev;
+       struct net_device *dev = NULL;
        struct pppoe_net *pn;
+       struct net *net = NULL;
        int error;
 
        lock_sock(sk);
@@ -652,12 +647,14 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
        /* Delete the old binding */
        if (stage_session(po->pppoe_pa.sid)) {
                pppox_unbind_sock(sk);
+               pn = pppoe_pernet(sock_net(sk));
+               delete_item(pn, po->pppoe_pa.sid,
+                           po->pppoe_pa.remote, po->pppoe_ifindex);
                if (po->pppoe_dev) {
-                       pn = pppoe_pernet(dev_net(po->pppoe_dev));
-                       delete_item(pn, po->pppoe_pa.sid,
-                               po->pppoe_pa.remote, po->pppoe_ifindex);
                        dev_put(po->pppoe_dev);
+                       po->pppoe_dev = NULL;
                }
+
                memset(sk_pppox(po) + 1, 0,
                       sizeof(struct pppox_sock) - sizeof(struct sock));
                sk->sk_state = PPPOX_NONE;
@@ -666,16 +663,15 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
        /* Re-bind in session stage only */
        if (stage_session(sp->sa_addr.pppoe.sid)) {
                error = -ENODEV;
-               dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev);
+               net = sock_net(sk);
+               dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
                if (!dev)
-                       goto end;
+                       goto err_put;
 
                po->pppoe_dev = dev;
                po->pppoe_ifindex = dev->ifindex;
-               pn = pppoe_pernet(dev_net(dev));
-               write_lock_bh(&pn->hash_lock);
+               pn = pppoe_pernet(net);
                if (!(dev->flags & IFF_UP)) {
-                       write_unlock_bh(&pn->hash_lock);
                        goto err_put;
                }
 
@@ -683,6 +679,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
                       &sp->sa_addr.pppoe,
                       sizeof(struct pppoe_addr));
 
+               write_lock_bh(&pn->hash_lock);
                error = __set_item(pn, po);
                write_unlock_bh(&pn->hash_lock);
                if (error < 0)
@@ -696,8 +693,11 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
                po->chan.ops = &pppoe_chan_ops;
 
                error = ppp_register_net_channel(dev_net(dev), &po->chan);
-               if (error)
+               if (error) {
+                       delete_item(pn, po->pppoe_pa.sid,
+                                   po->pppoe_pa.remote, po->pppoe_ifindex);
                        goto err_put;
+               }
 
                sk->sk_state = PPPOX_CONNECTED;
        }
@@ -915,6 +915,14 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
        struct pppoe_hdr *ph;
        int data_len = skb->len;
 
+       /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
+        * xmit operations conclude prior to an unregistration call.  Thus
+        * sk->sk_state cannot change, so we don't need to do lock_sock().
+        * But, we also can't do a lock_sock since that introduces a potential
+        * deadlock as we'd reverse the lock ordering used when calling
+        * ppp_unregister_channel().
+        */
+
        if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
                goto abort;
 
@@ -944,7 +952,6 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
                        po->pppoe_pa.remote, NULL, data_len);
 
        dev_queue_xmit(skb);
-
        return 1;
 
 abort:
index 01f9432c31ef9f2c02ec3df3dce4159c2fda1bb4..98bff5ada09ada5d5023ff2e741e72d0b5f56c2a 100644 (file)
@@ -444,7 +444,8 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
  * the appropriate LRO method
  */
 static void efx_rx_packet_lro(struct efx_channel *channel,
-                             struct efx_rx_buffer *rx_buf)
+                             struct efx_rx_buffer *rx_buf,
+                             bool checksummed)
 {
        struct napi_struct *napi = &channel->napi_str;
 
@@ -466,7 +467,8 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
                skb->len = rx_buf->len;
                skb->data_len = rx_buf->len;
                skb->truesize += rx_buf->len;
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               skb->ip_summed =
+                       checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
 
                napi_gro_frags(napi);
 
@@ -475,6 +477,7 @@ out:
                rx_buf->page = NULL;
        } else {
                EFX_BUG_ON_PARANOID(!rx_buf->skb);
+               EFX_BUG_ON_PARANOID(!checksummed);
 
                napi_gro_receive(napi, rx_buf->skb);
                rx_buf->skb = NULL;
@@ -570,7 +573,7 @@ void __efx_rx_packet(struct efx_channel *channel,
        }
 
        if (likely(checksummed || rx_buf->page)) {
-               efx_rx_packet_lro(channel, rx_buf);
+               efx_rx_packet_lro(channel, rx_buf, checksummed);
                goto done;
        }
 
index f49d0800c1d103c70bdb37f59cba9f00047b5285..528b912a4b0dbac26ecd88a77498a4be91743376 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/phy.h>
 #include <linux/cache.h>
 #include <linux/io.h>
+#include <asm/cacheflush.h>
 
 #include "sh_eth.h"
 
index 54bf0912b7373ec7bb6cfad5ffde55e1ba82203f..d9ebac8a2d99d8bc30a566ce1217a91bd1eee7db 100644 (file)
@@ -517,8 +517,7 @@ again:
        /* Free up any pending old buffers before queueing new ones. */
        free_old_xmit_skbs(vi);
 
-       /* Put new one in send queue and do transmit */
-       __skb_queue_head(&vi->send, skb);
+       /* Try to transmit */
        capacity = xmit_skb(vi, skb);
 
        /* This can happen with OOM and indirect buffers. */
@@ -532,8 +531,17 @@ again:
                }
                return NETDEV_TX_BUSY;
        }
-
        vi->svq->vq_ops->kick(vi->svq);
+
+       /*
+        * Put new one in send queue.  You'd expect we'd need this before
+        * xmit_skb calls add_buf(), since the callback can be triggered
+        * immediately after that.  But since the callback just triggers
+        * another call back here, normal network xmit locking prevents the
+        * race.
+        */
+       __skb_queue_head(&vi->send, skb);
+
        /* Don't wait up for transmitted skbs to be freed. */
        skb_orphan(skb);
        nf_reset(skb);