]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
bnx2x: Recovery flow bug fixes
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_main.c
index 2f6361e949f0c836562ff896928cb6709abf883a..c945df06161836815585d5c22fb3639b1ea72506 100644 (file)
@@ -468,7 +468,9 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
        while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
                DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
 
-               if (!cnt) {
+               if (!cnt ||
+                   (bp->recovery_state != BNX2X_RECOVERY_DONE &&
+                    bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
                        BNX2X_ERR("DMAE timeout!\n");
                        rc = DMAE_TIMEOUT;
                        goto unlock;
@@ -498,9 +500,13 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
        if (!bp->dmae_ready) {
                u32 *data = bnx2x_sp(bp, wb_data[0]);
 
-               DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
-                  "  using indirect\n", dst_addr, len32);
-               bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+               DP(BNX2X_MSG_OFF,
+                  "DMAE is not ready (dst_addr %08x len32 %d) using indirect\n",
+                  dst_addr, len32);
+               if (CHIP_IS_E1(bp))
+                       bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+               else
+                       bnx2x_init_str_wr(bp, dst_addr, data, len32);
                return;
        }
 
@@ -528,10 +534,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
                u32 *data = bnx2x_sp(bp, wb_data[0]);
                int i;
 
-               DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
-                  "  using indirect\n", src_addr, len32);
-               for (i = 0; i < len32; i++)
-                       data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+               if (CHIP_IS_E1(bp)) {
+                       DP(BNX2X_MSG_OFF,
+                          "DMAE is not ready (src_addr %08x len32 %d) using indirect\n",
+                          src_addr, len32);
+                       for (i = 0; i < len32; i++)
+                               data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+               } else
+                       for (i = 0; i < len32; i++)
+                               data[i] = REG_RD(bp, src_addr + i*4);
+
                return;
        }
 
@@ -941,7 +953,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
                        struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
 
                        BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
-                                 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
+                                 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
                }
 
                start = RX_SGE(fp->rx_sge_prod);
@@ -1007,8 +1019,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
  * initialization.
  */
 #define FLR_WAIT_USEC          10000   /* 10 miliseconds */
-#define FLR_WAIT_INTERAVAL     50      /* usec */
-#define        FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
+#define FLR_WAIT_INTERVAL      50      /* usec */
+#define        FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
 
 struct pbf_pN_buf_regs {
        int pN;
@@ -1041,7 +1053,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
        while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
               (init_crd - crd_start))) {
                if (cur_cnt--) {
-                       udelay(FLR_WAIT_INTERAVAL);
+                       udelay(FLR_WAIT_INTERVAL);
                        crd = REG_RD(bp, regs->crd);
                        crd_freed = REG_RD(bp, regs->crd_freed);
                } else {
@@ -1055,7 +1067,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
                }
        }
        DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
-          poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
 }
 
 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
@@ -1073,7 +1085,7 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
 
        while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
                if (cur_cnt--) {
-                       udelay(FLR_WAIT_INTERAVAL);
+                       udelay(FLR_WAIT_INTERVAL);
                        occup = REG_RD(bp, regs->lines_occup);
                        freed = REG_RD(bp, regs->lines_freed);
                } else {
@@ -1087,7 +1099,7 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
                }
        }
        DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
-          poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
 }
 
 static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
@@ -1097,7 +1109,7 @@ static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
        u32 val;
 
        while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
-               udelay(FLR_WAIT_INTERAVAL);
+               udelay(FLR_WAIT_INTERVAL);
 
        return val;
 }
@@ -1210,7 +1222,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
        int ret = 0;
 
        if (REG_RD(bp, comp_addr)) {
-               BNX2X_ERR("Cleanup complete is not 0\n");
+               BNX2X_ERR("Cleanup complete was not 0 before sending\n");
                return 1;
        }
 
@@ -1219,7 +1231,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
        op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
        op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
 
-       DP(BNX2X_MSG_SP, "FW Final cleanup\n");
+       DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
        REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
 
        if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
@@ -1334,6 +1346,7 @@ static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
        REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 
        /* Poll HW usage counters */
+       DP(BNX2X_MSG_SP, "Polling usage counters\n");
        if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
                return -EBUSY;
 
@@ -2318,12 +2331,6 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
                                        CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
-       return 2 * vn + BP_PORT(bp);
-}
-
 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
 {
        struct rate_shaping_vars_per_vn m_rs_vn;
@@ -2475,22 +2482,6 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
           "rate shaping and fairness are disabled\n");
 }
 
-static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
-{
-       int func;
-       int vn;
-
-       /* Set the attention towards other drivers on the same port */
-       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
-               if (vn == BP_VN(bp))
-                       continue;
-
-               func = func_by_vn(bp, vn);
-               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
-                      (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
-       }
-}
-
 /* This function is called upon link interrupt */
 static void bnx2x_link_attn(struct bnx2x *bp)
 {
@@ -2549,6 +2540,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
        if (bp->state != BNX2X_STATE_OPEN)
                return;
 
+       /* read updated dcb configuration */
+       bnx2x_dcbx_pmf_update(bp);
+
        bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
 
        if (bp->link_vars.link_up)
@@ -2643,15 +2637,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
        return rc;
 }
 
-static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
-{
-#ifdef BCM_CNIC
-       /* Statistics are not supported for CNIC Clients at the moment */
-       if (IS_FCOE_FP(fp))
-               return false;
-#endif
-       return true;
-}
 
 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 {
@@ -2695,11 +2680,11 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
         *  parent connection). The statistics are zeroed when the parent
         *  connection is initialized.
         */
-       if (stat_counter_valid(bp, fp)) {
-               __set_bit(BNX2X_Q_FLG_STATS, &flags);
-               if (zero_stats)
-                       __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
-       }
+
+       __set_bit(BNX2X_Q_FLG_STATS, &flags);
+       if (zero_stats)
+               __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
 
        return flags;
 }
@@ -2808,8 +2793,8 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
        /* This should be a maximum number of data bytes that may be
         * placed on the BD (not including paddings).
         */
-       rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
-               IP_HEADER_ALIGNMENT_PADDING;
+       rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
+               BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
 
        rxq_init->cl_qzone_id = fp->cl_qzone_id;
        rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -2940,6 +2925,143 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
         */
 }
 
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+       struct eth_stats_info *ether_stat =
+               &bp->slowpath->drv_info_to_mcp.ether_stat;
+
+       /* leave last char as NULL */
+       memcpy(ether_stat->version, DRV_MODULE_VERSION,
+              ETH_STAT_INFO_VERSION_LEN - 1);
+
+       bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
+                                        DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+                                        ether_stat->mac_local);
+
+       ether_stat->mtu_size = bp->dev->mtu;
+
+       if (bp->dev->features & NETIF_F_RXCSUM)
+               ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+       if (bp->dev->features & NETIF_F_TSO)
+               ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+       ether_stat->feature_flags |= bp->common.boot_mode;
+
+       ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+       ether_stat->txq_size = bp->tx_ring_size;
+       ether_stat->rxq_size = bp->rx_ring_size;
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+       struct fcoe_stats_info *fcoe_stat =
+               &bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+       memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+
+       fcoe_stat->qos_priority =
+               app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+       /* insert FCoE stats from ramrod response */
+       if (!NO_FCOE(bp)) {
+               struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+                       &bp->fw_stats_data->queue_stats[FCOE_IDX].
+                       tstorm_queue_statistics;
+
+               struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+                       &bp->fw_stats_data->queue_stats[FCOE_IDX].
+                       xstorm_queue_statistics;
+
+               struct fcoe_statistics_params *fw_fcoe_stat =
+                       &bp->fw_stats_data->fcoe;
+
+               ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
+                      fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+               ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
+                      fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->mcast_pkts_sent);
+       }
+
+       /* ask L5 driver to add data to the struct */
+       bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+#endif
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+       struct iscsi_stats_info *iscsi_stat =
+               &bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+       memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+
+       iscsi_stat->qos_priority =
+               app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+       /* ask L5 driver to add data to the struct */
+       bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+#endif
+}
+
 /* called due to MCP event (on pmf):
  *     reread new bandwidth configuration
  *     configure FW
@@ -2960,6 +3082,50 @@ static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
        bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
 }
 
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+       enum drv_info_opcode op_code;
+       u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+
+       /* if drv_info version supported by MFW doesn't match - send NACK */
+       if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+               return;
+       }
+
+       op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+                 DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+       memset(&bp->slowpath->drv_info_to_mcp, 0,
+              sizeof(union drv_info_to_mcp));
+
+       switch (op_code) {
+       case ETH_STATS_OPCODE:
+               bnx2x_drv_info_ether_stat(bp);
+               break;
+       case FCOE_STATS_OPCODE:
+               bnx2x_drv_info_fcoe_stat(bp);
+               break;
+       case ISCSI_STATS_OPCODE:
+               bnx2x_drv_info_iscsi_stat(bp);
+               break;
+       default:
+               /* if op code isn't supported - send NACK */
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+               return;
+       }
+
+       /* if we got drv_info attn from MFW then these fields are defined in
+        * shmem2 for sure
+        */
+       SHMEM2_WR(bp, drv_info_host_addr_lo,
+               U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+       SHMEM2_WR(bp, drv_info_host_addr_hi,
+               U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+       bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 {
        DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -3318,6 +3484,17 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
        netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
               " the driver to shutdown the card to prevent permanent"
               " damage.  Please contact OEM Support for assistance\n");
+
+       /*
+        * Scheudle device reset (unload)
+        * This is due to some boards consuming sufficient power when driver is
+        * up to overheat if fan fails.
+        */
+       smp_mb__before_clear_bit();
+       set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
+       smp_mb__after_clear_bit();
+       schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
 }
 
 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3456,6 +3633,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                        if (val & DRV_STATUS_SET_MF_BW)
                                bnx2x_set_mf_bw(bp);
 
+                       if (val & DRV_STATUS_DRV_INFO_REQ)
+                               bnx2x_handle_drv_info_req(bp);
                        if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
                                bnx2x_pmf_update(bp);
 
@@ -3547,11 +3726,11 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
  */
 void bnx2x_set_reset_global(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-
+       u32 val;
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
-       barrier();
-       mmiowb();
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 /*
@@ -3561,11 +3740,11 @@ void bnx2x_set_reset_global(struct bnx2x *bp)
  */
 static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-
+       u32 val;
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
-       barrier();
-       mmiowb();
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 /*
@@ -3588,15 +3767,17 @@ static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
  */
 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val;
        u32 bit = BP_PATH(bp) ?
                BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 
        /* Clear the bit */
        val &= ~bit;
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
-       barrier();
-       mmiowb();
+
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 /*
@@ -3606,15 +3787,16 @@ static inline void bnx2x_set_reset_done(struct bnx2x *bp)
  */
 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val;
        u32 bit = BP_PATH(bp) ?
                BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 
        /* Set the bit */
        val |= bit;
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
-       barrier();
-       mmiowb();
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 /*
@@ -3632,25 +3814,28 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
 }
 
 /*
- * Increment the load counter for the current engine.
+ * set pf load for the current pf.
  *
  * should be run under rtnl lock
  */
-void bnx2x_inc_load_cnt(struct bnx2x *bp)
+void bnx2x_set_pf_load(struct bnx2x *bp)
 {
-       u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val1, val;
        u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
                             BNX2X_PATH0_LOAD_CNT_MASK;
        u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
                             BNX2X_PATH0_LOAD_CNT_SHIFT;
 
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
        DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
 
        /* get the current counter value */
        val1 = (val & mask) >> shift;
 
-       /* increment... */
-       val1++;
+       /* set bit of that PF */
+       val1 |= (1 << bp->pf_num);
 
        /* clear the old value */
        val &= ~mask;
@@ -3659,34 +3844,35 @@ void bnx2x_inc_load_cnt(struct bnx2x *bp)
        val |= ((val1 << shift) & mask);
 
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
-       barrier();
-       mmiowb();
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 /**
- * bnx2x_dec_load_cnt - decrement the load counter
+ * bnx2x_clear_pf_load - clear pf load mark
  *
  * @bp:                driver handle
  *
  * Should be run under rtnl lock.
  * Decrements the load counter for the current engine. Returns
- * the new counter value.
+ * whether other functions are still loaded
  */
-u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+bool bnx2x_clear_pf_load(struct bnx2x *bp)
 {
-       u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val1, val;
        u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
                             BNX2X_PATH0_LOAD_CNT_MASK;
        u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
                             BNX2X_PATH0_LOAD_CNT_SHIFT;
 
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
        DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
 
        /* get the current counter value */
        val1 = (val & mask) >> shift;
 
-       /* decrement... */
-       val1--;
+       /* clear bit of that PF */
+       val1 &= ~(1 << bp->pf_num);
 
        /* clear the old value */
        val &= ~mask;
@@ -3695,18 +3881,16 @@ u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
        val |= ((val1 << shift) & mask);
 
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
-       barrier();
-       mmiowb();
-
-       return val1;
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       return val1 != 0;
 }
 
 /*
- * Read the load counter for the current engine.
+ * Read the load status for the current engine.
  *
  * should be run under rtnl lock
  */
-static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
+static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
 {
        u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
                             BNX2X_PATH0_LOAD_CNT_MASK);
@@ -3718,23 +3902,23 @@ static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
 
        val = (val & mask) >> shift;
 
-       DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
+       DP(NETIF_MSG_HW, "load mask for engine %d = 0x%x\n", engine, val);
 
-       return val;
+       return val != 0;
 }
 
 /*
- * Reset the load counter for the current engine.
- *
- * should be run under rtnl lock
+ * Reset the load status for the current engine.
  */
-static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+static inline void bnx2x_clear_load_status(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val;
        u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
-                            BNX2X_PATH0_LOAD_CNT_MASK);
-
+                   BNX2X_PATH0_LOAD_CNT_MASK);
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+       val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
 static inline void _print_next_block(int idx, const char *blk)
@@ -5247,7 +5431,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
        u8 cos;
        unsigned long q_type = 0;
        u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
-
+       fp->rx_queue = fp_idx;
        fp->cid = fp_idx;
        fp->cl_id = bnx2x_fp_cl_id(fp);
        fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
@@ -6521,13 +6705,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
        u16 cdu_ilt_start;
        u32 addr, val;
        u32 main_mem_base, main_mem_size, main_mem_prty_clr;
-       int i, main_mem_width;
+       int i, main_mem_width, rc;
 
        DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
 
        /* FLR cleanup - hmmm */
-       if (!CHIP_IS_E1x(bp))
-               bnx2x_pf_flr_clnup(bp);
+       if (!CHIP_IS_E1x(bp)) {
+               rc = bnx2x_pf_flr_clnup(bp);
+               if (rc)
+                       return rc;
+       }
 
        /* set MSI reconfigure capability */
        if (bp->common.int_block == INT_BLOCK_HC) {
@@ -6856,13 +7043,16 @@ void bnx2x_free_mem(struct bnx2x *bp)
 static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 {
        int num_groups;
+       int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
 
-       /* number of eth_queues */
-       u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+       /* number of queues for statistics is number of eth queues + FCoE */
+       u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
 
        /* Total number of FW statistics requests =
-        * 1 for port stats + 1 for PF stats + num_eth_queues */
-       bp->fw_stats_num = 2 + num_queue_stats;
+        * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
+        * num of queues
+        */
+       bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
 
 
        /* Request is built from stats_query_header and an array of
@@ -6870,8 +7060,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
         * configured in the stats_query_header.
         */
-       num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
-               (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+       num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
+                    (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
 
        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
                        num_groups * sizeof(struct stats_query_cmd_group);
@@ -6880,9 +7070,13 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
         *
         * stats_counter holds per-STORM counters that are incremented
         * when STORM has finished with the current request.
+        *
+        * memory for FCoE offloaded statistics are counted anyway,
+        * even if they will not be sent.
         */
        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
                sizeof(struct per_pf_stats) +
+               sizeof(struct fcoe_statistics_params) +
                sizeof(struct per_queue_stats) * num_queue_stats +
                sizeof(struct stats_counter);
 
@@ -7025,6 +7219,13 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
 {
        unsigned long ramrod_flags = 0;
 
+#ifdef BCM_CNIC
+       if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) {
+               DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n");
+               return 0;
+       }
+#endif
+
        DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
 
        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8278,13 +8479,38 @@ int bnx2x_leader_reset(struct bnx2x *bp)
 {
        int rc = 0;
        bool global = bnx2x_reset_is_global(bp);
+       u32 load_code;
+
+       /* if not going to reset MCP - load "fake" driver to reset HW while
+        * driver is owner of the HW
+        */
+       if (!global && !BP_NOMCP(bp)) {
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+               if (!load_code) {
+                       BNX2X_ERR("MCP response failure, aborting\n");
+                       rc = -EAGAIN;
+                       goto exit_leader_reset;
+               }
+               if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
+                   (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
+                       BNX2X_ERR("MCP unexpected resp, aborting\n");
+                       rc = -EAGAIN;
+                       goto exit_leader_reset2;
+               }
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+               if (!load_code) {
+                       BNX2X_ERR("MCP response failure, aborting\n");
+                       rc = -EAGAIN;
+                       goto exit_leader_reset2;
+               }
+       }
 
        /* Try to recover after the failure */
        if (bnx2x_process_kill(bp, global)) {
                netdev_err(bp->dev, "Something bad had happen on engine %d! "
                                    "Aii!\n", BP_PATH(bp));
                rc = -EAGAIN;
-               goto exit_leader_reset;
+               goto exit_leader_reset2;
        }
 
        /*
@@ -8295,6 +8521,12 @@ int bnx2x_leader_reset(struct bnx2x *bp)
        if (global)
                bnx2x_clear_reset_global(bp);
 
+exit_leader_reset2:
+       /* unload "fake driver" if it was loaded */
+       if (!global && !BP_NOMCP(bp)) {
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+       }
 exit_leader_reset:
        bp->is_leader = 0;
        bnx2x_release_leader_lock(bp);
@@ -8331,13 +8563,15 @@ static inline void bnx2x_recovery_failed(struct bnx2x *bp)
 static void bnx2x_parity_recover(struct bnx2x *bp)
 {
        bool global = false;
+       bool is_parity;
 
        DP(NETIF_MSG_HW, "Handling parity\n");
        while (1) {
                switch (bp->recovery_state) {
                case BNX2X_RECOVERY_INIT:
                        DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
-                       bnx2x_chk_parity_attn(bp, &global, false);
+                       is_parity = bnx2x_chk_parity_attn(bp, &global, false);
+                       WARN_ON(!is_parity);
 
                        /* Try to get a LEADER_LOCK HW lock */
                        if (bnx2x_trylock_leader_lock(bp)) {
@@ -8361,15 +8595,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
 
                        bp->recovery_state = BNX2X_RECOVERY_WAIT;
 
-                       /*
-                        * Reset MCP command sequence number and MCP mail box
-                        * sequence as we are going to reset the MCP.
-                        */
-                       if (global) {
-                               bp->fw_seq = 0;
-                               bp->fw_drv_pulse_wr_seq = 0;
-                       }
-
                        /* Ensure "is_leader", MCP command sequence and
                         * "recovery_state" update values are seen on other
                         * CPUs.
@@ -8381,10 +8606,10 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
                        DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
                        if (bp->is_leader) {
                                int other_engine = BP_PATH(bp) ? 0 : 1;
-                               u32 other_load_counter =
-                                       bnx2x_get_load_cnt(bp, other_engine);
-                               u32 load_counter =
-                                       bnx2x_get_load_cnt(bp, BP_PATH(bp));
+                               bool other_load_status =
+                                       bnx2x_get_load_status(bp, other_engine);
+                               bool load_status =
+                                       bnx2x_get_load_status(bp, BP_PATH(bp));
                                global = bnx2x_reset_is_global(bp);
 
                                /*
@@ -8395,8 +8620,8 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
                                 * the the gates will remain closed for that
                                 * engine.
                                 */
-                               if (load_counter ||
-                                   (global && other_load_counter)) {
+                               if (load_status ||
+                                   (global && other_load_status)) {
                                        /* Wait until all other functions get
                                         * down.
                                         */
@@ -8453,9 +8678,20 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
                                                return;
                                        }
 
-                                       if (bnx2x_nic_load(bp, LOAD_NORMAL))
-                                               bnx2x_recovery_failed(bp);
-                                       else {
+                                       bp->recovery_state =
+                                               BNX2X_RECOVERY_NIC_LOADING;
+                                       if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+                                               netdev_err(bp->dev,
+                                                          "Recovery failed. "
+                                                          "Power cycle "
+                                                          "needed\n");
+                                               /* Disconnect this device */
+                                               netif_device_detach(bp->dev);
+                                               /* Shut down the power */
+                                               bnx2x_set_power_state(
+                                                       bp, PCI_D3hot);
+                                               smp_mb();
+                                       } else {
                                                bp->recovery_state =
                                                        BNX2X_RECOVERY_DONE;
                                                smp_mb();
@@ -8522,6 +8758,17 @@ sp_rtnl_not_reset:
        if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
                bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
 
+       /*
+        * in case of fan failure we need to reset id if the "stop on error"
+        * debug flag is set, since we trying to prevent permanent overheating
+        * damage
+        */
+       if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
+               DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+               netif_device_detach(bp->dev);
+               bnx2x_close(bp->dev);
+       }
+
 sp_rtnl_exit:
        rtnl_unlock();
 }
@@ -8604,11 +8851,13 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
 {
        u32 val;
 
-       /* Check if there is any driver already loaded */
-       val = REG_RD(bp, MISC_REG_UNPREPARED);
-       if (val == 0x1) {
+       /* possibly another driver is trying to reset the chip */
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+       /* check if doorbell queue is reset */
+       if (REG_RD(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET)
+           & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
 
-               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
                /*
                 * Check if it is the UNDI driver
                 * UNDI driver initializes CID offset for normal bell to 0x7
@@ -8696,19 +8945,16 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
 
                        /* restore our func and fw_seq */
                        bp->pf_num = orig_pf_num;
-                       bp->fw_seq =
-                             (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
-                               DRV_MSG_SEQ_NUMBER_MASK);
                }
-
-               /* now it's safe to release the lock */
-               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
        }
+
+       /* now it's safe to release the lock */
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 }
 
 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 {
-       u32 val, val2, val3, val4, id;
+       u32 val, val2, val3, val4, id, boot_mode;
        u16 pmc;
 
        /* Get the chip revision id and number. */
@@ -8817,6 +9063,26 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
        bp->link_params.feature_config_flags |=
                (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
                FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+       bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+                       BC_SUPPORTS_PFC_STATS : 0;
+
+       boot_mode = SHMEM_RD(bp,
+                       dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+                       PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+       switch (boot_mode) {
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+               break;
+       }
 
        pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
        bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -9267,22 +9533,43 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
                                                        bp->common.shmem2_base);
 }
 
-#ifdef BCM_CNIC
-static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+void bnx2x_get_iscsi_info(struct bnx2x *bp)
 {
+#ifdef BCM_CNIC
        int port = BP_PORT(bp);
-       int func = BP_ABS_FUNC(bp);
 
        u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
                                drv_lic_key[port].max_iscsi_conn);
-       u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
-                               drv_lic_key[port].max_fcoe_conn);
 
-       /* Get the number of maximum allowed iSCSI and FCoE connections */
+       /* Get the number of maximum allowed iSCSI connections */
        bp->cnic_eth_dev.max_iscsi_conn =
                (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
                BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
 
+       BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
+                      bp->cnic_eth_dev.max_iscsi_conn);
+
+       /*
+        * If maximum allowed number of connections is zero -
+        * disable the feature.
+        */
+       if (!bp->cnic_eth_dev.max_iscsi_conn)
+               bp->flags |= NO_ISCSI_FLAG;
+#else
+       bp->flags |= NO_ISCSI_FLAG;
+#endif
+}
+
+static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       int port = BP_PORT(bp);
+       int func = BP_ABS_FUNC(bp);
+
+       u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+                               drv_lic_key[port].max_fcoe_conn);
+
+       /* Get the number of maximum allowed FCoE connections */
        bp->cnic_eth_dev.max_fcoe_conn =
                (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
                BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
@@ -9334,21 +9621,29 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
                }
        }
 
-       BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
-                      bp->cnic_eth_dev.max_iscsi_conn,
-                      bp->cnic_eth_dev.max_fcoe_conn);
+       BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
 
        /*
         * If maximum allowed number of connections is zero -
         * disable the feature.
         */
-       if (!bp->cnic_eth_dev.max_iscsi_conn)
-               bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-
        if (!bp->cnic_eth_dev.max_fcoe_conn)
                bp->flags |= NO_FCOE_FLAG;
-}
+#else
+       bp->flags |= NO_FCOE_FLAG;
 #endif
+}
+
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+       /*
+        * iSCSI may be dynamically disabled but reading
+        * info here we will decrease memory usage by driver
+        * if the feature is disabled for good
+        */
+       bnx2x_get_iscsi_info(bp);
+       bnx2x_get_fcoe_info(bp);
+}
 
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 {
@@ -9374,7 +9669,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                        bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 
 #ifdef BCM_CNIC
-               /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+               /*
+                * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
                 * FCoE MAC then the appropriate feature should be disabled.
                 */
                if (IS_MF_SI(bp)) {
@@ -9396,11 +9692,22 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                                val = MF_CFG_RD(bp, func_ext_config[func].
                                                    fcoe_mac_addr_lower);
                                bnx2x_set_mac_buf(fip_mac, val, val2);
-                               BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+                               BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
                                               fip_mac);
 
                        } else
                                bp->flags |= NO_FCOE_FLAG;
+               } else { /* SD mode */
+                       if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) {
+                               /* use primary mac as iscsi mac */
+                               memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+                               /* Zero primary MAC configuration */
+                               memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+                               BNX2X_DEV_INFO("SD ISCSI MODE\n");
+                               BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+                                              iscsi_mac);
+                       }
                }
 #endif
        } else {
@@ -9449,7 +9756,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
        }
 #endif
 
-       if (!is_valid_ether_addr(bp->dev->dev_addr))
+       if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
                dev_err(&bp->pdev->dev,
                        "bad Ethernet MAC address configuration: "
                        "%pM, change it manually before bringing up "
@@ -9661,19 +9968,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
        /* Get MAC addresses */
        bnx2x_get_mac_hwinfo(bp);
 
-#ifdef BCM_CNIC
        bnx2x_get_cnic_info(bp);
-#endif
-
-       /* Get current FW pulse sequence */
-       if (!BP_NOMCP(bp)) {
-               int mb_idx = BP_FW_MB_IDX(bp);
-
-               bp->fw_drv_pulse_wr_seq =
-                               (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
-                                DRV_PULSE_SEQ_MASK);
-               BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
-       }
 
        return rc;
 }
@@ -9681,30 +9976,49 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
 {
        int cnt, i, block_end, rodi;
-       char vpd_data[BNX2X_VPD_LEN+1];
+       char vpd_start[BNX2X_VPD_LEN+1];
        char str_id_reg[VENDOR_ID_LEN+1];
        char str_id_cap[VENDOR_ID_LEN+1];
+       char *vpd_data;
+       char *vpd_extended_data = NULL;
        u8 len;
 
-       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
        memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
 
        if (cnt < BNX2X_VPD_LEN)
                goto out_not_found;
 
-       i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+       /* VPD RO tag should be first tag after identifier string, hence
+        * we should be able to find it in first BNX2X_VPD_LEN chars
+        */
+       i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
                             PCI_VPD_LRDT_RO_DATA);
        if (i < 0)
                goto out_not_found;
 
-
        block_end = i + PCI_VPD_LRDT_TAG_SIZE +
-                   pci_vpd_lrdt_size(&vpd_data[i]);
+                   pci_vpd_lrdt_size(&vpd_start[i]);
 
        i += PCI_VPD_LRDT_TAG_SIZE;
 
-       if (block_end > BNX2X_VPD_LEN)
-               goto out_not_found;
+       if (block_end > BNX2X_VPD_LEN) {
+               vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+               if (vpd_extended_data  == NULL)
+                       goto out_not_found;
+
+               /* read rest of vpd image into vpd_extended_data */
+               memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+               cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+                                  block_end - BNX2X_VPD_LEN,
+                                  vpd_extended_data + BNX2X_VPD_LEN);
+               if (cnt < (block_end - BNX2X_VPD_LEN))
+                       goto out_not_found;
+               vpd_data = vpd_extended_data;
+       } else
+               vpd_data = vpd_start;
+
+       /* now vpd_data holds full vpd content in both cases */
 
        rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
                                   PCI_VPD_RO_KEYWORD_MFR_ID);
@@ -9736,9 +10050,11 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
                                bp->fw_ver[len] = ' ';
                        }
                }
+               kfree(vpd_extended_data);
                return;
        }
 out_not_found:
+       kfree(vpd_extended_data);
        return;
 }
 
@@ -9823,14 +10139,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
        if (!BP_NOMCP(bp))
                bnx2x_undi_unload(bp);
 
-       /* init fw_seq after undi_unload! */
-       if (!BP_NOMCP(bp)) {
-               bp->fw_seq =
-                       (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
-                        DRV_MSG_SEQ_NUMBER_MASK);
-               BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-       }
-
        if (CHIP_REV_IS_FPGA(bp))
                dev_err(&bp->pdev->dev, "FPGA detected\n");
 
@@ -9840,15 +10148,20 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 
        bp->multi_mode = multi_mode;
 
+       bp->disable_tpa = disable_tpa;
+
+#ifdef BCM_CNIC
+       bp->disable_tpa |= IS_MF_ISCSI_SD(bp);
+#endif
+
        /* Set TPA flags */
-       if (disable_tpa) {
+       if (bp->disable_tpa) {
                bp->flags &= ~TPA_ENABLE_FLAG;
                bp->dev->features &= ~NETIF_F_LRO;
        } else {
                bp->flags |= TPA_ENABLE_FLAG;
                bp->dev->features |= NETIF_F_LRO;
        }
-       bp->disable_tpa = disable_tpa;
 
        if (CHIP_IS_E1(bp))
                bp->dropless_fc = 0;
@@ -9907,14 +10220,14 @@ static int bnx2x_open(struct net_device *dev)
        struct bnx2x *bp = netdev_priv(dev);
        bool global = false;
        int other_engine = BP_PATH(bp) ? 0 : 1;
-       u32 other_load_counter, load_counter;
+       bool other_load_status, load_status;
 
        netif_carrier_off(dev);
 
        bnx2x_set_power_state(bp, PCI_D0);
 
-       other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
-       load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
+       other_load_status = bnx2x_get_load_status(bp, other_engine);
+       load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
 
        /*
         * If parity had happen during the unload, then attentions
@@ -9940,8 +10253,8 @@ static int bnx2x_open(struct net_device *dev)
                         * global blocks only the first in the chip should try
                         * to recover.
                         */
-                       if ((!load_counter &&
-                            (!global || !other_load_counter)) &&
+                       if ((!load_status &&
+                            (!global || !other_load_status)) &&
                            bnx2x_trylock_leader_lock(bp) &&
                            !bnx2x_leader_reset(bp)) {
                                netdev_info(bp->dev, "Recovered in open\n");
@@ -9965,7 +10278,7 @@ static int bnx2x_open(struct net_device *dev)
 }
 
 /* called with rtnl_lock */
-static int bnx2x_close(struct net_device *dev)
+int bnx2x_close(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
@@ -10119,6 +10432,11 @@ void bnx2x_set_rx_mode(struct net_device *dev)
        }
 
        bp->rx_mode = rx_mode;
+#ifdef BCM_CNIC
+       /* handle ISCSI SD mode */
+       if (IS_MF_ISCSI_SD(bp))
+               bp->rx_mode = BNX2X_RX_MODE_NONE;
+#endif
 
        /* Schedule the rx_mode command */
        if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -10198,6 +10516,15 @@ static void poll_bnx2x(struct net_device *dev)
 }
 #endif
 
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr))
+               return -EADDRNOTAVAIL;
+       return 0;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_open               = bnx2x_open,
        .ndo_stop               = bnx2x_close,
@@ -10205,7 +10532,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_select_queue       = bnx2x_select_queue,
        .ndo_set_rx_mode        = bnx2x_set_rx_mode,
        .ndo_set_mac_address    = bnx2x_change_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_validate_addr      = bnx2x_validate_addr,
        .ndo_do_ioctl           = bnx2x_ioctl,
        .ndo_change_mtu         = bnx2x_change_mtu,
        .ndo_fix_features       = bnx2x_fix_features,
@@ -10246,6 +10573,10 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
 {
        struct bnx2x *bp;
        int rc;
+       u32 pci_cfg_dword;
+       bool chip_is_e1x = (board_type == BCM57710 ||
+                           board_type == BCM57711 ||
+                           board_type == BCM57711E);
 
        SET_NETDEV_DEV(dev, &pdev->dev);
        bp = netdev_priv(dev);
@@ -10253,7 +10584,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        bp->dev = dev;
        bp->pdev = pdev;
        bp->flags = 0;
-       bp->pf_num = PCI_FUNC(pdev->devfn);
 
        rc = pci_enable_device(pdev);
        if (rc) {
@@ -10320,6 +10650,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
                goto err_out_release;
        }
 
+       /* In E1/E1H use pci device function given by kernel.
+        * In E2/E3 read physical function from ME register since these chips
+        * support Physical Device Assignment where kernel BDF maybe arbitrary
+        * (depending on hypervisor).
+        */
+       if (chip_is_e1x)
+               bp->pf_num = PCI_FUNC(pdev->devfn);
+       else {/* chip is E2/3*/
+               pci_read_config_dword(bp->pdev,
+                                     PCICFG_ME_REGISTER, &pci_cfg_dword);
+               bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
+                   ME_REG_ABS_PF_NUM_SHIFT);
+       }
+       DP(BNX2X_MSG_SP, "me reg PF num: %d\n", bp->pf_num);
+
        bnx2x_set_power_state(bp, PCI_D0);
 
        /* clean indirect addresses */
@@ -10334,7 +10679,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
        REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
 
-       if (CHIP_IS_E1x(bp)) {
+       if (chip_is_e1x) {
                REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
                REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
                REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
@@ -10345,13 +10690,11 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
         * Enable internal target-read (in case we are probed after PF FLR).
         * Must be done prior to any BAR read access. Only for 57712 and up
         */
-       if (board_type != BCM57710 &&
-           board_type != BCM57711 &&
-           board_type != BCM57711E)
+       if (!chip_is_e1x)
                REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 
        /* Reset the load counter */
-       bnx2x_clear_load_cnt(bp);
+       bnx2x_clear_load_status(bp);
 
        dev->watchdog_timeo = TX_TIMEOUT;
 
@@ -10823,8 +11166,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
        bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
 
 #ifdef BCM_CNIC
-       /* disable FCOE L2 queue for E1x and E3*/
-       if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+       /* disable FCOE L2 queue for E1x */
+       if (CHIP_IS_E1x(bp))
                bp->flags |= NO_FCOE_FLAG;
 
 #endif
@@ -11004,13 +11347,6 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
        if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
                != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
                BNX2X_ERR("BAD MCP validity signature\n");
-
-       if (!BP_NOMCP(bp)) {
-               bp->fw_seq =
-                   (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
-                   DRV_MSG_SEQ_NUMBER_MASK);
-               BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-       }
 }
 
 /**
@@ -11266,6 +11602,13 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
                return -EIO;
 #endif
 
+       if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
+           (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
+               netdev_err(dev, "Handling parity error recovery. Try again "
+                               "later\n");
+               return -EAGAIN;
+       }
+
        spin_lock_bh(&bp->spq_lock);
 
        for (i = 0; i < count; i++) {
@@ -11486,6 +11829,38 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
                smp_mb__after_atomic_inc();
                break;
        }
+       case DRV_CTL_ULP_REGISTER_CMD: {
+               int ulp_type = ctl->data.ulp_type;
+
+               if (CHIP_IS_E3(bp)) {
+                       int idx = BP_FW_MB_IDX(bp);
+                       u32 cap;
+
+                       cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+                       if (ulp_type == CNIC_ULP_ISCSI)
+                               cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+                       else if (ulp_type == CNIC_ULP_FCOE)
+                               cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+                       SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+               }
+               break;
+       }
+       case DRV_CTL_ULP_UNREGISTER_CMD: {
+               int ulp_type = ctl->data.ulp_type;
+
+               if (CHIP_IS_E3(bp)) {
+                       int idx = BP_FW_MB_IDX(bp);
+                       u32 cap;
+
+                       cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+                       if (ulp_type == CNIC_ULP_ISCSI)
+                               cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+                       else if (ulp_type == CNIC_ULP_FCOE)
+                               cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+                       SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+               }
+               break;
+       }
 
        default:
                BNX2X_ERR("unknown command %x\n", ctl->cmd);
@@ -11561,7 +11936,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
 
        mutex_lock(&bp->cnic_mutex);
        cp->drv_state = 0;
-       rcu_assign_pointer(bp->cnic_ops, NULL);
+       RCU_INIT_POINTER(bp->cnic_ops, NULL);
        mutex_unlock(&bp->cnic_mutex);
        synchronize_rcu();
        kfree(bp->cnic_kwq);