]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/scsi/lpfc/lpfc_els.c
Merge branch 'master' into tk71
[mv-sheeva.git] / drivers / scsi / lpfc / lpfc_els.c
index 8d09191c327e7de39a347d7e56ba331b6bfde16d..c62d567cc8457b6ba880247f081f3efcb61ec4c2 100644 (file)
@@ -177,15 +177,18 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
                 (elscmd == ELS_CMD_LOGO)))
                switch (elscmd) {
                case ELS_CMD_FLOGI:
-               elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+               elsiocb->iocb_flag |=
+                       ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
                                        & LPFC_FIP_ELS_ID_MASK);
                break;
                case ELS_CMD_FDISC:
-               elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+               elsiocb->iocb_flag |=
+                       ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
                                        & LPFC_FIP_ELS_ID_MASK);
                break;
                case ELS_CMD_LOGO:
-               elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+               elsiocb->iocb_flag |=
+                       ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
                                        & LPFC_FIP_ELS_ID_MASK);
                break;
                }
@@ -372,7 +375,8 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
                err = 4;
                goto fail;
        }
-       rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
+       rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
+                         ndlp->nlp_rpi);
        if (rc) {
                err = 5;
                goto fail_free_mbox;
@@ -517,18 +521,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (sp->cmn.edtovResolution)    /* E_D_TOV ticks are in nanoseconds */
                phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
 
+       phba->fc_edtovResol = sp->cmn.edtovResolution;
        phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
 
-       if (phba->fc_topology == TOPOLOGY_LOOP) {
+       if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                spin_lock_irq(shost->host_lock);
                vport->fc_flag |= FC_PUBLIC_LOOP;
                spin_unlock_irq(shost->host_lock);
-       } else {
-               /*
-                * If we are a N-port connected to a Fabric, fixup sparam's so
-                * logins to devices on remote loops work.
-                */
-               vport->fc_sparam.cmn.altBbCredit = 1;
        }
 
        vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
@@ -585,6 +584,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                        lpfc_unreg_rpi(vport, np);
                }
                lpfc_cleanup_pending_mbox(vport);
+
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       lpfc_sli4_unreg_all_rpis(vport);
+
                if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
                        lpfc_mbx_unreg_vpi(vport);
                        spin_lock_irq(shost->host_lock);
@@ -800,7 +803,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
        if (irsp->ulpStatus) {
                /*
-                * In case of FIP mode, perform round robin FCF failover
+                * In case of FIP mode, perform roundrobin FCF failover
                 * due to new FCF discovery
                 */
                if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
@@ -808,48 +811,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                    (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
                    (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
-                                       "2611 FLOGI failed on registered "
-                                       "FCF record fcf_index(%d), status: "
-                                       "x%x/x%x, tmo:x%x, trying to perform "
-                                       "round robin failover\n",
+                                       "2611 FLOGI failed on FCF (x%x), "
+                                       "status:x%x/x%x, tmo:x%x, perform "
+                                       "roundrobin FCF failover\n",
                                        phba->fcf.current_rec.fcf_indx,
                                        irsp->ulpStatus, irsp->un.ulpWord[4],
                                        irsp->ulpTimeout);
                        fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
-                       if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
-                               /*
-                                * Exhausted the eligible FCF record list,
-                                * fail through to retry FLOGI on current
-                                * FCF record.
-                                */
-                               lpfc_printf_log(phba, KERN_WARNING,
-                                               LOG_FIP | LOG_ELS,
-                                               "2760 Completed one round "
-                                               "of FLOGI FCF round robin "
-                                               "failover list, retry FLOGI "
-                                               "on currently registered "
-                                               "FCF index:%d\n",
-                                               phba->fcf.current_rec.fcf_indx);
-                       } else {
-                               lpfc_printf_log(phba, KERN_INFO,
-                                               LOG_FIP | LOG_ELS,
-                                               "2794 FLOGI FCF round robin "
-                                               "failover to FCF index x%x\n",
-                                               fcf_index);
-                               rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
-                                                                  fcf_index);
-                               if (rc)
-                                       lpfc_printf_log(phba, KERN_WARNING,
-                                                       LOG_FIP | LOG_ELS,
-                                                       "2761 FLOGI round "
-                                                       "robin FCF failover "
-                                                       "read FCF failed "
-                                                       "rc:x%x, fcf_index:"
-                                                       "%d\n", rc,
-                                               phba->fcf.current_rec.fcf_indx);
-                               else
-                                       goto out;
-                       }
+                       rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
+                       if (rc)
+                               goto out;
                }
 
                /* FLOGI failure */
@@ -862,6 +833,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                if (lpfc_els_retry(phba, cmdiocb, rspiocb))
                        goto out;
 
+               /* FLOGI failure */
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+                                "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+                                irsp->ulpStatus, irsp->un.ulpWord[4],
+                                irsp->ulpTimeout);
+
                /* FLOGI failed, so there is no fabric */
                spin_lock_irq(shost->host_lock);
                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -873,13 +850,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                 */
                if (phba->alpa_map[0] == 0) {
                        vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
+                       if ((phba->sli_rev == LPFC_SLI_REV4) &&
+                           (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+                            (vport->fc_prevDID != vport->fc_myDID))) {
+                               if (vport->fc_flag & FC_VFI_REGISTERED)
+                                       lpfc_sli4_unreg_all_rpis(vport);
+                               lpfc_issue_reg_vfi(vport);
+                               lpfc_nlp_put(ndlp);
+                               goto out;
+                       }
                }
-
-               /* FLOGI failure */
-               lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-                                "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
-                                irsp->ulpStatus, irsp->un.ulpWord[4],
-                                irsp->ulpTimeout);
                goto flogifail;
        }
        spin_lock_irq(shost->host_lock);
@@ -909,7 +889,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                 */
                if (sp->cmn.fPort)
                        rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
-               else if (!(phba->hba_flag & HBA_FCOE_SUPPORT))
+               else if (!(phba->hba_flag & HBA_FCOE_MODE))
                        rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
                else {
                        lpfc_printf_vlog(vport, KERN_ERR,
@@ -939,6 +919,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        lpfc_nlp_put(ndlp);
                        spin_lock_irq(&phba->hbalock);
                        phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+                       phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
                        spin_unlock_irq(&phba->hbalock);
                        goto out;
                }
@@ -947,13 +928,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        if (phba->hba_flag & HBA_FIP_SUPPORT)
                                lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
                                                LOG_ELS,
-                                               "2769 FLOGI successful on FCF "
-                                               "record: current_fcf_index:"
-                                               "x%x, terminate FCF round "
-                                               "robin failover process\n",
+                                               "2769 FLOGI to FCF (x%x) "
+                                               "completed successfully\n",
                                                phba->fcf.current_rec.fcf_indx);
                        spin_lock_irq(&phba->hbalock);
                        phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+                       phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
                        spin_unlock_irq(&phba->hbalock);
                        goto out;
                }
@@ -1044,7 +1024,9 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (sp->cmn.fcphHigh < FC_PH3)
                sp->cmn.fcphHigh = FC_PH3;
 
-       if  (phba->sli_rev == LPFC_SLI_REV4) {
+       if  ((phba->sli_rev == LPFC_SLI_REV4) &&
+            (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+             LPFC_SLI_INTF_IF_TYPE_0)) {
                elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
                elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
                /* FLOGI needs to be 3 for WQE FCFI */
@@ -1057,7 +1039,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                icmd->ulpCt_l = 0;
        }
 
-       if (phba->fc_topology != TOPOLOGY_LOOP) {
+       if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
                icmd->un.elsreq64.myID = 0;
                icmd->un.elsreq64.fl = 1;
        }
@@ -1175,12 +1157,13 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
                        return 0;
        }
 
-       if (lpfc_issue_els_flogi(vport, ndlp, 0))
+       if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
                /* This decrement of reference count to node shall kick off
                 * the release of the node.
                 */
                lpfc_nlp_put(ndlp);
-
+               return 0;
+       }
        return 1;
 }
 
@@ -1310,6 +1293,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
        uint32_t rc, keepDID = 0;
        int  put_node;
        int  put_rport;
+       struct lpfc_node_rrqs rrq;
 
        /* Fabric nodes can have the same WWPN so we don't bother searching
         * by WWPN.  Just return the ndlp that was given to us.
@@ -1327,6 +1311,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 
        if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
                return ndlp;
+       memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
 
        if (!new_ndlp) {
                rc = memcmp(&ndlp->nlp_portname, name,
@@ -1347,12 +1332,25 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
                if (!new_ndlp)
                        return ndlp;
                keepDID = new_ndlp->nlp_DID;
-       } else
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       memcpy(&rrq.xri_bitmap,
+                               &new_ndlp->active_rrqs.xri_bitmap,
+                               sizeof(new_ndlp->active_rrqs.xri_bitmap));
+       } else {
                keepDID = new_ndlp->nlp_DID;
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       memcpy(&rrq.xri_bitmap,
+                               &new_ndlp->active_rrqs.xri_bitmap,
+                               sizeof(new_ndlp->active_rrqs.xri_bitmap));
+       }
 
        lpfc_unreg_rpi(vport, new_ndlp);
        new_ndlp->nlp_DID = ndlp->nlp_DID;
        new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               memcpy(new_ndlp->active_rrqs.xri_bitmap,
+                       &ndlp->active_rrqs.xri_bitmap,
+                       sizeof(ndlp->active_rrqs.xri_bitmap));
 
        if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
                new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -1391,12 +1389,20 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 
                /* Two ndlps cannot have the same did on the nodelist */
                ndlp->nlp_DID = keepDID;
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       memcpy(&ndlp->active_rrqs.xri_bitmap,
+                               &rrq.xri_bitmap,
+                               sizeof(ndlp->active_rrqs.xri_bitmap));
                lpfc_drop_node(vport, ndlp);
        }
        else {
                lpfc_unreg_rpi(vport, ndlp);
                /* Two ndlps cannot have the same did */
                ndlp->nlp_DID = keepDID;
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       memcpy(&ndlp->active_rrqs.xri_bitmap,
+                               &rrq.xri_bitmap,
+                               sizeof(ndlp->active_rrqs.xri_bitmap));
                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
                /* Since we are swapping the ndlp passed in with the new one
                 * and the did has already been swapped, copy over the
@@ -1456,6 +1462,73 @@ lpfc_end_rscn(struct lpfc_vport *vport)
        }
 }
 
+/**
+ * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine will call the clear rrq function to free the rrq and
+ * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
+ * exist then the clear_rrq is still called because the rrq needs to
+ * be freed.
+ **/
+
+static void
+lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+                   struct lpfc_iocbq *rspiocb)
+{
+       struct lpfc_vport *vport = cmdiocb->vport;
+       IOCB_t *irsp;
+       struct lpfc_nodelist *ndlp;
+       struct lpfc_node_rrq *rrq;
+
+       /* we pass cmdiocb to state machine which needs rspiocb as well */
+       rrq = cmdiocb->context_un.rrq;
+       cmdiocb->context_un.rsp_iocb = rspiocb;
+
+       irsp = &rspiocb->iocb;
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+               "RRQ cmpl:      status:x%x/x%x did:x%x",
+               irsp->ulpStatus, irsp->un.ulpWord[4],
+               irsp->un.elsreq64.remoteID);
+
+       ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+                                "2882 RRQ completes to NPort x%x "
+                                "with no ndlp. Data: x%x x%x x%x\n",
+                                irsp->un.elsreq64.remoteID,
+                                irsp->ulpStatus, irsp->un.ulpWord[4],
+                                irsp->ulpIoTag);
+               goto out;
+       }
+
+       /* rrq completes to NPort <nlp_DID> */
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                        "2880 RRQ completes to NPort x%x "
+                        "Data: x%x x%x x%x x%x x%x\n",
+                        ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+                        irsp->ulpTimeout, rrq->xritag, rrq->rxid);
+
+       if (irsp->ulpStatus) {
+               /* Check for retry */
+               /* RRQ failed Don't print the vport to vport rjts */
+               if (irsp->ulpStatus != IOSTAT_LS_RJT ||
+                       (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
+                       ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
+                       (phba)->pport->cfg_log_verbose & LOG_ELS)
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+                                "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
+                                ndlp->nlp_DID, irsp->ulpStatus,
+                                irsp->un.ulpWord[4]);
+       }
+out:
+       if (rrq)
+               lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+       lpfc_els_free_iocb(phba, cmdiocb);
+       return;
+}
 /**
  * lpfc_cmpl_els_plogi - Completion callback function for plogi
  * @phba: pointer to lpfc hba data structure.
@@ -1645,6 +1718,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
        memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
        sp = (struct serv_parm *) pcmd;
 
+       /*
+        * If we are a N-port connected to a Fabric, fix-up paramm's so logins
+        * to device on remote loops work.
+        */
+       if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
+               sp->cmn.altBbCredit = 1;
+
        if (sp->cmn.fcphLow < FC_PH_4_3)
                sp->cmn.fcphLow = FC_PH_4_3;
 
@@ -2744,7 +2824,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        if (cmd == ELS_CMD_FLOGI) {
                                if (PCI_DEVICE_ID_HORNET ==
                                        phba->pcidev->device) {
-                                       phba->fc_topology = TOPOLOGY_LOOP;
+                                       phba->fc_topology = LPFC_TOPOLOGY_LOOP;
                                        phba->pport->fc_myDID = 0;
                                        phba->alpa_map[0] = 0;
                                        phba->alpa_map[1] = 0;
@@ -2899,7 +2979,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                retry = 1;
 
        if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
-           (phba->fc_topology != TOPOLOGY_LOOP) &&
+           (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
            !lpfc_error_lost_link(irsp)) {
                /* FLOGI retry policy */
                retry = 1;
@@ -3241,15 +3321,9 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 
-       /*
-        * This routine is used to register and unregister in previous SLI
-        * modes.
-        */
-       if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
-           (phba->sli_rev == LPFC_SLI_REV4))
-               lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
-
        pmb->context1 = NULL;
+       pmb->context2 = NULL;
+
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
        mempool_free(pmb, phba->mbox_mem_pool);
@@ -3923,6 +3997,105 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
        return 0;
 }
 
+/**
+ * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @iocb: pointer to the lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return
+ **/
+static void
+lpfc_els_clear_rrq(struct lpfc_vport *vport,
+      struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba  *phba = vport->phba;
+       uint8_t *pcmd;
+       struct RRQ *rrq;
+       uint16_t rxid;
+       struct lpfc_node_rrq *prrq;
+
+
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
+       pcmd += sizeof(uint32_t);
+       rrq = (struct RRQ *)pcmd;
+       rxid = bf_get(rrq_oxid, rrq);
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                       "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
+                       " x%x x%x\n",
+                       bf_get(rrq_did, rrq),
+                       bf_get(rrq_oxid, rrq),
+                       rxid,
+                       iocb->iotag, iocb->iocb.ulpContext);
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+               "Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
+               ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
+       prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID);
+       if (prrq)
+               lpfc_clr_rrq_active(phba, rxid, prrq);
+       return;
+}
+
+/**
+ * lpfc_els_rsp_echo_acc - Issue echo acc response
+ * @vport: pointer to a virtual N_Port data structure.
+ * @data: pointer to echo data to return in the accept.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully issued acc echo response
+ *   1 - Failed to issue acc echo response
+ **/
+static int
+lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
+                     struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba  *phba = vport->phba;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+       int rc;
+
+       psli = &phba->sli;
+       cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+
+       elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+                                    ndlp->nlp_DID, ELS_CMD_ACC);
+       if (!elsiocb)
+               return 1;
+
+       elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext;    /* Xri */
+       /* Xmit ECHO ACC response tag <ulpIoTag> */
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                        "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
+                        elsiocb->iotag, elsiocb->iocb.ulpContext);
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+       pcmd += sizeof(uint32_t);
+       memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+               "Issue ACC ECHO:  did:x%x flg:x%x",
+               ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+       phba->fc_stat.elsXmitACC++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+       lpfc_nlp_put(ndlp);
+       elsiocb->context1 = NULL;  /* Don't need ndlp for cmpl,
+                                   * it could be freed */
+
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
+       return 0;
+}
+
 /**
  * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
  * @vport: pointer to a host virtual N_Port data structure.
@@ -4559,7 +4732,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 
        lpfc_set_disctmo(vport);
 
-       if (phba->fc_topology == TOPOLOGY_LOOP) {
+       if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                /* We should never receive a FLOGI in loop mode, ignore it */
                did = icmd->un.elsreq64.remoteID;
 
@@ -4681,6 +4854,30 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        return 0;
 }
 
+/**
+ * lpfc_els_rcv_echo - Process an unsolicited echo iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+                 struct lpfc_nodelist *ndlp)
+{
+       uint8_t *pcmd;
+
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+
+       /* skip over first word of echo command to find echo data */
+       pcmd += sizeof(uint32_t);
+
+       lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
+       return 0;
+}
+
 /**
  * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
  * @vport: pointer to a host virtual N_Port data structure.
@@ -4730,6 +4927,91 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                 struct lpfc_nodelist *ndlp)
 {
        lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+       if (vport->phba->sli_rev == LPFC_SLI_REV4)
+               lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
+}
+
+/**
+ * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+       MAILBOX_t *mb;
+       IOCB_t *icmd;
+       struct RLS_RSP *rls_rsp;
+       uint8_t *pcmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_nodelist *ndlp;
+       uint16_t xri;
+       uint32_t cmdsize;
+
+       mb = &pmb->u.mb;
+
+       ndlp = (struct lpfc_nodelist *) pmb->context2;
+       xri = (uint16_t) ((unsigned long)(pmb->context1));
+       pmb->context1 = NULL;
+       pmb->context2 = NULL;
+
+       if (mb->mbxStatus) {
+               mempool_free(pmb, phba->mbox_mem_pool);
+               return;
+       }
+
+       cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
+       mempool_free(pmb, phba->mbox_mem_pool);
+       elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+                                    lpfc_max_els_tries, ndlp,
+                                    ndlp->nlp_DID, ELS_CMD_ACC);
+
+       /* Decrement the ndlp reference count from previous mbox command */
+       lpfc_nlp_put(ndlp);
+
+       if (!elsiocb)
+               return;
+
+       icmd = &elsiocb->iocb;
+       icmd->ulpContext = xri;
+
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+       pcmd += sizeof(uint32_t); /* Skip past command */
+       rls_rsp = (struct RLS_RSP *)pcmd;
+
+       rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+       rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+       rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+       rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+       rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+       rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+
+       /* Xmit ELS RLS ACC response tag <ulpIoTag> */
+       lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+                        "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
+                        "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+                        elsiocb->iotag, elsiocb->iocb.ulpContext,
+                        ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+                        ndlp->nlp_rpi);
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+       phba->fc_stat.elsXmitACC++;
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+               lpfc_els_free_iocb(phba, elsiocb);
 }
 
 /**
@@ -4795,7 +5077,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        pcmd += sizeof(uint32_t); /* Skip past command */
        rps_rsp = (RPS_RSP *)pcmd;
 
-       if (phba->fc_topology != TOPOLOGY_LOOP)
+       if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
                status = 0x10;
        else
                status = 0x8;
@@ -4825,7 +5107,155 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 }
 
 /**
- * lpfc_els_rcv_rps - Process an unsolicited rps iocb
+ * lpfc_els_rcv_rls - Process an unsolicited rls iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPL) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPL Accept (ACC) response.
+ *
+ * Return codes
+ *   0 - Successfully processed rls iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+                struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba *phba = vport->phba;
+       LPFC_MBOXQ_t *mbox;
+       struct lpfc_dmabuf *pcmd;
+       struct ls_rjt stat;
+
+       if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+           (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+               /* reject the unsolicited RPS request and done with it */
+               goto reject_out;
+
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+       if (mbox) {
+               lpfc_read_lnk_stat(phba, mbox);
+               mbox->context1 =
+                   (void *)((unsigned long) cmdiocb->iocb.ulpContext);
+               mbox->context2 = lpfc_nlp_get(ndlp);
+               mbox->vport = vport;
+               mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
+               if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+                       != MBX_NOT_FINISHED)
+                       /* Mbox completion will send ELS Response */
+                       return 0;
+               /* Decrement reference count used for the failed mbox
+                * command.
+                */
+               lpfc_nlp_put(ndlp);
+               mempool_free(mbox, phba->mbox_mem_pool);
+       }
+reject_out:
+       /* issue rejection response */
+       stat.un.b.lsRjtRsvd0 = 0;
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+       stat.un.b.vendorUnique = 0;
+       lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+       return 0;
+}
+
+/**
+ * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Timout Value (RTV) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
+ * Value (RTV) unsolicited IOCB event.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ * Return codes
+ *   0 - Successfully processed rtv iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+                struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba *phba = vport->phba;
+       struct ls_rjt stat;
+       struct RTV_RSP *rtv_rsp;
+       uint8_t *pcmd;
+       struct lpfc_iocbq *elsiocb;
+       uint32_t cmdsize;
+
+
+       if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+           (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+               /* reject the unsolicited RPS request and done with it */
+               goto reject_out;
+
+       cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
+       elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+                                    lpfc_max_els_tries, ndlp,
+                                    ndlp->nlp_DID, ELS_CMD_ACC);
+
+       if (!elsiocb)
+               return 1;
+
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+               *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+       pcmd += sizeof(uint32_t); /* Skip past command */
+
+       /* use the command's xri in the response */
+       elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
+
+       rtv_rsp = (struct RTV_RSP *)pcmd;
+
+       /* populate RTV payload */
+       rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
+       rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
+       bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
+       bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
+       rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
+
+       /* Xmit ELS RLS ACC response tag <ulpIoTag> */
+       lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+                        "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
+                        "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
+                        "Data: x%x x%x x%x\n",
+                        elsiocb->iotag, elsiocb->iocb.ulpContext,
+                        ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+                        ndlp->nlp_rpi,
+                       rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+       phba->fc_stat.elsXmitACC++;
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+               lpfc_els_free_iocb(phba, elsiocb);
+       return 0;
+
+reject_out:
+       /* issue rejection response */
+       stat.un.b.lsRjtRsvd0 = 0;
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+       stat.un.b.vendorUnique = 0;
+       lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+       return 0;
+}
+
+/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
  * @vport: pointer to a host virtual N_Port data structure.
  * @cmdiocb: pointer to lpfc command iocb data structure.
  * @ndlp: pointer to a node-list data structure.
@@ -4901,6 +5331,97 @@ reject_out:
        return 0;
 }
 
+/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: DID of the target.
+ * @rrq: Pointer to the rrq struct.
+ *
+ * Build a ELS RRQ command and send it to the target. If the issue_iocb is
+ * Successful the the completion handler will clear the RRQ.
+ *
+ * Return codes
+ *   0 - Successfully sent rrq els iocb.
+ *   1 - Failed to send rrq els iocb.
+ **/
+static int
+lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                       uint32_t did, struct lpfc_node_rrq *rrq)
+{
+       struct lpfc_hba  *phba = vport->phba;
+       struct RRQ *els_rrq;
+       IOCB_t *icmd;
+       struct lpfc_iocbq *elsiocb;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+       int ret;
+
+
+       if (ndlp != rrq->ndlp)
+               ndlp = rrq->ndlp;
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+               return 1;
+
+       /* If ndlp is not NULL, we will bump the reference count on it */
+       cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
+       elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
+                                    ELS_CMD_RRQ);
+       if (!elsiocb)
+               return 1;
+
+       icmd = &elsiocb->iocb;
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+       /* For RRQ request, remainder of payload is Exchange IDs */
+       *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
+       pcmd += sizeof(uint32_t);
+       els_rrq = (struct RRQ *) pcmd;
+
+       bf_set(rrq_oxid, els_rrq, rrq->xritag);
+       bf_set(rrq_rxid, els_rrq, rrq->rxid);
+       bf_set(rrq_did, els_rrq, vport->fc_myDID);
+       els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
+       els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
+
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+               "Issue RRQ:     did:x%x",
+               did, rrq->xritag, rrq->rxid);
+       elsiocb->context_un.rrq = rrq;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
+       ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+       if (ret == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * lpfc_send_rrq - Sends ELS RRQ if needed.
+ * @phba: pointer to lpfc hba data structure.
+ * @rrq: pointer to the active rrq.
+ *
+ * This routine will call the lpfc_issue_els_rrq if the rrq is
+ * still active for the xri. If this function returns a failure then
+ * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
+ *
+ * Returns 0 Success.
+ *         1 Failure.
+ **/
+int
+lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
+{
+       struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
+                                                       rrq->nlp_DID);
+       if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
+               return lpfc_issue_els_rrq(rrq->vport, ndlp,
+                                        rrq->nlp_DID, rrq);
+       else
+               return 1;
+}
+
 /**
  * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
  * @vport: pointer to a host virtual N_Port data structure.
@@ -5015,7 +5536,6 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
        lp = (uint32_t *) pcmd->virt;
        rpl = (RPL *) (lp + 1);
-
        maxsize = be32_to_cpu(rpl->maxsize);
 
        /* We support only one port */
@@ -5190,7 +5710,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
                            sizeof(struct lpfc_name)))) {
                        /* This port has switched fabrics. FLOGI is required */
-                       lpfc_initial_flogi(vport);
+                       lpfc_issue_init_vfi(vport);
                } else {
                        /* FAN verified - skip FLOGI */
                        vport->fc_myDID = vport->fc_prevDID;
@@ -5834,6 +6354,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                if (newnode)
                        lpfc_nlp_put(ndlp);
                break;
+       case ELS_CMD_RLS:
+               lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+                       "RCV RLS:         did:x%x/ste:x%x flg:x%x",
+                       did, vport->port_state, ndlp->nlp_flag);
+
+               phba->fc_stat.elsRcvRLS++;
+               lpfc_els_rcv_rls(vport, elsiocb, ndlp);
+               if (newnode)
+                       lpfc_nlp_put(ndlp);
+               break;
        case ELS_CMD_RPS:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
                        "RCV RPS:         did:x%x/ste:x%x flg:x%x",
@@ -5864,6 +6394,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                if (newnode)
                        lpfc_nlp_put(ndlp);
                break;
+       case ELS_CMD_RTV:
+               lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+                       "RCV RTV:        did:x%x/ste:x%x flg:x%x",
+                       did, vport->port_state, ndlp->nlp_flag);
+               phba->fc_stat.elsRcvRTV++;
+               lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
+               if (newnode)
+                       lpfc_nlp_put(ndlp);
+               break;
        case ELS_CMD_RRQ:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
                        "RCV RRQ:         did:x%x/ste:x%x flg:x%x",
@@ -5874,13 +6413,23 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                if (newnode)
                        lpfc_nlp_put(ndlp);
                break;
+       case ELS_CMD_ECHO:
+               lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+                       "RCV ECHO:        did:x%x/ste:x%x flg:x%x",
+                       did, vport->port_state, ndlp->nlp_flag);
+
+               phba->fc_stat.elsRcvECHO++;
+               lpfc_els_rcv_echo(vport, elsiocb, ndlp);
+               if (newnode)
+                       lpfc_nlp_put(ndlp);
+               break;
        default:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
                        "RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
                        cmd, did, vport->port_state);
 
                /* Unsupported ELS command, reject */
-               rjt_err = LSRJT_INVALID_CMD;
+               rjt_err = LSRJT_CMD_UNSUPPORTED;
 
                /* Unknown ELS command <elsCmd> received from NPORT <did> */
                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -6052,7 +6601,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
        if (!ndlp) {
                ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
                if (!ndlp) {
-                       if (phba->fc_topology == TOPOLOGY_LOOP) {
+                       if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                                lpfc_disc_start(vport);
                                return;
                        }
@@ -6065,7 +6614,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
        } else if (!NLP_CHK_NODE_ACT(ndlp)) {
                ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
                if (!ndlp) {
-                       if (phba->fc_topology == TOPOLOGY_LOOP) {
+                       if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                                lpfc_disc_start(vport);
                                return;
                        }
@@ -6087,18 +6636,31 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
        }
 
        if (vport->cfg_fdmi_on) {
-               ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
-                                         GFP_KERNEL);
+               /* If this is the first time, allocate an ndlp and initialize
+                * it. Otherwise, make sure the node is enabled and then do the
+                * login.
+                */
+               ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
+               if (!ndlp_fdmi) {
+                       ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
+                                                 GFP_KERNEL);
+                       if (ndlp_fdmi) {
+                               lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
+                               ndlp_fdmi->nlp_type |= NLP_FABRIC;
+                       } else
+                               return;
+               }
+               if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
+                       ndlp_fdmi = lpfc_enable_node(vport,
+                                                    ndlp_fdmi,
+                                                    NLP_STE_NPR_NODE);
+
                if (ndlp_fdmi) {
-                       lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
-                       ndlp_fdmi->nlp_type |= NLP_FABRIC;
                        lpfc_nlp_set_state(vport, ndlp_fdmi,
-                               NLP_STE_PLOGI_ISSUE);
-                       lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
-                                            0);
+                                          NLP_STE_PLOGI_ISSUE);
+                       lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
                }
        }
-       return;
 }
 
 /**
@@ -6168,13 +6730,15 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
                default:
                        /* Try to recover from this error */
+                       if (phba->sli_rev == LPFC_SLI_REV4)
+                               lpfc_sli4_unreg_all_rpis(vport);
                        lpfc_mbx_unreg_vpi(vport);
                        spin_lock_irq(shost->host_lock);
                        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
                        spin_unlock_irq(shost->host_lock);
                        if (vport->port_type == LPFC_PHYSICAL_PORT
                                && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
-                               lpfc_initial_flogi(vport);
+                               lpfc_issue_init_vfi(vport);
                        else
                                lpfc_initial_fdisc(vport);
                        break;
@@ -6411,7 +6975,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
        vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
        vport->fc_flag |= FC_FABRIC;
-       if (vport->phba->fc_topology == TOPOLOGY_LOOP)
+       if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
                vport->fc_flag |=  FC_PUBLIC_LOOP;
        spin_unlock_irq(shost->host_lock);
 
@@ -6435,6 +6999,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        lpfc_unreg_rpi(vport, np);
                }
                lpfc_cleanup_pending_mbox(vport);
+
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       lpfc_sli4_unreg_all_rpis(vport);
+
                lpfc_mbx_unreg_vpi(vport);
                spin_lock_irq(shost->host_lock);
                vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6450,7 +7018,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                 * to update the MAC address.
                 */
                lpfc_register_new_vport(phba, vport, ndlp);
-               return ;
+               goto out;
        }
 
        if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
@@ -6517,7 +7085,9 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        icmd->un.elsreq64.myID = 0;
        icmd->un.elsreq64.fl = 1;
 
-       if  (phba->sli_rev == LPFC_SLI_REV4) {
+       if  ((phba->sli_rev == LPFC_SLI_REV4) &&
+            (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+             LPFC_SLI_INTF_IF_TYPE_0)) {
                /* FDISC needs to be 1 for WQE VPI */
                elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
                elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
@@ -7024,8 +7594,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
                          struct sli4_wcqe_xri_aborted *axri)
 {
        uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+       uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+
        struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
        unsigned long iflag = 0;
+       struct lpfc_nodelist *ndlp;
        struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 
        spin_lock_irqsave(&phba->hbalock, iflag);
@@ -7034,11 +7607,14 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
                        &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
                if (sglq_entry->sli4_xritag == xri) {
                        list_del(&sglq_entry->list);
+                       ndlp = sglq_entry->ndlp;
+                       sglq_entry->ndlp = NULL;
                        list_add_tail(&sglq_entry->list,
                                &phba->sli4_hba.lpfc_sgl_list);
                        sglq_entry->state = SGL_FREED;
                        spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
                        spin_unlock_irqrestore(&phba->hbalock, iflag);
+                       lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
 
                        /* Check if TXQ queue needs to be serviced */
                        if (pring->txq_cnt)