]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/scsi/lpfc/lpfc_sli.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[karo-tx-linux.git] / drivers / scsi / lpfc / lpfc_sli.c
index fd5835e1c0395b93a387353b93fdddbf753c1f32..98999bbd8cbfee0dedefa49f90a85917c3cf2e33 100644 (file)
@@ -65,6 +65,9 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
                                                         struct lpfc_iocbq *);
 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
                                      struct hbq_dmabuf *);
+static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
+                                   struct lpfc_cqe *);
+
 static IOCB_t *
 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
 {
@@ -456,7 +459,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
        struct lpfc_iocbq * iocbq = NULL;
 
        list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
-
        if (iocbq)
                phba->iocb_cnt++;
        if (phba->iocb_cnt > phba->iocb_max)
@@ -479,13 +481,10 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
 static struct lpfc_sglq *
 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 {
-       uint16_t adj_xri;
        struct lpfc_sglq *sglq;
-       adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
-       if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
-               return NULL;
-       sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
-       phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
+
+       sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
+       phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
        return sglq;
 }
 
@@ -504,12 +503,9 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 struct lpfc_sglq *
 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 {
-       uint16_t adj_xri;
        struct lpfc_sglq *sglq;
-       adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
-       if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
-               return NULL;
-       sglq =  phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+
+       sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
        return sglq;
 }
 
@@ -532,7 +528,6 @@ static int
 __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
                uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
 {
-       uint16_t adj_xri;
        struct lpfc_node_rrq *rrq;
        int empty;
        uint32_t did = 0;
@@ -553,21 +548,19 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        /*
         * set the active bit even if there is no mem available.
         */
-       adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
-
        if (NLP_CHK_FREE_REQ(ndlp))
                goto out;
 
        if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
                goto out;
 
-       if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+       if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
                goto out;
 
        rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
        if (rrq) {
                rrq->send_rrq = send_rrq;
-               rrq->xritag = xritag;
+               rrq->xritag = phba->sli4_hba.xri_ids[xritag];
                rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
                rrq->ndlp = ndlp;
                rrq->nlp_DID = ndlp->nlp_DID;
@@ -603,7 +596,6 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
                    uint16_t xritag,
                    struct lpfc_node_rrq *rrq)
 {
-       uint16_t adj_xri;
        struct lpfc_nodelist *ndlp = NULL;
 
        if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
@@ -619,8 +611,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
        if (!ndlp)
                goto out;
 
-       adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
-       if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
+       if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
                rrq->send_rrq = 0;
                rrq->xritag = 0;
                rrq->rrq_stop_time = 0;
@@ -796,12 +787,9 @@ int
 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
                        uint16_t  xritag)
 {
-       uint16_t adj_xri;
-
-       adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
        if (!ndlp)
                return 0;
-       if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+       if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
                        return 1;
        else
                return 0;
@@ -841,7 +829,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
  * @piocb: Pointer to the iocbq.
  *
  * This function is called with hbalock held. This function
- * Gets a new driver sglq object from the sglq list. If the
+ * gets a new driver sglq object from the sglq list. If the
  * list is not empty then it is successful, it returns pointer to the newly
  * allocated sglq object else it returns NULL.
  **/
@@ -851,7 +839,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
        struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
        struct lpfc_sglq *sglq = NULL;
        struct lpfc_sglq *start_sglq = NULL;
-       uint16_t adj_xri;
        struct lpfc_scsi_buf *lpfc_cmd;
        struct lpfc_nodelist *ndlp;
        int found = 0;
@@ -870,8 +857,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
        while (!found) {
                if (!sglq)
                        return NULL;
-               adj_xri = sglq->sli4_xritag -
-                               phba->sli4_hba.max_cfg_param.xri_base;
                if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
                        /* This xri has an rrq outstanding for this DID.
                         * put it back in the list and get another xri.
@@ -888,7 +873,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
                }
                sglq->ndlp = ndlp;
                found = 1;
-               phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+               phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
                sglq->state = SGL_ALLOCATED;
        }
        return sglq;
@@ -944,7 +929,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
        if (iocbq->sli4_xritag == NO_XRI)
                sglq = NULL;
        else
-               sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
+               sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
        if (sglq)  {
                if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
                        (sglq->state != SGL_XRI_ABORTED)) {
@@ -971,6 +957,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
         * Clean all volatile data fields, preserve iotag and node struct.
         */
        memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+       iocbq->sli4_lxritag = NO_XRI;
        iocbq->sli4_xritag = NO_XRI;
        list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
 }
@@ -2113,7 +2100,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
            pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
            !pmb->u.mb.mbxStatus) {
                rpi = pmb->u.mb.un.varWords[0];
-               vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
+               vpi = pmb->u.mb.un.varRegLogin.vpi;
                lpfc_unreg_login(phba, vpi, rpi, pmb);
                pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -3881,8 +3868,10 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
        list_del_init(&phba->sli4_hba.els_cq->list);
        for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
                list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
-       for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
+       qindx = 0;
+       do
                list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
+       while (++qindx < phba->cfg_fcp_eq_count);
        spin_unlock_irq(&phba->hbalock);
 
        /* Now physically reset the device */
@@ -4318,6 +4307,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
                        continue;
                } else if (rc)
                        break;
+
                phba->link_state = LPFC_INIT_MBX_CMDS;
                lpfc_config_port(phba, pmb);
                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -4421,7 +4411,8 @@ int
 lpfc_sli_hba_setup(struct lpfc_hba *phba)
 {
        uint32_t rc;
-       int  mode = 3;
+       int  mode = 3, i;
+       int longs;
 
        switch (lpfc_sli_mode) {
        case 2:
@@ -4491,6 +4482,35 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
        if (rc)
                goto lpfc_sli_hba_setup_error;
 
+       /* Initialize VPIs. */
+       if (phba->sli_rev == LPFC_SLI_REV3) {
+               /*
+                * The VPI bitmask and physical ID array are allocated
+                * and initialized once only - at driver load.  A port
+                * reset doesn't need to reinitialize this memory.
+                */
+               if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
+                       longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
+                       phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
+                                                 GFP_KERNEL);
+                       if (!phba->vpi_bmask) {
+                               rc = -ENOMEM;
+                               goto lpfc_sli_hba_setup_error;
+                       }
+
+                       phba->vpi_ids = kzalloc(
+                                       (phba->max_vpi+1) * sizeof(uint16_t),
+                                       GFP_KERNEL);
+                       if (!phba->vpi_ids) {
+                               kfree(phba->vpi_bmask);
+                               rc = -ENOMEM;
+                               goto lpfc_sli_hba_setup_error;
+                       }
+                       for (i = 0; i < phba->max_vpi; i++)
+                               phba->vpi_ids[i] = i;
+               }
+       }
+
        /* Init HBQs */
        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
                rc = lpfc_sli_hbq_setup(phba);
@@ -4677,9 +4697,11 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
 
        lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
        lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
-       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+       fcp_eqidx = 0;
+       do
                lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
                                     LPFC_QUEUE_REARM);
+       while (++fcp_eqidx < phba->cfg_fcp_eq_count);
        lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
        for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
                lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -4687,174 +4709,967 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
+ * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
  * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
  *
- * This function is the main SLI4 device intialization PCI function. This
- * function is called by the HBA intialization code, HBA reset code and
- * HBA error attention handler code. Caller is not required to hold any
- * locks.
+ * This function allocates all SLI4 resource identifiers.
  **/
-int
-lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+static int
+lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
+                              uint16_t *extnt_count, uint16_t *extnt_size)
 {
-       int rc;
-       LPFC_MBOXQ_t *mboxq;
-       struct lpfc_mqe *mqe;
-       uint8_t *vpd;
-       uint32_t vpd_size;
-       uint32_t ftr_rsp = 0;
-       struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
-       struct lpfc_vport *vport = phba->pport;
-       struct lpfc_dmabuf *mp;
+       int rc = 0;
+       uint32_t length;
+       uint32_t mbox_tmo;
+       struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
+       LPFC_MBOXQ_t *mbox;
 
-       /*
-        * TODO:  Why does this routine execute these task in a different
-        * order from probe?
-        */
-       /* Perform a PCI function reset to start from clean */
-       rc = lpfc_pci_function_reset(phba);
-       if (unlikely(rc))
-               return -ENODEV;
+       mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
 
-       /* Check the HBA Host Status Register for readyness */
-       rc = lpfc_sli4_post_status_check(phba);
-       if (unlikely(rc))
-               return -ENODEV;
+       /* Find out how many extents are available for this resource type */
+       length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
+                        length, LPFC_SLI4_MBX_EMBED);
+
+       /* Send an extents count of 0 - the GET doesn't use it. */
+       rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+                                       LPFC_SLI4_MBX_EMBED);
+       if (unlikely(rc)) {
+               rc = -EIO;
+               goto err_exit;
+       }
+
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
        else {
-               spin_lock_irq(&phba->hbalock);
-               phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
-               spin_unlock_irq(&phba->hbalock);
+               mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+       }
+       if (unlikely(rc)) {
+               rc = -EIO;
+               goto err_exit;
+       }
+
+       rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
+       if (bf_get(lpfc_mbox_hdr_status,
+                  &rsrc_info->header.cfg_shdr.response)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+                               "2930 Failed to get resource extents "
+                               "Status 0x%x Add'l Status 0x%x\n",
+                               bf_get(lpfc_mbox_hdr_status,
+                                      &rsrc_info->header.cfg_shdr.response),
+                               bf_get(lpfc_mbox_hdr_add_status,
+                                      &rsrc_info->header.cfg_shdr.response));
+               rc = -EIO;
+               goto err_exit;
+       }
+
+       *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
+                             &rsrc_info->u.rsp);
+       *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
+                            &rsrc_info->u.rsp);
+ err_exit:
+       mempool_free(mbox, phba->mbox_mem_pool);
+       return rc;
+}
+
+/**
+ * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The extent type to check.
+ *
+ * This function reads the current available extents from the port and checks
+ * if the extent count or extent size has changed since the last access.
+ * Callers use this routine post port reset to understand if there is a
+ * extent reprovisioning requirement.
+ *
+ * Returns:
+ *   -Error: error indicates problem.
+ *   1: Extent count or size has changed.
+ *   0: No changes.
+ **/
+static int
+lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
+{
+       uint16_t curr_ext_cnt, rsrc_ext_cnt;
+       uint16_t size_diff, rsrc_ext_size;
+       int rc = 0;
+       struct lpfc_rsrc_blks *rsrc_entry;
+       struct list_head *rsrc_blk_list = NULL;
+
+       size_diff = 0;
+       curr_ext_cnt = 0;
+       rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+                                           &rsrc_ext_cnt,
+                                           &rsrc_ext_size);
+       if (unlikely(rc))
+               return -EIO;
+
+       switch (type) {
+       case LPFC_RSC_TYPE_FCOE_RPI:
+               rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+               break;
+       case LPFC_RSC_TYPE_FCOE_VPI:
+               rsrc_blk_list = &phba->lpfc_vpi_blk_list;
+               break;
+       case LPFC_RSC_TYPE_FCOE_XRI:
+               rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+               break;
+       case LPFC_RSC_TYPE_FCOE_VFI:
+               rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+               break;
+       default:
+               break;
+       }
+
+       list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
+               curr_ext_cnt++;
+               if (rsrc_entry->rsrc_size != rsrc_ext_size)
+                       size_diff++;
        }
 
+       if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
+               rc = 1;
+
+       return rc;
+}
+
+/**
+ * lpfc_sli4_cfg_post_extnts -
+ * @phba: Pointer to HBA context object.
+ * @extnt_cnt - number of available extents.
+ * @type - the extent type (rpi, xri, vfi, vpi).
+ * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
+ * @mbox - pointer to the caller's allocated mailbox structure.
+ *
+ * This function executes the extents allocation request.  It also
+ * takes care of the amount of memory needed to allocate or get the
+ * allocated extents. It is the caller's responsibility to evaluate
+ * the response.
+ *
+ * Returns:
+ *   -Error:  Error value describes the condition found.
+ *   0: if successful
+ **/
+static int
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
+                         uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
+{
+       int rc = 0;
+       uint32_t req_len;
+       uint32_t emb_len;
+       uint32_t alloc_len, mbox_tmo;
+
+       /* Calculate the total requested length of the dma memory */
+       req_len = *extnt_cnt * sizeof(uint16_t);
+
        /*
-        * Allocate a single mailbox container for initializing the
-        * port.
+        * Calculate the size of an embedded mailbox.  The uint32_t
+        * accounts for extents-specific word.
         */
-       mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-       if (!mboxq)
-               return -ENOMEM;
+       emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+               sizeof(uint32_t);
 
        /*
-        * Continue initialization with default values even if driver failed
-        * to read FCoE param config regions
+        * Presume the allocation and response will fit into an embedded
+        * mailbox.  If not true, reconfigure to a non-embedded mailbox.
         */
-       if (lpfc_sli4_read_fcoe_params(phba, mboxq))
-               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
-                       "2570 Failed to read FCoE parameters\n");
-
-       /* Issue READ_REV to collect vpd and FW information. */
-       vpd_size = SLI4_PAGE_SIZE;
-       vpd = kzalloc(vpd_size, GFP_KERNEL);
-       if (!vpd) {
-               rc = -ENOMEM;
-               goto out_free_mbox;
+       *emb = LPFC_SLI4_MBX_EMBED;
+       if (req_len > emb_len) {
+               req_len = *extnt_cnt * sizeof(uint16_t) +
+                       sizeof(union lpfc_sli4_cfg_shdr) +
+                       sizeof(uint32_t);
+               *emb = LPFC_SLI4_MBX_NEMBED;
        }
 
-       rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
-       if (unlikely(rc)) {
-               kfree(vpd);
-               goto out_free_mbox;
+       alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                                    LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
+                                    req_len, *emb);
+       if (alloc_len < req_len) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "9000 Allocated DMA memory size (x%x) is "
+                       "less than the requested DMA memory "
+                       "size (x%x)\n", alloc_len, req_len);
+               return -ENOMEM;
        }
-       mqe = &mboxq->u.mqe;
-       phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
-       if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
-               phba->hba_flag |= HBA_FCOE_MODE;
-       else
-               phba->hba_flag &= ~HBA_FCOE_MODE;
+       rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
+       if (unlikely(rc))
+               return -EIO;
 
-       if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
-               LPFC_DCBX_CEE_MODE)
-               phba->hba_flag |= HBA_FIP_SUPPORT;
-       else
-               phba->hba_flag &= ~HBA_FIP_SUPPORT;
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       else {
+               mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+       }
 
-       if (phba->sli_rev != LPFC_SLI_REV4) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-                       "0376 READ_REV Error. SLI Level %d "
-                       "FCoE enabled %d\n",
-                       phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
+       if (unlikely(rc))
                rc = -EIO;
-               kfree(vpd);
-               goto out_free_mbox;
-       }
-       /*
-        * Evaluate the read rev and vpd data. Populate the driver
-        * state with the results. If this routine fails, the failure
-        * is not fatal as the driver will use generic values.
-        */
-       rc = lpfc_parse_vpd(phba, vpd, vpd_size);
-       if (unlikely(!rc)) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-                               "0377 Error %d parsing vpd. "
-                               "Using defaults.\n", rc);
-               rc = 0;
+       return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type:  The resource extent type to allocate.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+static int
+lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+       bool emb = false;
+       uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
+       uint16_t rsrc_id, rsrc_start, j, k;
+       uint16_t *ids;
+       int i, rc;
+       unsigned long longs;
+       unsigned long *bmask;
+       struct lpfc_rsrc_blks *rsrc_blks;
+       LPFC_MBOXQ_t *mbox;
+       uint32_t length;
+       struct lpfc_id_range *id_array = NULL;
+       void *virtaddr = NULL;
+       struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+       struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+       struct list_head *ext_blk_list;
+
+       rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+                                           &rsrc_cnt,
+                                           &rsrc_size);
+       if (unlikely(rc))
+               return -EIO;
+
+       if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+                       "3009 No available Resource Extents "
+                       "for resource type 0x%x: Count: 0x%x, "
+                       "Size 0x%x\n", type, rsrc_cnt,
+                       rsrc_size);
+               return -ENOMEM;
        }
-       kfree(vpd);
 
-       /* Save information as VPD data */
-       phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
-       phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
-       phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
-       phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
-                                        &mqe->un.read_rev);
-       phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
-                                      &mqe->un.read_rev);
-       phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
-                                           &mqe->un.read_rev);
-       phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
-                                          &mqe->un.read_rev);
-       phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
-       memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
-       phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
-       memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
-       phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
-       memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
-       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
-                       "(%d):0380 READ_REV Status x%x "
-                       "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
-                       mboxq->vport ? mboxq->vport->vpi : 0,
-                       bf_get(lpfc_mqe_status, mqe),
-                       phba->vpd.rev.opFwName,
-                       phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
-                       phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
+       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
+                       "2903 Available Resource Extents "
+                       "for resource type 0x%x: Count: 0x%x, "
+                       "Size 0x%x\n", type, rsrc_cnt,
+                       rsrc_size);
 
-       /*
-        * Discover the port's supported feature set and match it against the
-        * hosts requests.
-        */
-       lpfc_request_features(phba, mboxq);
-       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+
+       rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
        if (unlikely(rc)) {
                rc = -EIO;
-               goto out_free_mbox;
+               goto err_exit;
        }
 
        /*
-        * The port must support FCP initiator mode as this is the
-        * only mode running in the host.
+        * Figure out where the response is located.  Then get local pointers
+        * to the response data.  The port does not guarantee to respond to
+        * all extents counts request so update the local variable with the
+        * allocated count from the port.
         */
-       if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
-                               "0378 No support for fcpi mode.\n");
-               ftr_rsp++;
+       if (emb == LPFC_SLI4_MBX_EMBED) {
+               rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+               id_array = &rsrc_ext->u.rsp.id[0];
+               rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+       } else {
+               virtaddr = mbox->sge_array->addr[0];
+               n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+               rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+               id_array = &n_rsrc->id;
        }
-       if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
-               phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
-       else
-               phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
+
+       longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+       rsrc_id_cnt = rsrc_cnt * rsrc_size;
+
        /*
-        * If the port cannot support the host's requested features
-        * then turn off the global config parameters to disable the
-        * feature in the driver.  This is not a fatal error.
+        * Based on the resource size and count, correct the base and max
+        * resource values.
         */
-       if ((phba->cfg_enable_bg) &&
-           !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
-               ftr_rsp++;
+       length = sizeof(struct lpfc_rsrc_blks);
+       switch (type) {
+       case LPFC_RSC_TYPE_FCOE_RPI:
+               phba->sli4_hba.rpi_bmask = kzalloc(longs *
+                                                  sizeof(unsigned long),
+                                                  GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+                       rc = -ENOMEM;
+                       goto err_exit;
+               }
+               phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
+                                                sizeof(uint16_t),
+                                                GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.rpi_ids)) {
+                       kfree(phba->sli4_hba.rpi_bmask);
+                       rc = -ENOMEM;
+                       goto err_exit;
+               }
 
-       if (phba->max_vpi && phba->cfg_enable_npiv &&
-           !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
-               ftr_rsp++;
+               /*
+                * The next_rpi was initialized with the maximum available
+                * count but the port may allocate a smaller number.  Catch
+                * that case and update the next_rpi.
+                */
+               phba->sli4_hba.next_rpi = rsrc_id_cnt;
+
+               /* Initialize local ptrs for common extent processing later. */
+               bmask = phba->sli4_hba.rpi_bmask;
+               ids = phba->sli4_hba.rpi_ids;
+               ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+               break;
+       case LPFC_RSC_TYPE_FCOE_VPI:
+               phba->vpi_bmask = kzalloc(longs *
+                                         sizeof(unsigned long),
+                                         GFP_KERNEL);
+               if (unlikely(!phba->vpi_bmask)) {
+                       rc = -ENOMEM;
+                       goto err_exit;
+               }
+               phba->vpi_ids = kzalloc(rsrc_id_cnt *
+                                        sizeof(uint16_t),
+                                        GFP_KERNEL);
+               if (unlikely(!phba->vpi_ids)) {
+                       kfree(phba->vpi_bmask);
+                       rc = -ENOMEM;
+                       goto err_exit;
+               }
+
+               /* Initialize local ptrs for common extent processing later. */
+               bmask = phba->vpi_bmask;
+               ids = phba->vpi_ids;
+               ext_blk_list = &phba->lpfc_vpi_blk_list;
+               break;
+       case LPFC_RSC_TYPE_FCOE_XRI:
+               phba->sli4_hba.xri_bmask = kzalloc(longs *
+                                                  sizeof(unsigned long),
+                                                  GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.xri_bmask)) {
+                       rc = -ENOMEM;
+                       goto err_exit;
+               }
+               phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
+                                                sizeof(uint16_t),
+                                                GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.xri_ids)) {
+                       kfree(phba->sli4_hba.xri_bmask);
+                       rc = -ENOMEM;
+                       goto err_exit;
+               }
+
+               /* Initialize local ptrs for common extent processing later. */
+               bmask = phba->sli4_hba.xri_bmask;
+               ids = phba->sli4_hba.xri_ids;
+               ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+               break;
+       case LPFC_RSC_TYPE_FCOE_VFI:
+               phba->sli4_hba.vfi_bmask = kzalloc(longs *
+                                                  sizeof(unsigned long),
+                                                  GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+                       rc = -ENOMEM;
+                       goto err_exit;
+               }
+               phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
+                                                sizeof(uint16_t),
+                                                GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.vfi_ids)) {
+                       kfree(phba->sli4_hba.vfi_bmask);
+                       rc = -ENOMEM;
+                       goto err_exit;
+               }
+
+               /* Initialize local ptrs for common extent processing later. */
+               bmask = phba->sli4_hba.vfi_bmask;
+               ids = phba->sli4_hba.vfi_ids;
+               ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+               break;
+       default:
+               /* Unsupported Opcode.  Fail call. */
+               id_array = NULL;
+               bmask = NULL;
+               ids = NULL;
+               ext_blk_list = NULL;
+               goto err_exit;
+       }
+
+       /*
+        * Complete initializing the extent configuration with the
+        * allocated ids assigned to this function.  The bitmask serves
+        * as an index into the array and manages the available ids.  The
+        * array just stores the ids communicated to the port via the wqes.
+        */
+       for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
+               if ((i % 2) == 0)
+                       rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
+                                        &id_array[k]);
+               else
+                       rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
+                                        &id_array[k]);
+
+               rsrc_blks = kzalloc(length, GFP_KERNEL);
+               if (unlikely(!rsrc_blks)) {
+                       rc = -ENOMEM;
+                       kfree(bmask);
+                       kfree(ids);
+                       goto err_exit;
+               }
+               rsrc_blks->rsrc_start = rsrc_id;
+               rsrc_blks->rsrc_size = rsrc_size;
+               list_add_tail(&rsrc_blks->list, ext_blk_list);
+               rsrc_start = rsrc_id;
+               if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
+                       phba->sli4_hba.scsi_xri_start = rsrc_start +
+                               lpfc_sli4_get_els_iocb_cnt(phba);
+
+               while (rsrc_id < (rsrc_start + rsrc_size)) {
+                       ids[j] = rsrc_id;
+                       rsrc_id++;
+                       j++;
+               }
+               /* Entire word processed.  Get next word.*/
+               if ((i % 2) == 1)
+                       k++;
+       }
+ err_exit:
+       lpfc_sli4_mbox_cmd_free(phba, mbox);
+       return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: the extent's type.
+ *
+ * This function deallocates all extents of a particular resource type.
+ * SLI4 does not allow for deallocating a particular extent range.  It
+ * is the caller's responsibility to release all kernel memory resources.
+ **/
+static int
+lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+       int rc;
+       uint32_t length, mbox_tmo = 0;
+       LPFC_MBOXQ_t *mbox;
+       struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
+       struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
+
+       mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+
+       /*
+        * This function sends an embedded mailbox because it only sends the
+        * the resource type.  All extents of this type are released by the
+        * port.
+        */
+       length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
+                        length, LPFC_SLI4_MBX_EMBED);
+
+       /* Send an extents count of 0 - the dealloc doesn't use it. */
+       rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+                                       LPFC_SLI4_MBX_EMBED);
+       if (unlikely(rc)) {
+               rc = -EIO;
+               goto out_free_mbox;
+       }
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       else {
+               mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+       }
+       if (unlikely(rc)) {
+               rc = -EIO;
+               goto out_free_mbox;
+       }
+
+       dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
+       if (bf_get(lpfc_mbox_hdr_status,
+                  &dealloc_rsrc->header.cfg_shdr.response)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+                               "2919 Failed to release resource extents "
+                               "for type %d - Status 0x%x Add'l Status 0x%x. "
+                               "Resource memory not released.\n",
+                               type,
+                               bf_get(lpfc_mbox_hdr_status,
+                                   &dealloc_rsrc->header.cfg_shdr.response),
+                               bf_get(lpfc_mbox_hdr_add_status,
+                                   &dealloc_rsrc->header.cfg_shdr.response));
+               rc = -EIO;
+               goto out_free_mbox;
+       }
+
+       /* Release kernel memory resources for the specific type. */
+       switch (type) {
+       case LPFC_RSC_TYPE_FCOE_VPI:
+               kfree(phba->vpi_bmask);
+               kfree(phba->vpi_ids);
+               bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+               list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+                                   &phba->lpfc_vpi_blk_list, list) {
+                       list_del_init(&rsrc_blk->list);
+                       kfree(rsrc_blk);
+               }
+               break;
+       case LPFC_RSC_TYPE_FCOE_XRI:
+               kfree(phba->sli4_hba.xri_bmask);
+               kfree(phba->sli4_hba.xri_ids);
+               bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+               list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+                                   &phba->sli4_hba.lpfc_xri_blk_list, list) {
+                       list_del_init(&rsrc_blk->list);
+                       kfree(rsrc_blk);
+               }
+               break;
+       case LPFC_RSC_TYPE_FCOE_VFI:
+               kfree(phba->sli4_hba.vfi_bmask);
+               kfree(phba->sli4_hba.vfi_ids);
+               bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+               list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+                                   &phba->sli4_hba.lpfc_vfi_blk_list, list) {
+                       list_del_init(&rsrc_blk->list);
+                       kfree(rsrc_blk);
+               }
+               break;
+       case LPFC_RSC_TYPE_FCOE_RPI:
+               /* RPI bitmask and physical id array are cleaned up earlier. */
+               list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+                                   &phba->sli4_hba.lpfc_rpi_blk_list, list) {
+                       list_del_init(&rsrc_blk->list);
+                       kfree(rsrc_blk);
+               }
+               break;
+       default:
+               break;
+       }
+
+       bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+
+ out_free_mbox:
+       mempool_free(mbox, phba->mbox_mem_pool);
+       return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+int
+lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
+{
+       int i, rc, error = 0;
+       uint16_t count, base;
+       unsigned long longs;
+
+       if (phba->sli4_hba.extents_in_use) {
+               /*
+                * The port supports resource extents. The XRI, VPI, VFI, RPI
+                * resource extent count must be read and allocated before
+                * provisioning the resource id arrays.
+                */
+               if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+                   LPFC_IDX_RSRC_RDY) {
+                       /*
+                        * Extent-based resources are set - the driver could
+                        * be in a port reset. Figure out if any corrective
+                        * actions need to be taken.
+                        */
+                       rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+                                                LPFC_RSC_TYPE_FCOE_VFI);
+                       if (rc != 0)
+                               error++;
+                       rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+                                                LPFC_RSC_TYPE_FCOE_VPI);
+                       if (rc != 0)
+                               error++;
+                       rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+                                                LPFC_RSC_TYPE_FCOE_XRI);
+                       if (rc != 0)
+                               error++;
+                       rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+                                                LPFC_RSC_TYPE_FCOE_RPI);
+                       if (rc != 0)
+                               error++;
+
+                       /*
+                        * It's possible that the number of resources
+                        * provided to this port instance changed between
+                        * resets.  Detect this condition and reallocate
+                        * resources.  Otherwise, there is no action.
+                        */
+                       if (error) {
+                               lpfc_printf_log(phba, KERN_INFO,
+                                               LOG_MBOX | LOG_INIT,
+                                               "2931 Detected extent resource "
+                                               "change.  Reallocating all "
+                                               "extents.\n");
+                               rc = lpfc_sli4_dealloc_extent(phba,
+                                                LPFC_RSC_TYPE_FCOE_VFI);
+                               rc = lpfc_sli4_dealloc_extent(phba,
+                                                LPFC_RSC_TYPE_FCOE_VPI);
+                               rc = lpfc_sli4_dealloc_extent(phba,
+                                                LPFC_RSC_TYPE_FCOE_XRI);
+                               rc = lpfc_sli4_dealloc_extent(phba,
+                                                LPFC_RSC_TYPE_FCOE_RPI);
+                       } else
+                               return 0;
+               }
+
+               rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+               if (unlikely(rc))
+                       goto err_exit;
+
+               rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+               if (unlikely(rc))
+                       goto err_exit;
+
+               rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+               if (unlikely(rc))
+                       goto err_exit;
+
+               rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+               if (unlikely(rc))
+                       goto err_exit;
+               bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+                      LPFC_IDX_RSRC_RDY);
+               return rc;
+       } else {
+               /*
+                * The port does not support resource extents.  The XRI, VPI,
+                * VFI, RPI resource ids were determined from READ_CONFIG.
+                * Just allocate the bitmasks and provision the resource id
+                * arrays.  If a port reset is active, the resources don't
+                * need any action - just exit.
+                */
+               if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+                   LPFC_IDX_RSRC_RDY)
+                       return 0;
+
+               /* RPIs. */
+               count = phba->sli4_hba.max_cfg_param.max_rpi;
+               base = phba->sli4_hba.max_cfg_param.rpi_base;
+               longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+               phba->sli4_hba.rpi_bmask = kzalloc(longs *
+                                                  sizeof(unsigned long),
+                                                  GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+                       rc = -ENOMEM;
+                       goto err_exit;
+               }
+               phba->sli4_hba.rpi_ids = kzalloc(count *
+                                                sizeof(uint16_t),
+                                                GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.rpi_ids)) {
+                       rc = -ENOMEM;
+                       goto free_rpi_bmask;
+               }
+
+               for (i = 0; i < count; i++)
+                       phba->sli4_hba.rpi_ids[i] = base + i;
+
+               /* VPIs. */
+               count = phba->sli4_hba.max_cfg_param.max_vpi;
+               base = phba->sli4_hba.max_cfg_param.vpi_base;
+               longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+               phba->vpi_bmask = kzalloc(longs *
+                                         sizeof(unsigned long),
+                                         GFP_KERNEL);
+               if (unlikely(!phba->vpi_bmask)) {
+                       rc = -ENOMEM;
+                       goto free_rpi_ids;
+               }
+               phba->vpi_ids = kzalloc(count *
+                                       sizeof(uint16_t),
+                                       GFP_KERNEL);
+               if (unlikely(!phba->vpi_ids)) {
+                       rc = -ENOMEM;
+                       goto free_vpi_bmask;
+               }
+
+               for (i = 0; i < count; i++)
+                       phba->vpi_ids[i] = base + i;
+
+               /* XRIs. */
+               count = phba->sli4_hba.max_cfg_param.max_xri;
+               base = phba->sli4_hba.max_cfg_param.xri_base;
+               longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+               phba->sli4_hba.xri_bmask = kzalloc(longs *
+                                                  sizeof(unsigned long),
+                                                  GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.xri_bmask)) {
+                       rc = -ENOMEM;
+                       goto free_vpi_ids;
+               }
+               phba->sli4_hba.xri_ids = kzalloc(count *
+                                                sizeof(uint16_t),
+                                                GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.xri_ids)) {
+                       rc = -ENOMEM;
+                       goto free_xri_bmask;
+               }
+
+               for (i = 0; i < count; i++)
+                       phba->sli4_hba.xri_ids[i] = base + i;
+
+               /* VFIs. */
+               count = phba->sli4_hba.max_cfg_param.max_vfi;
+               base = phba->sli4_hba.max_cfg_param.vfi_base;
+               longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+               phba->sli4_hba.vfi_bmask = kzalloc(longs *
+                                                  sizeof(unsigned long),
+                                                  GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+                       rc = -ENOMEM;
+                       goto free_xri_ids;
+               }
+               phba->sli4_hba.vfi_ids = kzalloc(count *
+                                                sizeof(uint16_t),
+                                                GFP_KERNEL);
+               if (unlikely(!phba->sli4_hba.vfi_ids)) {
+                       rc = -ENOMEM;
+                       goto free_vfi_bmask;
+               }
+
+               for (i = 0; i < count; i++)
+                       phba->sli4_hba.vfi_ids[i] = base + i;
+
+               /*
+                * Mark all resources ready.  An HBA reset doesn't need
+                * to reset the initialization.
+                */
+               bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+                      LPFC_IDX_RSRC_RDY);
+               return 0;
+       }
+
+ free_vfi_bmask:
+       kfree(phba->sli4_hba.vfi_bmask);
+ free_xri_ids:
+       kfree(phba->sli4_hba.xri_ids);
+ free_xri_bmask:
+       kfree(phba->sli4_hba.xri_bmask);
+ free_vpi_ids:
+       kfree(phba->vpi_ids);
+ free_vpi_bmask:
+       kfree(phba->vpi_bmask);
+ free_rpi_ids:
+       kfree(phba->sli4_hba.rpi_ids);
+ free_rpi_bmask:
+       kfree(phba->sli4_hba.rpi_bmask);
+ err_exit:
+       return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+int
+lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
+{
+       if (phba->sli4_hba.extents_in_use) {
+               lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+               lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+               lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+               lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+       } else {
+               kfree(phba->vpi_bmask);
+               kfree(phba->vpi_ids);
+               bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+               kfree(phba->sli4_hba.xri_bmask);
+               kfree(phba->sli4_hba.xri_ids);
+               bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+               kfree(phba->sli4_hba.vfi_bmask);
+               kfree(phba->sli4_hba.vfi_ids);
+               bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+               bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+       }
+
+       return 0;
+}
+
+/**
+ * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI4 device intialization PCI function. This
+ * function is called by the HBA intialization code, HBA reset code and
+ * HBA error attention handler code. Caller is not required to hold any
+ * locks.
+ **/
+int
+lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+{
+       int rc;
+       LPFC_MBOXQ_t *mboxq;
+       struct lpfc_mqe *mqe;
+       uint8_t *vpd;
+       uint32_t vpd_size;
+       uint32_t ftr_rsp = 0;
+       struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
+       struct lpfc_vport *vport = phba->pport;
+       struct lpfc_dmabuf *mp;
+
+       /* Perform a PCI function reset to start from clean */
+       rc = lpfc_pci_function_reset(phba);
+       if (unlikely(rc))
+               return -ENODEV;
+
+       /* Check the HBA Host Status Register for readyness */
+       rc = lpfc_sli4_post_status_check(phba);
+       if (unlikely(rc))
+               return -ENODEV;
+       else {
+               spin_lock_irq(&phba->hbalock);
+               phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
+               spin_unlock_irq(&phba->hbalock);
+       }
+
+       /*
+        * Allocate a single mailbox container for initializing the
+        * port.
+        */
+       mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq)
+               return -ENOMEM;
+
+       /*
+        * Continue initialization with default values even if driver failed
+        * to read FCoE param config regions
+        */
+       if (lpfc_sli4_read_fcoe_params(phba, mboxq))
+               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
+                       "2570 Failed to read FCoE parameters\n");
+
+       /* Issue READ_REV to collect vpd and FW information. */
+       vpd_size = SLI4_PAGE_SIZE;
+       vpd = kzalloc(vpd_size, GFP_KERNEL);
+       if (!vpd) {
+               rc = -ENOMEM;
+               goto out_free_mbox;
+       }
+
+       rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
+       if (unlikely(rc)) {
+               kfree(vpd);
+               goto out_free_mbox;
+       }
+       mqe = &mboxq->u.mqe;
+       phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
+       if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
+               phba->hba_flag |= HBA_FCOE_MODE;
+       else
+               phba->hba_flag &= ~HBA_FCOE_MODE;
+
+       if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
+               LPFC_DCBX_CEE_MODE)
+               phba->hba_flag |= HBA_FIP_SUPPORT;
+       else
+               phba->hba_flag &= ~HBA_FIP_SUPPORT;
+
+       if (phba->sli_rev != LPFC_SLI_REV4) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                       "0376 READ_REV Error. SLI Level %d "
+                       "FCoE enabled %d\n",
+                       phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
+               rc = -EIO;
+               kfree(vpd);
+               goto out_free_mbox;
+       }
+       /*
+        * Evaluate the read rev and vpd data. Populate the driver
+        * state with the results. If this routine fails, the failure
+        * is not fatal as the driver will use generic values.
+        */
+       rc = lpfc_parse_vpd(phba, vpd, vpd_size);
+       if (unlikely(!rc)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "0377 Error %d parsing vpd. "
+                               "Using defaults.\n", rc);
+               rc = 0;
+       }
+       kfree(vpd);
+
+       /* Save information as VPD data */
+       phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
+       phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
+       phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
+       phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
+                                        &mqe->un.read_rev);
+       phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
+                                      &mqe->un.read_rev);
+       phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
+                                           &mqe->un.read_rev);
+       phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
+                                          &mqe->un.read_rev);
+       phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
+       memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
+       phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
+       memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
+       phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
+       memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
+       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                       "(%d):0380 READ_REV Status x%x "
+                       "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
+                       mboxq->vport ? mboxq->vport->vpi : 0,
+                       bf_get(lpfc_mqe_status, mqe),
+                       phba->vpd.rev.opFwName,
+                       phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
+                       phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
+
+       /*
+        * Discover the port's supported feature set and match it against the
+        * hosts requests.
+        */
+       lpfc_request_features(phba, mboxq);
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       if (unlikely(rc)) {
+               rc = -EIO;
+               goto out_free_mbox;
+       }
+
+       /*
+        * The port must support FCP initiator mode as this is the
+        * only mode running in the host.
+        */
+       if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+                               "0378 No support for fcpi mode.\n");
+               ftr_rsp++;
+       }
+       if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
+               phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
+       else
+               phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
+       /*
+        * If the port cannot support the host's requested features
+        * then turn off the global config parameters to disable the
+        * feature in the driver.  This is not a fatal error.
+        */
+       if ((phba->cfg_enable_bg) &&
+           !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
+               ftr_rsp++;
+
+       if (phba->max_vpi && phba->cfg_enable_npiv &&
+           !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+               ftr_rsp++;
 
        if (ftr_rsp) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
@@ -4873,6 +5688,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
        phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
        spin_unlock_irq(&phba->hbalock);
 
+       /*
+        * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
+        * calls depends on these resources to complete port setup.
+        */
+       rc = lpfc_sli4_alloc_resource_identifiers(phba);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "2920 Failed to alloc Resource IDs "
+                               "rc = x%x\n", rc);
+               goto out_free_mbox;
+       }
+
        /* Read the port's service parameters. */
        rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
        if (rc) {
@@ -4906,35 +5733,37 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                goto out_free_mbox;
        }
 
-       if (phba->cfg_soft_wwnn)
-               u64_to_wwn(phba->cfg_soft_wwnn,
-                          vport->fc_sparam.nodeName.u.wwn);
-       if (phba->cfg_soft_wwpn)
-               u64_to_wwn(phba->cfg_soft_wwpn,
-                          vport->fc_sparam.portName.u.wwn);
-       memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
-              sizeof(struct lpfc_name));
-       memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
-              sizeof(struct lpfc_name));
+       lpfc_update_vport_wwn(vport);
 
        /* Update the fc_host data structures with new wwn. */
        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
 
        /* Register SGL pool to the device using non-embedded mailbox command */
-       rc = lpfc_sli4_post_sgl_list(phba);
-       if (unlikely(rc)) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-                               "0582 Error %d during sgl post operation\n",
-                                       rc);
-               rc = -ENODEV;
-               goto out_free_mbox;
+       if (!phba->sli4_hba.extents_in_use) {
+               rc = lpfc_sli4_post_els_sgl_list(phba);
+               if (unlikely(rc)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                                       "0582 Error %d during els sgl post "
+                                       "operation\n", rc);
+                       rc = -ENODEV;
+                       goto out_free_mbox;
+               }
+       } else {
+               rc = lpfc_sli4_post_els_sgl_list_ext(phba);
+               if (unlikely(rc)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                                       "2560 Error %d during els sgl post "
+                                       "operation\n", rc);
+                       rc = -ENODEV;
+                       goto out_free_mbox;
+               }
        }
 
        /* Register SCSI SGL pool to the device */
        rc = lpfc_sli4_repost_scsi_sgl_list(phba);
        if (unlikely(rc)) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
                                "0383 Error %d during scsi sgl post "
                                "operation\n", rc);
                /* Some Scsi buffers were moved to the abort scsi list */
@@ -5747,10 +6576,15 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
                              sizeof(struct lpfc_mcqe));
        mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
-
-       /* Prefix the mailbox status with range x4000 to note SLI4 status. */
+       /*
+        * When the CQE status indicates a failure and the mailbox status
+        * indicates success then copy the CQE status into the mailbox status
+        * (and prefix it with x4000).
+        */
        if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
-               bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
+               if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
+                       bf_set(lpfc_mqe_status, mb,
+                              (LPFC_MBX_ERROR_RANGE | mcqe_status));
                rc = MBXERR_ERROR;
        } else
                lpfc_sli4_swap_str(phba, mboxq);
@@ -5819,7 +6653,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
                else
                        rc = -EIO;
                if (rc != MBX_SUCCESS)
-                       lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
                                        "(%d):2541 Mailbox command x%x "
                                        "(x%x) cannot issue Data: x%x x%x\n",
                                        mboxq->vport ? mboxq->vport->vpi : 0,
@@ -6307,6 +7141,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
                        sgl->addr_hi = bpl->addrHigh;
                        sgl->addr_lo = bpl->addrLow;
 
+                       sgl->word2 = le32_to_cpu(sgl->word2);
                        if ((i+1) == numBdes)
                                bf_set(lpfc_sli4_sge_last, sgl, 1);
                        else
@@ -6343,6 +7178,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
                                cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
                        sgl->addr_lo =
                                cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+                       sgl->word2 = le32_to_cpu(sgl->word2);
                        bf_set(lpfc_sli4_sge_last, sgl, 1);
                        sgl->word2 = cpu_to_le32(sgl->word2);
                        sgl->sge_len =
@@ -6474,7 +7310,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                        els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
                                        >> LPFC_FIP_ELS_ID_SHIFT);
                }
-               bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi);
+               bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+                      phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
                bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
                bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
                bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6623,14 +7460,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                       iocbq->iocb.ulpContext);
                if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
                        bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
-                              iocbq->vport->vpi + phba->vpi_base);
+                              phba->vpi_ids[iocbq->vport->vpi]);
                bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
                bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
                bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
                bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
                       LPFC_WQE_LENLOC_WORD3);
                bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
-               bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi);
+               bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+                      phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
                command_type = OTHER_COMMAND;
        break;
        case CMD_CLOSE_XRI_CN:
@@ -6729,6 +7567,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                return IOCB_ERROR;
        break;
        }
+
        bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
        bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
        wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -6776,7 +7615,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
                                        return IOCB_BUSY;
                                }
                        } else {
-                       sglq = __lpfc_sli_get_sglq(phba, piocb);
+                               sglq = __lpfc_sli_get_sglq(phba, piocb);
                                if (!sglq) {
                                        if (!(flag & SLI_IOCB_RET_IOCB)) {
                                                __lpfc_sli_ringtx_put(phba,
@@ -6789,11 +7628,11 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
                        }
                }
        } else if (piocb->iocb_flag &  LPFC_IO_FCP) {
-               sglq = NULL; /* These IO's already have an XRI and
-                             * a mapped sgl.
-                             */
+               /* These IO's already have an XRI and a mapped sgl. */
+               sglq = NULL;
        } else {
-               /* This is a continuation of a commandi,(CX) so this
+               /*
+                * This is a continuation of a commandi,(CX) so this
                 * sglq is on the active list
                 */
                sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
@@ -6802,8 +7641,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
        }
 
        if (sglq) {
+               piocb->sli4_lxritag = sglq->sli4_lxritag;
                piocb->sli4_xritag = sglq->sli4_xritag;
-
                if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
                        return IOCB_ERROR;
        }
@@ -9799,7 +10638,12 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
                break;
        case LPFC_WCQ:
                while ((cqe = lpfc_sli4_cq_get(cq))) {
-                       workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
+                       if (cq->subtype == LPFC_FCP)
+                               workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
+                                                                      cqe);
+                       else
+                               workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
+                                                                     cqe);
                        if (!(++ecount % LPFC_GET_QE_REL_INT))
                                lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
                }
@@ -11446,6 +12290,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
        LPFC_MBOXQ_t *mbox;
        int rc;
        uint32_t shdr_status, shdr_add_status;
+       uint32_t mbox_tmo;
        union lpfc_sli4_cfg_shdr *shdr;
 
        if (xritag == NO_XRI) {
@@ -11479,8 +12324,10 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
                                cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
        if (!phba->sli4_hba.intr_enable)
                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
-       else
-               rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+       else {
+               mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+       }
        /* The IOCTL status is embedded in the mailbox subheader. */
        shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -11494,7 +12341,77 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
                                shdr_status, shdr_add_status, rc);
                rc = -ENXIO;
        }
-       return 0;
+       return 0;
+}
+
+/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * port for those SLI4 ports that do not support extents.  This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
+ * and should be called only when interrupts are disabled.
+ *
+ * Return codes
+ *     0 - successful
+ *     -ERROR - otherwise.
+ */
+uint16_t
+lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
+{
+       unsigned long xri;
+
+       /*
+        * Fetch the next logical xri.  Because this index is logical,
+        * the driver starts at 0 each time.
+        */
+       spin_lock_irq(&phba->hbalock);
+       xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
+                                phba->sli4_hba.max_cfg_param.max_xri, 0);
+       if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
+               spin_unlock_irq(&phba->hbalock);
+               return NO_XRI;
+       } else {
+               set_bit(xri, phba->sli4_hba.xri_bmask);
+               phba->sli4_hba.max_cfg_param.xri_used++;
+               phba->sli4_hba.xri_count++;
+       }
+
+       spin_unlock_irq(&phba->hbalock);
+       return xri;
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+       if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
+               phba->sli4_hba.xri_count--;
+               phba->sli4_hba.max_cfg_param.xri_used--;
+       }
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+       spin_lock_irq(&phba->hbalock);
+       __lpfc_sli4_free_xri(phba, xri);
+       spin_unlock_irq(&phba->hbalock);
 }
 
 /**
@@ -11510,30 +12427,23 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
 uint16_t
 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
 {
-       uint16_t xritag;
+       uint16_t xri_index;
 
-       spin_lock_irq(&phba->hbalock);
-       xritag = phba->sli4_hba.next_xri;
-       if ((xritag != (uint16_t) -1) && xritag <
-               (phba->sli4_hba.max_cfg_param.max_xri
-                       + phba->sli4_hba.max_cfg_param.xri_base)) {
-               phba->sli4_hba.next_xri++;
-               phba->sli4_hba.max_cfg_param.xri_used++;
-               spin_unlock_irq(&phba->hbalock);
-               return xritag;
-       }
-       spin_unlock_irq(&phba->hbalock);
-       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+       xri_index = lpfc_sli4_alloc_xri(phba);
+       if (xri_index != NO_XRI)
+               return xri_index;
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                        "2004 Failed to allocate XRI.last XRITAG is %d"
                        " Max XRI is %d, Used XRI is %d\n",
-                       phba->sli4_hba.next_xri,
+                       xri_index,
                        phba->sli4_hba.max_cfg_param.max_xri,
                        phba->sli4_hba.max_cfg_param.xri_used);
-       return -1;
+       return NO_XRI;
 }
 
 /**
- * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
+ * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine is invoked to post a block of driver's sgl pages to the
@@ -11542,7 +12452,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
  * stopped.
  **/
 int
-lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
+lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
 {
        struct lpfc_sglq *sglq_entry;
        struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -11551,7 +12461,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
        LPFC_MBOXQ_t *mbox;
        uint32_t reqlen, alloclen, pg_pairs;
        uint32_t mbox_tmo;
-       uint16_t xritag_start = 0;
+       uint16_t xritag_start = 0, lxri = 0;
        int els_xri_cnt, rc = 0;
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
@@ -11568,11 +12478,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
                return -ENOMEM;
        }
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-       if (!mbox) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "2560 Failed to allocate mbox cmd memory\n");
+       if (!mbox)
                return -ENOMEM;
-       }
 
        /* Allocate DMA memory and set up the non-embedded mailbox command */
        alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -11587,15 +12494,30 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
                lpfc_sli4_mbox_cmd_free(phba, mbox);
                return -ENOMEM;
        }
-       /* Get the first SGE entry from the non-embedded DMA memory */
-       viraddr = mbox->sge_array->addr[0];
-
        /* Set up the SGL pages in the non-embedded DMA pages */
+       viraddr = mbox->sge_array->addr[0];
        sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
        sgl_pg_pairs = &sgl->sgl_pg_pairs;
 
        for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
                sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
+
+               /*
+                * Assign the sglq a physical xri only if the driver has not
+                * initialized those resources.  A port reset only needs
+                * the sglq's posted.
+                */
+               if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+                   LPFC_XRI_RSRC_RDY) {
+                       lxri = lpfc_sli4_next_xritag(phba);
+                       if (lxri == NO_XRI) {
+                               lpfc_sli4_mbox_cmd_free(phba, mbox);
+                               return -ENOMEM;
+                       }
+                       sglq_entry->sli4_lxritag = lxri;
+                       sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+               }
+
                /* Set up the sge entry */
                sgl_pg_pairs->sgl_pg0_addr_lo =
                                cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -11605,16 +12527,17 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
                                cpu_to_le32(putPaddrLow(0));
                sgl_pg_pairs->sgl_pg1_addr_hi =
                                cpu_to_le32(putPaddrHigh(0));
+
                /* Keep the first xritag on the list */
                if (pg_pairs == 0)
                        xritag_start = sglq_entry->sli4_xritag;
                sgl_pg_pairs++;
        }
+
+       /* Complete initialization and perform endian conversion. */
        bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
        bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
-       /* Perform endian conversion if necessary */
        sgl->word0 = cpu_to_le32(sgl->word0);
-
        if (!phba->sli4_hba.intr_enable)
                rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
        else {
@@ -11633,6 +12556,181 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
                                shdr_status, shdr_add_status, rc);
                rc = -ENXIO;
        }
+
+       if (rc == 0)
+               bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+                      LPFC_XRI_RSRC_RDY);
+       return rc;
+}
+
+/**
+ * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
+{
+       struct lpfc_sglq *sglq_entry;
+       struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+       struct sgl_page_pairs *sgl_pg_pairs;
+       void *viraddr;
+       LPFC_MBOXQ_t *mbox;
+       uint32_t reqlen, alloclen, index;
+       uint32_t mbox_tmo;
+       uint16_t rsrc_start, rsrc_size, els_xri_cnt;
+       uint16_t xritag_start = 0, lxri = 0;
+       struct lpfc_rsrc_blks *rsrc_blk;
+       int cnt, ttl_cnt, rc = 0;
+       int loop_cnt;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       /* The number of sgls to be posted */
+       els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+       reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+                sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+       if (reqlen > SLI4_PAGE_SIZE) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "2989 Block sgl registration required DMA "
+                               "size (%d) great than a page\n", reqlen);
+               return -ENOMEM;
+       }
+
+       cnt = 0;
+       ttl_cnt = 0;
+       list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+                           list) {
+               rsrc_start = rsrc_blk->rsrc_start;
+               rsrc_size = rsrc_blk->rsrc_size;
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3014 Working ELS Extent start %d, cnt %d\n",
+                               rsrc_start, rsrc_size);
+
+               loop_cnt = min(els_xri_cnt, rsrc_size);
+               if (ttl_cnt + loop_cnt >= els_xri_cnt) {
+                       loop_cnt = els_xri_cnt - ttl_cnt;
+                       ttl_cnt = els_xri_cnt;
+               }
+
+               mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!mbox)
+                       return -ENOMEM;
+               /*
+                * Allocate DMA memory and set up the non-embedded mailbox
+                * command.
+                */
+               alloclen = lpfc_sli4_config(phba, mbox,
+                                       LPFC_MBOX_SUBSYSTEM_FCOE,
+                                       LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+                                       reqlen, LPFC_SLI4_MBX_NEMBED);
+               if (alloclen < reqlen) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "2987 Allocated DMA memory size (%d) "
+                                       "is less than the requested DMA memory "
+                                       "size (%d)\n", alloclen, reqlen);
+                       lpfc_sli4_mbox_cmd_free(phba, mbox);
+                       return -ENOMEM;
+               }
+
+               /* Set up the SGL pages in the non-embedded DMA pages */
+               viraddr = mbox->sge_array->addr[0];
+               sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+               sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+               /*
+                * The starting resource may not begin at zero. Control
+                * the loop variants via the block resource parameters,
+                * but handle the sge pointers with a zero-based index
+                * that doesn't get reset per loop pass.
+                */
+               for (index = rsrc_start;
+                    index < rsrc_start + loop_cnt;
+                    index++) {
+                       sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
+
+                       /*
+                        * Assign the sglq a physical xri only if the driver
+                        * has not initialized those resources.  A port reset
+                        * only needs the sglq's posted.
+                        */
+                       if (bf_get(lpfc_xri_rsrc_rdy,
+                                  &phba->sli4_hba.sli4_flags) !=
+                                  LPFC_XRI_RSRC_RDY) {
+                               lxri = lpfc_sli4_next_xritag(phba);
+                               if (lxri == NO_XRI) {
+                                       lpfc_sli4_mbox_cmd_free(phba, mbox);
+                                       rc = -ENOMEM;
+                                       goto err_exit;
+                               }
+                               sglq_entry->sli4_lxritag = lxri;
+                               sglq_entry->sli4_xritag =
+                                               phba->sli4_hba.xri_ids[lxri];
+                       }
+
+                       /* Set up the sge entry */
+                       sgl_pg_pairs->sgl_pg0_addr_lo =
+                               cpu_to_le32(putPaddrLow(sglq_entry->phys));
+                       sgl_pg_pairs->sgl_pg0_addr_hi =
+                               cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+                       sgl_pg_pairs->sgl_pg1_addr_lo =
+                               cpu_to_le32(putPaddrLow(0));
+                       sgl_pg_pairs->sgl_pg1_addr_hi =
+                               cpu_to_le32(putPaddrHigh(0));
+
+                       /* Track the starting physical XRI for the mailbox. */
+                       if (index == rsrc_start)
+                               xritag_start = sglq_entry->sli4_xritag;
+                       sgl_pg_pairs++;
+                       cnt++;
+               }
+
+               /* Complete initialization and perform endian conversion. */
+               rsrc_blk->rsrc_used += loop_cnt;
+               bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+               bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
+               sgl->word0 = cpu_to_le32(sgl->word0);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3015 Post ELS Extent SGL, start %d, "
+                               "cnt %d, used %d\n",
+                               xritag_start, loop_cnt, rsrc_blk->rsrc_used);
+               if (!phba->sli4_hba.intr_enable)
+                       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+               else {
+                       mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+                       rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+               }
+               shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+               shdr_status = bf_get(lpfc_mbox_hdr_status,
+                                    &shdr->response);
+               shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+                                        &shdr->response);
+               if (rc != MBX_TIMEOUT)
+                       lpfc_sli4_mbox_cmd_free(phba, mbox);
+               if (shdr_status || shdr_add_status || rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "2988 POST_SGL_BLOCK mailbox "
+                                       "command failed status x%x "
+                                       "add_status x%x mbx status x%x\n",
+                                       shdr_status, shdr_add_status, rc);
+                       rc = -ENXIO;
+                       goto err_exit;
+               }
+               if (ttl_cnt >= els_xri_cnt)
+                       break;
+       }
+
+ err_exit:
+       if (rc == 0)
+               bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+                      LPFC_XRI_RSRC_RDY);
        return rc;
 }
 
@@ -11693,6 +12791,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
                lpfc_sli4_mbox_cmd_free(phba, mbox);
                return -ENOMEM;
        }
+
        /* Get the first SGE entry from the non-embedded DMA memory */
        viraddr = mbox->sge_array->addr[0];
 
@@ -11747,6 +12846,169 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
        return rc;
 }
 
+/**
+ * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
+                               int cnt)
+{
+       struct lpfc_scsi_buf *psb = NULL;
+       struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+       struct sgl_page_pairs *sgl_pg_pairs;
+       void *viraddr;
+       LPFC_MBOXQ_t *mbox;
+       uint32_t reqlen, alloclen, pg_pairs;
+       uint32_t mbox_tmo;
+       uint16_t xri_start = 0, scsi_xri_start;
+       uint16_t rsrc_range;
+       int rc = 0, avail_cnt;
+       uint32_t shdr_status, shdr_add_status;
+       dma_addr_t pdma_phys_bpl1;
+       union lpfc_sli4_cfg_shdr *shdr;
+       struct lpfc_rsrc_blks *rsrc_blk;
+       uint32_t xri_cnt = 0;
+
+       /* Calculate the total requested length of the dma memory */
+       reqlen = cnt * sizeof(struct sgl_page_pairs) +
+                sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+       if (reqlen > SLI4_PAGE_SIZE) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "2932 Block sgl registration required DMA "
+                               "size (%d) great than a page\n", reqlen);
+               return -ENOMEM;
+       }
+
+       /*
+        * The use of extents requires the driver to post the sgl headers
+        * in multiple postings to meet the contiguous resource assignment.
+        */
+       psb = list_prepare_entry(psb, sblist, list);
+       scsi_xri_start = phba->sli4_hba.scsi_xri_start;
+       list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+                           list) {
+               rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
+               if (rsrc_range < scsi_xri_start)
+                       continue;
+               else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
+                       continue;
+               else
+                       avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
+
+               reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
+                       sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+               /*
+                * Allocate DMA memory and set up the non-embedded mailbox
+                * command. The mbox is used to post an SGL page per loop
+                * but the DMA memory has a use-once semantic so the mailbox
+                * is used and freed per loop pass.
+                */
+               mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!mbox) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "2933 Failed to allocate mbox cmd "
+                                       "memory\n");
+                       return -ENOMEM;
+               }
+               alloclen = lpfc_sli4_config(phba, mbox,
+                                       LPFC_MBOX_SUBSYSTEM_FCOE,
+                                       LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+                                       reqlen,
+                                       LPFC_SLI4_MBX_NEMBED);
+               if (alloclen < reqlen) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "2934 Allocated DMA memory size (%d) "
+                                       "is less than the requested DMA memory "
+                                       "size (%d)\n", alloclen, reqlen);
+                       lpfc_sli4_mbox_cmd_free(phba, mbox);
+                       return -ENOMEM;
+               }
+
+               /* Get the first SGE entry from the non-embedded DMA memory */
+               viraddr = mbox->sge_array->addr[0];
+
+               /* Set up the SGL pages in the non-embedded DMA pages */
+               sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+               sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+               /* pg_pairs tracks posted SGEs per loop iteration. */
+               pg_pairs = 0;
+               list_for_each_entry_continue(psb, sblist, list) {
+                       /* Set up the sge entry */
+                       sgl_pg_pairs->sgl_pg0_addr_lo =
+                               cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+                       sgl_pg_pairs->sgl_pg0_addr_hi =
+                               cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+                       if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+                               pdma_phys_bpl1 = psb->dma_phys_bpl +
+                                       SGL_PAGE_SIZE;
+                       else
+                               pdma_phys_bpl1 = 0;
+                       sgl_pg_pairs->sgl_pg1_addr_lo =
+                               cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+                       sgl_pg_pairs->sgl_pg1_addr_hi =
+                               cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+                       /* Keep the first xri for this extent. */
+                       if (pg_pairs == 0)
+                               xri_start = psb->cur_iocbq.sli4_xritag;
+                       sgl_pg_pairs++;
+                       pg_pairs++;
+                       xri_cnt++;
+
+                       /*
+                        * Track two exit conditions - the loop has constructed
+                        * all of the caller's SGE pairs or all available
+                        * resource IDs in this extent are consumed.
+                        */
+                       if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
+                               break;
+               }
+               rsrc_blk->rsrc_used += pg_pairs;
+               bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
+               bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3016 Post SCSI Extent SGL, start %d, cnt %d "
+                               "blk use %d\n",
+                               xri_start, pg_pairs, rsrc_blk->rsrc_used);
+               /* Perform endian conversion if necessary */
+               sgl->word0 = cpu_to_le32(sgl->word0);
+               if (!phba->sli4_hba.intr_enable)
+                       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+               else {
+                       mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+                       rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+               }
+               shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+               shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+               shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+                                        &shdr->response);
+               if (rc != MBX_TIMEOUT)
+                       lpfc_sli4_mbox_cmd_free(phba, mbox);
+               if (shdr_status || shdr_add_status || rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "2935 POST_SGL_BLOCK mailbox command "
+                                       "failed status x%x add_status x%x "
+                                       "mbx status x%x\n",
+                                       shdr_status, shdr_add_status, rc);
+                       return -ENXIO;
+               }
+
+               /* Post only what is requested. */
+               if (xri_cnt >= cnt)
+                       break;
+       }
+       return rc;
+}
+
 /**
  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
  * @phba: pointer to lpfc_hba struct that the frame was received on
@@ -12136,6 +13398,28 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
                lpfc_sli_release_iocbq(phba, cmd_iocbq);
 }
 
+/**
+ * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
+ * @phba: Pointer to HBA context object.
+ * @xri: xri id in transaction.
+ *
+ * This function validates the xri maps to the known range of XRIs allocated an
+ * used by the driver.
+ **/
+static uint16_t
+lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
+                     uint16_t xri)
+{
+       int i;
+
+       for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
+               if (xri == phba->sli4_hba.xri_ids[i])
+                       return i;
+       }
+       return NO_XRI;
+}
+
+
 /**
  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
  * @phba: Pointer to HBA context object.
@@ -12169,9 +13453,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
                                "SID:x%x\n", oxid, sid);
                return;
        }
-       if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
-               && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
-               + phba->sli4_hba.max_cfg_param.xri_base))
+       if (lpfc_sli4_xri_inrange(phba, rxid))
                lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
 
        /* Allocate buffer for rsp iocb */
@@ -12194,12 +13476,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
        icmd->ulpBdeCount = 0;
        icmd->ulpLe = 1;
        icmd->ulpClass = CLASS3;
-       icmd->ulpContext = ndlp->nlp_rpi;
+       icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
        ctiocb->context1 = ndlp;
 
        ctiocb->iocb_cmpl = NULL;
        ctiocb->vport = phba->pport;
        ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+       ctiocb->sli4_lxritag = NO_XRI;
        ctiocb->sli4_xritag = NO_XRI;
 
        /* If the oxid maps to the FCP XRI range or if it is out of range,
@@ -12380,8 +13663,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
                first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
                first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
                first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
-               first_iocbq->iocb.unsli3.rcvsli3.vpi =
-                                       vport->vpi + vport->phba->vpi_base;
+               /* iocbq is prepped for internal consumption.  Logical vpi. */
+               first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
                /* put the first buffer into the first IOCBq */
                first_iocbq->context2 = &seq_dmabuf->dbuf;
                first_iocbq->context3 = NULL;
@@ -12461,7 +13744,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
                                      &phba->sli.ring[LPFC_ELS_RING],
                                      iocbq, fc_hdr->fh_r_ctl,
                                      fc_hdr->fh_type))
-               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "2540 Ring %d handler: unexpected Rctl "
                                "x%x Type x%x received\n",
                                LPFC_ELS_RING,
@@ -12558,9 +13841,24 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
 {
        struct lpfc_rpi_hdr *rpi_page;
        uint32_t rc = 0;
+       uint16_t lrpi = 0;
+
+       /* SLI4 ports that support extents do not require RPI headers. */
+       if (!phba->sli4_hba.rpi_hdrs_in_use)
+               goto exit;
+       if (phba->sli4_hba.extents_in_use)
+               return -EIO;
 
-       /* Post all rpi memory regions to the port. */
        list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+               /*
+                * Assign the rpi headers a physical rpi only if the driver
+                * has not initialized those resources.  A port reset only
+                * needs the headers posted.
+                */
+               if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+                   LPFC_RPI_RSRC_RDY)
+                       rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+
                rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
                if (rc != MBX_SUCCESS) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12571,6 +13869,9 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
                }
        }
 
+ exit:
+       bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+              LPFC_RPI_RSRC_RDY);
        return rc;
 }
 
@@ -12594,10 +13895,15 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
        LPFC_MBOXQ_t *mboxq;
        struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
        uint32_t rc = 0;
-       uint32_t mbox_tmo;
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
 
+       /* SLI4 ports that support extents do not require RPI headers. */
+       if (!phba->sli4_hba.rpi_hdrs_in_use)
+               return rc;
+       if (phba->sli4_hba.extents_in_use)
+               return -EIO;
+
        /* The port is notified of the header region via a mailbox command. */
        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mboxq) {
@@ -12609,16 +13915,19 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
 
        /* Post all rpi memory regions to the port. */
        hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
-       mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
                         LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
                         sizeof(struct lpfc_mbx_post_hdr_tmpl) -
                         sizeof(struct lpfc_sli4_cfg_mhdr),
                         LPFC_SLI4_MBX_EMBED);
-       bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
-              hdr_tmpl, rpi_page->page_count);
+
+
+       /* Post the physical rpi to the port for this rpi header. */
        bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
               rpi_page->start_rpi);
+       bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+              hdr_tmpl, rpi_page->page_count);
+
        hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
        hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -12653,22 +13962,21 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
 int
 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
 {
-       int rpi;
-       uint16_t max_rpi, rpi_base, rpi_limit;
-       uint16_t rpi_remaining;
+       unsigned long rpi;
+       uint16_t max_rpi, rpi_limit;
+       uint16_t rpi_remaining, lrpi = 0;
        struct lpfc_rpi_hdr *rpi_hdr;
 
        max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
-       rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
        rpi_limit = phba->sli4_hba.next_rpi;
 
        /*
-        * The valid rpi range is not guaranteed to be zero-based.  Start
-        * the search at the rpi_base as reported by the port.
+        * Fetch the next logical rpi.  Because this index is logical,
+        * the  driver starts at 0 each time.
         */
        spin_lock_irq(&phba->hbalock);
-       rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
-       if (rpi >= rpi_limit || rpi < rpi_base)
+       rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+       if (rpi >= rpi_limit)
                rpi = LPFC_RPI_ALLOC_ERROR;
        else {
                set_bit(rpi, phba->sli4_hba.rpi_bmask);
@@ -12678,7 +13986,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
 
        /*
         * Don't try to allocate more rpi header regions if the device limit
-        * on available rpis max has been exhausted.
+        * has been exhausted.
         */
        if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
            (phba->sli4_hba.rpi_count >= max_rpi)) {
@@ -12686,14 +13994,22 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
                return rpi;
        }
 
+       /*
+        * RPI header postings are not required for SLI4 ports capable of
+        * extents.
+        */
+       if (!phba->sli4_hba.rpi_hdrs_in_use) {
+               spin_unlock_irq(&phba->hbalock);
+               return rpi;
+       }
+
        /*
         * If the driver is running low on rpi resources, allocate another
         * page now.  Note that the next_rpi value is used because
         * it represents how many are actually in use whereas max_rpi notes
         * how many are supported max by the device.
         */
-       rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
-               phba->sli4_hba.rpi_count;
+       rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
        spin_unlock_irq(&phba->hbalock);
        if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
                rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
@@ -12702,6 +14018,8 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
                                        "2002 Error Could not grow rpi "
                                        "count\n");
                } else {
+                       lrpi = rpi_hdr->start_rpi;
+                       rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
                        lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
                }
        }
@@ -12751,6 +14069,8 @@ void
 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
 {
        kfree(phba->sli4_hba.rpi_bmask);
+       kfree(phba->sli4_hba.rpi_ids);
+       bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
 }
 
 /**
@@ -13489,6 +14809,96 @@ out:
        return;
 }
 
+/**
+ * lpfc_wr_object - write an object to the firmware
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @dmabuf_list: list of dmabufs to write to the port.
+ * @size: the total byte value of the objects to write to the port.
+ * @offset: the current offset to be used to start the transfer.
+ *
+ * This routine will create a wr_object mailbox command to send to the port.
+ * the mailbox command will be constructed using the dma buffers described in
+ * @dmabuf_list to create a list of BDEs. This routine will fill in as many
+ * BDEs that the imbedded mailbox can support. The @offset variable will be
+ * used to indicate the starting offset of the transfer and will also return
+ * the offset after the write object mailbox has completed. @size is used to
+ * determine the end of the object and whether the eof bit should be set.
+ *
+ * Return 0 is successful and offset will contain the the new offset to use
+ * for the next write.
+ * Return negative value for error cases.
+ **/
+int
+lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+              uint32_t size, uint32_t *offset)
+{
+       struct lpfc_mbx_wr_object *wr_object;
+       LPFC_MBOXQ_t *mbox;
+       int rc = 0, i = 0;
+       uint32_t shdr_status, shdr_add_status;
+       uint32_t mbox_tmo;
+       union lpfc_sli4_cfg_shdr *shdr;
+       struct lpfc_dmabuf *dmabuf;
+       uint32_t written = 0;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                       LPFC_MBOX_OPCODE_WRITE_OBJECT,
+                       sizeof(struct lpfc_mbx_wr_object) -
+                       sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+       wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
+       wr_object->u.request.write_offset = *offset;
+       sprintf((uint8_t *)wr_object->u.request.object_name, "/");
+       wr_object->u.request.object_name[0] =
+               cpu_to_le32(wr_object->u.request.object_name[0]);
+       bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
+       list_for_each_entry(dmabuf, dmabuf_list, list) {
+               if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
+                       break;
+               wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
+               wr_object->u.request.bde[i].addrHigh =
+                       putPaddrHigh(dmabuf->phys);
+               if (written + SLI4_PAGE_SIZE >= size) {
+                       wr_object->u.request.bde[i].tus.f.bdeSize =
+                               (size - written);
+                       written += (size - written);
+                       bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+               } else {
+                       wr_object->u.request.bde[i].tus.f.bdeSize =
+                               SLI4_PAGE_SIZE;
+                       written += SLI4_PAGE_SIZE;
+               }
+               i++;
+       }
+       wr_object->u.request.bde_count = i;
+       bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       else {
+               mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+       }
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, phba->mbox_mem_pool);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3025 Write Object mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               rc = -ENXIO;
+       } else
+               *offset += wr_object->u.response.actual_write_length;
+       return rc;
+}
+
 /**
  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
  * @vport: pointer to vport data structure.
@@ -13644,7 +15054,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
                                 * never happen
                                 */
                                sglq = __lpfc_clear_active_sglq(phba,
-                                                sglq->sli4_xritag);
+                                                sglq->sli4_lxritag);
                                spin_unlock_irqrestore(&phba->hbalock, iflags);
                                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                        "2823 txq empty and txq_cnt is %d\n ",
@@ -13656,6 +15066,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
                /* The xri and iocb resources secured,
                 * attempt to issue request
                 */
+               piocbq->sli4_lxritag = sglq->sli4_lxritag;
                piocbq->sli4_xritag = sglq->sli4_xritag;
                if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
                        fail_msg = "to convert bpl to sgl";