]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'scsi/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 11 Feb 2016 03:17:38 +0000 (14:17 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 11 Feb 2016 03:17:38 +0000 (14:17 +1100)
56 files changed:
Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
block/blk-core.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commctrl.c
drivers/scsi/aacraid/comminit.c
drivers/scsi/aacraid/commsup.c
drivers/scsi/aacraid/dpcsup.c
drivers/scsi/aacraid/linit.c
drivers/scsi/aacraid/src.c
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/bfa/bfa_ioc.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/hisi_sas/Kconfig
drivers/scsi/hisi_sas/Makefile
drivers/scsi/hisi_sas/hisi_sas.h
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c [new file with mode: 0644]
drivers/scsi/hosts.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fp.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/megaraid/megaraid_sas_fusion.h
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_bsg.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/storvsc_drv.c
include/linux/blkdev.h
include/scsi/iscsi_if.h
include/scsi/scsi_device.h

index f67e761bcc18e8f37f5f56e89f7f8e514ab6c2f4..bf2411f366e5a2e4e9309333ffb9e2269236cc60 100644 (file)
@@ -5,6 +5,7 @@ The HiSilicon SAS controller supports SAS/SATA.
 Main node required properties:
   - compatible : value should be as follows:
        (a) "hisilicon,hip05-sas-v1" for v1 hw in hip05 chipset
+       (b) "hisilicon,hip06-sas-v2" for v2 hw in hip06 chipset
   - sas-addr : array of 8 bytes for host SAS address
   - reg : Address and length of the SAS register
   - hisilicon,sas-syscon: phandle of syscon used for sas control
@@ -13,7 +14,7 @@ Main node required properties:
   - ctrl-clock-ena-reg : offset to controller clock enable register in ctrl reg
   - queue-count : number of delivery and completion queues in the controller
   - phy-count : number of phys accessible by the controller
-  - interrupts : Interrupts for phys, completion queues, and fatal
+  - interrupts : For v1 hw: Interrupts for phys, completion queues, and fatal
                sources; the interrupts are ordered in 3 groups, as follows:
                        - Phy interrupts
                        - Completion queue interrupts
@@ -30,6 +31,24 @@ Main node required properties:
                Fatal interrupts : the fatal interrupts are ordered as follows:
                        - ECC
                        - AXI bus
+               For v2 hw: Interrupts for phys, Sata, and completion queues;
+               the interrupts are ordered in 3 groups, as follows:
+                       - Phy interrupts
+                       - Sata interrupts
+                       - Completion queue interrupts
+               Phy interrupts : Each controller has 2 phy interrupts:
+                       - phy up/down
+                       - channel interrupt
+               Sata interrupts : Each phy on the controller has 1 Sata
+                       interrupt. The interrupts are ordered in increasing
+                       order.
+               Completion queue interrupts : each completion queue has 1
+                       interrupt source. The interrupts are ordered in
+                       increasing order.
+
+Optional main node properties:
+ - hip06-sas-v2-quirk-amt : when set, indicates that the v2 controller has the
+                           "am-max-transmissions" limitation.
 
 Example:
        sas0: sas@c1000000 {
index c60e233eb08bdc110eb37018f3593c8ddb4001fc..45f4d7efbf349efa77f8c9d425e1d0ece4fbd9d4 100644 (file)
@@ -2455,14 +2455,16 @@ struct request *blk_peek_request(struct request_queue *q)
 
                        rq = NULL;
                        break;
-               } else if (ret == BLKPREP_KILL) {
+               } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
+                       int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
+
                        rq->cmd_flags |= REQ_QUIET;
                        /*
                         * Mark this request as started so we don't trigger
                         * any debug logic in the end I/O path.
                         */
                        blk_start_request(rq);
-                       __blk_end_request_all(rq, -EIO);
+                       __blk_end_request_all(rq, err);
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
                        break;
index e4c243748a97f97d2fa16b1b2ee6622e83b3905e..7dfd0fa272555dc17cf63200a19acc3e492e7e24 100644 (file)
@@ -323,7 +323,6 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
        if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
                dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
                aac_fib_complete(fibptr);
-               aac_fib_free(fibptr);
                return 0;
        }
        scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
@@ -331,7 +330,6 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
        if (unlikely(!device || !scsi_device_online(device))) {
                dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
                aac_fib_complete(fibptr);
-               aac_fib_free(fibptr);
                return 0;
        }
        return 1;
@@ -541,7 +539,6 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
 
        aac_fib_complete(fibptr);
-       aac_fib_free(fibptr);
        scsicmd->scsi_done(scsicmd);
 }
 
@@ -557,7 +554,8 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 
        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 
-       if (!(cmd_fibcontext = aac_fib_alloc(dev)))
+       cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+       if (!cmd_fibcontext)
                return -ENOMEM;
 
        aac_fib_init(cmd_fibcontext);
@@ -586,7 +584,6 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 
        printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
        aac_fib_complete(cmd_fibcontext);
-       aac_fib_free(cmd_fibcontext);
        return -1;
 }
 
@@ -1024,7 +1021,6 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
        scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
 
        aac_fib_complete(fibptr);
-       aac_fib_free(fibptr);
        scsicmd->scsi_done(scsicmd);
 }
 
@@ -1040,7 +1036,8 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
 
        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 
-       if (!(cmd_fibcontext = aac_fib_alloc(dev)))
+       cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+       if (!cmd_fibcontext)
                return -ENOMEM;
 
        aac_fib_init(cmd_fibcontext);
@@ -1068,7 +1065,6 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
 
        printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
        aac_fib_complete(cmd_fibcontext);
-       aac_fib_free(cmd_fibcontext);
        return -1;
 }
 
@@ -1869,7 +1865,6 @@ static void io_callback(void *context, struct fib * fibptr)
                break;
        }
        aac_fib_complete(fibptr);
-       aac_fib_free(fibptr);
 
        scsicmd->scsi_done(scsicmd);
 }
@@ -1954,7 +1949,8 @@ static int aac_read(struct scsi_cmnd * scsicmd)
        /*
         *      Alocate and initialize a Fib
         */
-       if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+       cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+       if (!cmd_fibcontext) {
                printk(KERN_WARNING "aac_read: fib allocation failed\n");
                return -1;
        }
@@ -2051,7 +2047,8 @@ static int aac_write(struct scsi_cmnd * scsicmd)
        /*
         *      Allocate and initialize a Fib then setup a BlockWrite command
         */
-       if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+       cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+       if (!cmd_fibcontext) {
                /* FIB temporarily unavailable,not catastrophic failure */
 
                /* scsicmd->result = DID_ERROR << 16;
@@ -2285,7 +2282,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
        /*
         *      Allocate and initialize a Fib
         */
-       cmd_fibcontext = aac_fib_alloc(aac);
+       cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
        if (!cmd_fibcontext)
                return SCSI_MLQUEUE_HOST_BUSY;
 
@@ -3157,7 +3154,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
        scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
 
        aac_fib_complete(fibptr);
-       aac_fib_free(fibptr);
        scsicmd->scsi_done(scsicmd);
 }
 
@@ -3187,9 +3183,10 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
        /*
         *      Allocate and initialize a Fib then setup a BlockWrite command
         */
-       if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+       cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+       if (!cmd_fibcontext)
                return -1;
-       }
+
        status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
 
        /*
index 074878b55a0b7d9f371fe5bc57b2714e01074fdb..efa493cf1bc630bffceee7bf3276f7babd1cbe15 100644 (file)
@@ -62,7 +62,7 @@ enum {
 #define        PMC_GLOBAL_INT_BIT0             0x00000001
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 41010
+# define AAC_DRIVER_BUILD 41052
 # define AAC_DRIVER_BRANCH "-ms"
 #endif
 #define MAXIMUM_NUM_CONTAINERS 32
@@ -94,6 +94,13 @@ enum {
 #define aac_phys_to_logical(x)  ((x)+1)
 #define aac_logical_to_phys(x)  ((x)?(x)-1:0)
 
+/*
+ * These macros are for keeping track of
+ * character device state.
+ */
+#define AAC_CHARDEV_UNREGISTERED       (-1)
+#define AAC_CHARDEV_NEEDS_REINIT       (-2)
+
 /* #define AAC_DETAILED_STATUS_INFO */
 
 struct diskparm
@@ -944,6 +951,7 @@ struct fib {
         */
        struct list_head        fiblink;
        void                    *data;
+       u32                     vector_no;
        struct hw_fib           *hw_fib_va;             /* Actual shared object */
        dma_addr_t              hw_fib_pa;              /* physical address of hw_fib*/
 };
@@ -1123,6 +1131,7 @@ struct aac_dev
        struct fib              *free_fib;
        spinlock_t              fib_lock;
 
+       struct mutex            ioctl_mutex;
        struct aac_queue_block *queues;
        /*
         *      The user API will use an IOCTL to register itself to receive
@@ -1234,6 +1243,7 @@ struct aac_dev
        struct msix_entry       msixentry[AAC_MAX_MSIX];
        struct aac_msix_ctx     aac_msix[AAC_MAX_MSIX]; /* context */
        u8                      adapter_shutdown;
+       u32                     handle_pci_error;
 };
 
 #define aac_adapter_interrupt(dev) \
@@ -2113,7 +2123,9 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
 int aac_acquire_irq(struct aac_dev *dev);
 void aac_free_irq(struct aac_dev *dev);
 const char *aac_driverinfo(struct Scsi_Host *);
+void aac_fib_vector_assign(struct aac_dev *dev);
 struct fib *aac_fib_alloc(struct aac_dev *dev);
+struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd);
 int aac_fib_setup(struct aac_dev *dev);
 void aac_fib_map_free(struct aac_dev *dev);
 void aac_fib_free(struct fib * context);
index 54195a117f72e1f29241b1d079eb8de96e0df056..4b3bb52b5108edbabf253d800a083c3f637c9380 100644 (file)
@@ -855,13 +855,20 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
 {
        int status;
 
+       mutex_lock(&dev->ioctl_mutex);
+
+       if (dev->adapter_shutdown) {
+               status = -EACCES;
+               goto cleanup;
+       }
+
        /*
         *      HBA gets first crack
         */
 
        status = aac_dev_ioctl(dev, cmd, arg);
        if (status != -ENOTTY)
-               return status;
+               goto cleanup;
 
        switch (cmd) {
        case FSACTL_MINIPORT_REV_CHECK:
@@ -890,6 +897,10 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
                status = -ENOTTY;
                break;
        }
+
+cleanup:
+       mutex_unlock(&dev->ioctl_mutex);
+
        return status;
 }
 
index 0e954e37f0b5f3923980ee684e55ffef5b296510..2b4e75380ae632cf60d392f8517bb8406110d9d4 100644 (file)
@@ -212,8 +212,11 @@ int aac_send_shutdown(struct aac_dev * dev)
                return -ENOMEM;
        aac_fib_init(fibctx);
 
-       cmd = (struct aac_close *) fib_data(fibctx);
+       mutex_lock(&dev->ioctl_mutex);
+       dev->adapter_shutdown = 1;
+       mutex_unlock(&dev->ioctl_mutex);
 
+       cmd = (struct aac_close *) fib_data(fibctx);
        cmd->command = cpu_to_le32(VM_CloseAll);
        cmd->cid = cpu_to_le32(0xfffffffe);
 
@@ -229,7 +232,6 @@ int aac_send_shutdown(struct aac_dev * dev)
        /* FIB should be freed only after getting the response from the F/W */
        if (status != -ERESTARTSYS)
                aac_fib_free(fibctx);
-       dev->adapter_shutdown = 1;
        if ((dev->pdev->device == PMC_DEVICE_S7 ||
             dev->pdev->device == PMC_DEVICE_S8 ||
             dev->pdev->device == PMC_DEVICE_S9) &&
index a1f90fe849c95201f410972b34413f45e484802e..511bbc575062efa4c0a7f3d26a1ec2c097d11826 100644 (file)
@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
 
 void aac_fib_map_free(struct aac_dev *dev)
 {
-       pci_free_consistent(dev->pdev,
-         dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
-         dev->hw_fib_va, dev->hw_fib_pa);
+       if (dev->hw_fib_va && dev->max_fib_size) {
+               pci_free_consistent(dev->pdev,
+               (dev->max_fib_size *
+               (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
+               dev->hw_fib_va, dev->hw_fib_pa);
+       }
        dev->hw_fib_va = NULL;
        dev->hw_fib_pa = 0;
 }
 
+void aac_fib_vector_assign(struct aac_dev *dev)
+{
+       u32 i = 0;
+       u32 vector = 1;
+       struct fib *fibptr = NULL;
+
+       for (i = 0, fibptr = &dev->fibs[i];
+               i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+               i++, fibptr++) {
+               if ((dev->max_msix == 1) ||
+                 (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
+                       - dev->vector_cap))) {
+                       fibptr->vector_no = 0;
+               } else {
+                       fibptr->vector_no = vector;
+                       vector++;
+                       if (vector == dev->max_msix)
+                               vector = 1;
+               }
+       }
+}
+
 /**
  *     aac_fib_setup   -       setup the fibs
  *     @dev: Adapter to set up
@@ -137,6 +162,7 @@ int aac_fib_setup(struct aac_dev * dev)
                i++, fibptr++)
        {
                fibptr->flags = 0;
+               fibptr->size = sizeof(struct fib);
                fibptr->dev = dev;
                fibptr->hw_fib_va = hw_fib;
                fibptr->data = (void *) fibptr->hw_fib_va->data;
@@ -151,17 +177,48 @@ int aac_fib_setup(struct aac_dev * dev)
                hw_fib_pa = hw_fib_pa +
                        dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
        }
+
+       /*
+        *Assign vector numbers to fibs
+        */
+       aac_fib_vector_assign(dev);
+
        /*
         *      Add the fib chain to the free list
         */
        dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
        /*
-        *      Enable this to debug out of queue space
-        */
-       dev->free_fib = &dev->fibs[0];
+       *       Set 8 fibs aside for management tools
+       */
+       dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
        return 0;
 }
 
+/**
+ *     aac_fib_alloc_tag-allocate a fib using tags
+ *     @dev: Adapter to allocate the fib for
+ *
+ *     Allocate a fib from the adapter fib pool using tags
+ *     from the blk layer.
+ */
+
+struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
+{
+       struct fib *fibptr;
+
+       fibptr = &dev->fibs[scmd->request->tag];
+       /*
+        *      Null out fields that depend on being zero at the start of
+        *      each I/O
+        */
+       fibptr->hw_fib_va->header.XferState = 0;
+       fibptr->type = FSAFS_NTC_FIB_CONTEXT;
+       fibptr->callback_data = NULL;
+       fibptr->callback = NULL;
+
+       return fibptr;
+}
+
 /**
  *     aac_fib_alloc   -       allocate a fib
  *     @dev: Adapter to allocate the fib for
index da9d9936e99567f6d4da2ef805dfbf9a0ea8397d..d677b52860ae35df7c63e8e0972b3c167a48aa21 100644 (file)
@@ -394,7 +394,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
                                fib->callback(fib->callback_data, fib);
                        } else {
                                aac_fib_complete(fib);
-                               aac_fib_free(fib);
                        }
                } else {
                        unsigned long flagv;
@@ -416,7 +415,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
                                fib->done = 0;
                                spin_unlock_irqrestore(&fib->event_lock, flagv);
                                aac_fib_complete(fib);
-                               aac_fib_free(fib);
                        }
 
                }
index 76eaa38ffd6e5c4a9f40bd66fa4c1205e35ed739..21a67ed047e8741889ee2a254989f55d095ddbd8 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/pci.h>
+#include <linux/aer.h>
 #include <linux/pci-aspm.h>
 #include <linux/slab.h>
 #include <linux/mutex.h>
@@ -79,7 +80,7 @@ MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
 
 static DEFINE_MUTEX(aac_mutex);
 static LIST_HEAD(aac_devices);
-static int aac_cfg_major = -1;
+static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED;
 char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
 
 /*
@@ -454,6 +455,8 @@ static int aac_slave_configure(struct scsi_device *sdev)
        } else
                scsi_change_queue_depth(sdev, 1);
 
+               sdev->tagged_supported = 1;
+
        return 0;
 }
 
@@ -700,23 +703,18 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
 static long aac_cfg_ioctl(struct file *file,
                unsigned int cmd, unsigned long arg)
 {
-       int ret;
-       struct aac_dev *aac;
-       aac = (struct aac_dev *)file->private_data;
-       if (!capable(CAP_SYS_RAWIO) || aac->adapter_shutdown)
+       struct aac_dev *aac = (struct aac_dev *)file->private_data;
+
+       if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
-       mutex_lock(&aac_mutex);
-       ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
-       mutex_unlock(&aac_mutex);
 
-       return ret;
+       return aac_do_ioctl(aac, cmd, (void __user *)arg);
 }
 
 #ifdef CONFIG_COMPAT
 static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
 {
        long ret;
-       mutex_lock(&aac_mutex);
        switch (cmd) {
        case FSACTL_MINIPORT_REV_CHECK:
        case FSACTL_SENDFIB:
@@ -750,7 +748,6 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
                ret = -ENOIOCTLCMD;
                break;
        }
-       mutex_unlock(&aac_mutex);
        return ret;
 }
 
@@ -1075,6 +1072,8 @@ static void __aac_shutdown(struct aac_dev * aac)
        int i;
        int cpu;
 
+       aac_send_shutdown(aac);
+
        if (aac->aif_thread) {
                int i;
                /* Clear out events first */
@@ -1086,7 +1085,6 @@ static void __aac_shutdown(struct aac_dev * aac)
                }
                kthread_stop(aac->thread);
        }
-       aac_send_shutdown(aac);
        aac_adapter_disable_int(aac);
        cpu = cpumask_first(cpu_online_mask);
        if (aac->pdev->device == PMC_DEVICE_S6 ||
@@ -1120,6 +1118,13 @@ static void __aac_shutdown(struct aac_dev * aac)
        else if (aac->max_msix > 1)
                pci_disable_msix(aac->pdev);
 }
+static void aac_init_char(void)
+{
+       aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops);
+       if (aac_cfg_major < 0) {
+               pr_err("aacraid: unable to register \"aac\" device.\n");
+       }
+}
 
 static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
@@ -1132,6 +1137,12 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        u64 dmamask;
        extern int aac_sync_mode;
 
+       /*
+        * Only series 7 needs freset.
+        */
+        if (pdev->device == PMC_DEVICE_S7)
+               pdev->needs_freset = 1;
+
        list_for_each_entry(aac, &aac_devices, entry) {
                if (aac->id > unique_id)
                        break;
@@ -1171,6 +1182,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        shost->max_cmd_len = 16;
        shost->use_cmd_list = 1;
 
+       if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
+               aac_init_char();
+
        aac = (struct aac_dev *)shost->hostdata;
        aac->base_start = pci_resource_start(pdev, 0);
        aac->scsi_host_ptr = shost;
@@ -1185,6 +1199,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_free_host;
        spin_lock_init(&aac->fib_lock);
 
+       mutex_init(&aac->ioctl_mutex);
        /*
         *      Map in the registers from the adapter.
         */
@@ -1296,6 +1311,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_deinit;
        scsi_scan_host(shost);
 
+       pci_enable_pcie_error_reporting(pdev);
+       pci_save_state(pdev);
+
        return 0;
 
  out_deinit:
@@ -1317,7 +1335,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        return error;
 }
 
-#if (defined(CONFIG_PM))
 static void aac_release_resources(struct aac_dev *aac)
 {
        int i;
@@ -1404,14 +1421,26 @@ static int aac_acquire_resources(struct aac_dev *dev)
 
        aac_adapter_enable_int(dev);
 
-       if (!dev->sync_mode)
+       /*max msix may change  after EEH
+        * Re-assign vectors to fibs
+        */
+       aac_fib_vector_assign(dev);
+
+       if (!dev->sync_mode) {
+               /* After EEH recovery or suspend resume, max_msix count
+                * may change, therfore updating in init as well.
+                */
                aac_adapter_start(dev);
+               dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+       }
        return 0;
 
 error_iounmap:
        return -1;
 
 }
+
+#if (defined(CONFIG_PM))
 static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 
@@ -1495,10 +1524,142 @@ static void aac_remove_one(struct pci_dev *pdev)
        pci_disable_device(pdev);
        if (list_empty(&aac_devices)) {
                unregister_chrdev(aac_cfg_major, "aac");
-               aac_cfg_major = -1;
+               aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT;
+       }
+}
+
+static void aac_flush_ios(struct aac_dev *aac)
+{
+       int i;
+       struct scsi_cmnd *cmd;
+
+       for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
+               cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
+               if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
+                       scsi_dma_unmap(cmd);
+
+                       if (aac->handle_pci_error)
+                               cmd->result = DID_NO_CONNECT << 16;
+                       else
+                               cmd->result = DID_RESET << 16;
+
+                       cmd->scsi_done(cmd);
+               }
+       }
+}
+
+static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
+                                       enum pci_channel_state error)
+{
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct aac_dev *aac = shost_priv(shost);
+
+       dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error);
+
+       switch (error) {
+       case pci_channel_io_normal:
+               return PCI_ERS_RESULT_CAN_RECOVER;
+       case pci_channel_io_frozen:
+               aac->handle_pci_error = 1;
+
+               scsi_block_requests(aac->scsi_host_ptr);
+               aac_flush_ios(aac);
+               aac_release_resources(aac);
+
+               pci_disable_pcie_error_reporting(pdev);
+               aac_adapter_ioremap(aac, 0);
+
+               return PCI_ERS_RESULT_NEED_RESET;
+       case pci_channel_io_perm_failure:
+               aac->handle_pci_error = 1;
+
+               aac_flush_ios(aac);
+               return PCI_ERS_RESULT_DISCONNECT;
        }
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev)
+{
+       dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n");
+       return PCI_ERS_RESULT_NEED_RESET;
 }
 
+static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev)
+{
+       dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n");
+       pci_restore_state(pdev);
+       if (pci_enable_device(pdev)) {
+               dev_warn(&pdev->dev,
+                       "aacraid: failed to enable slave\n");
+               goto fail_device;
+       }
+
+       pci_set_master(pdev);
+
+       if (pci_enable_device_mem(pdev)) {
+               dev_err(&pdev->dev, "pci_enable_device_mem failed\n");
+               goto fail_device;
+       }
+
+       return PCI_ERS_RESULT_RECOVERED;
+
+fail_device:
+       dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n");
+       return PCI_ERS_RESULT_DISCONNECT;
+}
+
+
+static void aac_pci_resume(struct pci_dev *pdev)
+{
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct scsi_device *sdev = NULL;
+       struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
+
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+
+       if (aac_adapter_ioremap(aac, aac->base_size)) {
+
+               dev_err(&pdev->dev, "aacraid: ioremap failed\n");
+               /* remap failed, go back ... */
+               aac->comm_interface = AAC_COMM_PRODUCER;
+               if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) {
+                       dev_warn(&pdev->dev,
+                               "aacraid: unable to map adapter.\n");
+
+                       return;
+               }
+       }
+
+       msleep(10000);
+
+       aac_acquire_resources(aac);
+
+       /*
+        * reset this flag to unblock ioctl() as it was set
+        * at aac_send_shutdown() to block ioctls from upperlayer
+        */
+       aac->adapter_shutdown = 0;
+       aac->handle_pci_error = 0;
+
+       shost_for_each_device(sdev, shost)
+               if (sdev->sdev_state == SDEV_OFFLINE)
+                       sdev->sdev_state = SDEV_RUNNING;
+       scsi_unblock_requests(aac->scsi_host_ptr);
+       scsi_scan_host(aac->scsi_host_ptr);
+       pci_save_state(pdev);
+
+       dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
+}
+
+static struct pci_error_handlers aac_pci_err_handler = {
+       .error_detected         = aac_pci_error_detected,
+       .mmio_enabled           = aac_pci_mmio_enabled,
+       .slot_reset             = aac_pci_slot_reset,
+       .resume                 = aac_pci_resume,
+};
+
 static struct pci_driver aac_pci_driver = {
        .name           = AAC_DRIVERNAME,
        .id_table       = aac_pci_tbl,
@@ -1509,6 +1670,7 @@ static struct pci_driver aac_pci_driver = {
        .resume         = aac_resume,
 #endif
        .shutdown       = aac_shutdown,
+       .err_handler    = &aac_pci_err_handler,
 };
 
 static int __init aac_init(void)
@@ -1522,11 +1684,8 @@ static int __init aac_init(void)
        if (error < 0)
                return error;
 
-       aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
-       if (aac_cfg_major < 0) {
-               printk(KERN_WARNING
-                       "aacraid: unable to register \"aac\" device.\n");
-       }
+       aac_init_char();
+
 
        return 0;
 }
index 2aa34ea8ceb1e903781331acd3283ba264eadd53..bc0203f3d243e8e8926e1975408017d9dd5f5012 100644 (file)
@@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
                                break;
                        if (dev->msi_enabled && dev->max_msix > 1)
                                atomic_dec(&dev->rrq_outstanding[vector_no]);
-                       aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
                        dev->host_rrq[index++] = 0;
+                       aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
                        if (index == (vector_no + 1) * dev->vector_cap)
                                index = vector_no * dev->vector_cap;
                        dev->host_rrq_idx[vector_no] = index;
@@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
 #endif
 
        u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
+       u16 vector_no;
 
        atomic_inc(&q->numpending);
 
        if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
            dev->max_msix > 1) {
-               u_int16_t vector_no, first_choice = 0xffff;
-
-               vector_no = dev->fibs_pushed_no % dev->max_msix;
-               do {
-                       vector_no += 1;
-                       if (vector_no == dev->max_msix)
-                               vector_no = 1;
-                       if (atomic_read(&dev->rrq_outstanding[vector_no]) <
-                           dev->vector_cap)
-                               break;
-                       if (0xffff == first_choice)
-                               first_choice = vector_no;
-                       else if (vector_no == first_choice)
-                               break;
-               } while (1);
-               if (vector_no == first_choice)
-                       vector_no = 0;
-               atomic_inc(&dev->rrq_outstanding[vector_no]);
-               if (dev->fibs_pushed_no == 0xffffffff)
-                       dev->fibs_pushed_no = 0;
-               else
-                       dev->fibs_pushed_no++;
+               vector_no = fib->vector_no;
                fib->hw_fib_va->header.Handle += (vector_no << 16);
+       } else {
+               vector_no = 0;
        }
 
+       atomic_inc(&dev->rrq_outstanding[vector_no]);
+
        if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
                /* Calculate the amount to the fibsize bits */
                fibsize = (hdr_size + 127) / 128 - 1;
index a41c6432f4446cf9082916a0859e08be3c0a70c7..ee5ace87353580b9005341e9442d5797a2fb4f18 100644 (file)
@@ -42,7 +42,7 @@ struct be_queue_info {
        u16 id;
        u16 tail, head;
        bool created;
-       atomic_t used;          /* Number of valid elements in the queue */
+       u16 used;               /* Number of valid elements in the queue */
 };
 
 static inline u32 MODULO(u16 val, u16 limit)
@@ -110,10 +110,9 @@ struct be_mcc_obj {
 };
 
 struct beiscsi_mcc_tag_state {
-#define MCC_TAG_STATE_COMPLETED 0x00
-#define MCC_TAG_STATE_RUNNING   0x01
-#define MCC_TAG_STATE_TIMEOUT   0x02
-       uint8_t tag_state;
+       unsigned long tag_state;
+#define MCC_TAG_STATE_RUNNING  1
+#define MCC_TAG_STATE_TIMEOUT  2
        struct be_dma_mem tag_mem_state;
 };
 
@@ -124,7 +123,7 @@ struct be_ctrl_info {
        struct pci_dev *pdev;
 
        /* Mbox used for cmd request/response */
-       spinlock_t mbox_lock;   /* For serializing mbox cmds to BE card */
+       struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
        struct be_dma_mem mbox_mem;
        /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
         * is stored for freeing purpose */
@@ -133,11 +132,10 @@ struct be_ctrl_info {
        /* MCC Rings */
        struct be_mcc_obj mcc_obj;
        spinlock_t mcc_lock;    /* For serializing mcc cmds to BE card */
-       spinlock_t mcc_cq_lock;
 
        wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1];
        unsigned int mcc_tag[MAX_MCC_CMD];
-       unsigned int mcc_numtag[MAX_MCC_CMD + 1];
+       unsigned int mcc_tag_status[MAX_MCC_CMD + 1];
        unsigned short mcc_alloc_index;
        unsigned short mcc_free_index;
        unsigned int mcc_tag_available;
@@ -147,6 +145,12 @@ struct be_ctrl_info {
 
 #include "be_cmds.h"
 
+/* WRB index mask for MCC_Q_LEN queue entries */
+#define MCC_Q_WRB_IDX_MASK     CQE_STATUS_WRB_MASK
+#define MCC_Q_WRB_IDX_SHIFT    CQE_STATUS_WRB_SHIFT
+/* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */
+#define MCC_Q_CMD_TAG_MASK     ((MAX_MCC_CMD << 1) - 1)
+
 #define PAGE_SHIFT_4K 12
 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
 #define mcc_timeout            120000 /* 12s timeout */
index 2778089b01a578c5954686218c69b07b368e94cd..a55eaeea37e72cf42e592356191fad0900cdc741 100644 (file)
@@ -104,24 +104,16 @@ int be_chk_reset_complete(struct beiscsi_hba *phba)
        return 0;
 }
 
-void be_mcc_notify(struct beiscsi_hba *phba)
-{
-       struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
-       u32 val = 0;
-
-       val |= mccq->id & DB_MCCQ_RING_ID_MASK;
-       val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
-       iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
-}
-
 unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
 {
        unsigned int tag = 0;
 
+       spin_lock(&phba->ctrl.mcc_lock);
        if (phba->ctrl.mcc_tag_available) {
                tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
                phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
-               phba->ctrl.mcc_numtag[tag] = 0;
+               phba->ctrl.mcc_tag_status[tag] = 0;
+               phba->ctrl.ptag_state[tag].tag_state = 0;
        }
        if (tag) {
                phba->ctrl.mcc_tag_available--;
@@ -130,11 +122,89 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
                else
                        phba->ctrl.mcc_alloc_index++;
        }
+       spin_unlock(&phba->ctrl.mcc_lock);
        return tag;
 }
 
+struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
+                                unsigned int *ref_tag)
+{
+       struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+       struct be_mcc_wrb *wrb = NULL;
+       unsigned int tag;
+
+       spin_lock_bh(&phba->ctrl.mcc_lock);
+       if (mccq->used == mccq->len) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
+                           mccq->used, phba->ctrl.mcc_tag_available);
+               goto alloc_failed;
+       }
+
+       if (!phba->ctrl.mcc_tag_available)
+               goto alloc_failed;
+
+       tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
+       if (!tag) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
+                           phba->ctrl.mcc_tag_available,
+                           phba->ctrl.mcc_alloc_index);
+               goto alloc_failed;
+       }
+
+       /* return this tag for further reference */
+       *ref_tag = tag;
+       phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
+       phba->ctrl.mcc_tag_status[tag] = 0;
+       phba->ctrl.ptag_state[tag].tag_state = 0;
+       phba->ctrl.mcc_tag_available--;
+       if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
+               phba->ctrl.mcc_alloc_index = 0;
+       else
+               phba->ctrl.mcc_alloc_index++;
+
+       wrb = queue_head_node(mccq);
+       memset(wrb, 0, sizeof(*wrb));
+       wrb->tag0 = tag;
+       wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
+       queue_head_inc(mccq);
+       mccq->used++;
+
+alloc_failed:
+       spin_unlock_bh(&phba->ctrl.mcc_lock);
+       return wrb;
+}
+
+void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
+{
+       struct be_queue_info *mccq = &ctrl->mcc_obj.q;
+
+       spin_lock_bh(&ctrl->mcc_lock);
+       tag = tag & MCC_Q_CMD_TAG_MASK;
+       ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
+       if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
+               ctrl->mcc_free_index = 0;
+       else
+               ctrl->mcc_free_index++;
+       ctrl->mcc_tag_available++;
+       mccq->used--;
+       spin_unlock_bh(&ctrl->mcc_lock);
+}
+
+/**
+ * beiscsi_fail_session(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ **/
+void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
+{
+       iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
 /*
- * beiscsi_mccq_compl()- Wait for completion of MBX
+ * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
  * @phba: Driver private structure
  * @tag: Tag for the MBX Command
  * @wrb: the WRB used for the MBX Command
@@ -146,43 +216,40 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
  * Success: 0
  * Failure: Non-Zero
  **/
-int beiscsi_mccq_compl(struct beiscsi_hba *phba,
-               uint32_t tag, struct be_mcc_wrb **wrb,
-               struct be_dma_mem *mbx_cmd_mem)
+int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
+                           uint32_t tag, struct be_mcc_wrb **wrb,
+                           struct be_dma_mem *mbx_cmd_mem)
 {
        int rc = 0;
-       uint32_t mcc_tag_response;
+       uint32_t mcc_tag_status;
        uint16_t status = 0, addl_status = 0, wrb_num = 0;
        struct be_mcc_wrb *temp_wrb;
        struct be_cmd_req_hdr *mbx_hdr;
        struct be_cmd_resp_hdr *mbx_resp_hdr;
        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
-       if (beiscsi_error(phba)) {
-               free_mcc_tag(&phba->ctrl, tag);
+       if (beiscsi_error(phba))
                return -EPERM;
-       }
-
-       /* Set MBX Tag state to Active */
-       spin_lock(&phba->ctrl.mbox_lock);
-       phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
-       spin_unlock(&phba->ctrl.mbox_lock);
 
        /* wait for the mccq completion */
        rc = wait_event_interruptible_timeout(
                                phba->ctrl.mcc_wait[tag],
-                               phba->ctrl.mcc_numtag[tag],
+                               phba->ctrl.mcc_tag_status[tag],
                                msecs_to_jiffies(
                                BEISCSI_HOST_MBX_TIMEOUT));
-
+       /**
+        * If MBOX cmd timeout expired, tag and resource allocated
+        * for cmd is not freed until FW returns completion.
+        */
        if (rc <= 0) {
                struct be_dma_mem *tag_mem;
-               /* Set MBX Tag state to timeout */
-               spin_lock(&phba->ctrl.mbox_lock);
-               phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
-               spin_unlock(&phba->ctrl.mbox_lock);
 
-               /* Store resource addr to be freed later */
+               /**
+                * PCI/DMA memory allocated and posted in non-embedded mode
+                * will have mbx_cmd_mem != NULL.
+                * Save virtual and bus addresses for the command so that it
+                * can be freed later.
+                **/
                tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
                if (mbx_cmd_mem) {
                        tag_mem->size = mbx_cmd_mem->size;
@@ -191,28 +258,28 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                } else
                        tag_mem->size = 0;
 
+               /* first make tag_mem_state visible to all */
+               wmb();
+               set_bit(MCC_TAG_STATE_TIMEOUT,
+                               &phba->ctrl.ptag_state[tag].tag_state);
+
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
                            BEISCSI_LOG_CONFIG,
                            "BC_%d : MBX Cmd Completion timed out\n");
                return -EBUSY;
-       } else {
-               rc = 0;
-               /* Set MBX Tag state to completed */
-               spin_lock(&phba->ctrl.mbox_lock);
-               phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
-               spin_unlock(&phba->ctrl.mbox_lock);
        }
 
-       mcc_tag_response = phba->ctrl.mcc_numtag[tag];
-       status = (mcc_tag_response & CQE_STATUS_MASK);
-       addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
+       rc = 0;
+       mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
+       status = (mcc_tag_status & CQE_STATUS_MASK);
+       addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
                        CQE_STATUS_ADDL_SHIFT);
 
        if (mbx_cmd_mem) {
                mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
        } else {
-               wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
+               wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
                           CQE_STATUS_WRB_SHIFT;
                temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
                mbx_hdr = embedded_payload(temp_wrb);
@@ -231,7 +298,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                            mbx_hdr->subsystem,
                            mbx_hdr->opcode,
                            status, addl_status);
-
+               rc = -EIO;
                if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
                        mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
                        beiscsi_log(phba, KERN_WARNING,
@@ -241,70 +308,16 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                                    "Resp_Len : %d Actual_Resp_Len : %d\n",
                                    mbx_resp_hdr->response_length,
                                    mbx_resp_hdr->actual_resp_len);
-
                        rc = -EAGAIN;
-                       goto release_mcc_tag;
                }
-               rc = -EIO;
        }
 
-release_mcc_tag:
-       /* Release the MCC entry */
-       free_mcc_tag(&phba->ctrl, tag);
-
+       free_mcc_wrb(&phba->ctrl, tag);
        return rc;
 }
 
-void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
-{
-       spin_lock(&ctrl->mbox_lock);
-       tag = tag & 0x000000FF;
-       ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
-       if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
-               ctrl->mcc_free_index = 0;
-       else
-               ctrl->mcc_free_index++;
-       ctrl->mcc_tag_available++;
-       spin_unlock(&ctrl->mbox_lock);
-}
-
-bool is_link_state_evt(u32 trailer)
-{
-       return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
-                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
-                 ASYNC_EVENT_CODE_LINK_STATE);
-}
-
-static bool is_iscsi_evt(u32 trailer)
-{
-       return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
-                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
-                 ASYNC_EVENT_CODE_ISCSI;
-}
-
-static int iscsi_evt_type(u32 trailer)
-{
-       return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
-                ASYNC_TRAILER_EVENT_TYPE_MASK;
-}
-
-static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
-{
-       if (compl->flags != 0) {
-               compl->flags = le32_to_cpu(compl->flags);
-               WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
-               return true;
-       } else
-               return false;
-}
-
-static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
-{
-       compl->flags = 0;
-}
-
 /*
- * be_mcc_compl_process()- Check the MBX comapletion status
+ * beiscsi_process_mbox_compl()- Check the MBX completion status
  * @ctrl: Function specific MBX data structure
  * @compl: Completion status of MBX Command
  *
@@ -314,8 +327,8 @@ static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  * Success: Zero
  * Failure: Non-Zero
  **/
-static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
-                               struct be_mcc_compl *compl)
+static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
+                                     struct be_mcc_compl *compl)
 {
        u16 compl_status, extd_status;
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -323,206 +336,228 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
        struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
        struct be_cmd_resp_hdr *resp_hdr;
 
-       be_dws_le_to_cpu(compl, 4);
-
-       compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
-                                       CQE_STATUS_COMPL_MASK;
-       if (compl_status != MCC_STATUS_SUCCESS) {
-               extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
-                                               CQE_STATUS_EXTD_MASK;
-
+       /**
+        * To check if valid bit is set, check the entire word as we don't know
+        * the endianness of the data (old entry is host endian while a new
+        * entry is little endian)
+        */
+       if (!compl->flags) {
                beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                           "BC_%d : error in cmd completion: "
-                           "Subsystem : %d Opcode : %d "
-                           "status(compl/extd)=%d/%d\n",
-                           hdr->subsystem, hdr->opcode,
-                           compl_status, extd_status);
-
-               if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
-                       resp_hdr = (struct be_cmd_resp_hdr *) hdr;
-                       if (resp_hdr->response_length)
-                               return 0;
-               }
+                               BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                               "BC_%d : BMBX busy, no completion\n");
                return -EBUSY;
        }
-       return 0;
-}
-
-int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
-                                   struct be_mcc_compl *compl)
-{
-       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-       u16 compl_status, extd_status;
-       unsigned short tag;
+       compl->flags = le32_to_cpu(compl->flags);
+       WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
 
+       /**
+        * Just swap the status to host endian;
+        * mcc tag is opaquely copied from mcc_wrb.
+        */
        be_dws_le_to_cpu(compl, 4);
-
        compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
-                                       CQE_STATUS_COMPL_MASK;
-       /* The ctrl.mcc_numtag[tag] is filled with
-        * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
-        * [7:0] = compl_status
-        */
-       tag = (compl->tag0 & 0x000000FF);
+               CQE_STATUS_COMPL_MASK;
        extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
-                                       CQE_STATUS_EXTD_MASK;
+               CQE_STATUS_EXTD_MASK;
+       /* Need to reset the entire word that houses the valid bit */
+       compl->flags = 0;
 
-       ctrl->mcc_numtag[tag]  = 0x80000000;
-       ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
-       ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
-       ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
+       if (compl_status == MCC_STATUS_SUCCESS)
+               return 0;
 
-       if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
-               wake_up_interruptible(&ctrl->mcc_wait[tag]);
-       } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
-               struct be_dma_mem *tag_mem;
-               tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                   "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
+                   hdr->subsystem, hdr->opcode, compl_status, extd_status);
 
-               beiscsi_log(phba, KERN_WARNING,
-                           BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
-                           BEISCSI_LOG_CONFIG,
-                           "BC_%d : MBX Completion for timeout Command "
-                           "from FW\n");
-               /* Check if memory needs to be freed */
-               if (tag_mem->size)
-                       pci_free_consistent(ctrl->pdev, tag_mem->size,
-                                           tag_mem->va, tag_mem->dma);
-
-               /* Change tag state */
-               spin_lock(&phba->ctrl.mbox_lock);
-               ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
-               spin_unlock(&phba->ctrl.mbox_lock);
-
-               /* Free MCC Tag */
-               free_mcc_tag(ctrl, tag);
+       if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+               /* if status is insufficient buffer, check the length */
+               resp_hdr = (struct be_cmd_resp_hdr *) hdr;
+               if (resp_hdr->response_length)
+                       return 0;
        }
-
-       return 0;
+       return -EINVAL;
 }
 
-static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
+static void beiscsi_process_async_link(struct beiscsi_hba *phba,
+                                      struct be_mcc_compl *compl)
 {
-       struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
-       struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
+       struct be_async_event_link_state *evt;
+
+       evt = (struct be_async_event_link_state *)compl;
 
-       if (be_mcc_compl_is_new(compl)) {
-               queue_tail_inc(mcc_cq);
-               return compl;
+       phba->port_speed = evt->port_speed;
+       /**
+        * Check logical link status in ASYNC event.
+        * This has been newly introduced in SKH-R Firmware 10.0.338.45.
+        **/
+       if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
+               phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
+               phba->get_boot = BE_GET_BOOT_RETRIES;
+               __beiscsi_log(phba, KERN_ERR,
+                             "BC_%d : Link Up on Port %d tag 0x%x\n",
+                             evt->physical_port, evt->event_tag);
+       } else {
+               phba->state = BE_ADAPTER_LINK_DOWN;
+               __beiscsi_log(phba, KERN_ERR,
+                             "BC_%d : Link Down on Port %d tag 0x%x\n",
+                             evt->physical_port, evt->event_tag);
+               iscsi_host_for_each_session(phba->shost,
+                                           beiscsi_fail_session);
        }
-       return NULL;
 }
 
-/**
- * be2iscsi_fail_session(): Closing session with appropriate error
- * @cls_session: ptr to session
- *
- * Depending on adapter state appropriate error flag is passed.
- **/
-void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
+static char *beiscsi_port_misconf_event_msg[] = {
+       "Physical Link is functional.",
+       "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
+       "Optics of two types installed - Remove one optic or install matching pair of optics.",
+       "Incompatible optics - Replace with compatible optics for card to function.",
+       "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
+       "Uncertified optics - Replace with Avago Certified optics to enable link operation."
+};
+
+static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
+                                     struct be_mcc_compl *compl)
 {
-       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
-       struct beiscsi_hba *phba = iscsi_host_priv(shost);
-       uint32_t iscsi_err_flag;
+       struct be_async_event_sli *async_sli;
+       u8 evt_type, state, old_state, le;
+       char *sev = KERN_WARNING;
+       char *msg = NULL;
+
+       evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
+       evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
+
+       /* processing only MISCONFIGURED physical port event */
+       if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
+               return;
+
+       async_sli = (struct be_async_event_sli *)compl;
+       state = async_sli->event_data1 >>
+                (phba->fw_config.phys_port * 8) & 0xff;
+       le = async_sli->event_data2 >>
+                (phba->fw_config.phys_port * 8) & 0xff;
+
+       old_state = phba->optic_state;
+       phba->optic_state = state;
+
+       if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
+               /* fw is reporting a state we don't know, log and return */
+               __beiscsi_log(phba, KERN_ERR,
+                           "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
+                           phba->port_name, async_sli->event_data1);
+               return;
+       }
 
-       if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
-               iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
-       else
-               iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
+       if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
+               /* log link effect for unqualified-4, uncertified-5 optics */
+               if (state > 3)
+                       msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
+                               " Link is non-operational." :
+                               " Link is operational.";
+               /* 1 - info */
+               if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
+                       sev = KERN_INFO;
+               /* 2 - error */
+               if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
+                       sev = KERN_ERR;
+       }
 
-       iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+       if (old_state != phba->optic_state)
+               __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
+                             phba->port_name,
+                             beiscsi_port_misconf_event_msg[state],
+                             !msg ? "" : msg);
 }
 
-void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
-               struct be_async_event_link_state *evt)
+void beiscsi_process_async_event(struct beiscsi_hba *phba,
+                               struct be_mcc_compl *compl)
 {
-       if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
-           ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
-            (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
-               phba->state = BE_ADAPTER_LINK_DOWN;
-
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Link Down on Port %d\n",
-                           evt->physical_port);
-
-               iscsi_host_for_each_session(phba->shost,
-                                           be2iscsi_fail_session);
-       } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
-                   ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
-                    (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
-               phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
+       char *sev = KERN_INFO;
+       u8 evt_code;
+
+       /* interpret flags as an async trailer */
+       evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
+       evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
+       switch (evt_code) {
+       case ASYNC_EVENT_CODE_LINK_STATE:
+               beiscsi_process_async_link(phba, compl);
+               break;
+       case ASYNC_EVENT_CODE_ISCSI:
+               phba->state |= BE_ADAPTER_CHECK_BOOT;
                phba->get_boot = BE_GET_BOOT_RETRIES;
-
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Link UP on Port %d\n",
-                           evt->physical_port);
+               sev = KERN_ERR;
+               break;
+       case ASYNC_EVENT_CODE_SLI:
+               beiscsi_process_async_sli(phba, compl);
+               break;
+       default:
+               /* event not registered */
+               sev = KERN_ERR;
        }
+
+       beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                   "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
+                   evt_code, compl->status, compl->flags);
 }
 
-int beiscsi_process_mcc(struct beiscsi_hba *phba)
+int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
+                             struct be_mcc_compl *compl)
 {
-       struct be_mcc_compl *compl;
-       int num = 0, status = 0;
-       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+       u16 compl_status, extd_status;
+       struct be_dma_mem *tag_mem;
+       unsigned int tag, wrb_idx;
 
-       spin_lock_bh(&phba->ctrl.mcc_cq_lock);
-       while ((compl = be_mcc_compl_get(phba))) {
-               if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
-                       /* Interpret flags as an async trailer */
-                       if (is_link_state_evt(compl->flags))
-                               /* Interpret compl as a async link evt */
-                               beiscsi_async_link_state_process(phba,
-                                  (struct be_async_event_link_state *) compl);
-                       else if (is_iscsi_evt(compl->flags)) {
-                               switch (iscsi_evt_type(compl->flags)) {
-                               case ASYNC_EVENT_NEW_ISCSI_TGT_DISC:
-                               case ASYNC_EVENT_NEW_ISCSI_CONN:
-                               case ASYNC_EVENT_NEW_TCP_CONN:
-                                       phba->state |= BE_ADAPTER_CHECK_BOOT;
-                                       phba->get_boot = BE_GET_BOOT_RETRIES;
-                                       beiscsi_log(phba, KERN_ERR,
-                                                   BEISCSI_LOG_CONFIG |
-                                                   BEISCSI_LOG_MBOX,
-                                                   "BC_%d : Async iscsi Event,"
-                                                   " flags handled = 0x%08x\n",
-                                                   compl->flags);
-                                       break;
-                               default:
-                                       phba->state |= BE_ADAPTER_CHECK_BOOT;
-                                       phba->get_boot = BE_GET_BOOT_RETRIES;
-                                       beiscsi_log(phba, KERN_ERR,
-                                                   BEISCSI_LOG_CONFIG |
-                                                   BEISCSI_LOG_MBOX,
-                                                   "BC_%d : Unsupported Async"
-                                                   " Event, flags = 0x%08x\n",
-                                                   compl->flags);
-                               }
-                       } else
-                               beiscsi_log(phba, KERN_ERR,
-                                           BEISCSI_LOG_CONFIG |
-                                           BEISCSI_LOG_MBOX,
-                                           "BC_%d : Unsupported Async Event, flags"
-                                           " = 0x%08x\n", compl->flags);
-
-               } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
-                               status = be_mcc_compl_process(ctrl, compl);
-                               atomic_dec(&phba->ctrl.mcc_obj.q.used);
-               }
-               be_mcc_compl_use(compl);
-               num++;
+       be_dws_le_to_cpu(compl, 4);
+       tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
+       wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
+
+       if (!test_bit(MCC_TAG_STATE_RUNNING,
+                     &ctrl->ptag_state[tag].tag_state)) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
+                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                           "BC_%d : MBX cmd completed but not posted\n");
+               return 0;
        }
 
-       if (num)
-               hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
+       if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
+               beiscsi_log(phba, KERN_WARNING,
+                           BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
+                           BEISCSI_LOG_CONFIG,
+                           "BC_%d : MBX Completion for timeout Command from FW\n");
+               /**
+                * Check for the size before freeing resource.
+                * Only for non-embedded cmd, PCI resource is allocated.
+                **/
+               tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+               if (tag_mem->size)
+                       pci_free_consistent(ctrl->pdev, tag_mem->size,
+                                       tag_mem->va, tag_mem->dma);
+               free_mcc_wrb(ctrl, tag);
+               return 0;
+       }
 
-       spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
-       return status;
+       compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+                      CQE_STATUS_COMPL_MASK;
+       extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+                     CQE_STATUS_EXTD_MASK;
+       /* The ctrl.mcc_tag_status[tag] is filled with
+        * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
+        * [7:0] = compl_status
+        */
+       ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
+       ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
+       ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
+                                    CQE_STATUS_ADDL_MASK;
+       ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
+
+       /* write ordering forced in wake_up_interruptible */
+       clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
+       wake_up_interruptible(&ctrl->mcc_wait[tag]);
+       return 0;
 }
 
 /*
- * be_mcc_wait_compl()- Wait for MBX completion
+ * be_mcc_compl_poll()- Wait for MBX completion
  * @phba: driver private structure
  *
  * Wait till no more pending mcc requests are present
@@ -532,50 +567,57 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
  * Failure: Non-Zero
  *
  **/
-static int be_mcc_wait_compl(struct beiscsi_hba *phba)
+int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag)
 {
-       int i, status;
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       int i;
+
+       if (!test_bit(MCC_TAG_STATE_RUNNING,
+                     &ctrl->ptag_state[tag].tag_state)) {
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BC_%d: tag %u state not running\n", tag);
+               return 0;
+       }
        for (i = 0; i < mcc_timeout; i++) {
                if (beiscsi_error(phba))
                        return -EIO;
 
-               status = beiscsi_process_mcc(phba);
-               if (status)
-                       return status;
-
-               if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
+               beiscsi_process_mcc_cq(phba);
+               /* after polling, wrb and tag need to be released */
+               if (!test_bit(MCC_TAG_STATE_RUNNING,
+                             &ctrl->ptag_state[tag].tag_state)) {
+                       free_mcc_wrb(ctrl, tag);
                        break;
+               }
                udelay(100);
        }
-       if (i == mcc_timeout) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                           "BC_%d : FW Timed Out\n");
-               phba->fw_timeout = true;
-               beiscsi_ue_detect(phba);
-               return -EBUSY;
-       }
-       return 0;
+
+       if (i < mcc_timeout)
+               return 0;
+
+       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                   "BC_%d : FW Timed Out\n");
+       phba->fw_timeout = true;
+       beiscsi_ue_detect(phba);
+       return -EBUSY;
 }
 
-/*
- * be_mcc_notify_wait()- Notify and wait for Compl
- * @phba: driver private structure
- *
- * Notify MCC requests and wait for completion
- *
- * return
- * Success: 0
- * Failure: Non-Zero
- **/
-int be_mcc_notify_wait(struct beiscsi_hba *phba)
+void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
 {
-       be_mcc_notify(phba);
-       return be_mcc_wait_compl(phba);
+       struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+       u32 val = 0;
+
+       set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
+       val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+       val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+       /* make request available for DMA */
+       wmb();
+       iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
 }
 
 /*
- * be_mbox_db_ready_wait()- Check ready status
+ * be_mbox_db_ready_poll()- Check ready status
  * @ctrl: Function specific MBX data structure
  *
  * Check for the ready status of FW to send BMBX
@@ -585,49 +627,45 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba)
  * Success: 0
  * Failure: Non-Zero
  **/
-static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
+static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
 {
-#define BEISCSI_MBX_RDY_BIT_TIMEOUT    4000    /* 4sec */
+       /* wait 30s for generic non-flash MBOX operation */
+#define BEISCSI_MBX_RDY_BIT_TIMEOUT    30000
        void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        unsigned long timeout;
-       bool read_flag = false;
-       int ret = 0, i;
        u32 ready;
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
 
-       if (beiscsi_error(phba))
-               return -EIO;
+       /*
+        * This BMBX busy wait path is used during init only.
+        * For the commands executed during init, 5s should suffice.
+        */
+       timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
+       do {
+               if (beiscsi_error(phba))
+                       return -EIO;
 
-       timeout = jiffies + (HZ * 110);
+               ready = ioread32(db);
+               if (ready == 0xffffffff)
+                       return -EIO;
 
-       do {
-               for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
-                       ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
-                       if (ready) {
-                               read_flag = true;
-                               break;
-                       }
-                       mdelay(1);
-               }
+               ready &= MPU_MAILBOX_DB_RDY_MASK;
+               if (ready)
+                       return 0;
 
-               if (!read_flag) {
-                       wait_event_timeout(rdybit_check_q,
-                                         (read_flag != true),
-                                          HZ * 5);
-               }
-       } while ((time_before(jiffies, timeout)) && !read_flag);
+               if (time_after(jiffies, timeout))
+                       break;
+               msleep(20);
+       } while (!ready);
 
-       if (!read_flag) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                           "BC_%d : FW Timed Out\n");
-                       phba->fw_timeout = true;
-                       beiscsi_ue_detect(phba);
-                       ret = -EBUSY;
-       }
+       beiscsi_log(phba, KERN_ERR,
+                       BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                       "BC_%d : FW Timed Out\n");
+
+       phba->fw_timeout = true;
+       beiscsi_ue_detect(phba);
 
-       return ret;
+       return -EBUSY;
 }
 
 /*
@@ -648,10 +686,8 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
        void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
        struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
        struct be_mcc_mailbox *mbox = mbox_mem->va;
-       struct be_mcc_compl *compl = &mbox->compl;
-       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 
-       status = be_mbox_db_ready_wait(ctrl);
+       status = be_mbox_db_ready_poll(ctrl);
        if (status)
                return status;
 
@@ -660,7 +696,7 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
        iowrite32(val, db);
 
-       status = be_mbox_db_ready_wait(ctrl);
+       status = be_mbox_db_ready_poll(ctrl);
        if (status)
                return status;
 
@@ -670,81 +706,15 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
        val |= (u32) (mbox_mem->dma >> 4) << 2;
        iowrite32(val, db);
 
-       status = be_mbox_db_ready_wait(ctrl);
+       status = be_mbox_db_ready_poll(ctrl);
        if (status)
                return status;
 
-       if (be_mcc_compl_is_new(compl)) {
-               status = be_mcc_compl_process(ctrl, &mbox->compl);
-               be_mcc_compl_use(compl);
-               if (status) {
-                       beiscsi_log(phba, KERN_ERR,
-                                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                                   "BC_%d : After be_mcc_compl_process\n");
-
-                       return status;
-               }
-       } else {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                           "BC_%d : Invalid Mailbox Completion\n");
-
-               return -EBUSY;
-       }
-       return 0;
-}
-
-/*
- * Insert the mailbox address into the doorbell in two steps
- * Polls on the mbox doorbell till a command completion (or a timeout) occurs
- */
-static int be_mbox_notify_wait(struct beiscsi_hba *phba)
-{
-       int status;
-       u32 val = 0;
-       void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
-       struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
-       struct be_mcc_mailbox *mbox = mbox_mem->va;
-       struct be_mcc_compl *compl = &mbox->compl;
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-
-       status = be_mbox_db_ready_wait(ctrl);
-       if (status)
-               return status;
-
-       val |= MPU_MAILBOX_DB_HI_MASK;
-       /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
-       val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
-       iowrite32(val, db);
-
-       /* wait for ready to be set */
-       status = be_mbox_db_ready_wait(ctrl);
-       if (status != 0)
-               return status;
-
-       val = 0;
-       /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
-       val |= (u32)(mbox_mem->dma >> 4) << 2;
-       iowrite32(val, db);
+       /* RDY is set; small delay before CQE read. */
+       udelay(1);
 
-       status = be_mbox_db_ready_wait(ctrl);
-       if (status != 0)
-               return status;
-
-       /* A cq entry has been made now */
-       if (be_mcc_compl_is_new(compl)) {
-               status = be_mcc_compl_process(ctrl, &mbox->compl);
-               be_mcc_compl_use(compl);
-               if (status)
-                       return status;
-       } else {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                           "BC_%d : invalid mailbox completion\n");
-
-               return -EBUSY;
-       }
-       return 0;
+       status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
+       return status;
 }
 
 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
@@ -809,21 +779,6 @@ struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
        return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
 }
 
-struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
-{
-       struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
-       struct be_mcc_wrb *wrb;
-
-       WARN_ON(atomic_read(&mccq->used) >= mccq->len);
-       wrb = queue_head_node(mccq);
-       memset(wrb, 0, sizeof(*wrb));
-       wrb->tag0 = (mccq->head & 0x000000FF) << 16;
-       queue_head_inc(mccq);
-       atomic_inc(&mccq->used);
-       return wrb;
-}
-
-
 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
                          struct be_queue_info *eq, int eq_delay)
 {
@@ -833,7 +788,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
        struct be_dma_mem *q_mem = &eq->dma_mem;
        int status;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -860,7 +815,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
                eq->id = le16_to_cpu(resp->eq_id);
                eq->created = true;
        }
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -881,7 +836,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
        int status;
        u8 *endian_check;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        endian_check = (u8 *) wrb;
@@ -900,7 +855,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BC_%d : be_cmd_fw_initialize Failed\n");
 
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -921,7 +876,7 @@ int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
        int status;
        u8 *endian_check;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        endian_check = (u8 *) wrb;
@@ -941,7 +896,7 @@ int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BC_%d : be_cmd_fw_uninit Failed\n");
 
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -957,7 +912,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
        void *ctxt = &req->context;
        int status;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1007,7 +962,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                            "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
                            status);
 
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
 
        return status;
 }
@@ -1025,13 +980,13 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
                        struct be_queue_info *cq)
 {
        struct be_mcc_wrb *wrb;
-       struct be_cmd_req_mcc_create *req;
+       struct be_cmd_req_mcc_create_ext *req;
        struct be_dma_mem *q_mem = &mccq->dma_mem;
        struct be_ctrl_info *ctrl;
        void *ctxt;
        int status;
 
-       spin_lock(&phba->ctrl.mbox_lock);
+       mutex_lock(&phba->ctrl.mbox_lock);
        ctrl = &phba->ctrl;
        wrb = wrb_from_mbox(&ctrl->mbox_mem);
        memset(wrb, 0, sizeof(*wrb));
@@ -1041,9 +996,12 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+                       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
 
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+       req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
+       req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
+       req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
 
        AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
                      PCI_FUNC(phba->pcidev->devfn));
@@ -1056,13 +1014,13 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
 
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 
-       status = be_mbox_notify_wait(phba);
+       status = be_mbox_notify(ctrl);
        if (!status) {
                struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
                mccq->id = le16_to_cpu(resp->id);
                mccq->created = true;
        }
-       spin_unlock(&phba->ctrl.mbox_lock);
+       mutex_unlock(&phba->ctrl.mbox_lock);
 
        return status;
 }
@@ -1080,7 +1038,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
                    "BC_%d : In beiscsi_cmd_q_destroy "
                    "queue_type : %d\n", queue_type);
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 
@@ -1110,7 +1068,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
                opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
                break;
        default:
-               spin_unlock(&ctrl->mbox_lock);
+               mutex_unlock(&ctrl->mbox_lock);
                BUG();
                return -ENXIO;
        }
@@ -1120,7 +1078,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
 
        status = be_mbox_notify(ctrl);
 
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -1155,7 +1113,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
        void *ctxt = &req->context;
        int status;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1227,7 +1185,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
                        defq_ring->doorbell_offset = resp->doorbell_offset;
                }
        }
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
 
        return status;
 }
@@ -1255,7 +1213,7 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        int status;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1286,7 +1244,7 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
                        pwrb_context->doorbell_offset = resp->doorbell_offset;
                }
        }
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -1297,7 +1255,7 @@ int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
        struct be_post_template_pages_req *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
 
        memset(wrb, 0, sizeof(*wrb));
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1310,7 +1268,7 @@ int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 
        status = be_mbox_notify(ctrl);
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -1320,7 +1278,7 @@ int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
        struct be_remove_template_pages_req *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
 
        memset(wrb, 0, sizeof(*wrb));
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1331,7 +1289,7 @@ int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
        req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
 
        status = be_mbox_notify(ctrl);
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -1350,7 +1308,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
        if (num_pages == 0xff)
                num_pages = 1;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
        do {
                memset(wrb, 0, sizeof(*wrb));
                be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1379,7 +1337,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
                }
        } while (num_pages > 0);
 error:
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        if (status != 0)
                beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
        return status;
@@ -1392,15 +1350,15 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba  *phba)
        struct be_post_sgl_pages_req *req = embedded_payload(wrb);
        int status;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
 
        req = embedded_payload(wrb);
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                           OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
-       status = be_mbox_notify_wait(phba);
+       status = be_mbox_notify(ctrl);
 
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -1417,21 +1375,20 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba  *phba)
 int be_cmd_set_vlan(struct beiscsi_hba *phba,
                     uint16_t vlan_tag)
 {
-       unsigned int tag = 0;
+       unsigned int tag;
        struct be_mcc_wrb *wrb;
        struct be_cmd_set_vlan_req *req;
        struct be_ctrl_info *ctrl = &phba->ctrl;
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       if (mutex_lock_interruptible(&ctrl->mbox_lock))
+               return 0;
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
 
-       wrb = wrb_from_mccq(phba);
        req = embedded_payload(wrb);
-       wrb->tag0 |= tag;
        be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
                           OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
@@ -1440,8 +1397,8 @@ int be_cmd_set_vlan(struct beiscsi_hba *phba,
        req->interface_hndl = phba->interface_handle;
        req->vlan_priority = vlan_tag;
 
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
 
        return tag;
 }
index 4bfca355fbe4b164cd0e73d570c3ed3e99156520..deeb951e6874c6d1da49c2443144e25b2c1cb7b8 100644 (file)
@@ -58,15 +58,16 @@ struct be_mcc_wrb {
 #define MCC_STATUS_ILLEGAL_FIELD 0x3
 #define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
 
-#define CQE_STATUS_COMPL_MASK 0xFFFF
-#define CQE_STATUS_COMPL_SHIFT 0       /* bits 0 - 15 */
-#define CQE_STATUS_EXTD_MASK 0xFFFF
-#define CQE_STATUS_EXTD_SHIFT 16               /* bits 0 - 15 */
+#define CQE_STATUS_COMPL_MASK  0xFFFF
+#define CQE_STATUS_COMPL_SHIFT 0               /* bits 0 - 15 */
+#define CQE_STATUS_EXTD_MASK   0xFFFF
+#define CQE_STATUS_EXTD_SHIFT  16              /* bits 31 - 16 */
 #define CQE_STATUS_ADDL_MASK   0xFF00
-#define CQE_STATUS_MASK        0xFF
-#define CQE_STATUS_ADDL_SHIFT  0x08
+#define CQE_STATUS_ADDL_SHIFT  8
+#define CQE_STATUS_MASK                0xFF
 #define CQE_STATUS_WRB_MASK    0xFF0000
 #define CQE_STATUS_WRB_SHIFT   16
+
 #define BEISCSI_HOST_MBX_TIMEOUT (110 * 1000)
 #define BEISCSI_FW_MBX_TIMEOUT 100
 
@@ -119,13 +120,22 @@ struct be_mcc_compl {
 #define ASYNC_TRAILER_EVENT_CODE_MASK  0xFF
 #define ASYNC_EVENT_CODE_LINK_STATE    0x1
 #define ASYNC_EVENT_CODE_ISCSI         0x4
+#define ASYNC_EVENT_CODE_SLI           0x11
 
 #define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16      /* bits 16 - 23 */
-#define ASYNC_TRAILER_EVENT_TYPE_MASK  0xF
+#define ASYNC_TRAILER_EVENT_TYPE_MASK  0xFF
+
+/* iSCSI events */
 #define ASYNC_EVENT_NEW_ISCSI_TGT_DISC 0x4
 #define ASYNC_EVENT_NEW_ISCSI_CONN     0x5
 #define ASYNC_EVENT_NEW_TCP_CONN       0x7
 
+/* SLI events */
+#define ASYNC_SLI_EVENT_TYPE_MISCONFIGURED     0x9
+#define ASYNC_SLI_LINK_EFFECT_VALID(le)                (le & 0x80)
+#define ASYNC_SLI_LINK_EFFECT_SEV(le)          ((le >> 1)  & 0x03)
+#define ASYNC_SLI_LINK_EFFECT_STATE(le)                (le & 0x01)
+
 struct be_async_event_trailer {
        u32 code;
 };
@@ -133,7 +143,6 @@ struct be_async_event_trailer {
 enum {
        ASYNC_EVENT_LINK_DOWN = 0x0,
        ASYNC_EVENT_LINK_UP = 0x1,
-       ASYNC_EVENT_LOGICAL = 0x2
 };
 
 /**
@@ -143,16 +152,39 @@ enum {
 struct be_async_event_link_state {
        u8 physical_port;
        u8 port_link_status;
+/**
+ * ASYNC_EVENT_LINK_DOWN               0x0
+ * ASYNC_EVENT_LINK_UP                 0x1
+ * ASYNC_EVENT_LINK_LOGICAL_DOWN       0x2
+ * ASYNC_EVENT_LINK_LOGICAL_UP         0x3
+ */
+#define BE_ASYNC_LINK_UP_MASK          0x01
        u8 port_duplex;
        u8 port_speed;
-#define BEISCSI_PHY_LINK_FAULT_NONE    0x00
-#define BEISCSI_PHY_LINK_FAULT_LOCAL   0x01
-#define BEISCSI_PHY_LINK_FAULT_REMOTE  0x02
+/* BE2ISCSI_LINK_SPEED_ZERO    0x00 - no link */
+#define BE2ISCSI_LINK_SPEED_10MBPS     0x01
+#define BE2ISCSI_LINK_SPEED_100MBPS    0x02
+#define BE2ISCSI_LINK_SPEED_1GBPS      0x03
+#define BE2ISCSI_LINK_SPEED_10GBPS     0x04
+#define BE2ISCSI_LINK_SPEED_25GBPS     0x06
+#define BE2ISCSI_LINK_SPEED_40GBPS     0x07
        u8 port_fault;
-       u8 rsvd0[7];
+       u8 event_reason;
+       u16 qos_link_speed;
+       u32 event_tag;
        struct be_async_event_trailer trailer;
 } __packed;
 
+/**
+ * When async-trailer is SLI event, mcc_compl is interpreted as
+ */
+struct be_async_event_sli {
+       u32 event_data1;
+       u32 event_data2;
+       u32 reserved;
+       u32 trailer;
+} __packed;
+
 struct be_mcc_mailbox {
        struct be_mcc_wrb wrb;
        struct be_mcc_compl compl;
@@ -172,6 +204,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_CQ_CREATE                                12
 #define OPCODE_COMMON_EQ_CREATE                                13
 #define OPCODE_COMMON_MCC_CREATE                       21
+#define OPCODE_COMMON_MCC_CREATE_EXT                   90
 #define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS      24
 #define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS   25
 #define OPCODE_COMMON_GET_CNTL_ATTRIBUTES              32
@@ -183,6 +216,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_EQ_DESTROY                       55
 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG            58
 #define OPCODE_COMMON_FUNCTION_RESET                   61
+#define OPCODE_COMMON_GET_PORT_NAME                    77
 
 /**
  * LIST of opcodes that are common between Initiator and Target
@@ -587,10 +621,11 @@ struct amap_mcc_context {
        u8 rsvd2[32];
 } __packed;
 
-struct be_cmd_req_mcc_create {
+struct be_cmd_req_mcc_create_ext {
        struct be_cmd_req_hdr hdr;
        u16 num_pages;
        u16 rsvd0;
+       u32 async_evt_bitmap;
        u8 context[sizeof(struct amap_mcc_context) / 8];
        struct phys_addr pages[8];
 } __packed;
@@ -653,20 +688,6 @@ struct be_cmd_req_modify_eq_delay {
 
 /******************** Get MAC ADDR *******************/
 
-#define ETH_ALEN       6
-
-struct be_cmd_get_nic_conf_req {
-       struct be_cmd_req_hdr hdr;
-       u32 nic_port_count;
-       u32 speed;
-       u32 max_speed;
-       u32 link_state;
-       u32 max_frame_size;
-       u16 size_of_structure;
-       u8 mac_address[ETH_ALEN];
-       u32 rsvd[23];
-};
-
 struct be_cmd_get_nic_conf_resp {
        struct be_cmd_resp_hdr hdr;
        u32 nic_port_count;
@@ -675,9 +696,8 @@ struct be_cmd_get_nic_conf_resp {
        u32 link_state;
        u32 max_frame_size;
        u16 size_of_structure;
-       u8 mac_address[6];
-       u32 rsvd[23];
-};
+       u8 mac_address[ETH_ALEN];
+} __packed;
 
 #define BEISCSI_ALIAS_LEN 32
 
@@ -689,29 +709,6 @@ struct be_cmd_hba_name {
        u8 initiator_alias[BEISCSI_ALIAS_LEN];
 } __packed;
 
-struct be_cmd_ntwk_link_status_req {
-       struct be_cmd_req_hdr hdr;
-       u32 rsvd0;
-} __packed;
-
-/*** Port Speed Values ***/
-#define BE2ISCSI_LINK_SPEED_ZERO       0x00
-#define BE2ISCSI_LINK_SPEED_10MBPS     0x01
-#define BE2ISCSI_LINK_SPEED_100MBPS    0x02
-#define BE2ISCSI_LINK_SPEED_1GBPS      0x03
-#define BE2ISCSI_LINK_SPEED_10GBPS     0x04
-struct be_cmd_ntwk_link_status_resp {
-       struct be_cmd_resp_hdr hdr;
-       u8 phys_port;
-       u8 mac_duplex;
-       u8 mac_speed;
-       u8 mac_fault;
-       u8 mgmt_mac_duplex;
-       u8 mgmt_mac_speed;
-       u16 qos_link_speed;
-       u32 logical_link_speed;
-} __packed;
-
 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
                          struct be_queue_info *eq, int eq_delay);
 
@@ -730,28 +727,28 @@ int be_poll_mcc(struct be_ctrl_info *ctrl);
 int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
                                      struct beiscsi_hba *phba);
 unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
-unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
 
-void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
+void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
 
 int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
                            int num);
-int beiscsi_mccq_compl(struct beiscsi_hba *phba,
-                       uint32_t tag, struct be_mcc_wrb **wrb,
-                       struct be_dma_mem *mbx_cmd_mem);
+int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
+                           uint32_t tag, struct be_mcc_wrb **wrb,
+                           struct be_dma_mem *mbx_cmd_mem);
 /*ISCSI Functuions */
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
 int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
 
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
-struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
-int be_mcc_notify_wait(struct beiscsi_hba *phba);
-void be_mcc_notify(struct beiscsi_hba *phba);
-unsigned int alloc_mcc_tag(struct beiscsi_hba *phba);
-void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
-               struct be_async_event_link_state *evt);
-int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
-                                   struct be_mcc_compl *compl);
+int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag);
+void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag);
+struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
+                                unsigned int *ref_tag);
+void beiscsi_process_async_event(struct beiscsi_hba *phba,
+                               struct be_mcc_compl *compl);
+int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
+                             struct be_mcc_compl *compl);
+
 
 int be_mbox_notify(struct be_ctrl_info *ctrl);
 
@@ -777,8 +774,6 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
                       struct hwi_wrb_context *pwrb_context,
                       uint8_t ulp_num);
 
-bool is_link_state_evt(u32 trailer);
-
 /* Configuration Functions */
 int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 
@@ -1137,6 +1132,21 @@ struct be_cmd_get_all_if_id_req {
        u32 if_hndl_list[1];
 } __packed;
 
+struct be_cmd_get_port_name {
+       union {
+               struct be_cmd_req_hdr req_hdr;
+               struct be_cmd_resp_hdr resp_hdr;
+       } h;
+       union {
+               struct {
+                       u32 reserved;
+               } req;
+               struct {
+                       u32 port_names;
+               } resp;
+       } p;
+} __packed;
+
 #define ISCSI_OPCODE_SCSI_DATA_OUT             5
 #define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
 #define OPCODE_COMMON_MODIFY_EQ_DELAY          41
@@ -1367,5 +1377,5 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
                        u8 subsystem, u8 opcode, int cmd_len);
 
-void be2iscsi_fail_session(struct iscsi_cls_session *cls_session);
+void beiscsi_fail_session(struct iscsi_cls_session *cls_session);
 #endif /* !BEISCSI_CMDS_H */
index 022e87b62e401a37e1d6578e065f389ce560a8b4..09f89a3eaa87605733d673741aedc64c309f60ce 100644 (file)
@@ -367,13 +367,14 @@ beiscsi_set_vlan_tag(struct Scsi_Host *shost,
                      struct iscsi_iface_param_info *iface_param)
 {
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
-       int ret = 0;
+       int ret;
 
        /* Get the Interface Handle */
-       if (mgmt_get_all_if_id(phba)) {
+       ret = mgmt_get_all_if_id(phba);
+       if (ret) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : Getting Interface Handle Failed\n");
-               return -EIO;
+               return ret;
        }
 
        switch (iface_param->param) {
@@ -465,6 +466,10 @@ beiscsi_set_ipv6(struct Scsi_Host *shost,
                ret = mgmt_set_ip(phba, iface_param, NULL,
                                  ISCSI_BOOTPROTO_STATIC);
                break;
+       case ISCSI_NET_PARAM_VLAN_ENABLED:
+       case ISCSI_NET_PARAM_VLAN_TAG:
+               ret = beiscsi_set_vlan_tag(shost, iface_param);
+               break;
        default:
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : Param %d not supported\n",
@@ -730,7 +735,7 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
                return -EBUSY;
        }
 
-       rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+       rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
        if (rc) {
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -753,7 +758,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
        struct iscsi_cls_host *ihost = shost->shost_data;
 
-       ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ?
+       ihost->port_state = (phba->state & BE_ADAPTER_LINK_UP) ?
                ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
 }
 
@@ -761,34 +766,13 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
  * beiscsi_get_port_speed  - Get the Port Speed from Adapter
  * @shost : pointer to scsi_host structure
  *
- * returns Success/Failure
  */
-static int beiscsi_get_port_speed(struct Scsi_Host *shost)
+static void beiscsi_get_port_speed(struct Scsi_Host *shost)
 {
-       int rc;
-       unsigned int tag;
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_ntwk_link_status_resp *resp;
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
        struct iscsi_cls_host *ihost = shost->shost_data;
 
-       tag = be_cmd_get_port_speed(phba);
-       if (!tag) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Getting Port Speed Failed\n");
-
-                return -EBUSY;
-       }
-       rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
-       if (rc) {
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                           "BS_%d : Port Speed MBX Failed\n");
-               return rc;
-       }
-       resp = embedded_payload(wrb);
-
-       switch (resp->mac_speed) {
+       switch (phba->port_speed) {
        case BE2ISCSI_LINK_SPEED_10MBPS:
                ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
                break;
@@ -801,10 +785,15 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
        case BE2ISCSI_LINK_SPEED_10GBPS:
                ihost->port_speed = ISCSI_PORT_SPEED_10GBPS;
                break;
+       case BE2ISCSI_LINK_SPEED_25GBPS:
+               ihost->port_speed = ISCSI_PORT_SPEED_25GBPS;
+               break;
+       case BE2ISCSI_LINK_SPEED_40GBPS:
+               ihost->port_speed = ISCSI_PORT_SPEED_40GBPS;
+               break;
        default:
                ihost->port_speed = ISCSI_PORT_SPEED_UNKNOWN;
        }
-       return 0;
 }
 
 /**
@@ -854,12 +843,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
                status = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
                break;
        case ISCSI_HOST_PARAM_PORT_SPEED:
-               status = beiscsi_get_port_speed(shost);
-               if (status) {
-                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                                   "BS_%d : Retreiving Port Speed Failed\n");
-                       return status;
-               }
+               beiscsi_get_port_speed(shost);
                status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
                break;
        default:
@@ -1159,7 +1143,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                return -EAGAIN;
        }
 
-       ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+       ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
        if (ret) {
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -1293,7 +1277,7 @@ static void beiscsi_flush_cq(struct beiscsi_hba *phba)
        for (i = 0; i < phba->num_cpus; i++) {
                pbe_eq = &phwi_context->be_eq[i];
                irq_poll_disable(&pbe_eq->iopoll);
-               beiscsi_process_cq(pbe_eq);
+               beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
                irq_poll_enable(&pbe_eq->iopoll);
        }
 }
@@ -1318,7 +1302,7 @@ static int beiscsi_close_conn(struct  beiscsi_endpoint *beiscsi_ep, int flag)
                ret = -EAGAIN;
        }
 
-       ret = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+       ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
 
        /* Flush the CQ entries */
        beiscsi_flush_cq(phba);
@@ -1393,7 +1377,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
                            beiscsi_ep->ep_cid);
        }
 
-       beiscsi_mccq_compl(phba, tag, NULL, NULL);
+       beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
        beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
 free_ep:
        msleep(BEISCSI_LOGOUT_SYNC_DELAY);
index cb9072a841be19cbfde9ff655fec50c523edf8b3..0892ee28463fd9c1d425b37ef4ca55d6b7df1244 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/iscsi_boot_sysfs.h>
 #include <linux/module.h>
 #include <linux/bsg-lib.h>
+#include <linux/irq_poll.h>
 
 #include <scsi/libiscsi.h>
 #include <scsi/scsi_bsg_iscsi.h>
@@ -285,7 +286,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
                return FAILED;
        }
 
-       rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+       rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
        if (rc != -EBUSY)
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
@@ -366,7 +367,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
                return FAILED;
        }
 
-       rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+       rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
        if (rc != -EBUSY)
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
@@ -727,9 +728,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
        mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
        mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
        memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
-       spin_lock_init(&ctrl->mbox_lock);
+       mutex_init(&ctrl->mbox_lock);
        spin_lock_init(&phba->ctrl.mcc_lock);
-       spin_lock_init(&phba->ctrl.mcc_cq_lock);
 
        return status;
 }
@@ -895,31 +895,17 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
 static irqreturn_t be_isr_msix(int irq, void *dev_id)
 {
        struct beiscsi_hba *phba;
-       struct be_eq_entry *eqe = NULL;
        struct be_queue_info *eq;
-       struct be_queue_info *cq;
-       unsigned int num_eq_processed;
        struct be_eq_obj *pbe_eq;
 
        pbe_eq = dev_id;
        eq = &pbe_eq->q;
-       cq = pbe_eq->cq;
-       eqe = queue_tail_node(eq);
 
        phba = pbe_eq->phba;
-       num_eq_processed = 0;
-       while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
-                               & EQE_VALID_MASK) {
-               irq_poll_sched(&pbe_eq->iopoll);
 
-               AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
-               queue_tail_inc(eq);
-               eqe = queue_tail_node(eq);
-               num_eq_processed++;
-       }
-
-       if (num_eq_processed)
-               hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+       /* disable interrupt till iopoll completes */
+       hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1);
+       irq_poll_sched(&pbe_eq->iopoll);
 
        return IRQ_HANDLED;
 }
@@ -996,6 +982,7 @@ static irqreturn_t be_isr(int irq, void *dev_id)
                return IRQ_NONE;
 }
 
+
 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
 {
        struct pci_dev *pcidev = phba->pcidev;
@@ -1070,7 +1057,7 @@ free_msix_irqs:
 
 void hwi_ring_cq_db(struct beiscsi_hba *phba,
                           unsigned int id, unsigned int num_processed,
-                          unsigned char rearm, unsigned char event)
+                          unsigned char rearm)
 {
        u32 val = 0;
 
@@ -1145,6 +1132,7 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
 {
        struct sgl_handle *psgl_handle;
 
+       spin_lock_bh(&phba->io_sgl_lock);
        if (phba->io_sgl_hndl_avbl) {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
                            "BM_%d : In alloc_io_sgl_handle,"
@@ -1162,12 +1150,14 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
                        phba->io_sgl_alloc_index++;
        } else
                psgl_handle = NULL;
+       spin_unlock_bh(&phba->io_sgl_lock);
        return psgl_handle;
 }
 
 static void
 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
 {
+       spin_lock_bh(&phba->io_sgl_lock);
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
                    "BM_%d : In free_,io_sgl_free_index=%d\n",
                    phba->io_sgl_free_index);
@@ -1182,6 +1172,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
                             "value there=%p\n", phba->io_sgl_free_index,
                             phba->io_sgl_hndl_base
                             [phba->io_sgl_free_index]);
+                spin_unlock_bh(&phba->io_sgl_lock);
                return;
        }
        phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -1190,6 +1181,25 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
                phba->io_sgl_free_index = 0;
        else
                phba->io_sgl_free_index++;
+       spin_unlock_bh(&phba->io_sgl_lock);
+}
+
+static inline struct wrb_handle *
+beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
+                      unsigned int wrbs_per_cxn)
+{
+       struct wrb_handle *pwrb_handle;
+
+       spin_lock_bh(&pwrb_context->wrb_lock);
+       pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
+       pwrb_context->wrb_handles_available--;
+       if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
+               pwrb_context->alloc_index = 0;
+       else
+               pwrb_context->alloc_index++;
+       spin_unlock_bh(&pwrb_context->wrb_lock);
+
+       return pwrb_handle;
 }
 
 /**
@@ -1201,30 +1211,32 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
  * This happens under session_lock until submission to chip
  */
 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
-                                    struct hwi_wrb_context **pcontext)
+                                   struct hwi_wrb_context **pcontext)
 {
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
-       struct wrb_handle *pwrb_handle;
        uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
        pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
-       if (pwrb_context->wrb_handles_available >= 2) {
-               pwrb_handle = pwrb_context->pwrb_handle_base[
-                                           pwrb_context->alloc_index];
-               pwrb_context->wrb_handles_available--;
-               if (pwrb_context->alloc_index ==
-                                               (phba->params.wrbs_per_cxn - 1))
-                       pwrb_context->alloc_index = 0;
-               else
-                       pwrb_context->alloc_index++;
+       /* return the context address */
+       *pcontext = pwrb_context;
+       return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn);
+}
 
-               /* Return the context address */
-               *pcontext = pwrb_context;
-       } else
-               pwrb_handle = NULL;
-       return pwrb_handle;
+static inline void
+beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
+                      struct wrb_handle *pwrb_handle,
+                      unsigned int wrbs_per_cxn)
+{
+       spin_lock_bh(&pwrb_context->wrb_lock);
+       pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
+       pwrb_context->wrb_handles_available++;
+       if (pwrb_context->free_index == (wrbs_per_cxn - 1))
+               pwrb_context->free_index = 0;
+       else
+               pwrb_context->free_index++;
+       spin_unlock_bh(&pwrb_context->wrb_lock);
 }
 
 /**
@@ -1239,13 +1251,9 @@ static void
 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
                struct wrb_handle *pwrb_handle)
 {
-       pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
-       pwrb_context->wrb_handles_available++;
-       if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
-               pwrb_context->free_index = 0;
-       else
-               pwrb_context->free_index++;
-
+       beiscsi_put_wrb_handle(pwrb_context,
+                              pwrb_handle,
+                              phba->params.wrbs_per_cxn);
        beiscsi_log(phba, KERN_INFO,
                    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
                    "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
@@ -1258,6 +1266,7 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
 {
        struct sgl_handle *psgl_handle;
 
+       spin_lock_bh(&phba->mgmt_sgl_lock);
        if (phba->eh_sgl_hndl_avbl) {
                psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
                phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
@@ -1275,13 +1284,14 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
                        phba->eh_sgl_alloc_index++;
        } else
                psgl_handle = NULL;
+       spin_unlock_bh(&phba->mgmt_sgl_lock);
        return psgl_handle;
 }
 
 void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
 {
-
+       spin_lock_bh(&phba->mgmt_sgl_lock);
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                    "BM_%d : In  free_mgmt_sgl_handle,"
                    "eh_sgl_free_index=%d\n",
@@ -1296,6 +1306,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
                            "BM_%d : Double Free in eh SGL ,"
                            "eh_sgl_free_index=%d\n",
                            phba->eh_sgl_free_index);
+               spin_unlock_bh(&phba->mgmt_sgl_lock);
                return;
        }
        phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1305,6 +1316,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
                phba->eh_sgl_free_index = 0;
        else
                phba->eh_sgl_free_index++;
+       spin_unlock_bh(&phba->mgmt_sgl_lock);
 }
 
 static void
@@ -2029,7 +2041,7 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
                               phwi_ctrlr, cri_index));
 }
 
-static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
+void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
 {
        struct be_queue_info *mcc_cq;
        struct  be_mcc_compl *mcc_compl;
@@ -2039,31 +2051,15 @@ static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
        mcc_compl = queue_tail_node(mcc_cq);
        mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
        while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
-
                if (num_processed >= 32) {
                        hwi_ring_cq_db(phba, mcc_cq->id,
-                                       num_processed, 0, 0);
+                                       num_processed, 0);
                        num_processed = 0;
                }
                if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
-                       /* Interpret flags as an async trailer */
-                       if (is_link_state_evt(mcc_compl->flags))
-                               /* Interpret compl as a async link evt */
-                               beiscsi_async_link_state_process(phba,
-                               (struct be_async_event_link_state *) mcc_compl);
-                       else {
-                               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
-                                           "BM_%d :  Unsupported Async Event, flags"
-                                           " = 0x%08x\n",
-                                           mcc_compl->flags);
-                               if (phba->state & BE_ADAPTER_LINK_UP) {
-                                       phba->state |= BE_ADAPTER_CHECK_BOOT;
-                                       phba->get_boot = BE_GET_BOOT_RETRIES;
-                               }
-                       }
+                       beiscsi_process_async_event(phba, mcc_compl);
                } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
-                       be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
-                       atomic_dec(&phba->ctrl.mcc_obj.q.used);
+                       beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl);
                }
 
                mcc_compl->flags = 0;
@@ -2074,24 +2070,24 @@ static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
        }
 
        if (num_processed > 0)
-               hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
-
+               hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
 }
 
 /**
  * beiscsi_process_cq()- Process the Completion Queue
  * @pbe_eq: Event Q on which the Completion has come
+ * @budget: Max number of events to processed
  *
  * return
  *     Number of Completion Entries processed.
  **/
-unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
 {
        struct be_queue_info *cq;
        struct sol_cqe *sol;
        struct dmsg_cqe *dmsg;
+       unsigned int total = 0;
        unsigned int num_processed = 0;
-       unsigned int tot_nump = 0;
        unsigned short code = 0, cid = 0;
        uint16_t cri_index = 0;
        struct beiscsi_conn *beiscsi_conn;
@@ -2142,12 +2138,12 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                beiscsi_ep = ep->dd_data;
                beiscsi_conn = beiscsi_ep->conn;
 
-               if (num_processed >= 32) {
-                       hwi_ring_cq_db(phba, cq->id,
-                                       num_processed, 0, 0);
-                       tot_nump += num_processed;
+               /* replenish cq */
+               if (num_processed == 32) {
+                       hwi_ring_cq_db(phba, cq->id, 32, 0);
                        num_processed = 0;
                }
+               total++;
 
                switch (code) {
                case SOL_CMD_COMPLETE:
@@ -2192,7 +2188,13 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                                    "BM_%d : Ignoring %s[%d] on CID : %d\n",
                                    cqe_desc[code], code, cid);
                        break;
+               case CXN_KILLED_HDR_DIGEST_ERR:
                case SOL_CMD_KILLED_DATA_DIGEST_ERR:
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+                                   "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
+                                   cqe_desc[code], code,  cid);
+                       break;
                case CMD_KILLED_INVALID_STATSN_RCVD:
                case CMD_KILLED_INVALID_R2T_RCVD:
                case CMD_CXN_KILLED_LUN_INVALID:
@@ -2218,7 +2220,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
                case CXN_KILLED_BURST_LEN_MISMATCH:
                case CXN_KILLED_AHS_RCVD:
-               case CXN_KILLED_HDR_DIGEST_ERR:
                case CXN_KILLED_UNKNOWN_HDR:
                case CXN_KILLED_STALE_ITT_TTT_RCVD:
                case CXN_KILLED_INVALID_ITT_TTT_RCVD:
@@ -2253,13 +2254,12 @@ proc_next_cqe:
                queue_tail_inc(cq);
                sol = queue_tail_node(cq);
                num_processed++;
+               if (total == budget)
+                       break;
        }
 
-       if (num_processed > 0) {
-               tot_nump += num_processed;
-               hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
-       }
-       return tot_nump;
+       hwi_ring_cq_db(phba, cq->id, num_processed, 1);
+       return total;
 }
 
 void beiscsi_process_all_cqs(struct work_struct *work)
@@ -2279,14 +2279,14 @@ void beiscsi_process_all_cqs(struct work_struct *work)
                spin_lock_irqsave(&phba->isr_lock, flags);
                pbe_eq->todo_mcc_cq = false;
                spin_unlock_irqrestore(&phba->isr_lock, flags);
-               beiscsi_process_mcc_isr(phba);
+               beiscsi_process_mcc_cq(phba);
        }
 
        if (pbe_eq->todo_cq) {
                spin_lock_irqsave(&phba->isr_lock, flags);
                pbe_eq->todo_cq = false;
                spin_unlock_irqrestore(&phba->isr_lock, flags);
-               beiscsi_process_cq(pbe_eq);
+               beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
        }
 
        /* rearm EQ for further interrupts */
@@ -2295,20 +2295,36 @@ void beiscsi_process_all_cqs(struct work_struct *work)
 
 static int be_iopoll(struct irq_poll *iop, int budget)
 {
-       unsigned int ret;
+       unsigned int ret, num_eq_processed;
        struct beiscsi_hba *phba;
        struct be_eq_obj *pbe_eq;
+       struct be_eq_entry *eqe = NULL;
+       struct be_queue_info *eq;
 
+       num_eq_processed = 0;
        pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
-       ret = beiscsi_process_cq(pbe_eq);
+       phba = pbe_eq->phba;
+       eq = &pbe_eq->q;
+       eqe = queue_tail_node(eq);
+
+       while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
+                       EQE_VALID_MASK) {
+               AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+               queue_tail_inc(eq);
+               eqe = queue_tail_node(eq);
+               num_eq_processed++;
+       }
+
+       hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+
+       ret = beiscsi_process_cq(pbe_eq, budget);
        pbe_eq->cq_count += ret;
        if (ret < budget) {
-               phba = pbe_eq->phba;
                irq_poll_complete(iop);
                beiscsi_log(phba, KERN_INFO,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
-                           "BM_%d : rearm pbe_eq->q.id =%d\n",
-                           pbe_eq->q.id);
+                           "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
+                           pbe_eq->q.id, ret);
                hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
        }
        return ret;
@@ -2502,7 +2518,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
  * @pwrb: ptr to the WRB entry
  * @task: iscsi task which is to be executed
  **/
-static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
+static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
 {
        struct iscsi_sge *psgl;
        struct beiscsi_io_task *io_task = task->dd_data;
@@ -2534,6 +2550,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
                                                             task->data,
                                                             task->data_count,
                                                             PCI_DMA_TODEVICE);
+                       if (pci_dma_mapping_error(phba->pcidev,
+                                                 io_task->mtask_addr))
+                               return -ENOMEM;
                        io_task->mtask_data_count = task->data_count;
                } else
                        io_task->mtask_addr = 0;
@@ -2578,6 +2597,7 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
                AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
        }
        AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+       return 0;
 }
 
 /**
@@ -2904,6 +2924,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
                        }
                        num_cxn_wrbh--;
                }
+               spin_lock_init(&pwrb_context->wrb_lock);
        }
        idx = 0;
        for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
@@ -3866,6 +3887,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
        phwi_context->min_eqd = 0;
        phwi_context->cur_eqd = 0;
        be_cmd_fw_initialize(&phba->ctrl);
+       /* set optic state to unknown */
+       phba->optic_state = 0xff;
 
        status = beiscsi_create_eqs(phba, phwi_context);
        if (status != 0) {
@@ -4384,7 +4407,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
                goto boot_freemem;
        }
 
-       ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+       ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
        if (ret) {
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
@@ -4607,11 +4630,9 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
        }
 
        if (io_task->psgl_handle) {
-               spin_lock_bh(&phba->mgmt_sgl_lock);
                free_mgmt_sgl_handle(phba,
                                     io_task->psgl_handle);
                io_task->psgl_handle = NULL;
-               spin_unlock_bh(&phba->mgmt_sgl_lock);
        }
 
        if (io_task->mtask_addr) {
@@ -4657,9 +4678,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
                }
 
                if (io_task->psgl_handle) {
-                       spin_lock(&phba->io_sgl_lock);
                        free_io_sgl_handle(phba, io_task->psgl_handle);
-                       spin_unlock(&phba->io_sgl_lock);
                        io_task->psgl_handle = NULL;
                }
 
@@ -4714,6 +4733,20 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
        doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
        iowrite32(doorbell, phba->db_va +
                  beiscsi_conn->doorbell_offset);
+
+       /*
+        * There is no completion for CONTEXT_UPDATE. The completion of next
+        * WRB posted guarantees FW's processing and DMA'ing of it.
+        * Use beiscsi_put_wrb_handle to put it back in the pool which makes
+        * sure zero'ing or reuse of the WRB only after wrbs_per_cxn.
+        */
+       beiscsi_put_wrb_handle(pwrb_context, pwrb_handle,
+                              phba->params.wrbs_per_cxn);
+       beiscsi_log(phba, KERN_INFO,
+                   BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                   "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n",
+                   pwrb_handle, pwrb_context->free_index,
+                   pwrb_context->wrb_handles_available);
 }
 
 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
@@ -4761,9 +4794,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        io_task->pwrb_handle = NULL;
 
        if (task->sc) {
-               spin_lock(&phba->io_sgl_lock);
                io_task->psgl_handle = alloc_io_sgl_handle(phba);
-               spin_unlock(&phba->io_sgl_lock);
                if (!io_task->psgl_handle) {
                        beiscsi_log(phba, KERN_ERR,
                                    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4788,10 +4819,8 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
                        beiscsi_conn->task = task;
                        if (!beiscsi_conn->login_in_progress) {
-                               spin_lock(&phba->mgmt_sgl_lock);
                                io_task->psgl_handle = (struct sgl_handle *)
                                                alloc_mgmt_sgl_handle(phba);
-                               spin_unlock(&phba->mgmt_sgl_lock);
                                if (!io_task->psgl_handle) {
                                        beiscsi_log(phba, KERN_ERR,
                                                    BEISCSI_LOG_IO |
@@ -4830,9 +4859,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                                                beiscsi_conn->plogin_wrb_handle;
                        }
                } else {
-                       spin_lock(&phba->mgmt_sgl_lock);
                        io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
-                       spin_unlock(&phba->mgmt_sgl_lock);
                        if (!io_task->psgl_handle) {
                                beiscsi_log(phba, KERN_ERR,
                                            BEISCSI_LOG_IO |
@@ -4867,15 +4894,11 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        return 0;
 
 free_io_hndls:
-       spin_lock(&phba->io_sgl_lock);
        free_io_sgl_handle(phba, io_task->psgl_handle);
-       spin_unlock(&phba->io_sgl_lock);
        goto free_hndls;
 free_mgmt_hndls:
-       spin_lock(&phba->mgmt_sgl_lock);
        free_mgmt_sgl_handle(phba, io_task->psgl_handle);
        io_task->psgl_handle = NULL;
-       spin_unlock(&phba->mgmt_sgl_lock);
 free_hndls:
        phwi_ctrlr = phba->phwi_ctrlr;
        cri_index = BE_GET_CRI_FROM_CID(
@@ -4903,7 +4926,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
 
        pwrb = io_task->pwrb_handle->pwrb;
 
-       io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
        io_task->bhs_len = sizeof(struct be_cmd_bhs);
 
        if (writedir) {
@@ -4964,7 +4986,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
        unsigned int doorbell = 0;
 
        pwrb = io_task->pwrb_handle->pwrb;
-       io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
        io_task->bhs_len = sizeof(struct be_cmd_bhs);
 
        if (writedir) {
@@ -5023,6 +5044,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
        unsigned int doorbell = 0;
        unsigned int cid;
        unsigned int pwrb_typeoffset = 0;
+       int ret = 0;
 
        cid = beiscsi_conn->beiscsi_conn_cid;
        pwrb = io_task->pwrb_handle->pwrb;
@@ -5071,7 +5093,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
        case ISCSI_OP_LOGIN:
                AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
                ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
-               hwi_write_buffer(pwrb, task);
+               ret = hwi_write_buffer(pwrb, task);
                break;
        case ISCSI_OP_NOOP_OUT:
                if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
@@ -5091,19 +5113,19 @@ static int beiscsi_mtask(struct iscsi_task *task)
                                AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
                                              dmsg, pwrb, 0);
                }
-               hwi_write_buffer(pwrb, task);
+               ret = hwi_write_buffer(pwrb, task);
                break;
        case ISCSI_OP_TEXT:
                ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
-               hwi_write_buffer(pwrb, task);
+               ret = hwi_write_buffer(pwrb, task);
                break;
        case ISCSI_OP_SCSI_TMFUNC:
                ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
-               hwi_write_buffer(pwrb, task);
+               ret = hwi_write_buffer(pwrb, task);
                break;
        case ISCSI_OP_LOGOUT:
                ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
-               hwi_write_buffer(pwrb, task);
+               ret = hwi_write_buffer(pwrb, task);
                break;
 
        default:
@@ -5114,6 +5136,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
                return -EINVAL;
        }
 
+       if (ret)
+               return ret;
+
        /* Set the task type */
        io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
                AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
@@ -5132,23 +5157,21 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
 {
        struct beiscsi_io_task *io_task = task->dd_data;
        struct scsi_cmnd *sc = task->sc;
-       struct beiscsi_hba *phba = NULL;
+       struct beiscsi_hba *phba;
        struct scatterlist *sg;
        int num_sg;
        unsigned int  writedir = 0, xferlen = 0;
 
-       phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
+       if (!io_task->conn->login_in_progress)
+               task->hdr->exp_statsn = 0;
 
        if (!sc)
                return beiscsi_mtask(task);
 
        io_task->scsi_cmnd = sc;
        num_sg = scsi_dma_map(sc);
+       phba = io_task->conn->phba;
        if (num_sg < 0) {
-               struct iscsi_conn *conn = task->conn;
-               struct beiscsi_hba *phba = NULL;
-
-               phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
                            "BM_%d : scsi_dma_map Failed "
@@ -5211,12 +5234,13 @@ static int beiscsi_bsg_request(struct bsg_job *job)
 
                rc = wait_event_interruptible_timeout(
                                        phba->ctrl.mcc_wait[tag],
-                                       phba->ctrl.mcc_numtag[tag],
+                                       phba->ctrl.mcc_tag_status[tag],
                                        msecs_to_jiffies(
                                        BEISCSI_HOST_MBX_TIMEOUT));
-               extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
-               status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
-               free_mcc_tag(&phba->ctrl, tag);
+               extd_status = (phba->ctrl.mcc_tag_status[tag] &
+                              CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
+               status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
+               free_mcc_wrb(&phba->ctrl, tag);
                resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
                sg_copy_from_buffer(job->reply_payload.sg_list,
                                    job->reply_payload.sg_cnt,
@@ -5313,7 +5337,6 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
 
 static void beiscsi_remove(struct pci_dev *pcidev)
 {
-
        struct beiscsi_hba *phba = NULL;
 
        phba = pci_get_drvdata(pcidev);
@@ -5323,9 +5346,9 @@ static void beiscsi_remove(struct pci_dev *pcidev)
        }
 
        beiscsi_destroy_def_ifaces(phba);
-       beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
        iscsi_boot_destroy_kset(phba->boot_kset);
        iscsi_host_remove(phba->shost);
+       beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
        pci_dev_put(phba->pcidev);
        iscsi_host_free(phba->shost);
        pci_disable_pcie_error_reporting(pcidev);
@@ -5334,23 +5357,6 @@ static void beiscsi_remove(struct pci_dev *pcidev)
        pci_disable_device(pcidev);
 }
 
-static void beiscsi_shutdown(struct pci_dev *pcidev)
-{
-
-       struct beiscsi_hba *phba = NULL;
-
-       phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
-       if (!phba) {
-               dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
-               return;
-       }
-
-       phba->state = BE_ADAPTER_STATE_SHUTDOWN;
-       iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session);
-       beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
-       pci_disable_device(pcidev);
-}
-
 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
 {
        int i, status;
@@ -5413,7 +5419,7 @@ static void be_eqd_update(struct beiscsi_hba *phba)
        if (num) {
                tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
                if (tag)
-                       beiscsi_mccq_compl(phba, tag, NULL, NULL);
+                       beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
        }
 }
 
@@ -5568,7 +5574,7 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
        for (i = 0; i < MAX_MCC_CMD; i++) {
                init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
                phba->ctrl.mcc_tag[i] = i + 1;
-               phba->ctrl.mcc_numtag[i + 1] = 0;
+               phba->ctrl.mcc_tag_status[i + 1] = 0;
                phba->ctrl.mcc_tag_available++;
        }
 
@@ -5670,6 +5676,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
                goto hba_free;
        }
 
+       /*
+        * FUNCTION_RESET should clean up any stale info in FW for this fn
+        */
        ret = beiscsi_cmd_reset_function(phba);
        if (ret) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5693,6 +5702,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
                            "BM_%d : Error getting fw config\n");
                goto free_port;
        }
+       mgmt_get_port_name(&phba->ctrl, phba);
+       beiscsi_get_params(phba);
 
        if (enable_msix)
                find_num_cpus(phba);
@@ -5710,7 +5721,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        }
 
        phba->shost->max_id = phba->params.cxns_per_ctrl;
-       beiscsi_get_params(phba);
        phba->shost->can_queue = phba->params.ios_per_ctrl;
        ret = beiscsi_init_port(phba);
        if (ret < 0) {
@@ -5723,7 +5733,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        for (i = 0; i < MAX_MCC_CMD; i++) {
                init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
                phba->ctrl.mcc_tag[i] = i + 1;
-               phba->ctrl.mcc_numtag[i + 1] = 0;
+               phba->ctrl.mcc_tag_status[i + 1] = 0;
                phba->ctrl.mcc_tag_available++;
                memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
                       sizeof(struct be_dma_mem));
@@ -5857,7 +5867,6 @@ static struct pci_driver beiscsi_pci_driver = {
        .name = DRV_NAME,
        .probe = beiscsi_dev_probe,
        .remove = beiscsi_remove,
-       .shutdown = beiscsi_shutdown,
        .id_table = beiscsi_pci_id_table,
        .err_handler = &beiscsi_eeh_handlers
 };
index 5c67c0732241b507e199ff756ad1c60e17c0b703..30a4606d9a3b9fa0abff5d5c027817c0642874b8 100644 (file)
@@ -36,7 +36,7 @@
 #include <scsi/scsi_transport_iscsi.h>
 
 #define DRV_NAME               "be2iscsi"
-#define BUILD_STR              "10.6.0.1"
+#define BUILD_STR              "11.0.0.0"
 #define BE_NAME                        "Emulex OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
@@ -63,6 +63,7 @@
 #define BE2_SGE                        32
 #define BE2_DEFPDU_HDR_SZ      64
 #define BE2_DEFPDU_DATA_SZ     8192
+#define BE2_MAX_NUM_CQ_PROC    512
 
 #define MAX_CPUS               64
 #define BEISCSI_MAX_NUM_CPUS   7
 #define BE_ADAPTER_LINK_UP     0x001
 #define BE_ADAPTER_LINK_DOWN   0x002
 #define BE_ADAPTER_PCI_ERR     0x004
-#define BE_ADAPTER_STATE_SHUTDOWN      0x008
-#define BE_ADAPTER_CHECK_BOOT  0x010
+#define BE_ADAPTER_CHECK_BOOT  0x008
 
 
 #define BEISCSI_CLEAN_UNLOAD   0x01
@@ -304,6 +304,7 @@ struct invalidate_command_table {
 #define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
        (phwi_ctrlr->wrb_context[cri].ulp_num)
 struct hwi_wrb_context {
+       spinlock_t wrb_lock;
        struct list_head wrb_handle_list;
        struct list_head wrb_handle_drvr_list;
        struct wrb_handle **pwrb_handle_base;
@@ -398,7 +399,9 @@ struct beiscsi_hba {
                 * group together since they are used most frequently
                 * for cid to cri conversion
                 */
+#define BEISCSI_PHYS_PORT_MAX  4
                unsigned int phys_port;
+               /* valid values of phys_port id are 0, 1, 2, 3 */
                unsigned int eqid_count;
                unsigned int cqid_count;
                unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT];
@@ -416,6 +419,7 @@ struct beiscsi_hba {
        } fw_config;
 
        unsigned int state;
+       u8 optic_state;
        int get_boot;
        bool fw_timeout;
        bool ue_detected;
@@ -423,6 +427,8 @@ struct beiscsi_hba {
 
        bool mac_addr_set;
        u8 mac_address[ETH_ALEN];
+       u8 port_name;
+       u8 port_speed;
        char fw_ver_str[BEISCSI_VER_STRLEN];
        char wq_name[20];
        struct workqueue_struct *wq;    /* The actuak work queue */
@@ -845,9 +851,10 @@ void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
 
 void hwi_ring_cq_db(struct beiscsi_hba *phba,
                     unsigned int id, unsigned int num_processed,
-                    unsigned char rearm, unsigned char event);
+                    unsigned char rearm);
 
-unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq);
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget);
+void beiscsi_process_mcc_cq(struct beiscsi_hba *phba);
 
 static inline bool beiscsi_error(struct beiscsi_hba *phba)
 {
@@ -1074,12 +1081,14 @@ struct hwi_context_memory {
 #define BEISCSI_LOG_CONFIG     0x0020  /* CONFIG Code Path */
 #define BEISCSI_LOG_ISCSI      0x0040  /* SCSI/iSCSI Protocol related Logs */
 
+#define __beiscsi_log(phba, level, fmt, arg...) \
+       shost_printk(level, phba->shost, fmt, __LINE__, ##arg)
+
 #define beiscsi_log(phba, level, mask, fmt, arg...) \
 do { \
        uint32_t log_value = phba->attr_log_enable; \
                if (((mask) & log_value) || (level[1] <= '3')) \
-                       shost_printk(level, phba->shost, \
-                                    fmt, __LINE__, ##arg); \
-} while (0)
+                       __beiscsi_log(phba, level, fmt, ##arg); \
+} while (0);
 
 #endif
index aea3e6b9477dcc73df18b263365609a93abd3758..83926e221f1ecd3b55f05a0770d107d8c170c40b 100644 (file)
@@ -161,20 +161,17 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_modify_eq_delay *req;
-       unsigned int tag = 0;
+       unsigned int tag;
        int i;
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
 
-       wrb = wrb_from_mccq(phba);
        req = embedded_payload(wrb);
-
-       wrb->tag0 |= tag;
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
@@ -187,8 +184,8 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
                                cpu_to_le32(set_eqd[i].delay_multiplier);
        }
 
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
@@ -209,22 +206,20 @@ unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct be_mcc_wrb *wrb;
        struct be_cmd_reopen_session_req *req;
-       unsigned int tag = 0;
+       unsigned int tag;
 
        beiscsi_log(phba, KERN_INFO,
                    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
                    "BG_%d : In bescsi_get_boot_target\n");
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
 
-       wrb = wrb_from_mccq(phba);
        req = embedded_payload(wrb);
-       wrb->tag0 |= tag;
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
                           OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
@@ -234,8 +229,8 @@ unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
        req->reopen_type = reopen_type;
        req->session_handle = sess_handle;
 
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
@@ -244,29 +239,27 @@ unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct be_mcc_wrb *wrb;
        struct be_cmd_get_boot_target_req *req;
-       unsigned int tag = 0;
+       unsigned int tag;
 
        beiscsi_log(phba, KERN_INFO,
                    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
                    "BG_%d : In bescsi_get_boot_target\n");
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
 
-       wrb = wrb_from_mccq(phba);
        req = embedded_payload(wrb);
-       wrb->tag0 |= tag;
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
                           OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
                           sizeof(struct be_cmd_get_boot_target_resp));
 
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
@@ -276,7 +269,7 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
 {
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct be_mcc_wrb *wrb;
-       unsigned int tag = 0;
+       unsigned int tag;
        struct  be_cmd_get_session_req *req;
        struct be_cmd_get_session_resp *resp;
        struct be_sge *sge;
@@ -285,22 +278,17 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
                    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
                    "BG_%d : In beiscsi_get_session_info\n");
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
 
        nonemb_cmd->size = sizeof(*resp);
        req = nonemb_cmd->va;
        memset(req, 0, sizeof(*req));
-       wrb = wrb_from_mccq(phba);
        sge = nonembedded_sgl(wrb);
-       wrb->tag0 |= tag;
-
-
-       wrb->tag0 |= tag;
        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
                           OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
@@ -310,11 +298,53 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
        sge->len = cpu_to_le32(nonemb_cmd->size);
 
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
+/**
+ * mgmt_get_port_name()- Get port name for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the alphanumeric character for port
+ *
+ **/
+int mgmt_get_port_name(struct be_ctrl_info *ctrl,
+                      struct beiscsi_hba *phba)
+{
+       int ret = 0;
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_get_port_name *ioctl;
+
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       memset(wrb, 0, sizeof(*wrb));
+       ioctl = embedded_payload(wrb);
+
+       be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+       be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+                          OPCODE_COMMON_GET_PORT_NAME,
+                          EMBED_MBX_MAX_PAYLOAD_SIZE);
+       ret = be_mbox_notify(ctrl);
+       phba->port_name = 0;
+       if (!ret) {
+               phba->port_name = ioctl->p.resp.port_names >>
+                                 (phba->fw_config.phys_port * 8) & 0xff;
+       } else {
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
+                           ret, ioctl->h.resp_hdr.status);
+       }
+
+       if (phba->port_name == 0)
+               phba->port_name = '?';
+
+       mutex_unlock(&ctrl->mbox_lock);
+       return ret;
+}
+
 /**
  * mgmt_get_fw_config()- Get the FW config for the function
  * @ctrl: ptr to Ctrl Info
@@ -331,91 +361,147 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
                                struct beiscsi_hba *phba)
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       struct be_fw_cfg *req = embedded_payload(wrb);
-       int status = 0;
+       struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
+       uint32_t cid_count, icd_count;
+       int status = -EINVAL;
+       uint8_t ulp_num = 0;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
+       be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
 
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+       be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
                           OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
                           EMBED_MBX_MAX_PAYLOAD_SIZE);
-       status = be_mbox_notify(ctrl);
-       if (!status) {
-               uint8_t ulp_num = 0;
-               struct be_fw_cfg *pfw_cfg;
-               pfw_cfg = req;
 
-               if (!is_chip_be2_be3r(phba)) {
-                       phba->fw_config.eqid_count = pfw_cfg->eqid_count;
-                       phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+       if (be_mbox_notify(ctrl)) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d : Failed in mgmt_get_fw_config\n");
+               goto fail_init;
+       }
 
-                       beiscsi_log(phba, KERN_INFO,
-                                   BEISCSI_LOG_INIT,
-                                   "BG_%d : EQ_Count : %d CQ_Count : %d\n",
-                                   phba->fw_config.eqid_count,
+       /* FW response formats depend on port id */
+       phba->fw_config.phys_port = pfw_cfg->phys_port;
+       if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d : invalid physical port id %d\n",
+                           phba->fw_config.phys_port);
+               goto fail_init;
+       }
+
+       /* populate and check FW config against min and max values */
+       if (!is_chip_be2_be3r(phba)) {
+               phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+               phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+               if (phba->fw_config.eqid_count == 0 ||
+                   phba->fw_config.eqid_count > 2048) {
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BG_%d : invalid EQ count %d\n",
+                                   phba->fw_config.eqid_count);
+                       goto fail_init;
+               }
+               if (phba->fw_config.cqid_count == 0 ||
+                   phba->fw_config.cqid_count > 4096) {
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BG_%d : invalid CQ count %d\n",
                                    phba->fw_config.cqid_count);
+                       goto fail_init;
                }
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+                           phba->fw_config.eqid_count,
+                           phba->fw_config.cqid_count);
+       }
 
-               for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
-                       if (pfw_cfg->ulp[ulp_num].ulp_mode &
-                           BEISCSI_ULP_ISCSI_INI_MODE)
-                               set_bit(ulp_num,
-                               &phba->fw_config.ulp_supported);
-
-               phba->fw_config.phys_port = pfw_cfg->phys_port;
-               for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
-                       if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
-                               phba->fw_config.iscsi_cid_start[ulp_num] =
-                                       pfw_cfg->ulp[ulp_num].sq_base;
-                               phba->fw_config.iscsi_cid_count[ulp_num] =
-                                       pfw_cfg->ulp[ulp_num].sq_count;
-
-                               phba->fw_config.iscsi_icd_start[ulp_num] =
-                                       pfw_cfg->ulp[ulp_num].icd_base;
-                               phba->fw_config.iscsi_icd_count[ulp_num] =
-                                       pfw_cfg->ulp[ulp_num].icd_count;
-
-                               phba->fw_config.iscsi_chain_start[ulp_num] =
-                                       pfw_cfg->chain_icd[ulp_num].chain_base;
-                               phba->fw_config.iscsi_chain_count[ulp_num] =
-                                       pfw_cfg->chain_icd[ulp_num].chain_count;
-
-                               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                                           "BG_%d : Function loaded on ULP : %d\n"
-                                           "\tiscsi_cid_count : %d\n"
-                                           "\tiscsi_cid_start : %d\n"
-                                           "\t iscsi_icd_count : %d\n"
-                                           "\t iscsi_icd_start : %d\n",
-                                           ulp_num,
-                                           phba->fw_config.
-                                           iscsi_cid_count[ulp_num],
-                                           phba->fw_config.
-                                           iscsi_cid_start[ulp_num],
-                                           phba->fw_config.
-                                           iscsi_icd_count[ulp_num],
-                                           phba->fw_config.
-                                           iscsi_icd_start[ulp_num]);
-                       }
+       /**
+        * Check on which all ULP iSCSI Protocol is loaded.
+        * Set the Bit for those ULP. This set flag is used
+        * at all places in the code to check on which ULP
+        * iSCSi Protocol is loaded
+        **/
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (pfw_cfg->ulp[ulp_num].ulp_mode &
+                   BEISCSI_ULP_ISCSI_INI_MODE) {
+                       set_bit(ulp_num, &phba->fw_config.ulp_supported);
+
+                       /* Get the CID, ICD and Chain count for each ULP */
+                       phba->fw_config.iscsi_cid_start[ulp_num] =
+                               pfw_cfg->ulp[ulp_num].sq_base;
+                       phba->fw_config.iscsi_cid_count[ulp_num] =
+                               pfw_cfg->ulp[ulp_num].sq_count;
+
+                       phba->fw_config.iscsi_icd_start[ulp_num] =
+                               pfw_cfg->ulp[ulp_num].icd_base;
+                       phba->fw_config.iscsi_icd_count[ulp_num] =
+                               pfw_cfg->ulp[ulp_num].icd_count;
+
+                       phba->fw_config.iscsi_chain_start[ulp_num] =
+                               pfw_cfg->chain_icd[ulp_num].chain_base;
+                       phba->fw_config.iscsi_chain_count[ulp_num] =
+                               pfw_cfg->chain_icd[ulp_num].chain_count;
+
+                       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                   "BG_%d : Function loaded on ULP : %d\n"
+                                   "\tiscsi_cid_count : %d\n"
+                                   "\tiscsi_cid_start : %d\n"
+                                   "\t iscsi_icd_count : %d\n"
+                                   "\t iscsi_icd_start : %d\n",
+                                   ulp_num,
+                                   phba->fw_config.
+                                   iscsi_cid_count[ulp_num],
+                                   phba->fw_config.
+                                   iscsi_cid_start[ulp_num],
+                                   phba->fw_config.
+                                   iscsi_icd_count[ulp_num],
+                                   phba->fw_config.
+                                   iscsi_icd_start[ulp_num]);
                }
+       }
 
-               phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
-                                                 BEISCSI_FUNC_DUA_MODE);
+       if (phba->fw_config.ulp_supported == 0) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
+                           pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
+                           pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
+               goto fail_init;
+       }
 
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                           "BG_%d : DUA Mode : 0x%x\n",
-                           phba->fw_config.dual_ulp_aware);
+       /**
+        * ICD is shared among ULPs. Use icd_count of any one loaded ULP
+        **/
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+                       break;
+       icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+       if (icd_count == 0 || icd_count > 65536) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d: invalid ICD count %d\n", icd_count);
+               goto fail_init;
+       }
 
-       } else {
+       cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+                   BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+       if (cid_count == 0 || cid_count > 4096) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BG_%d : Failed in mgmt_get_fw_config\n");
-               status = -EINVAL;
+                           "BG_%d: invalid CID count %d\n", cid_count);
+               goto fail_init;
        }
 
-       spin_unlock(&ctrl->mbox_lock);
+       /**
+        * Check FW is dual ULP aware i.e. can handle either
+        * of the protocols.
+        */
+       phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+                                         BEISCSI_FUNC_DUA_MODE);
+
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BG_%d : DUA Mode : 0x%x\n",
+                   phba->fw_config.dual_ulp_aware);
+
+       /* all set, continue using this FW config */
+       status = 0;
+fail_init:
+       mutex_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -440,7 +526,7 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
        nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
        req = nonemb_cmd.va;
        memset(req, 0, sizeof(*req));
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
@@ -470,7 +556,7 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
        } else
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BG_%d :  Failed in mgmt_check_supported_fw\n");
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        if (nonemb_cmd.va)
                pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
@@ -501,8 +587,9 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
        req->region = region;
        req->sector = sector;
        req->offset = offset;
-       spin_lock(&ctrl->mbox_lock);
 
+       if (mutex_lock_interruptible(&ctrl->mbox_lock))
+               return 0;
        switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
        case BEISCSI_WRITE_FLASH:
                offset = sector * sector_size + offset;
@@ -521,28 +608,26 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
                            "BG_%d : Unsupported cmd = 0x%x\n\n",
                            bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
 
-               spin_unlock(&ctrl->mbox_lock);
+               mutex_unlock(&ctrl->mbox_lock);
                return -ENOSYS;
        }
 
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
 
-       wrb = wrb_from_mccq(phba);
        mcc_sge = nonembedded_sgl(wrb);
        be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
                           job->request_payload.sg_cnt);
        mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
        mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
        mcc_sge->len = cpu_to_le32(nonemb_cmd->size);
-       wrb->tag0 |= tag;
 
-       be_mcc_notify(phba);
+       be_mcc_notify(phba, tag);
 
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
@@ -558,12 +643,19 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
 int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
 {
        struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
-       struct iscsi_cleanup_req *req = embedded_payload(wrb);
-       int status = 0;
+       struct be_mcc_wrb *wrb;
+       struct iscsi_cleanup_req *req;
+       unsigned int tag;
+       int status;
 
-       spin_lock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return -EBUSY;
+       }
 
+       req = embedded_payload(wrb);
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
                           OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
@@ -572,11 +664,12 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
        req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
        req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
 
-       status =  be_mcc_notify_wait(phba);
+       be_mcc_notify(phba, tag);
+       status = be_mcc_compl_poll(phba, tag);
        if (status)
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
                            "BG_%d : mgmt_epfw_cleanup , FAILED\n");
-       spin_unlock(&ctrl->mbox_lock);
+       mutex_unlock(&ctrl->mbox_lock);
        return status;
 }
 
@@ -590,20 +683,18 @@ unsigned int  mgmt_invalidate_icds(struct beiscsi_hba *phba,
        struct be_mcc_wrb *wrb;
        struct be_sge *sge;
        struct invalidate_commands_params_in *req;
-       unsigned int i, tag = 0;
+       unsigned int i, tag;
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
 
        req = nonemb_cmd->va;
        memset(req, 0, sizeof(*req));
-       wrb = wrb_from_mccq(phba);
        sge = nonembedded_sgl(wrb);
-       wrb->tag0 |= tag;
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -621,8 +712,8 @@ unsigned int  mgmt_invalidate_icds(struct beiscsi_hba *phba,
        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
        sge->len = cpu_to_le32(nonemb_cmd->size);
 
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
@@ -637,16 +728,14 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
        struct iscsi_invalidate_connection_params_in *req;
        unsigned int tag = 0;
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
-       wrb = wrb_from_mccq(phba);
-       wrb->tag0 |= tag;
-       req = embedded_payload(wrb);
 
+       req = embedded_payload(wrb);
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
                           OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION,
@@ -658,8 +747,8 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
        else
                req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
        req->save_cfg = savecfg_flag;
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
@@ -669,25 +758,23 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct be_mcc_wrb *wrb;
        struct tcp_upload_params_in *req;
-       unsigned int tag = 0;
+       unsigned int tag;
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
-       wrb = wrb_from_mccq(phba);
-       req = embedded_payload(wrb);
-       wrb->tag0 |= tag;
 
+       req = embedded_payload(wrb);
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
                           OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
        req->id = (unsigned short)cid;
        req->upload_type = (unsigned char)upload_flag;
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
@@ -722,6 +809,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
        unsigned short cid = beiscsi_ep->ep_cid;
        struct be_sge *sge;
 
+       if (dst_addr->sa_family != PF_INET && dst_addr->sa_family != PF_INET6) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BG_%d : unknown addr family %d\n",
+                           dst_addr->sa_family);
+               return -EINVAL;
+       }
+
        phwi_ctrlr = phba->phwi_ctrlr;
        phwi_context = phwi_ctrlr->phwi_ctxt;
 
@@ -732,18 +826,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 
        ptemplate_address = &template_address;
        ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       if (mutex_lock_interruptible(&ctrl->mbox_lock))
+               return 0;
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
-       wrb = wrb_from_mccq(phba);
-       sge = nonembedded_sgl(wrb);
 
+       sge = nonembedded_sgl(wrb);
        req = nonemb_cmd->va;
        memset(req, 0, sizeof(*req));
-       wrb->tag0 |= tag;
 
        be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -760,7 +853,8 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
                beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
                beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
                beiscsi_ep->ip_type = BE2_IPV4;
-       } else if (dst_addr->sa_family == PF_INET6) {
+       } else {
+               /* else its PF_INET6 family */
                req->ip_address.ip_type = BE2_IPV6;
                memcpy(&req->ip_address.addr,
                       &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
@@ -769,14 +863,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
                memcpy(&beiscsi_ep->dst6_addr,
                       &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
                beiscsi_ep->ip_type = BE2_IPV6;
-       } else{
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BG_%d : unknown addr family %d\n",
-                           dst_addr->sa_family);
-               spin_unlock(&ctrl->mbox_lock);
-               free_mcc_tag(&phba->ctrl, tag);
-               return -EINVAL;
-
        }
        req->cid = cid;
        i = phba->nxt_cqid++;
@@ -801,35 +887,45 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
                req->tcp_window_scale_count = 2;
        }
 
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
 unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
 {
        struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       struct be_cmd_get_all_if_id_req *req = embedded_payload(wrb);
-       struct be_cmd_get_all_if_id_req *pbe_allid = req;
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_get_all_if_id_req *req;
+       struct be_cmd_get_all_if_id_req *pbe_allid;
+       unsigned int tag;
        int status = 0;
 
-       memset(wrb, 0, sizeof(*wrb));
-
-       spin_lock(&ctrl->mbox_lock);
+       if (mutex_lock_interruptible(&ctrl->mbox_lock))
+               return -EINTR;
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return -ENOMEM;
+       }
 
+       req = embedded_payload(wrb);
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
                           OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
                           sizeof(*req));
-       status = be_mbox_notify(ctrl);
-       if (!status)
-               phba->interface_handle = pbe_allid->if_hndl_list[0];
-       else {
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
+
+       status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
+       if (status) {
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
                            "BG_%d : Failed in mgmt_get_all_if_id\n");
+               return -EBUSY;
        }
-       spin_unlock(&ctrl->mbox_lock);
+
+       pbe_allid = embedded_payload(wrb);
+       phba->interface_handle = pbe_allid->if_hndl_list[0];
 
        return status;
 }
@@ -852,27 +948,24 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
        unsigned int tag;
        int rc = 0;
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
                rc = -ENOMEM;
                goto free_cmd;
        }
 
-       wrb = wrb_from_mccq(phba);
-       wrb->tag0 |= tag;
        sge = nonembedded_sgl(wrb);
-
        be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
        sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
        sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma));
        sge->len = cpu_to_le32(nonemb_cmd->size);
 
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
 
-       rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd);
+       rc = beiscsi_mccq_compl_wait(phba, tag, NULL, nonemb_cmd);
 
        if (resp_buf)
                memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
@@ -1003,8 +1096,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
        uint32_t ip_type;
        int rc;
 
-       if (mgmt_get_all_if_id(phba))
-               return -EIO;
+       rc = mgmt_get_all_if_id(phba);
+       if (rc)
+               return rc;
 
        ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
                BE2_IPV6 : BE2_IPV4 ;
@@ -1173,8 +1267,9 @@ int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
        uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
        int rc;
 
-       if (mgmt_get_all_if_id(phba))
-               return -EIO;
+       rc = mgmt_get_all_if_id(phba);
+       if (rc)
+               return rc;
 
        do {
                rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
@@ -1245,55 +1340,27 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
 
 unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
 {
-       unsigned int tag = 0;
+       unsigned int tag;
        struct be_mcc_wrb *wrb;
        struct be_cmd_hba_name *req;
        struct be_ctrl_info *ctrl = &phba->ctrl;
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
+       if (mutex_lock_interruptible(&ctrl->mbox_lock))
+               return 0;
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               return 0;
        }
 
-       wrb = wrb_from_mccq(phba);
        req = embedded_payload(wrb);
-       wrb->tag0 |= tag;
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
                        OPCODE_ISCSI_INI_CFG_GET_HBA_NAME,
                        sizeof(*req));
 
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
-       return tag;
-}
-
-unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
-{
-       unsigned int tag = 0;
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_ntwk_link_status_req *req;
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
-               return tag;
-       }
-
-       wrb = wrb_from_mccq(phba);
-       req = embedded_payload(wrb);
-       wrb->tag0 |= tag;
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
-                       sizeof(*req));
-
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
        return tag;
 }
 
@@ -1330,7 +1397,7 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
                        return -EAGAIN;
                }
 
-               rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+               rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
                if (rc) {
                        beiscsi_log(phba, KERN_ERR,
                                    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
@@ -1364,7 +1431,7 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
                        return -EAGAIN;
                }
 
-               rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+               rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
                if (rc) {
                        beiscsi_log(phba, KERN_ERR,
                                    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
@@ -1406,7 +1473,7 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
                return -EBUSY;
        }
 
-       rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+       rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
        if (rc) {
                beiscsi_log(phba, KERN_ERR,
                            (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
@@ -1749,19 +1816,17 @@ int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
                    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
                    "BG_%d : In bescsi_logout_fwboot_sess\n");
 
-       spin_lock(&ctrl->mbox_lock);
-       tag = alloc_mcc_tag(phba);
-       if (!tag) {
-               spin_unlock(&ctrl->mbox_lock);
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
                beiscsi_log(phba, KERN_INFO,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
                            "BG_%d : MBX Tag Failure\n");
                return -EINVAL;
        }
 
-       wrb = wrb_from_mccq(phba);
        req = embedded_payload(wrb);
-       wrb->tag0 |= tag;
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
                           OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
@@ -1769,10 +1834,10 @@ int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
 
        /* Set the session handle */
        req->session_handle = fw_sess_handle;
-       be_mcc_notify(phba);
-       spin_unlock(&ctrl->mbox_lock);
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
 
-       rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+       rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
        if (rc) {
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
index c1dbb690ee279f669fffb2ddab5919189923b8c8..f3a48a04b2ca6d3254e61777f7939410cac0fa53 100644 (file)
@@ -268,6 +268,8 @@ struct beiscsi_endpoint {
 
 int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
                                 struct beiscsi_hba *phba);
+int mgmt_get_port_name(struct be_ctrl_info *ctrl,
+                      struct beiscsi_hba *phba);
 
 unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
                                         struct beiscsi_endpoint *beiscsi_ep,
index 251e2ff8ff5f797848b9603fdfff2d4fdc5eea71..a1ada4a31c9717f32d4660d3e50f6a7dfc1776c9 100644 (file)
@@ -2803,7 +2803,7 @@ void
 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
 {
        memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
-       memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+       strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
 }
 
 void
index 67405c628864875bab6e56beb2a67846fa690ead..d7029ea5d3193ec63e3a71caec5cb1e42ef1e82f 100644 (file)
@@ -97,6 +97,15 @@ static void __exit bnx2fc_mod_exit(void);
 
 unsigned int bnx2fc_debug_level;
 module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging,
+               "Option to enable extended logging,\n"
+               "\t\tDefault is 0 - no logging.\n"
+               "\t\t0x01 - SCSI cmd error, cleanup.\n"
+               "\t\t0x02 - Session setup, cleanup, etc.\n"
+               "\t\t0x04 - lport events, link, mtu, etc.\n"
+               "\t\t0x08 - ELS logs.\n"
+               "\t\t0x10 - fcoe L2 fame related logs.\n"
+               "\t\t0xff - LOG all messages.");
 
 static int bnx2fc_cpu_callback(struct notifier_block *nfb,
                             unsigned long action, void *hcpu);
index 0002caf687dd027f959e76e3e64cac52b7ded11d..2230dab67ca550549658c4d7b49f2e8bbf134e93 100644 (file)
@@ -1104,8 +1104,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
        struct bnx2fc_cmd *io_req;
        struct fc_lport *lport;
        struct bnx2fc_rport *tgt;
-       int rc = FAILED;
-
+       int rc;
 
        rc = fc_block_scsi_eh(sc_cmd);
        if (rc)
@@ -1114,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
        lport = shost_priv(sc_cmd->device->host);
        if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
                printk(KERN_ERR PFX "eh_abort: link not ready\n");
-               return rc;
+               return FAILED;
        }
 
        tgt = (struct bnx2fc_rport *)&rp[1];
index 3613581343159dc67e8980424b2346d301dec01f..93880ed6291cfbda8a88ad6130a433c0c461c727 100644 (file)
@@ -562,7 +562,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
                        /*
                         * Command Lock contention
                         */
-                       err = SCSI_DH_RETRY;
+                       err = SCSI_DH_IMM_RETRY;
                break;
        default:
                break;
@@ -612,6 +612,8 @@ retry:
                err = mode_select_handle_sense(sdev, h->sense);
                if (err == SCSI_DH_RETRY && retry_cnt--)
                        goto retry;
+               if (err == SCSI_DH_IMM_RETRY)
+                       goto retry;
        }
        if (err == SCSI_DH_OK) {
                h->state = RDAC_STATE_ACTIVE;
index b67661836c9fa26fccd1e23a82508c1d567a5916..d1dd1616f983bb7a567d4eb465e09a5c9cd611c5 100644 (file)
@@ -1,6 +1,6 @@
 config SCSI_HISI_SAS
        tristate "HiSilicon SAS"
-       depends on HAS_DMA
+       depends on HAS_DMA && HAS_IOMEM
        depends on ARM64 || COMPILE_TEST
        select SCSI_SAS_LIBSAS
        select BLK_DEV_INTEGRITY
index 3e70eae81343a873bab1cbe39160c5ae2e27ffba..c6d3a1b5fcb907298d6c1a1d2f597ca2d19d7695 100644 (file)
@@ -1,2 +1,2 @@
 obj-$(CONFIG_SCSI_HISI_SAS)            += hisi_sas_main.o
-obj-$(CONFIG_SCSI_HISI_SAS)            += hisi_sas_v1_hw.o
+obj-$(CONFIG_SCSI_HISI_SAS)            += hisi_sas_v1_hw.o hisi_sas_v2_hw.o
index 5af2e4187f01ee9dccb6995ab169bd5d75fffd99..02da7e4f9eb6975172203737479b9cc4b2d7e3bc 100644 (file)
 #ifndef _HISI_SAS_H_
 #define _HISI_SAS_H_
 
+#include <linux/acpi.h>
 #include <linux/dmapool.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
+#include <scsi/sas_ata.h>
 #include <scsi/libsas.h>
 
-#define DRV_VERSION "v1.0"
+#define DRV_VERSION "v1.2"
 
 #define HISI_SAS_MAX_PHYS      9
 #define HISI_SAS_MAX_QUEUES    32
 #define HISI_SAS_QUEUE_SLOTS 512
-#define HISI_SAS_MAX_ITCT_ENTRIES 4096
+#define HISI_SAS_MAX_ITCT_ENTRIES 2048
 #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
-#define HISI_SAS_COMMAND_ENTRIES 8192
 
 #define HISI_SAS_STATUS_BUF_SZ \
                (sizeof(struct hisi_sas_err_record) + 1024)
 
 #define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024)
 #define HISI_SAS_MAX_SMP_RESP_SZ 1028
+#define HISI_SAS_MAX_STP_RESP_SZ 28
+
+#define DEV_IS_EXPANDER(type) \
+       ((type == SAS_EDGE_EXPANDER_DEVICE) || \
+       (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 struct hisi_hba;
 
@@ -132,6 +139,8 @@ struct hisi_sas_hw {
                        struct hisi_sas_tmf_task *tmf);
        int (*prep_smp)(struct hisi_hba *hisi_hba,
                        struct hisi_sas_slot *slot);
+       int (*prep_stp)(struct hisi_hba *hisi_hba,
+                       struct hisi_sas_slot *slot);
        int (*slot_complete)(struct hisi_hba *hisi_hba,
                             struct hisi_sas_slot *slot, int abort);
        void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -140,6 +149,7 @@ struct hisi_sas_hw {
        void (*free_device)(struct hisi_hba *hisi_hba,
                            struct hisi_sas_device *dev);
        int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
+       int max_command_entries;
        int complete_hdr_size;
 };
 
@@ -244,18 +254,7 @@ struct hisi_sas_itct {
        __le64 sas_addr;
        __le64 qw2;
        __le64 qw3;
-       __le64 qw4;
-       __le64 qw_sata_ncq0_3;
-       __le64 qw_sata_ncq7_4;
-       __le64 qw_sata_ncq11_8;
-       __le64 qw_sata_ncq15_12;
-       __le64 qw_sata_ncq19_16;
-       __le64 qw_sata_ncq23_20;
-       __le64 qw_sata_ncq27_24;
-       __le64 qw_sata_ncq31_28;
-       __le64 qw_non_ncq_iptt;
-       __le64 qw_rsvd0;
-       __le64 qw_rsvd1;
+       __le64 qw4_15[12];
 };
 
 struct hisi_sas_iost {
@@ -266,17 +265,7 @@ struct hisi_sas_iost {
 };
 
 struct hisi_sas_err_record {
-       /* dw0 */
-       __le32 dma_err_type;
-
-       /* dw1 */
-       __le32 trans_tx_fail_type;
-
-       /* dw2 */
-       __le32 trans_rx_fail_type;
-
-       /* dw3 */
-       u32 rsvd;
+       u32     data[4];
 };
 
 struct hisi_sas_initial_fis {
index 99b1950d751c2a7e1325ae26ebb3ce6f9bcb2260..2194917bd84de14ddc446766ba69490362a7d099 100644 (file)
 #include "hisi_sas.h"
 #define DRV_NAME "hisi_sas"
 
-#define DEV_IS_EXPANDER(type) \
-       ((type == SAS_EDGE_EXPANDER_DEVICE) || \
-       (type == SAS_FANOUT_EXPANDER_DEVICE))
-
 #define DEV_IS_GONE(dev) \
        ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
 
@@ -111,6 +107,12 @@ static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
        return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
 }
 
+static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
+                                 struct hisi_sas_slot *slot)
+{
+       return hisi_hba->hw->prep_stp(hisi_hba, slot);
+}
+
 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
                              int is_tmf, struct hisi_sas_tmf_task *tmf,
                              int *pass)
@@ -234,6 +236,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
        case SAS_PROTOCOL_SATA:
        case SAS_PROTOCOL_STP:
        case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+               rc = hisi_sas_task_prep_ata(hisi_hba, slot);
+               break;
        default:
                dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
                        task->task_proto);
@@ -977,9 +981,9 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
 
 static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
 {
-       int i, s;
        struct platform_device *pdev = hisi_hba->pdev;
        struct device *dev = &pdev->dev;
+       int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
 
        spin_lock_init(&hisi_hba->lock);
        for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -1039,13 +1043,13 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
 
        memset(hisi_hba->itct, 0, s);
 
-       hisi_hba->slot_info = devm_kcalloc(dev, HISI_SAS_COMMAND_ENTRIES,
+       hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
                                           sizeof(struct hisi_sas_slot),
                                           GFP_KERNEL);
        if (!hisi_hba->slot_info)
                goto err_out;
 
-       s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_iost);
+       s = max_command_entries * sizeof(struct hisi_sas_iost);
        hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
                                            GFP_KERNEL);
        if (!hisi_hba->iost)
@@ -1053,7 +1057,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
 
        memset(hisi_hba->iost, 0, s);
 
-       s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint);
+       s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
        hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
                                &hisi_hba->breakpoint_dma, GFP_KERNEL);
        if (!hisi_hba->breakpoint)
@@ -1061,7 +1065,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
 
        memset(hisi_hba->breakpoint, 0, s);
 
-       hisi_hba->slot_index_count = HISI_SAS_COMMAND_ENTRIES;
+       hisi_hba->slot_index_count = max_command_entries;
        s = hisi_hba->slot_index_count / sizeof(unsigned long);
        hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
        if (!hisi_hba->slot_index_tags)
@@ -1079,7 +1083,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
                goto err_out;
        memset(hisi_hba->initial_fis, 0, s);
 
-       s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint) * 2;
+       s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
        hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
                                &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
        if (!hisi_hba->sata_breakpoint)
@@ -1102,7 +1106,7 @@ err_out:
 static void hisi_sas_free(struct hisi_hba *hisi_hba)
 {
        struct device *dev = &hisi_hba->pdev->dev;
-       int i, s;
+       int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
 
        for (i = 0; i < hisi_hba->queue_count; i++) {
                s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
@@ -1127,12 +1131,12 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
                dma_free_coherent(dev, s,
                                  hisi_hba->itct, hisi_hba->itct_dma);
 
-       s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_iost);
+       s = max_command_entries * sizeof(struct hisi_sas_iost);
        if (hisi_hba->iost)
                dma_free_coherent(dev, s,
                                  hisi_hba->iost, hisi_hba->iost_dma);
 
-       s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint);
+       s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
        if (hisi_hba->breakpoint)
                dma_free_coherent(dev, s,
                                  hisi_hba->breakpoint,
@@ -1145,7 +1149,7 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
                                  hisi_hba->initial_fis,
                                  hisi_hba->initial_fis_dma);
 
-       s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint) * 2;
+       s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
        if (hisi_hba->sata_breakpoint)
                dma_free_coherent(dev, s,
                                  hisi_hba->sata_breakpoint,
@@ -1163,7 +1167,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
        struct hisi_hba *hisi_hba;
        struct device *dev = &pdev->dev;
        struct device_node *np = pdev->dev.of_node;
-       struct property *sas_addr_prop;
 
        shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
        if (!shost)
@@ -1177,27 +1180,34 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
 
        init_timer(&hisi_hba->timer);
 
-       sas_addr_prop = of_find_property(np, "sas-addr", NULL);
-       if (!sas_addr_prop || (sas_addr_prop->length != SAS_ADDR_SIZE))
+       if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
+                                         SAS_ADDR_SIZE))
                goto err_out;
-       memcpy(hisi_hba->sas_addr, sas_addr_prop->value, SAS_ADDR_SIZE);
 
-       if (of_property_read_u32(np, "ctrl-reset-reg",
-                                &hisi_hba->ctrl_reset_reg))
-               goto err_out;
+       if (np) {
+               hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
+                                       "hisilicon,sas-syscon");
+               if (IS_ERR(hisi_hba->ctrl))
+                       goto err_out;
 
-       if (of_property_read_u32(np, "ctrl-reset-sts-reg",
-                                &hisi_hba->ctrl_reset_sts_reg))
-               goto err_out;
+               if (device_property_read_u32(dev, "ctrl-reset-reg",
+                                            &hisi_hba->ctrl_reset_reg))
+                       goto err_out;
 
-       if (of_property_read_u32(np, "ctrl-clock-ena-reg",
-                                &hisi_hba->ctrl_clock_ena_reg))
-               goto err_out;
+               if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
+                                            &hisi_hba->ctrl_reset_sts_reg))
+                       goto err_out;
+
+               if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
+                                            &hisi_hba->ctrl_clock_ena_reg))
+                       goto err_out;
+       }
 
-       if (of_property_read_u32(np, "phy-count", &hisi_hba->n_phy))
+       if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
                goto err_out;
 
-       if (of_property_read_u32(np, "queue-count", &hisi_hba->queue_count))
+       if (device_property_read_u32(dev, "queue-count",
+                                    &hisi_hba->queue_count))
                goto err_out;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1205,11 +1215,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
        if (IS_ERR(hisi_hba->regs))
                goto err_out;
 
-       hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(
-                               np, "hisilicon,sas-syscon");
-       if (IS_ERR(hisi_hba->ctrl))
-               goto err_out;
-
        if (hisi_sas_alloc(hisi_hba, shost)) {
                hisi_sas_free(hisi_hba);
                goto err_out;
@@ -1277,8 +1282,8 @@ int hisi_sas_probe(struct platform_device *pdev,
        shost->max_channel = 1;
        shost->max_cmd_len = 16;
        shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
-       shost->can_queue = HISI_SAS_COMMAND_ENTRIES;
-       shost->cmd_per_lun = HISI_SAS_COMMAND_ENTRIES;
+       shost->can_queue = hisi_hba->hw->max_command_entries;
+       shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
 
        sha->sas_ha_name = DRV_NAME;
        sha->dev = &hisi_hba->pdev->dev;
index 057fdeb720acec997f4a4a5952318c58ab2f0b3e..ce5f65d7fff8861354ab53957dd76f95ed31c788 100644 (file)
@@ -288,6 +288,20 @@ struct hisi_sas_complete_v1_hdr {
        __le32 data;
 };
 
+struct hisi_sas_err_record_v1 {
+       /* dw0 */
+       __le32 dma_err_type;
+
+       /* dw1 */
+       __le32 trans_tx_fail_type;
+
+       /* dw2 */
+       __le32 trans_rx_fail_type;
+
+       /* dw3 */
+       u32 rsvd;
+};
+
 enum {
        HISI_SAS_PHY_BCAST_ACK = 0,
        HISI_SAS_PHY_SL_PHY_ENABLED,
@@ -392,6 +406,8 @@ enum {
        TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */
 };
 
+#define HISI_SAS_COMMAND_ENTRIES_V1_HW 8192
+
 #define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS)
 #define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES)
 #define HISI_SAS_FATAL_INT_NR (2)
@@ -607,31 +623,42 @@ static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)
                        return -EIO;
        }
 
-       /* Apply reset and disable clock */
-       /* clk disable reg is offset by +4 bytes from clk enable reg */
-       regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
-                    RESET_VALUE);
-       regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
-                    RESET_VALUE);
-       msleep(1);
-       regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
-       if (RESET_VALUE != (val & RESET_VALUE)) {
-               dev_err(dev, "Reset failed\n");
-               return -EIO;
-       }
+       if (ACPI_HANDLE(dev)) {
+               acpi_status s;
 
-       /* De-reset and enable clock */
-       /* deassert rst reg is offset by +4 bytes from assert reg */
-       regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
-                    RESET_VALUE);
-       regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
-                    RESET_VALUE);
-       msleep(1);
-       regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
-       if (val & RESET_VALUE) {
-               dev_err(dev, "De-reset failed\n");
-               return -EIO;
-       }
+               s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL);
+               if (ACPI_FAILURE(s)) {
+                       dev_err(dev, "Reset failed\n");
+                       return -EIO;
+               }
+       } else if (hisi_hba->ctrl) {
+               /* Apply reset and disable clock */
+               /* clk disable reg is offset by +4 bytes from clk enable reg */
+               regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
+                            RESET_VALUE);
+               regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
+                            RESET_VALUE);
+               msleep(1);
+               regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
+               if (RESET_VALUE != (val & RESET_VALUE)) {
+                       dev_err(dev, "Reset failed\n");
+                       return -EIO;
+               }
+
+               /* De-reset and enable clock */
+               /* deassert rst reg is offset by +4 bytes from assert reg */
+               regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
+                            RESET_VALUE);
+               regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
+                            RESET_VALUE);
+               msleep(1);
+               regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
+               if (val & RESET_VALUE) {
+                       dev_err(dev, "De-reset failed\n");
+                       return -EIO;
+               }
+       } else
+               dev_warn(dev, "no reset method\n");
 
        return 0;
 }
@@ -1096,7 +1123,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
                           struct hisi_sas_slot *slot)
 {
        struct task_status_struct *ts = &task->task_status;
-       struct hisi_sas_err_record *err_record = slot->status_buffer;
+       struct hisi_sas_err_record_v1 *err_record = slot->status_buffer;
        struct device *dev = &hisi_hba->pdev->dev;
 
        switch (task->task_proto) {
@@ -1220,7 +1247,6 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
        struct domain_device *device;
        enum exec_status sts;
        struct hisi_sas_complete_v1_hdr *complete_queue =
-                       (struct hisi_sas_complete_v1_hdr *)
                        hisi_hba->complete_hdr[slot->cmplt_queue];
        struct hisi_sas_complete_v1_hdr *complete_hdr;
        u32 cmplt_hdr_data;
@@ -1289,13 +1315,10 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
                goto out;
        }
 
-       if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK) {
-               if (!(cmplt_hdr_data & CMPLT_HDR_CMD_CMPLT_MSK) ||
-                   !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK))
-                       ts->stat = SAS_DATA_OVERRUN;
-               else
-                       slot_err_v1_hw(hisi_hba, task, slot);
+       if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK &&
+               !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
 
+               slot_err_v1_hw(hisi_hba, task, slot);
                goto out;
        }
 
@@ -1799,6 +1822,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
        .phy_disable = disable_phy_v1_hw,
        .phy_hard_reset = phy_hard_reset_v1_hw,
        .get_wideport_bitmap = get_wideport_bitmap_v1_hw,
+       .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
        .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
 };
 
@@ -1818,12 +1842,20 @@ static const struct of_device_id sas_v1_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, sas_v1_of_match);
 
+static const struct acpi_device_id sas_v1_acpi_match[] = {
+       { "HISI0161", 0 },
+       { }
+};
+
+MODULE_DEVICE_TABLE(acpi, sas_v1_acpi_match);
+
 static struct platform_driver hisi_sas_v1_driver = {
        .probe = hisi_sas_v1_probe,
        .remove = hisi_sas_v1_remove,
        .driver = {
                .name = DRV_NAME,
                .of_match_table = sas_v1_of_match,
+               .acpi_match_table = ACPI_PTR(sas_v1_acpi_match),
        },
 };
 
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
new file mode 100644 (file)
index 0000000..58e1956
--- /dev/null
@@ -0,0 +1,2205 @@
+/*
+ * Copyright (c) 2016 Linaro Ltd.
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include "hisi_sas.h"
+#define DRV_NAME "hisi_sas_v2_hw"
+
+/* global registers need init*/
+#define DLVRY_QUEUE_ENABLE             0x0
+#define IOST_BASE_ADDR_LO              0x8
+#define IOST_BASE_ADDR_HI              0xc
+#define ITCT_BASE_ADDR_LO              0x10
+#define ITCT_BASE_ADDR_HI              0x14
+#define IO_BROKEN_MSG_ADDR_LO          0x18
+#define IO_BROKEN_MSG_ADDR_HI          0x1c
+#define PHY_CONTEXT                    0x20
+#define PHY_STATE                      0x24
+#define PHY_PORT_NUM_MA                        0x28
+#define PORT_STATE                     0x2c
+#define PORT_STATE_PHY8_PORT_NUM_OFF   16
+#define PORT_STATE_PHY8_PORT_NUM_MSK   (0xf << PORT_STATE_PHY8_PORT_NUM_OFF)
+#define PORT_STATE_PHY8_CONN_RATE_OFF  20
+#define PORT_STATE_PHY8_CONN_RATE_MSK  (0xf << PORT_STATE_PHY8_CONN_RATE_OFF)
+#define PHY_CONN_RATE                  0x30
+#define HGC_TRANS_TASK_CNT_LIMIT       0x38
+#define AXI_AHB_CLK_CFG                        0x3c
+#define ITCT_CLR                       0x44
+#define ITCT_CLR_EN_OFF                        16
+#define ITCT_CLR_EN_MSK                        (0x1 << ITCT_CLR_EN_OFF)
+#define ITCT_DEV_OFF                   0
+#define ITCT_DEV_MSK                   (0x7ff << ITCT_DEV_OFF)
+#define AXI_USER1                      0x48
+#define AXI_USER2                      0x4c
+#define IO_SATA_BROKEN_MSG_ADDR_LO     0x58
+#define IO_SATA_BROKEN_MSG_ADDR_HI     0x5c
+#define SATA_INITI_D2H_STORE_ADDR_LO   0x60
+#define SATA_INITI_D2H_STORE_ADDR_HI   0x64
+#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL        0x84
+#define HGC_SAS_TXFAIL_RETRY_CTRL      0x88
+#define HGC_GET_ITV_TIME               0x90
+#define DEVICE_MSG_WORK_MODE           0x94
+#define OPENA_WT_CONTI_TIME            0x9c
+#define I_T_NEXUS_LOSS_TIME            0xa0
+#define MAX_CON_TIME_LIMIT_TIME                0xa4
+#define BUS_INACTIVE_LIMIT_TIME                0xa8
+#define REJECT_TO_OPEN_LIMIT_TIME      0xac
+#define CFG_AGING_TIME                 0xbc
+#define HGC_DFX_CFG2                   0xc0
+#define HGC_IOMB_PROC1_STATUS  0x104
+#define CFG_1US_TIMER_TRSH             0xcc
+#define HGC_INVLD_DQE_INFO             0x148
+#define HGC_INVLD_DQE_INFO_FB_CH0_OFF  9
+#define HGC_INVLD_DQE_INFO_FB_CH0_MSK  (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
+#define HGC_INVLD_DQE_INFO_FB_CH3_OFF  18
+#define INT_COAL_EN                    0x19c
+#define OQ_INT_COAL_TIME               0x1a0
+#define OQ_INT_COAL_CNT                        0x1a4
+#define ENT_INT_COAL_TIME              0x1a8
+#define ENT_INT_COAL_CNT               0x1ac
+#define OQ_INT_SRC                     0x1b0
+#define OQ_INT_SRC_MSK                 0x1b4
+#define ENT_INT_SRC1                   0x1b8
+#define ENT_INT_SRC1_D2H_FIS_CH0_OFF   0
+#define ENT_INT_SRC1_D2H_FIS_CH0_MSK   (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
+#define ENT_INT_SRC1_D2H_FIS_CH1_OFF   8
+#define ENT_INT_SRC1_D2H_FIS_CH1_MSK   (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
+#define ENT_INT_SRC2                   0x1bc
+#define ENT_INT_SRC3                   0x1c0
+#define ENT_INT_SRC3_ITC_INT_OFF       15
+#define ENT_INT_SRC3_ITC_INT_MSK       (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
+#define ENT_INT_SRC_MSK1               0x1c4
+#define ENT_INT_SRC_MSK2               0x1c8
+#define ENT_INT_SRC_MSK3               0x1cc
+#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
+#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
+#define SAS_ECC_INTR_MSK               0x1ec
+#define HGC_ERR_STAT_EN                        0x238
+#define DLVRY_Q_0_BASE_ADDR_LO         0x260
+#define DLVRY_Q_0_BASE_ADDR_HI         0x264
+#define DLVRY_Q_0_DEPTH                        0x268
+#define DLVRY_Q_0_WR_PTR               0x26c
+#define DLVRY_Q_0_RD_PTR               0x270
+#define HYPER_STREAM_ID_EN_CFG         0xc80
+#define OQ0_INT_SRC_MSK                        0xc90
+#define COMPL_Q_0_BASE_ADDR_LO         0x4e0
+#define COMPL_Q_0_BASE_ADDR_HI         0x4e4
+#define COMPL_Q_0_DEPTH                        0x4e8
+#define COMPL_Q_0_WR_PTR               0x4ec
+#define COMPL_Q_0_RD_PTR               0x4f0
+
+/* phy registers need init */
+#define PORT_BASE                      (0x2000)
+
+#define PHY_CFG                                (PORT_BASE + 0x0)
+#define HARD_PHY_LINKRATE              (PORT_BASE + 0x4)
+#define PHY_CFG_ENA_OFF                        0
+#define PHY_CFG_ENA_MSK                        (0x1 << PHY_CFG_ENA_OFF)
+#define PHY_CFG_DC_OPT_OFF             2
+#define PHY_CFG_DC_OPT_MSK             (0x1 << PHY_CFG_DC_OPT_OFF)
+#define PROG_PHY_LINK_RATE             (PORT_BASE + 0x8)
+#define PROG_PHY_LINK_RATE_MAX_OFF     0
+#define PROG_PHY_LINK_RATE_MAX_MSK     (0xff << PROG_PHY_LINK_RATE_MAX_OFF)
+#define PHY_CTRL                       (PORT_BASE + 0x14)
+#define PHY_CTRL_RESET_OFF             0
+#define PHY_CTRL_RESET_MSK             (0x1 << PHY_CTRL_RESET_OFF)
+#define SAS_PHY_CTRL                   (PORT_BASE + 0x20)
+#define SL_CFG                         (PORT_BASE + 0x84)
+#define PHY_PCN                                (PORT_BASE + 0x44)
+#define SL_TOUT_CFG                    (PORT_BASE + 0x8c)
+#define SL_CONTROL                     (PORT_BASE + 0x94)
+#define SL_CONTROL_NOTIFY_EN_OFF       0
+#define SL_CONTROL_NOTIFY_EN_MSK       (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
+#define TX_ID_DWORD0                   (PORT_BASE + 0x9c)
+#define TX_ID_DWORD1                   (PORT_BASE + 0xa0)
+#define TX_ID_DWORD2                   (PORT_BASE + 0xa4)
+#define TX_ID_DWORD3                   (PORT_BASE + 0xa8)
+#define TX_ID_DWORD4                   (PORT_BASE + 0xaC)
+#define TX_ID_DWORD5                   (PORT_BASE + 0xb0)
+#define TX_ID_DWORD6                   (PORT_BASE + 0xb4)
+#define RX_IDAF_DWORD0                 (PORT_BASE + 0xc4)
+#define RX_IDAF_DWORD1                 (PORT_BASE + 0xc8)
+#define RX_IDAF_DWORD2                 (PORT_BASE + 0xcc)
+#define RX_IDAF_DWORD3                 (PORT_BASE + 0xd0)
+#define RX_IDAF_DWORD4                 (PORT_BASE + 0xd4)
+#define RX_IDAF_DWORD5                 (PORT_BASE + 0xd8)
+#define RX_IDAF_DWORD6                 (PORT_BASE + 0xdc)
+#define RXOP_CHECK_CFG_H               (PORT_BASE + 0xfc)
+#define DONE_RECEIVED_TIME             (PORT_BASE + 0x11c)
+#define CHL_INT0                       (PORT_BASE + 0x1b4)
+#define CHL_INT0_HOTPLUG_TOUT_OFF      0
+#define CHL_INT0_HOTPLUG_TOUT_MSK      (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
+#define CHL_INT0_SL_RX_BCST_ACK_OFF    1
+#define CHL_INT0_SL_RX_BCST_ACK_MSK    (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
+#define CHL_INT0_SL_PHY_ENABLE_OFF     2
+#define CHL_INT0_SL_PHY_ENABLE_MSK     (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
+#define CHL_INT0_NOT_RDY_OFF           4
+#define CHL_INT0_NOT_RDY_MSK           (0x1 << CHL_INT0_NOT_RDY_OFF)
+#define CHL_INT0_PHY_RDY_OFF           5
+#define CHL_INT0_PHY_RDY_MSK           (0x1 << CHL_INT0_PHY_RDY_OFF)
+#define CHL_INT1                       (PORT_BASE + 0x1b8)
+#define CHL_INT1_DMAC_TX_ECC_ERR_OFF   15
+#define CHL_INT1_DMAC_TX_ECC_ERR_MSK   (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_RX_ECC_ERR_OFF   17
+#define CHL_INT1_DMAC_RX_ECC_ERR_MSK   (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT2                       (PORT_BASE + 0x1bc)
+#define CHL_INT0_MSK                   (PORT_BASE + 0x1c0)
+#define CHL_INT1_MSK                   (PORT_BASE + 0x1c4)
+#define CHL_INT2_MSK                   (PORT_BASE + 0x1c8)
+#define CHL_INT_COAL_EN                        (PORT_BASE + 0x1d0)
+#define PHY_CTRL_RDY_MSK               (PORT_BASE + 0x2b0)
+#define PHYCTRL_NOT_RDY_MSK            (PORT_BASE + 0x2b4)
+#define PHYCTRL_DWS_RESET_MSK          (PORT_BASE + 0x2b8)
+#define PHYCTRL_PHY_ENA_MSK            (PORT_BASE + 0x2bc)
+#define SL_RX_BCAST_CHK_MSK            (PORT_BASE + 0x2c0)
+#define PHYCTRL_OOB_RESTART_MSK                (PORT_BASE + 0x2c4)
+#define DMA_TX_STATUS                  (PORT_BASE + 0x2d0)
+#define DMA_TX_STATUS_BUSY_OFF         0
+#define DMA_TX_STATUS_BUSY_MSK         (0x1 << DMA_TX_STATUS_BUSY_OFF)
+#define DMA_RX_STATUS                  (PORT_BASE + 0x2e8)
+#define DMA_RX_STATUS_BUSY_OFF         0
+#define DMA_RX_STATUS_BUSY_MSK         (0x1 << DMA_RX_STATUS_BUSY_OFF)
+
+#define AXI_CFG                                (0x5100)
+#define AM_CFG_MAX_TRANS               (0x5010)
+#define AM_CFG_SINGLE_PORT_MAX_TRANS   (0x5014)
+
+/* HW dma structures */
+/* Delivery queue header */
+/* dw0 */
+#define CMD_HDR_RESP_REPORT_OFF                5
+#define CMD_HDR_RESP_REPORT_MSK                (0x1 << CMD_HDR_RESP_REPORT_OFF)
+#define CMD_HDR_TLR_CTRL_OFF           6
+#define CMD_HDR_TLR_CTRL_MSK           (0x3 << CMD_HDR_TLR_CTRL_OFF)
+#define CMD_HDR_PORT_OFF               18
+#define CMD_HDR_PORT_MSK               (0xf << CMD_HDR_PORT_OFF)
+#define CMD_HDR_PRIORITY_OFF           27
+#define CMD_HDR_PRIORITY_MSK           (0x1 << CMD_HDR_PRIORITY_OFF)
+#define CMD_HDR_CMD_OFF                        29
+#define CMD_HDR_CMD_MSK                        (0x7 << CMD_HDR_CMD_OFF)
+/* dw1 */
+#define CMD_HDR_DIR_OFF                        5
+#define CMD_HDR_DIR_MSK                        (0x3 << CMD_HDR_DIR_OFF)
+#define CMD_HDR_RESET_OFF              7
+#define CMD_HDR_RESET_MSK              (0x1 << CMD_HDR_RESET_OFF)
+#define CMD_HDR_VDTL_OFF               10
+#define CMD_HDR_VDTL_MSK               (0x1 << CMD_HDR_VDTL_OFF)
+#define CMD_HDR_FRAME_TYPE_OFF         11
+#define CMD_HDR_FRAME_TYPE_MSK         (0x1f << CMD_HDR_FRAME_TYPE_OFF)
+#define CMD_HDR_DEV_ID_OFF             16
+#define CMD_HDR_DEV_ID_MSK             (0xffff << CMD_HDR_DEV_ID_OFF)
+/* dw2 */
+#define CMD_HDR_CFL_OFF                        0
+#define CMD_HDR_CFL_MSK                        (0x1ff << CMD_HDR_CFL_OFF)
+#define CMD_HDR_NCQ_TAG_OFF            10
+#define CMD_HDR_NCQ_TAG_MSK            (0x1f << CMD_HDR_NCQ_TAG_OFF)
+#define CMD_HDR_MRFL_OFF               15
+#define CMD_HDR_MRFL_MSK               (0x1ff << CMD_HDR_MRFL_OFF)
+#define CMD_HDR_SG_MOD_OFF             24
+#define CMD_HDR_SG_MOD_MSK             (0x3 << CMD_HDR_SG_MOD_OFF)
+#define CMD_HDR_FIRST_BURST_OFF                26
+#define CMD_HDR_FIRST_BURST_MSK                (0x1 << CMD_HDR_SG_MOD_OFF)
+/* dw3 */
+#define CMD_HDR_IPTT_OFF               0
+#define CMD_HDR_IPTT_MSK               (0xffff << CMD_HDR_IPTT_OFF)
+/* dw6 */
+#define CMD_HDR_DIF_SGL_LEN_OFF                0
+#define CMD_HDR_DIF_SGL_LEN_MSK                (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
+#define CMD_HDR_DATA_SGL_LEN_OFF       16
+#define CMD_HDR_DATA_SGL_LEN_MSK       (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
+
+/* Completion header */
+/* dw0 */
+#define CMPLT_HDR_RSPNS_XFRD_OFF       10
+#define CMPLT_HDR_RSPNS_XFRD_MSK       (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
+#define CMPLT_HDR_ERX_OFF              12
+#define CMPLT_HDR_ERX_MSK              (0x1 << CMPLT_HDR_ERX_OFF)
+/* dw1 */
+#define CMPLT_HDR_IPTT_OFF             0
+#define CMPLT_HDR_IPTT_MSK             (0xffff << CMPLT_HDR_IPTT_OFF)
+#define CMPLT_HDR_DEV_ID_OFF           16
+#define CMPLT_HDR_DEV_ID_MSK           (0xffff << CMPLT_HDR_DEV_ID_OFF)
+
+/* ITCT header */
+/* qw0 */
+#define ITCT_HDR_DEV_TYPE_OFF          0
+#define ITCT_HDR_DEV_TYPE_MSK          (0x3 << ITCT_HDR_DEV_TYPE_OFF)
+#define ITCT_HDR_VALID_OFF             2
+#define ITCT_HDR_VALID_MSK             (0x1 << ITCT_HDR_VALID_OFF)
+#define ITCT_HDR_MCR_OFF               5
+#define ITCT_HDR_MCR_MSK               (0xf << ITCT_HDR_MCR_OFF)
+#define ITCT_HDR_VLN_OFF               9
+#define ITCT_HDR_VLN_MSK               (0xf << ITCT_HDR_VLN_OFF)
+#define ITCT_HDR_PORT_ID_OFF           28
+#define ITCT_HDR_PORT_ID_MSK           (0xf << ITCT_HDR_PORT_ID_OFF)
+/* qw2 */
+#define ITCT_HDR_INLT_OFF              0
+#define ITCT_HDR_INLT_MSK              (0xffffULL << ITCT_HDR_INLT_OFF)
+#define ITCT_HDR_BITLT_OFF             16
+#define ITCT_HDR_BITLT_MSK             (0xffffULL << ITCT_HDR_BITLT_OFF)
+#define ITCT_HDR_MCTLT_OFF             32
+#define ITCT_HDR_MCTLT_MSK             (0xffffULL << ITCT_HDR_MCTLT_OFF)
+#define ITCT_HDR_RTOLT_OFF             48
+#define ITCT_HDR_RTOLT_MSK             (0xffffULL << ITCT_HDR_RTOLT_OFF)
+
+struct hisi_sas_complete_v2_hdr {
+       __le32 dw0;
+       __le32 dw1;
+       __le32 act;
+       __le32 dw3;
+};
+
+struct hisi_sas_err_record_v2 {
+       /* dw0 */
+       __le32 trans_tx_fail_type;
+
+       /* dw1 */
+       __le32 trans_rx_fail_type;
+
+       /* dw2 */
+       __le16 dma_tx_err_type;
+       __le16 sipc_rx_err_type;
+
+       /* dw3 */
+       __le32 dma_rx_err_type;
+};
+
+enum {
+       HISI_SAS_PHY_PHY_UPDOWN,
+       HISI_SAS_PHY_CHNL_INT,
+       HISI_SAS_PHY_INT_NR
+};
+
+enum {
+       TRANS_TX_FAIL_BASE = 0x0, /* dw0 */
+       TRANS_RX_FAIL_BASE = 0x100, /* dw1 */
+       DMA_TX_ERR_BASE = 0x200, /* dw2 bit 15-0 */
+       SIPC_RX_ERR_BASE = 0x300, /* dw2 bit 31-16*/
+       DMA_RX_ERR_BASE = 0x400, /* dw3 */
+
+       /* trans tx*/
+       TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */
+       TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */
+       TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */
+       TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */
+       TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */
+       RESERVED0, /* 0x5 */
+       TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */
+       TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */
+       TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */
+       TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */
+       TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */
+       TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */
+       TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */
+       TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */
+       TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */
+       TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */
+       TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */
+       TRANS_TX_ERR_FRAME_TXED, /* 0x11 */
+       TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */
+       TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */
+       TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */
+       TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */
+       TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/
+       TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */
+       TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */
+       TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */
+       TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/
+       TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/
+       /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */
+       TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */
+       /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */
+       TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */
+       TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */
+       /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */
+       TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */
+
+       /* trans rx */
+       TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x100 */
+       TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x101 for sata/stp */
+       TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x102 for ssp/smp */
+       /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x102 <] for sata/stp */
+       TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x103 for sata/stp */
+       TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x104 for sata/stp */
+       TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x105 for smp */
+       /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x105 <] for sata/stp */
+       TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x106 for sata/stp*/
+       TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x107 */
+       TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x108 */
+       TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x109 */
+       TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x10a */
+       RESERVED1, /* 0x10b */
+       TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x10c */
+       TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x10d */
+       TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x10e */
+       TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x10f */
+       TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x110 for ssp/smp */
+       TRANS_RX_ERR_WITH_BAD_HASH, /* 0x111 for ssp */
+       /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x111 <] for sata/stp */
+       TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x112 for ssp*/
+       /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x112 <] for sata/stp */
+       TRANS_RX_SSP_FRM_LEN_ERR, /* 0x113 for ssp */
+       /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x113 <] for sata */
+       RESERVED2, /* 0x114 */
+       RESERVED3, /* 0x115 */
+       RESERVED4, /* 0x116 */
+       RESERVED5, /* 0x117 */
+       TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x118 */
+       TRANS_RX_SMP_FRM_LEN_ERR, /* 0x119 */
+       TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x11a */
+       RESERVED6, /* 0x11b */
+       RESERVED7, /* 0x11c */
+       RESERVED8, /* 0x11d */
+       RESERVED9, /* 0x11e */
+       TRANS_RX_R_ERR, /* 0x11f */
+
+       /* dma tx */
+       DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x200 */
+       DMA_TX_DIF_APP_ERR, /* 0x201 */
+       DMA_TX_DIF_RPP_ERR, /* 0x202 */
+       DMA_TX_DATA_SGL_OVERFLOW, /* 0x203 */
+       DMA_TX_DIF_SGL_OVERFLOW, /* 0x204 */
+       DMA_TX_UNEXP_XFER_ERR, /* 0x205 */
+       DMA_TX_UNEXP_RETRANS_ERR, /* 0x206 */
+       DMA_TX_XFER_LEN_OVERFLOW, /* 0x207 */
+       DMA_TX_XFER_OFFSET_ERR, /* 0x208 */
+       DMA_TX_RAM_ECC_ERR, /* 0x209 */
+       DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x20a */
+
+       /* sipc rx */
+       SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x300 */
+       SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x301 */
+       SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x302 */
+       SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x303 */
+       SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x304 */
+       SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x305 */
+       SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x306 */
+       SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x307 */
+       SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x308 */
+       SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x309 */
+       SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x30a */
+
+       /* dma rx */
+       DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x400 */
+       DMA_RX_DIF_APP_ERR, /* 0x401 */
+       DMA_RX_DIF_RPP_ERR, /* 0x402 */
+       DMA_RX_DATA_SGL_OVERFLOW, /* 0x403 */
+       DMA_RX_DIF_SGL_OVERFLOW, /* 0x404 */
+       DMA_RX_DATA_LEN_OVERFLOW, /* 0x405 */
+       DMA_RX_DATA_LEN_UNDERFLOW, /* 0x406 */
+       DMA_RX_DATA_OFFSET_ERR, /* 0x407 */
+       RESERVED10, /* 0x408 */
+       DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x409 */
+       DMA_RX_RESP_BUF_OVERFLOW, /* 0x40a */
+       DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x40b */
+       DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x40c */
+       DMA_RX_UNEXP_RDFRAME_ERR, /* 0x40d */
+       DMA_RX_PIO_DATA_LEN_ERR, /* 0x40e */
+       DMA_RX_RDSETUP_STATUS_ERR, /* 0x40f */
+       DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x410 */
+       DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x411 */
+       DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x412 */
+       DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x413 */
+       DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x414 */
+       DMA_RX_RDSETUP_OFFSET_ERR, /* 0x415 */
+       DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x416 */
+       DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x417 */
+       DMA_RX_RAM_ECC_ERR, /* 0x418 */
+       DMA_RX_UNKNOWN_FRM_ERR, /* 0x419 */
+};
+
+#define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096
+
+#define DIR_NO_DATA 0
+#define DIR_TO_INI 1
+#define DIR_TO_DEVICE 2
+#define DIR_RESERVED 3
+
+#define SATA_PROTOCOL_NONDATA          0x1
+#define SATA_PROTOCOL_PIO              0x2
+#define SATA_PROTOCOL_DMA              0x4
+#define SATA_PROTOCOL_FPDMA            0x8
+#define SATA_PROTOCOL_ATAPI            0x10
+
+static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
+{
+       void __iomem *regs = hisi_hba->regs + off;
+
+       return readl(regs);
+}
+
+static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
+{
+       void __iomem *regs = hisi_hba->regs + off;
+
+       return readl_relaxed(regs);
+}
+
+static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
+{
+       void __iomem *regs = hisi_hba->regs + off;
+
+       writel(val, regs);
+}
+
+static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
+                                u32 off, u32 val)
+{
+       void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
+
+       writel(val, regs);
+}
+
+static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
+                                     int phy_no, u32 off)
+{
+       void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
+
+       return readl(regs);
+}
+
+static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+       u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+       cfg &= ~PHY_CFG_DC_OPT_MSK;
+       cfg |= 1 << PHY_CFG_DC_OPT_OFF;
+       hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+       struct sas_identify_frame identify_frame;
+       u32 *identify_buffer;
+
+       memset(&identify_frame, 0, sizeof(identify_frame));
+       identify_frame.dev_type = SAS_END_DEVICE;
+       identify_frame.frame_type = 0;
+       identify_frame._un1 = 1;
+       identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
+       identify_frame.target_bits = SAS_PROTOCOL_NONE;
+       memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
+       memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
+       identify_frame.phy_id = phy_no;
+       identify_buffer = (u32 *)(&identify_frame);
+
+       hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
+                       __swab32(identify_buffer[0]));
+       hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
+                       identify_buffer[2]);
+       hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
+                       identify_buffer[1]);
+       hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
+                       identify_buffer[4]);
+       hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
+                       identify_buffer[3]);
+       hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
+                       __swab32(identify_buffer[5]));
+}
+
+static void init_id_frame_v2_hw(struct hisi_hba *hisi_hba)
+{
+       int i;
+
+       for (i = 0; i < hisi_hba->n_phy; i++)
+               config_id_frame_v2_hw(hisi_hba, i);
+}
+
+static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
+                            struct hisi_sas_device *sas_dev)
+{
+       struct domain_device *device = sas_dev->sas_device;
+       struct device *dev = &hisi_hba->pdev->dev;
+       u64 qw0, device_id = sas_dev->device_id;
+       struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
+       struct domain_device *parent_dev = device->parent;
+       struct hisi_sas_port *port = device->port->lldd_port;
+
+       memset(itct, 0, sizeof(*itct));
+
+       /* qw0 */
+       qw0 = 0;
+       switch (sas_dev->dev_type) {
+       case SAS_END_DEVICE:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
+               qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
+               break;
+       case SAS_SATA_DEV:
+               if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+                       qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
+               else
+                       qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
+               break;
+       default:
+               dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
+                        sas_dev->dev_type);
+       }
+
+       qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
+               (device->max_linkrate << ITCT_HDR_MCR_OFF) |
+               (1 << ITCT_HDR_VLN_OFF) |
+               (port->id << ITCT_HDR_PORT_ID_OFF));
+       itct->qw0 = cpu_to_le64(qw0);
+
+       /* qw1 */
+       memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+       itct->sas_addr = __swab64(itct->sas_addr);
+
+       /* qw2 */
+       itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) |
+                               (0xff00ULL << ITCT_HDR_BITLT_OFF) |
+                               (0xff00ULL << ITCT_HDR_MCTLT_OFF) |
+                               (0xff00ULL << ITCT_HDR_RTOLT_OFF));
+}
+
+static void free_device_v2_hw(struct hisi_hba *hisi_hba,
+                             struct hisi_sas_device *sas_dev)
+{
+       u64 qw0, dev_id = sas_dev->device_id;
+       struct device *dev = &hisi_hba->pdev->dev;
+       struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
+       u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+       int i;
+
+       /* clear the itct interrupt state */
+       if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
+               hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+                                ENT_INT_SRC3_ITC_INT_MSK);
+
+       /* clear the itct int*/
+       for (i = 0; i < 2; i++) {
+               /* clear the itct table*/
+               reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+               reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
+               hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
+
+               udelay(10);
+               reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+               if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
+                       dev_dbg(dev, "got clear ITCT done interrupt\n");
+
+                       /* invalid the itct state*/
+                       qw0 = cpu_to_le64(itct->qw0);
+                       qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
+                       hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+                                        ENT_INT_SRC3_ITC_INT_MSK);
+                       hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
+                       hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
+
+                       /* clear the itct */
+                       hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+                       dev_dbg(dev, "clear ITCT ok\n");
+                       break;
+               }
+       }
+}
+
+static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
+{
+       int i, reset_val;
+       u32 val;
+       unsigned long end_time;
+       struct device *dev = &hisi_hba->pdev->dev;
+
+       /* The mask needs to be set depending on the number of phys */
+       if (hisi_hba->n_phy == 9)
+               reset_val = 0x1fffff;
+       else
+               reset_val = 0x7ffff;
+
+       /* Disable all of the DQ */
+       for (i = 0; i < HISI_SAS_MAX_QUEUES; i++)
+               hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+
+       /* Disable all of the PHYs */
+       for (i = 0; i < hisi_hba->n_phy; i++) {
+               u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG);
+
+               phy_cfg &= ~PHY_CTRL_RESET_MSK;
+               hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg);
+       }
+       udelay(50);
+
+       /* Ensure DMA tx & rx idle */
+       for (i = 0; i < hisi_hba->n_phy; i++) {
+               u32 dma_tx_status, dma_rx_status;
+
+               end_time = jiffies + msecs_to_jiffies(1000);
+
+               while (1) {
+                       dma_tx_status = hisi_sas_phy_read32(hisi_hba, i,
+                                                           DMA_TX_STATUS);
+                       dma_rx_status = hisi_sas_phy_read32(hisi_hba, i,
+                                                           DMA_RX_STATUS);
+
+                       if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) &&
+                               !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK))
+                               break;
+
+                       msleep(20);
+                       if (time_after(jiffies, end_time))
+                               return -EIO;
+               }
+       }
+
+       /* Ensure axi bus idle */
+       end_time = jiffies + msecs_to_jiffies(1000);
+       while (1) {
+               u32 axi_status =
+                       hisi_sas_read32(hisi_hba, AXI_CFG);
+
+               if (axi_status == 0)
+                       break;
+
+               msleep(20);
+               if (time_after(jiffies, end_time))
+                       return -EIO;
+       }
+
+       /* reset and disable clock*/
+       regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
+                       reset_val);
+       regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
+                       reset_val);
+       msleep(1);
+       regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
+       if (reset_val != (val & reset_val)) {
+               dev_err(dev, "SAS reset fail.\n");
+               return -EIO;
+       }
+
+       /* De-reset and enable clock*/
+       regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
+                       reset_val);
+       regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
+                       reset_val);
+       msleep(1);
+       regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg,
+                       &val);
+       if (val & reset_val) {
+               dev_err(dev, "SAS de-reset fail.\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
+{
+       struct device *dev = &hisi_hba->pdev->dev;
+       struct device_node *np = dev->of_node;
+       int i;
+
+       /* Global registers init */
+
+       /* Deal with am-max-transmissions quirk */
+       if (of_get_property(np, "hip06-sas-v2-quirk-amt", NULL)) {
+               hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020);
+               hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS,
+                                0x2020);
+       } /* Else, use defaults -> do nothing */
+
+       hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
+                        (u32)((1ULL << hisi_hba->queue_count) - 1));
+       hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000);
+       hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000);
+       hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+       hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF);
+       hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1);
+       hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4);
+       hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x4E20);
+       hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1);
+       hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
+       hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1);
+       hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1);
+       hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
+       hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
+       hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
+       hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1);
+       hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1);
+       hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0);
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
+       hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0);
+       for (i = 0; i < hisi_hba->queue_count; i++)
+               hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+
+       hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
+       hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
+
+       for (i = 0; i < hisi_hba->n_phy; i++) {
+               hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
+               hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908);
+               hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
+               hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
+               hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
+               hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
+               hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
+               hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
+               hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+               hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+               hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x23f801fc);
+               hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
+               hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
+               hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
+               hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
+               hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
+               hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
+               hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
+               hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+       }
+
+       for (i = 0; i < hisi_hba->queue_count; i++) {
+               /* Delivery queue */
+               hisi_sas_write32(hisi_hba,
+                                DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
+                                upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
+
+               hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
+                                lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
+
+               hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
+                                HISI_SAS_QUEUE_SLOTS);
+
+               /* Completion queue */
+               hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
+                                upper_32_bits(hisi_hba->complete_hdr_dma[i]));
+
+               hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
+                                lower_32_bits(hisi_hba->complete_hdr_dma[i]));
+
+               hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
+                                HISI_SAS_QUEUE_SLOTS);
+       }
+
+       /* itct */
+       hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
+                        lower_32_bits(hisi_hba->itct_dma));
+
+       hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
+                        upper_32_bits(hisi_hba->itct_dma));
+
+       /* iost */
+       hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
+                        lower_32_bits(hisi_hba->iost_dma));
+
+       hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
+                        upper_32_bits(hisi_hba->iost_dma));
+
+       /* breakpoint */
+       hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
+                        lower_32_bits(hisi_hba->breakpoint_dma));
+
+       hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
+                        upper_32_bits(hisi_hba->breakpoint_dma));
+
+       /* SATA broken msg */
+       hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
+                        lower_32_bits(hisi_hba->sata_breakpoint_dma));
+
+       hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
+                        upper_32_bits(hisi_hba->sata_breakpoint_dma));
+
+       /* SATA initial fis */
+       hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
+                        lower_32_bits(hisi_hba->initial_fis_dma));
+
+       hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
+                        upper_32_bits(hisi_hba->initial_fis_dma));
+}
+
+static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
+{
+       struct device *dev = &hisi_hba->pdev->dev;
+       int rc;
+
+       rc = reset_hw_v2_hw(hisi_hba);
+       if (rc) {
+               dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+               return rc;
+       }
+
+       msleep(100);
+       init_reg_v2_hw(hisi_hba);
+
+       init_id_frame_v2_hw(hisi_hba);
+
+       return 0;
+}
+
+static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+       u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+       cfg |= PHY_CFG_ENA_MSK;
+       hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+       u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+       cfg &= ~PHY_CFG_ENA_MSK;
+       hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+       config_id_frame_v2_hw(hisi_hba, phy_no);
+       config_phy_opt_mode_v2_hw(hisi_hba, phy_no);
+       enable_phy_v2_hw(hisi_hba, phy_no);
+}
+
+static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+       disable_phy_v2_hw(hisi_hba, phy_no);
+}
+
+static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+       stop_phy_v2_hw(hisi_hba, phy_no);
+       msleep(100);
+       start_phy_v2_hw(hisi_hba, phy_no);
+}
+
+static void start_phys_v2_hw(unsigned long data)
+{
+       struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+       int i;
+
+       for (i = 0; i < hisi_hba->n_phy; i++)
+               start_phy_v2_hw(hisi_hba, i);
+}
+
+static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
+{
+       int i;
+       struct timer_list *timer = &hisi_hba->timer;
+
+       for (i = 0; i < hisi_hba->n_phy; i++) {
+               hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a);
+               hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK);
+       }
+
+       setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
+       mod_timer(timer, jiffies + HZ);
+}
+
+static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+       u32 sl_control;
+
+       sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+       sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
+       hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
+       msleep(1);
+       sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+       sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
+       hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
+}
+
+static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
+{
+       int i, bitmap = 0;
+       u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+       u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+
+       for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++)
+               if (phy_state & 1 << i)
+                       if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+                               bitmap |= 1 << i;
+
+       if (hisi_hba->n_phy == 9) {
+               u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+
+               if (phy_state & 1 << 8)
+                       if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
+                            PORT_STATE_PHY8_PORT_NUM_OFF) == port_id)
+                               bitmap |= 1 << 9;
+       }
+
+       return bitmap;
+}
+
+/**
+ * This function allocates across all queues to load balance.
+ * Slots are allocated from queues in a round-robin fashion.
+ *
+ * The callpath to this function and upto writing the write
+ * queue pointer should be safe from interruption.
+ */
+static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+{
+       struct device *dev = &hisi_hba->pdev->dev;
+       u32 r, w;
+       int queue = hisi_hba->queue;
+
+       while (1) {
+               w = hisi_sas_read32_relaxed(hisi_hba,
+                                           DLVRY_Q_0_WR_PTR + (queue * 0x14));
+               r = hisi_sas_read32_relaxed(hisi_hba,
+                                           DLVRY_Q_0_RD_PTR + (queue * 0x14));
+               if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+                       queue = (queue + 1) % hisi_hba->queue_count;
+                       if (queue == hisi_hba->queue) {
+                               dev_warn(dev, "could not find free slot\n");
+                               return -EAGAIN;
+                       }
+                       continue;
+               }
+               break;
+       }
+       hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+       *q = queue;
+       *s = w;
+       return 0;
+}
+
+static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
+{
+       int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
+       int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
+
+       hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
+                        ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS);
+}
+
+static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
+                             struct hisi_sas_slot *slot,
+                             struct hisi_sas_cmd_hdr *hdr,
+                             struct scatterlist *scatter,
+                             int n_elem)
+{
+       struct device *dev = &hisi_hba->pdev->dev;
+       struct scatterlist *sg;
+       int i;
+
+       if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
+               dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+                       n_elem);
+               return -EINVAL;
+       }
+
+       slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
+                                       &slot->sge_page_dma);
+       if (!slot->sge_page)
+               return -ENOMEM;
+
+       for_each_sg(scatter, sg, n_elem, i) {
+               struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
+
+               entry->addr = cpu_to_le64(sg_dma_address(sg));
+               entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
+               entry->data_len = cpu_to_le32(sg_dma_len(sg));
+               entry->data_off = 0;
+       }
+
+       hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
+
+       hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
+
+       return 0;
+}
+
+static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
+                         struct hisi_sas_slot *slot)
+{
+       struct sas_task *task = slot->task;
+       struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+       struct domain_device *device = task->dev;
+       struct device *dev = &hisi_hba->pdev->dev;
+       struct hisi_sas_port *port = slot->port;
+       struct scatterlist *sg_req, *sg_resp;
+       struct hisi_sas_device *sas_dev = device->lldd_dev;
+       dma_addr_t req_dma_addr;
+       unsigned int req_len, resp_len;
+       int elem, rc;
+
+       /*
+       * DMA-map SMP request, response buffers
+       */
+       /* req */
+       sg_req = &task->smp_task.smp_req;
+       elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
+       if (!elem)
+               return -ENOMEM;
+       req_len = sg_dma_len(sg_req);
+       req_dma_addr = sg_dma_address(sg_req);
+
+       /* resp */
+       sg_resp = &task->smp_task.smp_resp;
+       elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
+       if (!elem) {
+               rc = -ENOMEM;
+               goto err_out_req;
+       }
+       resp_len = sg_dma_len(sg_resp);
+       if ((req_len & 0x3) || (resp_len & 0x3)) {
+               rc = -EINVAL;
+               goto err_out_resp;
+       }
+
+       /* create header */
+       /* dw0 */
+       hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
+                              (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
+                              (2 << CMD_HDR_CMD_OFF)); /* smp */
+
+       /* map itct entry */
+       hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
+                              (1 << CMD_HDR_FRAME_TYPE_OFF) |
+                              (DIR_NO_DATA << CMD_HDR_DIR_OFF));
+
+       /* dw2 */
+       hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
+                              (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
+                              CMD_HDR_MRFL_OFF));
+
+       hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
+
+       hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
+       hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+
+       return 0;
+
+err_out_resp:
+       dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
+                    DMA_FROM_DEVICE);
+err_out_req:
+       dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
+                    DMA_TO_DEVICE);
+       return rc;
+}
+
+static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
+                         struct hisi_sas_slot *slot, int is_tmf,
+                         struct hisi_sas_tmf_task *tmf)
+{
+       struct sas_task *task = slot->task;
+       struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+       struct domain_device *device = task->dev;
+       struct hisi_sas_device *sas_dev = device->lldd_dev;
+       struct hisi_sas_port *port = slot->port;
+       struct sas_ssp_task *ssp_task = &task->ssp_task;
+       struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+       int has_data = 0, rc, priority = is_tmf;
+       u8 *buf_cmd;
+       u32 dw1 = 0, dw2 = 0;
+
+       hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
+                              (2 << CMD_HDR_TLR_CTRL_OFF) |
+                              (port->id << CMD_HDR_PORT_OFF) |
+                              (priority << CMD_HDR_PRIORITY_OFF) |
+                              (1 << CMD_HDR_CMD_OFF)); /* ssp */
+
+       dw1 = 1 << CMD_HDR_VDTL_OFF;
+       if (is_tmf) {
+               dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
+               dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
+       } else {
+               dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
+               switch (scsi_cmnd->sc_data_direction) {
+               case DMA_TO_DEVICE:
+                       has_data = 1;
+                       dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
+                       break;
+               case DMA_FROM_DEVICE:
+                       has_data = 1;
+                       dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
+                       break;
+               default:
+                       dw1 &= ~CMD_HDR_DIR_MSK;
+               }
+       }
+
+       /* map itct entry */
+       dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
+       hdr->dw1 = cpu_to_le32(dw1);
+
+       dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
+             + 3) / 4) << CMD_HDR_CFL_OFF) |
+             ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
+             (2 << CMD_HDR_SG_MOD_OFF);
+       hdr->dw2 = cpu_to_le32(dw2);
+
+       hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+       if (has_data) {
+               rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
+                                       slot->n_elem);
+               if (rc)
+                       return rc;
+       }
+
+       hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+       hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
+       hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+
+       buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
+
+       memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+       if (!is_tmf) {
+               buf_cmd[9] = task->ssp_task.task_attr |
+                               (task->ssp_task.task_prio << 3);
+               memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
+                               task->ssp_task.cmd->cmd_len);
+       } else {
+               buf_cmd[10] = tmf->tmf;
+               switch (tmf->tmf) {
+               case TMF_ABORT_TASK:
+               case TMF_QUERY_TASK:
+                       buf_cmd[12] =
+                               (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
+                       buf_cmd[13] =
+                               tmf->tag_of_task_to_be_managed & 0xff;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+                           struct hisi_sas_slot *slot)
+{
+       struct task_status_struct *ts = &task->task_status;
+       struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
+       struct dev_to_host_fis *d2h = slot->status_buffer +
+                                     sizeof(struct hisi_sas_err_record);
+
+       resp->frame_len = sizeof(struct dev_to_host_fis);
+       memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
+
+       ts->buf_valid_size = sizeof(*resp);
+}
+
+/* by default, task resp is complete */
+static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
+                          struct sas_task *task,
+                          struct hisi_sas_slot *slot)
+{
+       struct task_status_struct *ts = &task->task_status;
+       struct hisi_sas_err_record_v2 *err_record = slot->status_buffer;
+       u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
+       u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
+       u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
+       u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type);
+       u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type);
+       int error = -1;
+
+       if (dma_rx_err_type) {
+               error = ffs(dma_rx_err_type)
+                       - 1 + DMA_RX_ERR_BASE;
+       } else if (sipc_rx_err_type) {
+               error = ffs(sipc_rx_err_type)
+                       - 1 + SIPC_RX_ERR_BASE;
+       }  else if (dma_tx_err_type) {
+               error = ffs(dma_tx_err_type)
+                       - 1 + DMA_TX_ERR_BASE;
+       } else if (trans_rx_fail_type) {
+               error = ffs(trans_rx_fail_type)
+                       - 1 + TRANS_RX_FAIL_BASE;
+       } else if (trans_tx_fail_type) {
+               error = ffs(trans_tx_fail_type)
+                       - 1 + TRANS_TX_FAIL_BASE;
+       }
+
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SSP:
+       {
+               switch (error) {
+               case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       ts->open_rej_reason = SAS_OREJ_NO_DEST;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       ts->open_rej_reason = SAS_OREJ_PATH_BLOCKED;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       ts->open_rej_reason = SAS_OREJ_EPROTO;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
+               {
+                       /* not sure */
+                       ts->stat = SAS_DEV_NO_RESPONSE;
+                       break;
+               }
+               case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
+               {
+                       ts->stat = SAS_PHY_DOWN;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
+               {
+                       ts->stat = SAS_OPEN_TO;
+                       break;
+               }
+               case DMA_RX_DATA_LEN_OVERFLOW:
+               {
+                       ts->stat = SAS_DATA_OVERRUN;
+                       ts->residual = 0;
+                       break;
+               }
+               case DMA_RX_DATA_LEN_UNDERFLOW:
+               case SIPC_RX_DATA_UNDERFLOW_ERR:
+               {
+                       ts->residual = trans_tx_fail_type;
+                       ts->stat = SAS_DATA_UNDERRUN;
+                       break;
+               }
+               case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
+               case TRANS_TX_ERR_PHY_NOT_ENABLE:
+               case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
+               case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
+               case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
+               case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
+               case TRANS_TX_ERR_WITH_BREAK_REQUEST:
+               case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
+               case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
+               case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
+               case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+               case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
+               case TRANS_TX_ERR_WITH_NAK_RECEVIED:
+               case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
+               case TRANS_TX_ERR_WITH_IPTT_CONFLICT:
+               case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
+               case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR:
+               case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
+               case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
+               case TRANS_RX_ERR_WITH_BREAK_TIMEOUT:
+               case TRANS_RX_ERR_WITH_BREAK_REQUEST:
+               case TRANS_RX_ERR_WITH_BREAK_RECEVIED:
+               case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
+               case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+               case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
+               case TRANS_RX_ERR_WITH_DATA_LEN0:
+               case TRANS_RX_ERR_WITH_BAD_HASH:
+               case TRANS_RX_XRDY_WLEN_ZERO_ERR:
+               case TRANS_RX_SSP_FRM_LEN_ERR:
+               case TRANS_RX_ERR_WITH_BAD_FRM_TYPE:
+               case DMA_TX_UNEXP_XFER_ERR:
+               case DMA_TX_UNEXP_RETRANS_ERR:
+               case DMA_TX_XFER_LEN_OVERFLOW:
+               case DMA_TX_XFER_OFFSET_ERR:
+               case DMA_RX_DATA_OFFSET_ERR:
+               case DMA_RX_UNEXP_NORM_RESP_ERR:
+               case DMA_RX_UNEXP_RDFRAME_ERR:
+               case DMA_RX_UNKNOWN_FRM_ERR:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+                       break;
+               }
+               default:
+                       break;
+               }
+       }
+               break;
+       case SAS_PROTOCOL_SMP:
+               ts->stat = SAM_STAT_CHECK_CONDITION;
+               break;
+
+       case SAS_PROTOCOL_SATA:
+       case SAS_PROTOCOL_STP:
+       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+       {
+               switch (error) {
+               case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
+               case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
+               case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
+               {
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_DEV_NO_RESPONSE;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
+               case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
+               case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
+               case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
+               case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
+               case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
+               case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       break;
+               }
+               case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
+               {
+                       ts->stat = SAS_OPEN_TO;
+                       break;
+               }
+               case DMA_RX_DATA_LEN_OVERFLOW:
+               {
+                       ts->stat = SAS_DATA_OVERRUN;
+                       break;
+               }
+               case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
+               case TRANS_TX_ERR_PHY_NOT_ENABLE:
+               case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
+               case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
+               case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
+               case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
+               case TRANS_TX_ERR_WITH_BREAK_REQUEST:
+               case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
+               case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
+               case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
+               case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+               case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
+               case TRANS_TX_ERR_WITH_NAK_RECEVIED:
+               case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
+               case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
+               case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT:
+               case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
+               case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
+               case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR:
+               case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR:
+               case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN:
+               case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP:
+               case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
+               case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
+               case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+               case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
+               case TRANS_RX_ERR_WITH_DATA_LEN0:
+               case TRANS_RX_ERR_WITH_BAD_HASH:
+               case TRANS_RX_XRDY_WLEN_ZERO_ERR:
+               case TRANS_RX_SSP_FRM_LEN_ERR:
+               case SIPC_RX_FIS_STATUS_ERR_BIT_VLD:
+               case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR:
+               case SIPC_RX_FIS_STATUS_BSY_BIT_ERR:
+               case SIPC_RX_WRSETUP_LEN_ODD_ERR:
+               case SIPC_RX_WRSETUP_LEN_ZERO_ERR:
+               case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR:
+               case SIPC_RX_SATA_UNEXP_FIS_ERR:
+               case DMA_RX_SATA_FRAME_TYPE_ERR:
+               case DMA_RX_UNEXP_RDFRAME_ERR:
+               case DMA_RX_PIO_DATA_LEN_ERR:
+               case DMA_RX_RDSETUP_STATUS_ERR:
+               case DMA_RX_RDSETUP_STATUS_DRQ_ERR:
+               case DMA_RX_RDSETUP_STATUS_BSY_ERR:
+               case DMA_RX_RDSETUP_LEN_ODD_ERR:
+               case DMA_RX_RDSETUP_LEN_ZERO_ERR:
+               case DMA_RX_RDSETUP_LEN_OVER_ERR:
+               case DMA_RX_RDSETUP_OFFSET_ERR:
+               case DMA_RX_RDSETUP_ACTIVE_ERR:
+               case DMA_RX_RDSETUP_ESTATUS_ERR:
+               case DMA_RX_UNKNOWN_FRM_ERR:
+               {
+                       ts->stat = SAS_OPEN_REJECT;
+                       break;
+               }
+               default:
+               {
+                       ts->stat = SAS_PROTO_RESPONSE;
+                       break;
+               }
+               }
+               sata_done_v2_hw(hisi_hba, task, slot);
+       }
+               break;
+       default:
+               break;
+       }
+}
+
+static int
+slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
+                   int abort)
+{
+       struct sas_task *task = slot->task;
+       struct hisi_sas_device *sas_dev;
+       struct device *dev = &hisi_hba->pdev->dev;
+       struct task_status_struct *ts;
+       struct domain_device *device;
+       enum exec_status sts;
+       struct hisi_sas_complete_v2_hdr *complete_queue =
+                       hisi_hba->complete_hdr[slot->cmplt_queue];
+       struct hisi_sas_complete_v2_hdr *complete_hdr =
+                       &complete_queue[slot->cmplt_queue_slot];
+
+       if (unlikely(!task || !task->lldd_task || !task->dev))
+               return -EINVAL;
+
+       ts = &task->task_status;
+       device = task->dev;
+       sas_dev = device->lldd_dev;
+
+       task->task_state_flags &=
+               ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+       task->task_state_flags |= SAS_TASK_STATE_DONE;
+
+       memset(ts, 0, sizeof(*ts));
+       ts->resp = SAS_TASK_COMPLETE;
+
+       if (unlikely(!sas_dev || abort)) {
+               if (!sas_dev)
+                       dev_dbg(dev, "slot complete: port has not device\n");
+               ts->stat = SAS_PHY_DOWN;
+               goto out;
+       }
+
+       if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
+               (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
+               dev_dbg(dev, "%s slot %d has error info 0x%x\n",
+                       __func__, slot->cmplt_queue_slot,
+                       complete_hdr->dw0 & CMPLT_HDR_ERX_MSK);
+
+               slot_err_v2_hw(hisi_hba, task, slot);
+               goto out;
+       }
+
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SSP:
+       {
+               struct ssp_response_iu *iu = slot->status_buffer +
+                       sizeof(struct hisi_sas_err_record);
+
+               sas_ssp_task_response(dev, task, iu);
+               break;
+       }
+       case SAS_PROTOCOL_SMP:
+       {
+               struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+               void *to;
+
+               ts->stat = SAM_STAT_GOOD;
+               to = kmap_atomic(sg_page(sg_resp));
+
+               dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
+                            DMA_FROM_DEVICE);
+               dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
+                            DMA_TO_DEVICE);
+               memcpy(to + sg_resp->offset,
+                      slot->status_buffer +
+                      sizeof(struct hisi_sas_err_record),
+                      sg_dma_len(sg_resp));
+               kunmap_atomic(to);
+               break;
+       }
+       case SAS_PROTOCOL_SATA:
+       case SAS_PROTOCOL_STP:
+       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+       {
+               ts->stat = SAM_STAT_GOOD;
+               sata_done_v2_hw(hisi_hba, task, slot);
+               break;
+       }
+       default:
+               ts->stat = SAM_STAT_CHECK_CONDITION;
+               break;
+       }
+
+       if (!slot->port->port_attached) {
+               dev_err(dev, "slot complete: port %d has removed\n",
+                       slot->port->sas_port.id);
+               ts->stat = SAS_PHY_DOWN;
+       }
+
+out:
+       if (sas_dev && sas_dev->running_req)
+               sas_dev->running_req--;
+
+       hisi_sas_slot_task_free(hisi_hba, task, slot);
+       sts = ts->stat;
+
+       if (task->task_done)
+               task->task_done(task);
+
+       return sts;
+}
+
+static u8 get_ata_protocol(u8 cmd, int direction)
+{
+       switch (cmd) {
+       case ATA_CMD_FPDMA_WRITE:
+       case ATA_CMD_FPDMA_READ:
+       return SATA_PROTOCOL_FPDMA;
+
+       case ATA_CMD_ID_ATA:
+       case ATA_CMD_PMP_READ:
+       case ATA_CMD_READ_LOG_EXT:
+       case ATA_CMD_PIO_READ:
+       case ATA_CMD_PIO_READ_EXT:
+       case ATA_CMD_PMP_WRITE:
+       case ATA_CMD_WRITE_LOG_EXT:
+       case ATA_CMD_PIO_WRITE:
+       case ATA_CMD_PIO_WRITE_EXT:
+       return SATA_PROTOCOL_PIO;
+
+       case ATA_CMD_READ:
+       case ATA_CMD_READ_EXT:
+       case ATA_CMD_READ_LOG_DMA_EXT:
+       case ATA_CMD_WRITE:
+       case ATA_CMD_WRITE_EXT:
+       case ATA_CMD_WRITE_QUEUED:
+       case ATA_CMD_WRITE_LOG_DMA_EXT:
+       return SATA_PROTOCOL_DMA;
+
+       case ATA_CMD_DOWNLOAD_MICRO:
+       case ATA_CMD_DEV_RESET:
+       case ATA_CMD_CHK_POWER:
+       case ATA_CMD_FLUSH:
+       case ATA_CMD_FLUSH_EXT:
+       case ATA_CMD_VERIFY:
+       case ATA_CMD_VERIFY_EXT:
+       case ATA_CMD_SET_FEATURES:
+       case ATA_CMD_STANDBY:
+       case ATA_CMD_STANDBYNOW1:
+       return SATA_PROTOCOL_NONDATA;
+       default:
+               if (direction == DMA_NONE)
+                       return SATA_PROTOCOL_NONDATA;
+               return SATA_PROTOCOL_PIO;
+       }
+}
+
+static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag)
+{
+       struct ata_queued_cmd *qc = task->uldd_task;
+
+       if (qc) {
+               if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+                       qc->tf.command == ATA_CMD_FPDMA_READ) {
+                       *tag = qc->tag;
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
+                         struct hisi_sas_slot *slot)
+{
+       struct sas_task *task = slot->task;
+       struct domain_device *device = task->dev;
+       struct domain_device *parent_dev = device->parent;
+       struct hisi_sas_device *sas_dev = device->lldd_dev;
+       struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+       struct hisi_sas_port *port = device->port->lldd_port;
+       u8 *buf_cmd;
+       int has_data = 0, rc = 0, hdr_tag = 0;
+       u32 dw1 = 0, dw2 = 0;
+
+       /* create header */
+       /* dw0 */
+       hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
+       if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+               hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
+       else
+               hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
+
+       /* dw1 */
+       switch (task->data_dir) {
+       case DMA_TO_DEVICE:
+               has_data = 1;
+               dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
+               break;
+       case DMA_FROM_DEVICE:
+               has_data = 1;
+               dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
+               break;
+       default:
+               dw1 &= ~CMD_HDR_DIR_MSK;
+       }
+
+       if (0 == task->ata_task.fis.command)
+               dw1 |= 1 << CMD_HDR_RESET_OFF;
+
+       dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir))
+               << CMD_HDR_FRAME_TYPE_OFF;
+       dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
+       hdr->dw1 = cpu_to_le32(dw1);
+
+       /* dw2 */
+       if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) {
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+               dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
+       }
+
+       dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
+                       2 << CMD_HDR_SG_MOD_OFF;
+       hdr->dw2 = cpu_to_le32(dw2);
+
+       /* dw3 */
+       hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+       if (has_data) {
+               rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
+                                       slot->n_elem);
+               if (rc)
+                       return rc;
+       }
+
+
+       hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+       hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
+       hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+
+       buf_cmd = slot->command_table;
+
+       if (likely(!task->ata_task.device_control_reg_update))
+               task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+       /* fill in command FIS */
+       memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+
+       return 0;
+}
+
+static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+       int i, res = 0;
+       u32 context, port_id, link_rate, hard_phy_linkrate;
+       struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+       struct device *dev = &hisi_hba->pdev->dev;
+       u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
+       struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
+
+       hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
+
+       /* Check for SATA dev */
+       context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
+       if (context & (1 << phy_no))
+               goto end;
+
+       if (phy_no == 8) {
+               u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+
+               port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
+                         PORT_STATE_PHY8_PORT_NUM_OFF;
+               link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >>
+                           PORT_STATE_PHY8_CONN_RATE_OFF;
+       } else {
+               port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+               port_id = (port_id >> (4 * phy_no)) & 0xf;
+               link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+               link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+       }
+
+       if (port_id == 0xf) {
+               dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
+               res = IRQ_NONE;
+               goto end;
+       }
+
+       for (i = 0; i < 6; i++) {
+               u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
+                                              RX_IDAF_DWORD0 + (i * 4));
+               frame_rcvd[i] = __swab32(idaf);
+       }
+
+       /* Get the linkrates */
+       link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+       link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+       sas_phy->linkrate = link_rate;
+       hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
+                                               HARD_PHY_LINKRATE);
+       phy->maximum_linkrate = hard_phy_linkrate & 0xf;
+       phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
+
+       sas_phy->oob_mode = SAS_OOB_MODE;
+       memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE);
+       dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+       phy->port_id = port_id;
+       phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+       phy->phy_type |= PORT_TYPE_SAS;
+       phy->phy_attached = 1;
+       phy->identify.device_type = id->dev_type;
+       phy->frame_rcvd_size =  sizeof(struct sas_identify_frame);
+       if (phy->identify.device_type == SAS_END_DEVICE)
+               phy->identify.target_port_protocols =
+                       SAS_PROTOCOL_SSP;
+       else if (phy->identify.device_type != SAS_PHY_UNUSED)
+               phy->identify.target_port_protocols =
+                       SAS_PROTOCOL_SMP;
+       queue_work(hisi_hba->wq, &phy->phyup_ws);
+
+end:
+       hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+                            CHL_INT0_SL_PHY_ENABLE_MSK);
+       hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
+
+       return res;
+}
+
+static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+       int res = 0;
+       u32 phy_cfg, phy_state;
+
+       hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
+
+       phy_cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+       phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+
+       hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
+
+       hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
+       hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
+
+       return res;
+}
+
+static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
+{
+       struct hisi_hba *hisi_hba = p;
+       u32 irq_msk;
+       int phy_no = 0;
+       irqreturn_t res = IRQ_HANDLED;
+
+       irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO)
+                  >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff;
+       while (irq_msk) {
+               if (irq_msk  & 1) {
+                       u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
+                                                           CHL_INT0);
+
+                       if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
+                               /* phy up */
+                               if (phy_up_v2_hw(phy_no, hisi_hba)) {
+                                       res = IRQ_NONE;
+                                       goto end;
+                               }
+
+                       if (irq_value & CHL_INT0_NOT_RDY_MSK)
+                               /* phy down */
+                               if (phy_down_v2_hw(phy_no, hisi_hba)) {
+                                       res = IRQ_NONE;
+                                       goto end;
+                               }
+               }
+               irq_msk >>= 1;
+               phy_no++;
+       }
+
+end:
+       return res;
+}
+
+static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+       struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+       struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+       unsigned long flags;
+
+       hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+
+       spin_lock_irqsave(&hisi_hba->lock, flags);
+       sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+       spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+       hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+                            CHL_INT0_SL_RX_BCST_ACK_MSK);
+       hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
+}
+
+static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
+{
+       struct hisi_hba *hisi_hba = p;
+       struct device *dev = &hisi_hba->pdev->dev;
+       u32 ent_msk, ent_tmp, irq_msk;
+       int phy_no = 0;
+
+       ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+       ent_tmp = ent_msk;
+       ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
+
+       irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >>
+                       HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
+
+       while (irq_msk) {
+               if (irq_msk & (1 << phy_no)) {
+                       u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
+                                                            CHL_INT0);
+                       u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+                                                            CHL_INT1);
+                       u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+                                                            CHL_INT2);
+
+                       if (irq_value1) {
+                               if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
+                                                 CHL_INT1_DMAC_TX_ECC_ERR_MSK))
+                                       panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
+                                               dev_name(dev), irq_value1);
+
+                               hisi_sas_phy_write32(hisi_hba, phy_no,
+                                                    CHL_INT1, irq_value1);
+                       }
+
+                       if (irq_value2)
+                               hisi_sas_phy_write32(hisi_hba, phy_no,
+                                                    CHL_INT2, irq_value2);
+
+
+                       if (irq_value0) {
+                               if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
+                                       phy_bcast_v2_hw(phy_no, hisi_hba);
+
+                               hisi_sas_phy_write32(hisi_hba, phy_no,
+                                               CHL_INT0, irq_value0
+                                               & (~CHL_INT0_HOTPLUG_TOUT_MSK)
+                                               & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+                                               & (~CHL_INT0_NOT_RDY_MSK));
+                       }
+               }
+               irq_msk &= ~(1 << phy_no);
+               phy_no++;
+       }
+
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
+{
+       struct hisi_sas_cq *cq = p;
+       struct hisi_hba *hisi_hba = cq->hisi_hba;
+       struct hisi_sas_slot *slot;
+       struct hisi_sas_itct *itct;
+       struct hisi_sas_complete_v2_hdr *complete_queue;
+       u32 irq_value, rd_point, wr_point, dev_id;
+       int queue = cq->id;
+
+       complete_queue = hisi_hba->complete_hdr[queue];
+       irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
+
+       hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
+
+       rd_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_RD_PTR +
+                                  (0x14 * queue));
+       wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
+                                  (0x14 * queue));
+
+       while (rd_point != wr_point) {
+               struct hisi_sas_complete_v2_hdr *complete_hdr;
+               int iptt;
+
+               complete_hdr = &complete_queue[rd_point];
+
+               /* Check for NCQ completion */
+               if (complete_hdr->act) {
+                       u32 act_tmp = complete_hdr->act;
+                       int ncq_tag_count = ffs(act_tmp);
+
+                       dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
+                                CMPLT_HDR_DEV_ID_OFF;
+                       itct = &hisi_hba->itct[dev_id];
+
+                       /* The NCQ tags are held in the itct header */
+                       while (ncq_tag_count) {
+                               __le64 *ncq_tag = &itct->qw4_15[0];
+
+                               ncq_tag_count -= 1;
+                               iptt = (ncq_tag[ncq_tag_count / 5]
+                                       >> (ncq_tag_count % 5) * 12) & 0xfff;
+
+                               slot = &hisi_hba->slot_info[iptt];
+                               slot->cmplt_queue_slot = rd_point;
+                               slot->cmplt_queue = queue;
+                               slot_complete_v2_hw(hisi_hba, slot, 0);
+
+                               act_tmp &= ~(1 << ncq_tag_count);
+                               ncq_tag_count = ffs(act_tmp);
+                       }
+               } else {
+                       iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+                       slot = &hisi_hba->slot_info[iptt];
+                       slot->cmplt_queue_slot = rd_point;
+                       slot->cmplt_queue = queue;
+                       slot_complete_v2_hw(hisi_hba, slot, 0);
+               }
+
+               if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
+                       rd_point = 0;
+       }
+
+       /* update rd_point */
+       hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
+{
+       struct hisi_sas_phy *phy = p;
+       struct hisi_hba *hisi_hba = phy->hisi_hba;
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+       struct device *dev = &hisi_hba->pdev->dev;
+       struct  hisi_sas_initial_fis *initial_fis;
+       struct dev_to_host_fis *fis;
+       u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
+       irqreturn_t res = IRQ_HANDLED;
+       u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
+       int phy_no;
+
+       phy_no = sas_phy->id;
+       initial_fis = &hisi_hba->initial_fis[phy_no];
+       fis = &initial_fis->fis;
+
+       ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1);
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk | 1 << phy_no);
+
+       ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1);
+       ent_tmp = ent_int;
+       ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4);
+       if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) {
+               dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no);
+               hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp);
+               hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk);
+               res = IRQ_NONE;
+               goto end;
+       }
+
+       if (unlikely(phy_no == 8)) {
+               u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+
+               port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
+                         PORT_STATE_PHY8_PORT_NUM_OFF;
+               link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >>
+                           PORT_STATE_PHY8_CONN_RATE_OFF;
+       } else {
+               port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+               port_id = (port_id >> (4 * phy_no)) & 0xf;
+               link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+               link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+       }
+
+       if (port_id == 0xf) {
+               dev_err(dev, "sata int: phy%d invalid portid\n", phy_no);
+               res = IRQ_NONE;
+               goto end;
+       }
+
+       sas_phy->linkrate = link_rate;
+       hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
+                                               HARD_PHY_LINKRATE);
+       phy->maximum_linkrate = hard_phy_linkrate & 0xf;
+       phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
+
+       sas_phy->oob_mode = SATA_OOB_MODE;
+       /* Make up some unique SAS address */
+       attached_sas_addr[0] = 0x50;
+       attached_sas_addr[7] = phy_no;
+       memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE);
+       memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis));
+       dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+       phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+       phy->port_id = port_id;
+       phy->phy_type |= PORT_TYPE_SATA;
+       phy->phy_attached = 1;
+       phy->identify.device_type = SAS_SATA_DEV;
+       phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+       phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+       queue_work(hisi_hba->wq, &phy->phyup_ws);
+
+end:
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp);
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk);
+
+       return res;
+}
+
+static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
+       int_phy_updown_v2_hw,
+       int_chnl_int_v2_hw,
+};
+
+/**
+ * There is a limitation in the hip06 chipset that we need
+ * to map in all mbigen interrupts, even if they are not used.
+ */
+static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
+{
+       struct platform_device *pdev = hisi_hba->pdev;
+       struct device *dev = &pdev->dev;
+       int i, irq, rc, irq_map[128];
+
+
+       for (i = 0; i < 128; i++)
+               irq_map[i] = platform_get_irq(pdev, i);
+
+       for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
+               int idx = i;
+
+               irq = irq_map[idx + 1]; /* Phy up/down is irq1 */
+               if (!irq) {
+                       dev_err(dev, "irq init: fail map phy interrupt %d\n",
+                               idx);
+                       return -ENOENT;
+               }
+
+               rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
+                                     DRV_NAME " phy", hisi_hba);
+               if (rc) {
+                       dev_err(dev, "irq init: could not request "
+                               "phy interrupt %d, rc=%d\n",
+                               irq, rc);
+                       return -ENOENT;
+               }
+       }
+
+       for (i = 0; i < hisi_hba->n_phy; i++) {
+               struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+               int idx = i + 72; /* First SATA interrupt is irq72 */
+
+               irq = irq_map[idx];
+               if (!irq) {
+                       dev_err(dev, "irq init: fail map phy interrupt %d\n",
+                               idx);
+                       return -ENOENT;
+               }
+
+               rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
+                                     DRV_NAME " sata", phy);
+               if (rc) {
+                       dev_err(dev, "irq init: could not request "
+                               "sata interrupt %d, rc=%d\n",
+                               irq, rc);
+                       return -ENOENT;
+               }
+       }
+
+       for (i = 0; i < hisi_hba->queue_count; i++) {
+               int idx = i + 96; /* First cq interrupt is irq96 */
+
+               irq = irq_map[idx];
+               if (!irq) {
+                       dev_err(dev,
+                               "irq init: could not map cq interrupt %d\n",
+                               idx);
+                       return -ENOENT;
+               }
+               rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
+                                     DRV_NAME " cq", &hisi_hba->cq[i]);
+               if (rc) {
+                       dev_err(dev,
+                               "irq init: could not request cq interrupt %d, rc=%d\n",
+                               irq, rc);
+                       return -ENOENT;
+               }
+       }
+
+       return 0;
+}
+
+static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
+{
+       int rc;
+
+       rc = hw_init_v2_hw(hisi_hba);
+       if (rc)
+               return rc;
+
+       rc = interrupt_init_v2_hw(hisi_hba);
+       if (rc)
+               return rc;
+
+       phys_init_v2_hw(hisi_hba);
+
+       return 0;
+}
+
+static const struct hisi_sas_hw hisi_sas_v2_hw = {
+       .hw_init = hisi_sas_v2_init,
+       .setup_itct = setup_itct_v2_hw,
+       .sl_notify = sl_notify_v2_hw,
+       .get_wideport_bitmap = get_wideport_bitmap_v2_hw,
+       .free_device = free_device_v2_hw,
+       .prep_smp = prep_smp_v2_hw,
+       .prep_ssp = prep_ssp_v2_hw,
+       .prep_stp = prep_ata_v2_hw,
+       .get_free_slot = get_free_slot_v2_hw,
+       .start_delivery = start_delivery_v2_hw,
+       .slot_complete = slot_complete_v2_hw,
+       .phy_enable = enable_phy_v2_hw,
+       .phy_disable = disable_phy_v2_hw,
+       .phy_hard_reset = phy_hard_reset_v2_hw,
+       .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
+       .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
+};
+
+static int hisi_sas_v2_probe(struct platform_device *pdev)
+{
+       return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
+}
+
+static int hisi_sas_v2_remove(struct platform_device *pdev)
+{
+       return hisi_sas_remove(pdev);
+}
+
+static const struct of_device_id sas_v2_of_match[] = {
+       { .compatible = "hisilicon,hip06-sas-v2",},
+       {},
+};
+MODULE_DEVICE_TABLE(of, sas_v2_of_match);
+
+static struct platform_driver hisi_sas_v2_driver = {
+       .probe = hisi_sas_v2_probe,
+       .remove = hisi_sas_v2_remove,
+       .driver = {
+               .name = DRV_NAME,
+               .of_match_table = sas_v2_of_match,
+       },
+};
+
+module_platform_driver(hisi_sas_v2_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver");
+MODULE_ALIAS("platform:" DRV_NAME);
index 82ac1cd818ac18e1310ba2abaede9c8b262f9976..94025c5cf797aa6490506fdd2c160941662575c7 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/transport_class.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
-
+#include <linux/idr.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport.h>
@@ -42,7 +42,7 @@
 #include "scsi_logging.h"
 
 
-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0);    /* host_no for next new host */
+static DEFINE_IDA(host_index_ida);
 
 
 static void scsi_host_cls_release(struct device *dev)
@@ -355,6 +355,8 @@ static void scsi_host_dev_release(struct device *dev)
 
        kfree(shost->shost_data);
 
+       ida_simple_remove(&host_index_ida, shost->host_no);
+
        if (parent)
                put_device(parent);
        kfree(shost);
@@ -388,6 +390,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
 {
        struct Scsi_Host *shost;
        gfp_t gfp_mask = GFP_KERNEL;
+       int index;
 
        if (sht->unchecked_isa_dma && privsize)
                gfp_mask |= __GFP_DMA;
@@ -406,11 +409,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        init_waitqueue_head(&shost->host_wait);
        mutex_init(&shost->scan_mutex);
 
-       /*
-        * subtract one because we increment first then return, but we need to
-        * know what the next host number was before increment
-        */
-       shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
+       index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
+       if (index < 0)
+               goto fail_kfree;
+       shost->host_no = index;
+
        shost->dma_channel = 0xff;
 
        /* These three are default values which can be overridden */
@@ -495,7 +498,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
                shost_printk(KERN_WARNING, shost,
                        "error handler thread failed to spawn, error = %ld\n",
                        PTR_ERR(shost->ehandler));
-               goto fail_kfree;
+               goto fail_index_remove;
        }
 
        shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
@@ -511,6 +514,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
 
  fail_kthread:
        kthread_stop(shost->ehandler);
+ fail_index_remove:
+       ida_simple_remove(&host_index_ida, shost->host_no);
  fail_kfree:
        kfree(shost);
        return NULL;
@@ -606,6 +611,7 @@ int scsi_init_hosts(void)
 void scsi_exit_hosts(void)
 {
        class_unregister(&shost_class);
+       ida_destroy(&host_index_ida);
 }
 
 int scsi_is_host_device(const struct device *dev)
index c0f7c8ce54aa08b668694b4b70e19076ec72dbff..b6fdb48eee902562be7b5b97fb9a2b43a6da690c 100644 (file)
@@ -35,8 +35,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "06.808.16.00-rc1"
-#define MEGASAS_RELDATE                                "Oct. 8, 2015"
+#define MEGASAS_VERSION                                "06.810.09.00-rc1"
+#define MEGASAS_RELDATE                                "Jan. 28, 2016"
 
 /*
  * Device IDs
 #define MFI_RESET_FLAGS                                MFI_INIT_READY| \
                                                MFI_INIT_MFIMODE| \
                                                MFI_INIT_ABORT
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE    (0x01)
 
 /*
  * MFI frame flags
 
 /* Driver internal */
 #define DRV_DCMD_POLLED_MODE           0x1
+#define DRV_DCMD_SKIP_REFIRE           0x2
 
 /*
  * Definition for cmd_status
 
 #define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS     0x01190100
 #define MR_DRIVER_SET_APP_CRASHDUMP_MODE       (0xF0010000 | 0x0600)
+#define MR_DCMD_PD_GET_INFO                    0x02020000
 
 /*
  * Global functions
@@ -434,6 +437,257 @@ enum MR_PD_STATE {
        MR_PD_STATE_SYSTEM              = 0x40,
  };
 
+union MR_PD_REF {
+       struct {
+               u16      deviceId;
+               u16      seqNum;
+       } mrPdRef;
+       u32      ref;
+};
+
+/*
+ * define the DDF Type bit structure
+ */
+union MR_PD_DDF_TYPE {
+        struct {
+               union {
+                       struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+                                u16     forcedPDGUID:1;
+                                u16     inVD:1;
+                                u16     isGlobalSpare:1;
+                                u16     isSpare:1;
+                                u16     isForeign:1;
+                                u16     reserved:7;
+                                u16     intf:4;
+#else
+                                u16     intf:4;
+                                u16     reserved:7;
+                                u16     isForeign:1;
+                                u16     isSpare:1;
+                                u16     isGlobalSpare:1;
+                                u16     inVD:1;
+                                u16     forcedPDGUID:1;
+#endif
+                        } pdType;
+                        u16     type;
+                };
+                u16     reserved;
+        } ddf;
+        struct {
+                u32    reserved;
+        } nonDisk;
+        u32     type;
+} __packed;
+
+/*
+ * defines the progress structure
+ */
+union MR_PROGRESS {
+       struct  {
+               u16 progress;
+               union {
+                       u16 elapsedSecs;
+                       u16 elapsedSecsForLastPercent;
+               };
+       } mrProgress;
+       u32 w;
+} __packed;
+
+/*
+ * defines the physical drive progress structure
+ */
+struct MR_PD_PROGRESS {
+       struct {
+#ifndef MFI_BIG_ENDIAN
+               u32     rbld:1;
+               u32     patrol:1;
+               u32     clear:1;
+               u32     copyBack:1;
+               u32     erase:1;
+               u32     locate:1;
+               u32     reserved:26;
+#else
+               u32     reserved:26;
+               u32     locate:1;
+               u32     erase:1;
+               u32     copyBack:1;
+               u32     clear:1;
+               u32     patrol:1;
+               u32     rbld:1;
+#endif
+       } active;
+       union MR_PROGRESS     rbld;
+       union MR_PROGRESS     patrol;
+       union {
+               union MR_PROGRESS     clear;
+               union MR_PROGRESS     erase;
+       };
+
+       struct {
+#ifndef MFI_BIG_ENDIAN
+               u32     rbld:1;
+               u32     patrol:1;
+               u32     clear:1;
+               u32     copyBack:1;
+               u32     erase:1;
+               u32     reserved:27;
+#else
+               u32     reserved:27;
+               u32     erase:1;
+               u32     copyBack:1;
+               u32     clear:1;
+               u32     patrol:1;
+               u32     rbld:1;
+#endif
+       } pause;
+
+       union MR_PROGRESS     reserved[3];
+} __packed;
+
+struct  MR_PD_INFO {
+       union MR_PD_REF ref;
+       u8 inquiryData[96];
+       u8 vpdPage83[64];
+       u8 notSupported;
+       u8 scsiDevType;
+
+       union {
+               u8 connectedPortBitmap;
+               u8 connectedPortNumbers;
+       };
+
+       u8 deviceSpeed;
+       u32 mediaErrCount;
+       u32 otherErrCount;
+       u32 predFailCount;
+       u32 lastPredFailEventSeqNum;
+
+       u16 fwState;
+       u8 disabledForRemoval;
+       u8 linkSpeed;
+       union MR_PD_DDF_TYPE state;
+
+       struct {
+               u8 count;
+#ifndef __BIG_ENDIAN_BITFIELD
+               u8 isPathBroken:4;
+               u8 reserved3:3;
+               u8 widePortCapable:1;
+#else
+               u8 widePortCapable:1;
+               u8 reserved3:3;
+               u8 isPathBroken:4;
+#endif
+
+               u8 connectorIndex[2];
+               u8 reserved[4];
+               u64 sasAddr[2];
+               u8 reserved2[16];
+       } pathInfo;
+
+       u64 rawSize;
+       u64 nonCoercedSize;
+       u64 coercedSize;
+       u16 enclDeviceId;
+       u8 enclIndex;
+
+       union {
+               u8 slotNumber;
+               u8 enclConnectorIndex;
+       };
+
+       struct MR_PD_PROGRESS progInfo;
+       u8 badBlockTableFull;
+       u8 unusableInCurrentConfig;
+       u8 vpdPage83Ext[64];
+       u8 powerState;
+       u8 enclPosition;
+       u32 allowedOps;
+       u16 copyBackPartnerId;
+       u16 enclPartnerDeviceId;
+       struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+               u16 fdeCapable:1;
+               u16 fdeEnabled:1;
+               u16 secured:1;
+               u16 locked:1;
+               u16 foreign:1;
+               u16 needsEKM:1;
+               u16 reserved:10;
+#else
+               u16 reserved:10;
+               u16 needsEKM:1;
+               u16 foreign:1;
+               u16 locked:1;
+               u16 secured:1;
+               u16 fdeEnabled:1;
+               u16 fdeCapable:1;
+#endif
+       } security;
+       u8 mediaType;
+       u8 notCertified;
+       u8 bridgeVendor[8];
+       u8 bridgeProductIdentification[16];
+       u8 bridgeProductRevisionLevel[4];
+       u8 satBridgeExists;
+
+       u8 interfaceType;
+       u8 temperature;
+       u8 emulatedBlockSize;
+       u16 userDataBlockSize;
+       u16 reserved2;
+
+       struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+               u32 piType:3;
+               u32 piFormatted:1;
+               u32 piEligible:1;
+               u32 NCQ:1;
+               u32 WCE:1;
+               u32 commissionedSpare:1;
+               u32 emergencySpare:1;
+               u32 ineligibleForSSCD:1;
+               u32 ineligibleForLd:1;
+               u32 useSSEraseType:1;
+               u32 wceUnchanged:1;
+               u32 supportScsiUnmap:1;
+               u32 reserved:18;
+#else
+               u32 reserved:18;
+               u32 supportScsiUnmap:1;
+               u32 wceUnchanged:1;
+               u32 useSSEraseType:1;
+               u32 ineligibleForLd:1;
+               u32 ineligibleForSSCD:1;
+               u32 emergencySpare:1;
+               u32 commissionedSpare:1;
+               u32 WCE:1;
+               u32 NCQ:1;
+               u32 piEligible:1;
+               u32 piFormatted:1;
+               u32 piType:3;
+#endif
+       } properties;
+
+       u64 shieldDiagCompletionTime;
+       u8 shieldCounter;
+
+       u8 linkSpeedOther;
+       u8 reserved4[2];
+
+       struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+               u32 bbmErrCountSupported:1;
+               u32 bbmErrCount:31;
+#else
+               u32 bbmErrCount:31;
+               u32 bbmErrCountSupported:1;
+#endif
+       } bbmErr;
+
+       u8 reserved1[512-428];
+} __packed;
 
  /*
  * defines the physical drive address structure
@@ -473,6 +727,7 @@ struct megasas_pd_list {
        u16             tid;
        u8             driveType;
        u8             driveState;
+       u8             interface;
 } __packed;
 
  /*
@@ -1083,6 +1338,8 @@ struct megasas_ctrl_info {
 
 #define VD_EXT_DEBUG 0
 
+#define SCAN_PD_CHANNEL        0x1
+#define SCAN_VD_CHANNEL        0x2
 
 enum MR_SCSI_CMD_TYPE {
        READ_WRITE_LDIO = 0,
@@ -1091,6 +1348,17 @@ enum MR_SCSI_CMD_TYPE {
        NON_READ_WRITE_SYSPDIO = 3,
 };
 
+enum DCMD_TIMEOUT_ACTION {
+       INITIATE_OCR = 0,
+       KILL_ADAPTER = 1,
+       IGNORE_TIMEOUT = 2,
+};
+
+enum FW_BOOT_CONTEXT {
+       PROBE_CONTEXT = 0,
+       OCR_CONTEXT = 1,
+};
+
 /* Frame Type */
 #define IO_FRAME                               0
 #define PTHRU_FRAME                            1
@@ -1137,6 +1405,7 @@ enum MR_SCSI_CMD_TYPE {
 
 #define MFI_OB_INTR_STATUS_MASK                        0x00000002
 #define MFI_POLL_TIMEOUT_SECS                  60
+#define MFI_IO_TIMEOUT_SECS                    180
 #define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF    (5 * HZ)
 #define MEGASAS_OCR_SETTLE_TIME_VF             (1000 * 30)
 #define MEGASAS_ROUTINE_WAIT_TIME_VF           300
@@ -1154,6 +1423,7 @@ enum MR_SCSI_CMD_TYPE {
 #define MR_MAX_REPLY_QUEUES_EXT_OFFSET          0X003FC000
 #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT    14
 #define MR_MAX_MSIX_REG_ARRAY                   16
+#define MR_RDPQ_MODE_OFFSET                    0X00800000
 /*
 * register set for both 1068 and 1078 controllers
 * structure extended for 1078 registers
@@ -1193,8 +1463,9 @@ struct megasas_register_set {
 
        u32     outbound_scratch_pad ;          /*00B0h*/
        u32     outbound_scratch_pad_2;         /*00B4h*/
+       u32     outbound_scratch_pad_3;         /*00B8h*/
 
-       u32     reserved_4[2];                  /*00B8h*/
+       u32     reserved_4;                     /*00BCh*/
 
        u32     inbound_low_queue_port ;        /*00C0h*/
 
@@ -1266,7 +1537,10 @@ union megasas_sgl_frame {
 typedef union _MFI_CAPABILITIES {
        struct {
 #if   defined(__BIG_ENDIAN_BITFIELD)
-               u32     reserved:23;
+               u32     reserved:20;
+               u32     support_qd_throttling:1;
+               u32     support_fp_rlbypass:1;
+               u32     support_vfid_in_ioframe:1;
                u32     support_ext_io_size:1;
                u32     support_ext_queue_depth:1;
                u32     security_protocol_cmds_fw:1;
@@ -1286,7 +1560,10 @@ typedef union _MFI_CAPABILITIES {
                u32     security_protocol_cmds_fw:1;
                u32     support_ext_queue_depth:1;
                u32     support_ext_io_size:1;
-               u32     reserved:23;
+               u32     support_vfid_in_ioframe:1;
+               u32     support_fp_rlbypass:1;
+               u32     support_qd_throttling:1;
+               u32     reserved:20;
 #endif
        } mfi_capabilities;
        __le32          reg;
@@ -1511,6 +1788,15 @@ union megasas_frame {
        u8 raw_bytes[64];
 };
 
+/**
+ * struct MR_PRIV_DEVICE - sdev private hostdata
+ * @is_tm_capable: firmware managed tm_capable flag
+ * @tm_busy: TM request is in progress
+ */
+struct MR_PRIV_DEVICE {
+       bool is_tm_capable;
+       bool tm_busy;
+};
 struct megasas_cmd;
 
 union megasas_evt_class_locale {
@@ -1700,6 +1986,19 @@ struct MR_DRV_SYSTEM_INFO {
        u8      reserved[1980];
 };
 
+enum MR_PD_TYPE {
+                UNKNOWN_DRIVE = 0,
+                PARALLEL_SCSI = 1,
+                SAS_PD = 2,
+                SATA_PD = 3,
+                FC_PD = 4,
+};
+
+/* JBOD Queue depth definitions */
+#define MEGASAS_SATA_QD        32
+#define MEGASAS_SAS_QD 64
+#define MEGASAS_DEFAULT_PD_QD  64
+
 struct megasas_instance {
 
        __le32 *producer;
@@ -1714,6 +2013,8 @@ struct megasas_instance {
        dma_addr_t vf_affiliation_111_h;
        struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
        dma_addr_t hb_host_mem_h;
+       struct MR_PD_INFO *pd_info;
+       dma_addr_t pd_info_h;
 
        __le32 *reply_queue;
        dma_addr_t reply_queue_h;
@@ -1745,6 +2046,8 @@ struct megasas_instance {
        u16 max_fw_cmds;
        u16 max_mfi_cmds;
        u16 max_scsi_cmds;
+       u16 ldio_threshold;
+       u16 cur_can_queue;
        u32 max_sectors_per_req;
        struct megasas_aen_event *ev;
 
@@ -1762,7 +2065,7 @@ struct megasas_instance {
        struct megasas_evt_detail *evt_detail;
        dma_addr_t evt_detail_h;
        struct megasas_cmd *aen_cmd;
-       struct mutex aen_mutex;
+       struct mutex hba_mutex;
        struct semaphore ioctl_sem;
 
        struct Scsi_Host *host;
@@ -1775,6 +2078,7 @@ struct megasas_instance {
        u32 fw_support_ieee;
 
        atomic_t fw_outstanding;
+       atomic_t ldio_outstanding;
        atomic_t fw_reset_no_pci_access;
 
        struct megasas_instance_template *instancet;
@@ -1797,7 +2101,7 @@ struct megasas_instance {
        u16 drv_supported_vd_count;
        u16 drv_supported_pd_count;
 
-       u8 adprecovery;
+       atomic_t adprecovery;
        unsigned long last_time;
        u32 mfiStatus;
        u32 last_seq_num;
@@ -1827,6 +2131,7 @@ struct megasas_instance {
        u8 mask_interrupts;
        u16 max_chain_frame_sz;
        u8 is_imr;
+       u8 is_rdpq;
        bool dev_handle;
 };
 struct MR_LD_VF_MAP {
@@ -1916,7 +2221,7 @@ struct megasas_instance_template {
        u32 (*init_adapter)(struct megasas_instance *);
        u32 (*build_and_issue_cmd) (struct megasas_instance *,
                                    struct scsi_cmnd *);
-       void (*issue_dcmd) (struct megasas_instance *instance,
+       int (*issue_dcmd)(struct megasas_instance *instance,
                            struct megasas_cmd *cmd);
 };
 
@@ -2014,6 +2319,19 @@ struct megasas_mgmt_info {
        int max_index;
 };
 
+enum MEGASAS_OCR_CAUSE {
+       FW_FAULT_OCR                    = 0,
+       SCSIIO_TIMEOUT_OCR              = 1,
+       MFI_IO_TIMEOUT_OCR              = 2,
+};
+
+enum DCMD_RETURN_STATUS {
+       DCMD_SUCCESS            = 0,
+       DCMD_TIMEOUT            = 1,
+       DCMD_FAILED             = 2,
+       DCMD_NOT_FIRED          = 3,
+};
+
 u8
 MR_BuildRaidContext(struct megasas_instance *instance,
                    struct IO_REQUEST_INFO *io_info,
@@ -2051,4 +2369,8 @@ void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
 int megasas_cmd_type(struct scsi_cmnd *cmd);
 void megasas_setup_jbod_map(struct megasas_instance *instance);
 
+void megasas_update_sdev_properties(struct scsi_device *sdev);
+int megasas_reset_fusion(struct Scsi_Host *shost, int reason);
+int megasas_task_abort_fusion(struct scsi_cmnd *scmd);
+int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
 #endif                         /*LSI_MEGARAID_SAS_H */
index 97a1c1c33b05dd2ba7e2dfe4feb3469dce3b6199..a8313273719189d102ba45a7a5736476b23d6854 100644 (file)
@@ -83,7 +83,7 @@ module_param(throttlequeuedepth, int, S_IRUGO);
 MODULE_PARM_DESC(throttlequeuedepth,
        "Adapter queue depth when throttled due to I/O timeout. Default: 16");
 
-int resetwaittime = MEGASAS_RESET_WAIT_TIME;
+unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
 module_param(resetwaittime, int, S_IRUGO);
 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
                 "before resetting adapter. Default: 180");
@@ -92,6 +92,18 @@ int smp_affinity_enable = 1;
 module_param(smp_affinity_enable, int, S_IRUGO);
 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
 
+int rdpq_enable = 1;
+module_param(rdpq_enable, int, S_IRUGO);
+MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
+
+unsigned int dual_qdepth_disable;
+module_param(dual_qdepth_disable, int, S_IRUGO);
+MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
+
+unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
+module_param(scmd_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
+
 MODULE_LICENSE("GPL");
 MODULE_VERSION(MEGASAS_VERSION);
 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
@@ -104,6 +116,8 @@ static int megasas_ld_list_query(struct megasas_instance *instance,
 static int megasas_issue_init_mfi(struct megasas_instance *instance);
 static int megasas_register_aen(struct megasas_instance *instance,
                                u32 seq_num, u32 class_locale_word);
+static int
+megasas_get_pd_info(struct megasas_instance *instance, u16 device_id);
 /*
  * PCI ID table for all supported controllers
  */
@@ -189,18 +203,18 @@ int
 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
        int seconds);
 void megasas_reset_reply_desc(struct megasas_instance *instance);
-int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
 void megasas_fusion_ocr_wq(struct work_struct *work);
 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
                                         int initial);
 int megasas_check_mpio_paths(struct megasas_instance *instance,
                             struct scsi_cmnd *scmd);
 
-void
+int
 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
        instance->instancet->fire_cmd(instance,
                cmd->frame_phys_addr, 0, instance->reg_set);
+       return 0;
 }
 
 /**
@@ -473,7 +487,7 @@ static int
 megasas_check_reset_xscale(struct megasas_instance *instance,
                struct megasas_register_set __iomem *regs)
 {
-       if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+       if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
            (le32_to_cpu(*instance->consumer) ==
                MEGASAS_ADPRESET_INPROG_SIGN))
                return 1;
@@ -609,7 +623,7 @@ static int
 megasas_check_reset_ppc(struct megasas_instance *instance,
                        struct megasas_register_set __iomem *regs)
 {
-       if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+       if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
                return 1;
 
        return 0;
@@ -746,7 +760,7 @@ static int
 megasas_check_reset_skinny(struct megasas_instance *instance,
                                struct megasas_register_set __iomem *regs)
 {
-       if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+       if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
                return 1;
 
        return 0;
@@ -940,9 +954,8 @@ static int
 megasas_check_reset_gen2(struct megasas_instance *instance,
                struct megasas_register_set __iomem *regs)
 {
-       if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+       if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
                return 1;
-       }
 
        return 0;
 }
@@ -983,25 +996,20 @@ extern struct megasas_instance_template megasas_instance_template_fusion;
 int
 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
-       int seconds;
        struct megasas_header *frame_hdr = &cmd->frame->hdr;
 
-       frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
+       frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
        frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
 
-       /*
-        * Issue the frame using inbound queue port
-        */
-       instance->instancet->issue_dcmd(instance, cmd);
+       if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
+               (instance->instancet->issue_dcmd(instance, cmd))) {
+               dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+                       __func__, __LINE__);
+               return DCMD_NOT_FIRED;
+       }
 
-       /*
-        * Wait for cmd_status to change
-        */
-       if (instance->requestorId)
-               seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
-       else
-               seconds = MFI_POLL_TIMEOUT_SECS;
-       return wait_and_poll(instance, cmd, seconds);
+       return wait_and_poll(instance, cmd, instance->requestorId ?
+                       MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
 }
 
 /**
@@ -1019,21 +1027,29 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
                          struct megasas_cmd *cmd, int timeout)
 {
        int ret = 0;
-
        cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
 
-       instance->instancet->issue_dcmd(instance, cmd);
+       if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
+               (instance->instancet->issue_dcmd(instance, cmd))) {
+               dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+                       __func__, __LINE__);
+               return DCMD_NOT_FIRED;
+       }
+
        if (timeout) {
                ret = wait_event_timeout(instance->int_cmd_wait_q,
                                cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
-               if (!ret)
-                       return 1;
+               if (!ret) {
+                       dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
+                               __func__, __LINE__);
+                       return DCMD_TIMEOUT;
+               }
        } else
                wait_event(instance->int_cmd_wait_q,
                                cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
 
        return (cmd->cmd_status_drv == MFI_STAT_OK) ?
-               0 : 1;
+               DCMD_SUCCESS : DCMD_FAILED;
 }
 
 /**
@@ -1077,15 +1093,20 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
        cmd->sync_cmd = 1;
        cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
 
-       instance->instancet->issue_dcmd(instance, cmd);
+       if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
+               (instance->instancet->issue_dcmd(instance, cmd))) {
+               dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+                       __func__, __LINE__);
+               return DCMD_NOT_FIRED;
+       }
 
        if (timeout) {
                ret = wait_event_timeout(instance->abort_cmd_wait_q,
                                cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
                if (!ret) {
-                       dev_err(&instance->pdev->dev, "Command timedout"
-                               "from %s\n", __func__);
-                       return 1;
+                       dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
+                               __func__, __LINE__);
+                       return DCMD_TIMEOUT;
                }
        } else
                wait_event(instance->abort_cmd_wait_q,
@@ -1094,7 +1115,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
        cmd->sync_cmd = 0;
 
        megasas_return_cmd(instance, cmd);
-       return 0;
+       return (cmd->cmd_status_drv == MFI_STAT_OK) ?
+               DCMD_SUCCESS : DCMD_FAILED;
 }
 
 /**
@@ -1621,7 +1643,7 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
        return 0;
 out_return_cmd:
        megasas_return_cmd(instance, cmd);
-       return 1;
+       return SCSI_MLQUEUE_HOST_BUSY;
 }
 
 
@@ -1634,7 +1656,7 @@ static int
 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 {
        struct megasas_instance *instance;
-       unsigned long flags;
+       struct MR_PRIV_DEVICE *mr_device_priv_data;
 
        instance = (struct megasas_instance *)
            scmd->device->host->hostdata;
@@ -1648,35 +1670,38 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
        if (instance->issuepend_done == 0)
                return SCSI_MLQUEUE_HOST_BUSY;
 
-       spin_lock_irqsave(&instance->hba_lock, flags);
 
        /* Check for an mpio path and adjust behavior */
-       if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+       if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
                if (megasas_check_mpio_paths(instance, scmd) ==
                    (DID_RESET << 16)) {
-                       spin_unlock_irqrestore(&instance->hba_lock, flags);
                        return SCSI_MLQUEUE_HOST_BUSY;
                } else {
-                       spin_unlock_irqrestore(&instance->hba_lock, flags);
                        scmd->result = DID_NO_CONNECT << 16;
                        scmd->scsi_done(scmd);
                        return 0;
                }
        }
 
-       if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
-               spin_unlock_irqrestore(&instance->hba_lock, flags);
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
                scmd->result = DID_NO_CONNECT << 16;
                scmd->scsi_done(scmd);
                return 0;
        }
 
-       if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
-               spin_unlock_irqrestore(&instance->hba_lock, flags);
-               return SCSI_MLQUEUE_HOST_BUSY;
+       mr_device_priv_data = scmd->device->hostdata;
+       if (!mr_device_priv_data) {
+               scmd->result = DID_NO_CONNECT << 16;
+               scmd->scsi_done(scmd);
+               return 0;
        }
 
-       spin_unlock_irqrestore(&instance->hba_lock, flags);
+       if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
+               return SCSI_MLQUEUE_HOST_BUSY;
+
+       if (mr_device_priv_data->tm_busy)
+               return SCSI_MLQUEUE_DEVICE_BUSY;
+
 
        scmd->result = 0;
 
@@ -1699,12 +1724,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
                break;
        }
 
-       if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
-               dev_err(&instance->pdev->dev, "Err returned from build_and_issue_cmd\n");
-               return SCSI_MLQUEUE_HOST_BUSY;
-       }
-
-       return 0;
+       return instance->instancet->build_and_issue_cmd(instance, scmd);
 
  out_done:
        scmd->scsi_done(scmd);
@@ -1726,27 +1746,39 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
 }
 
 /*
-* megasas_set_dma_alignment - Set DMA alignment for PI enabled VD
+* megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities
 *
 * @sdev: OS provided scsi device
 *
 * Returns void
 */
-static void megasas_set_dma_alignment(struct scsi_device *sdev)
+void megasas_update_sdev_properties(struct scsi_device *sdev)
 {
+       u16 pd_index = 0;
        u32 device_id, ld;
        struct megasas_instance *instance;
        struct fusion_context *fusion;
+       struct MR_PRIV_DEVICE *mr_device_priv_data;
+       struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
        struct MR_LD_RAID *raid;
        struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
 
        instance = megasas_lookup_instance(sdev->host->host_no);
        fusion = instance->ctrl_context;
+       mr_device_priv_data = sdev->hostdata;
 
        if (!fusion)
                return;
 
-       if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) {
+       if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
+               instance->use_seqnum_jbod_fp) {
+               pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+                       sdev->id;
+               pd_sync = (void *)fusion->pd_seq_sync
+                               [(instance->pd_seq_map_id - 1) & 1];
+               mr_device_priv_data->is_tm_capable =
+                       pd_sync->seq[pd_index].capability.tmCapable;
+       } else {
                device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
                                        + sdev->id;
                local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
@@ -1754,10 +1786,51 @@ static void megasas_set_dma_alignment(struct scsi_device *sdev)
                raid = MR_LdRaidGet(ld, local_map_ptr);
 
                if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
-                       blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+               blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+               mr_device_priv_data->is_tm_capable =
+                       raid->capability.tmCapable;
+       }
+}
+
+static void megasas_set_device_queue_depth(struct scsi_device *sdev)
+{
+       u16                             pd_index = 0;
+       int             ret = DCMD_FAILED;
+       struct megasas_instance *instance;
+
+       instance = megasas_lookup_instance(sdev->host->host_no);
+
+       if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+               pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
+
+               if (instance->pd_info) {
+                       mutex_lock(&instance->hba_mutex);
+                       ret = megasas_get_pd_info(instance, pd_index);
+                       mutex_unlock(&instance->hba_mutex);
+               }
+
+               if (ret != DCMD_SUCCESS)
+                       return;
+
+               if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
+
+                       switch (instance->pd_list[pd_index].interface) {
+                       case SAS_PD:
+                               scsi_change_queue_depth(sdev, MEGASAS_SAS_QD);
+                               break;
+
+                       case SATA_PD:
+                               scsi_change_queue_depth(sdev, MEGASAS_SATA_QD);
+                               break;
+
+                       default:
+                               scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD);
+                       }
+               }
        }
 }
 
+
 static int megasas_slave_configure(struct scsi_device *sdev)
 {
        u16 pd_index = 0;
@@ -1774,12 +1847,14 @@ static int megasas_slave_configure(struct scsi_device *sdev)
                                return -ENXIO;
                }
        }
-       megasas_set_dma_alignment(sdev);
+       megasas_set_device_queue_depth(sdev);
+       megasas_update_sdev_properties(sdev);
+
        /*
         * The RAID firmware may require extended timeouts.
         */
        blk_queue_rq_timeout(sdev->request_queue,
-               MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+               scmd_timeout * HZ);
 
        return 0;
 }
@@ -1788,6 +1863,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
 {
        u16 pd_index = 0;
        struct megasas_instance *instance ;
+       struct MR_PRIV_DEVICE *mr_device_priv_data;
 
        instance = megasas_lookup_instance(sdev->host->host_no);
        if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
@@ -1799,13 +1875,26 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
                        sdev->id;
                if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState ==
                        MR_PD_STATE_SYSTEM)) {
-                       return 0;
+                       goto scan_target;
                }
                return -ENXIO;
        }
+
+scan_target:
+       mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
+                                       GFP_KERNEL);
+       if (!mr_device_priv_data)
+               return -ENOMEM;
+       sdev->hostdata = mr_device_priv_data;
        return 0;
 }
 
+static void megasas_slave_destroy(struct scsi_device *sdev)
+{
+       kfree(sdev->hostdata);
+       sdev->hostdata = NULL;
+}
+
 /*
 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
 *                                       kill adapter
@@ -1845,7 +1934,7 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
 void megaraid_sas_kill_hba(struct megasas_instance *instance)
 {
        /* Set critical error to block I/O & ioctls in case caller didn't */
-       instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+       atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
        /* Wait 1 second to ensure IO or ioctls in build have posted */
        msleep(1000);
        if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
@@ -1883,7 +1972,7 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
                spin_lock_irqsave(instance->host->host_lock, flags);
                instance->flag &= ~MEGASAS_FW_BUSY;
 
-               instance->host->can_queue = instance->max_scsi_cmds;
+               instance->host->can_queue = instance->cur_can_queue;
                spin_unlock_irqrestore(instance->host->host_lock, flags);
        }
 }
@@ -1905,7 +1994,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
        unsigned long flags;
 
        /* If we have already declared adapter dead, donot complete cmds */
-       if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
                return;
 
        spin_lock_irqsave(&instance->completion_lock, flags);
@@ -1974,7 +2063,7 @@ void megasas_do_ocr(struct megasas_instance *instance)
                *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
        }
        instance->instancet->disable_intr(instance);
-       instance->adprecovery   = MEGASAS_ADPRESET_SM_INFAULT;
+       atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
        instance->issuepend_done = 0;
 
        atomic_set(&instance->fw_outstanding, 0);
@@ -2054,9 +2143,7 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
        dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
               "scsi%d\n", instance->host->host_no);
 
-       megasas_issue_blocked_cmd(instance, cmd, 0);
-
-       if (dcmd->cmd_status) {
+       if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
                dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
                       " failed with status 0x%x for scsi%d\n",
                       dcmd->cmd_status, instance->host->host_no);
@@ -2166,9 +2253,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
        dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
               "scsi%d\n", instance->host->host_no);
 
-       megasas_issue_blocked_cmd(instance, cmd, 0);
 
-       if (dcmd->cmd_status) {
+       if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
                dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
                       " failed with status 0x%x for scsi%d\n",
                       dcmd->cmd_status, instance->host->host_no);
@@ -2373,21 +2459,21 @@ void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
  */
 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
 {
-       int i;
+       int i, sl, outstanding;
        u32 reset_index;
        u32 wait_time = MEGASAS_RESET_WAIT_TIME;
-       u8 adprecovery;
        unsigned long flags;
        struct list_head clist_local;
        struct megasas_cmd *reset_cmd;
        u32 fw_state;
-       u8 kill_adapter_flag;
 
-       spin_lock_irqsave(&instance->hba_lock, flags);
-       adprecovery = instance->adprecovery;
-       spin_unlock_irqrestore(&instance->hba_lock, flags);
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+               dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
+               __func__, __LINE__);
+               return FAILED;
+       }
 
-       if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+       if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
 
                INIT_LIST_HEAD(&clist_local);
                spin_lock_irqsave(&instance->hba_lock, flags);
@@ -2398,18 +2484,13 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
                dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
                for (i = 0; i < wait_time; i++) {
                        msleep(1000);
-                       spin_lock_irqsave(&instance->hba_lock, flags);
-                       adprecovery = instance->adprecovery;
-                       spin_unlock_irqrestore(&instance->hba_lock, flags);
-                       if (adprecovery == MEGASAS_HBA_OPERATIONAL)
+                       if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
                                break;
                }
 
-               if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+               if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
                        dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
-                       spin_lock_irqsave(&instance->hba_lock, flags);
-                       instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
-                       spin_unlock_irqrestore(&instance->hba_lock, flags);
+                       atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
                        return FAILED;
                }
 
@@ -2447,7 +2528,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
        }
 
        for (i = 0; i < resetwaittime; i++) {
-               int outstanding = atomic_read(&instance->fw_outstanding);
+               outstanding = atomic_read(&instance->fw_outstanding);
 
                if (!outstanding)
                        break;
@@ -2466,67 +2547,60 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
        }
 
        i = 0;
-       kill_adapter_flag = 0;
+       outstanding = atomic_read(&instance->fw_outstanding);
+       fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+
+       if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
+               goto no_outstanding;
+
+       if (instance->disableOnlineCtrlReset)
+               goto kill_hba_and_failed;
        do {
-               fw_state = instance->instancet->read_fw_status_reg(
-                                       instance->reg_set) & MFI_STATE_MASK;
-               if ((fw_state == MFI_STATE_FAULT) &&
-                       (instance->disableOnlineCtrlReset == 0)) {
-                       if (i == 3) {
-                               kill_adapter_flag = 2;
-                               break;
-                       }
+               if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
+                       dev_info(&instance->pdev->dev,
+                               "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
+                               __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
+                       if (i == 3)
+                               goto kill_hba_and_failed;
                        megasas_do_ocr(instance);
-                       kill_adapter_flag = 1;
 
-                       /* wait for 1 secs to let FW finish the pending cmds */
-                       msleep(1000);
+                       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+                               dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
+                               __func__, __LINE__);
+                               return FAILED;
+                       }
+                       dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
+                               __func__, __LINE__);
+
+                       for (sl = 0; sl < 10; sl++)
+                               msleep(500);
+
+                       outstanding = atomic_read(&instance->fw_outstanding);
+
+                       fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+                       if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
+                               goto no_outstanding;
                }
                i++;
        } while (i <= 3);
 
-       if (atomic_read(&instance->fw_outstanding) && !kill_adapter_flag) {
-               if (instance->disableOnlineCtrlReset == 0) {
-                       megasas_do_ocr(instance);
+no_outstanding:
 
-                       /* wait for 5 secs to let FW finish the pending cmds */
-                       for (i = 0; i < wait_time; i++) {
-                               int outstanding =
-                                       atomic_read(&instance->fw_outstanding);
-                               if (!outstanding)
-                                       return SUCCESS;
-                               msleep(1000);
-                       }
-               }
-       }
+       dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
+               __func__, __LINE__);
+       return SUCCESS;
 
-       if (atomic_read(&instance->fw_outstanding) ||
-                                       (kill_adapter_flag == 2)) {
-               dev_notice(&instance->pdev->dev, "pending cmds after reset\n");
-               /*
-                * Send signal to FW to stop processing any pending cmds.
-                * The controller will be taken offline by the OS now.
-                */
-               if ((instance->pdev->device ==
-                       PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-                       (instance->pdev->device ==
-                       PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
-                       writel(MFI_STOP_ADP,
-                               &instance->reg_set->doorbell);
-               } else {
-                       writel(MFI_STOP_ADP,
-                               &instance->reg_set->inbound_doorbell);
-               }
-               megasas_dump_pending_frames(instance);
-               spin_lock_irqsave(&instance->hba_lock, flags);
-               instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
-               spin_unlock_irqrestore(&instance->hba_lock, flags);
-               return FAILED;
-       }
+kill_hba_and_failed:
 
-       dev_notice(&instance->pdev->dev, "no pending cmds after reset\n");
+       /* Reset not supported, kill adapter */
+       dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
+               " disableOnlineCtrlReset %d fw_outstanding %d \n",
+               __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
+               atomic_read(&instance->fw_outstanding));
+       megasas_dump_pending_frames(instance);
+       megaraid_sas_kill_hba(instance);
 
-       return SUCCESS;
+       return FAILED;
 }
 
 /**
@@ -2547,7 +2621,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
        scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
                 scmd->cmnd[0], scmd->retries);
 
-       if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
                dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
                return FAILED;
        }
@@ -2575,7 +2649,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
        unsigned long flags;
 
        if (time_after(jiffies, scmd->jiffies_at_alloc +
-                               (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
+                               (scmd_timeout * 2) * HZ)) {
                return BLK_EH_NOT_HANDLED;
        }
 
@@ -2851,6 +2925,16 @@ megasas_page_size_show(struct device *cdev,
        return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
 }
 
+static ssize_t
+megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
+       char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
+}
+
 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
        megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
@@ -2859,12 +2943,15 @@ static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
        megasas_fw_crash_state_show, megasas_fw_crash_state_store);
 static DEVICE_ATTR(page_size, S_IRUGO,
        megasas_page_size_show, NULL);
+static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
+       megasas_ldio_outstanding_show, NULL);
 
 struct device_attribute *megaraid_host_attrs[] = {
        &dev_attr_fw_crash_buffer_size,
        &dev_attr_fw_crash_buffer,
        &dev_attr_fw_crash_state,
        &dev_attr_page_size,
+       &dev_attr_ldio_outstanding,
        NULL,
 };
 
@@ -2878,6 +2965,7 @@ static struct scsi_host_template megasas_template = {
        .proc_name = "megaraid_sas",
        .slave_configure = megasas_slave_configure,
        .slave_alloc = megasas_slave_alloc,
+       .slave_destroy = megasas_slave_destroy,
        .queuecommand = megasas_queue_command,
        .eh_device_reset_handler = megasas_reset_device,
        .eh_bus_reset_handler = megasas_reset_bus_host,
@@ -3277,13 +3365,13 @@ process_fw_state_change_wq(struct work_struct *work)
        u32 wait;
        unsigned long flags;
 
-       if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
+    if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
                dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
-                               instance->adprecovery);
+                               atomic_read(&instance->adprecovery));
                return ;
        }
 
-       if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+       if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
                dev_notice(&instance->pdev->dev, "FW detected to be in fault"
                                        "state, restarting it...\n");
 
@@ -3326,7 +3414,7 @@ process_fw_state_change_wq(struct work_struct *work)
                megasas_issue_init_mfi(instance);
 
                spin_lock_irqsave(&instance->hba_lock, flags);
-               instance->adprecovery   = MEGASAS_HBA_OPERATIONAL;
+               atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
                spin_unlock_irqrestore(&instance->hba_lock, flags);
                instance->instancet->enable_intr(instance);
 
@@ -3391,14 +3479,14 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
 
 
                        instance->instancet->disable_intr(instance);
-                       instance->adprecovery   = MEGASAS_ADPRESET_SM_INFAULT;
+                       atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
                        instance->issuepend_done = 0;
 
                        atomic_set(&instance->fw_outstanding, 0);
                        megasas_internal_reset_defer_cmds(instance);
 
                        dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
-                                       fw_state, instance->adprecovery);
+                                       fw_state, atomic_read(&instance->adprecovery));
 
                        schedule_work(&instance->work_init);
                        return IRQ_HANDLED;
@@ -3851,6 +3939,92 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
        return 0;
 }
 
+/*
+ * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
+ * @instance:                          Adapter soft state
+ *
+ * Return 0 for only Fusion adapter, if driver load/unload is not in progress
+ * or FW is not under OCR.
+ */
+inline int
+dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
+
+       if (!instance->ctrl_context)
+               return KILL_ADAPTER;
+       else if (instance->unload ||
+                       test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
+               return IGNORE_TIMEOUT;
+       else
+               return INITIATE_OCR;
+}
+
+static int
+megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
+{
+       int ret;
+       struct megasas_cmd *cmd;
+       struct megasas_dcmd_frame *dcmd;
+
+       cmd = megasas_get_cmd(instance);
+
+       if (!cmd) {
+               dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
+               return -ENOMEM;
+       }
+
+       dcmd = &cmd->frame->dcmd;
+
+       memset(instance->pd_info, 0, sizeof(*instance->pd_info));
+       memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+       dcmd->mbox.s[0] = cpu_to_le16(device_id);
+       dcmd->cmd = MFI_CMD_DCMD;
+       dcmd->cmd_status = 0xFF;
+       dcmd->sge_count = 1;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+       dcmd->timeout = 0;
+       dcmd->pad_0 = 0;
+       dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
+       dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
+       dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
+       dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
+
+       if (instance->ctrl_context && !instance->mask_interrupts)
+               ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+       else
+               ret = megasas_issue_polled(instance, cmd);
+
+       switch (ret) {
+       case DCMD_SUCCESS:
+               instance->pd_list[device_id].interface =
+                               instance->pd_info->state.ddf.pdType.intf;
+               break;
+
+       case DCMD_TIMEOUT:
+
+               switch (dcmd_timeout_ocr_possible(instance)) {
+               case INITIATE_OCR:
+                       cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+                       megasas_reset_fusion(instance->host,
+                               MFI_IO_TIMEOUT_OCR);
+                       break;
+               case KILL_ADAPTER:
+                       megaraid_sas_kill_hba(instance);
+                       break;
+               case IGNORE_TIMEOUT:
+                       dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+                               __func__, __LINE__);
+                       break;
+               }
+
+               break;
+       }
+
+       if (ret != DCMD_TIMEOUT)
+               megasas_return_cmd(instance, cmd);
+
+       return ret;
+}
 /*
  * megasas_get_pd_list_info -  Returns FW's pd_list structure
  * @instance:                          Adapter soft state
@@ -3906,42 +4080,72 @@ megasas_get_pd_list(struct megasas_instance *instance)
 
        if (instance->ctrl_context && !instance->mask_interrupts)
                ret = megasas_issue_blocked_cmd(instance, cmd,
-                       MEGASAS_BLOCKED_CMD_TIMEOUT);
+                       MFI_IO_TIMEOUT_SECS);
        else
                ret = megasas_issue_polled(instance, cmd);
 
-       /*
-        * the following function will get the instance PD LIST.
-        */
+       switch (ret) {
+       case DCMD_FAILED:
+               megaraid_sas_kill_hba(instance);
+               break;
+       case DCMD_TIMEOUT:
+
+               switch (dcmd_timeout_ocr_possible(instance)) {
+               case INITIATE_OCR:
+                       cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+                       /*
+                        * DCMD failed from AEN path.
+                        * AEN path already hold reset_mutex to avoid PCI access
+                        * while OCR is in progress.
+                        */
+                       mutex_unlock(&instance->reset_mutex);
+                       megasas_reset_fusion(instance->host,
+                                               MFI_IO_TIMEOUT_OCR);
+                       mutex_lock(&instance->reset_mutex);
+                       break;
+               case KILL_ADAPTER:
+                       megaraid_sas_kill_hba(instance);
+                       break;
+               case IGNORE_TIMEOUT:
+                       dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
+                               __func__, __LINE__);
+                       break;
+               }
+
+               break;
 
-       pd_addr = ci->addr;
+       case DCMD_SUCCESS:
+               pd_addr = ci->addr;
 
-       if (ret == 0 &&
-            (le32_to_cpu(ci->count) <
-                 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
+               if ((le32_to_cpu(ci->count) >
+                       (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
+                       break;
 
                memset(instance->local_pd_list, 0,
-                       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+                               MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
 
                for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
-
                        instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid     =
-                               le16_to_cpu(pd_addr->deviceId);
+                                       le16_to_cpu(pd_addr->deviceId);
                        instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType       =
-                                                       pd_addr->scsiDevType;
+                                       pd_addr->scsiDevType;
                        instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState      =
-                                                       MR_PD_STATE_SYSTEM;
+                                       MR_PD_STATE_SYSTEM;
                        pd_addr++;
                }
+
                memcpy(instance->pd_list, instance->local_pd_list,
                        sizeof(instance->pd_list));
+               break;
+
        }
 
        pci_free_consistent(instance->pdev,
                                MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
                                ci, ci_h);
 
-       megasas_return_cmd(instance, cmd);
+       if (ret != DCMD_TIMEOUT)
+               megasas_return_cmd(instance, cmd);
 
        return ret;
 }
@@ -4002,33 +4206,63 @@ megasas_get_ld_list(struct megasas_instance *instance)
 
        if (instance->ctrl_context && !instance->mask_interrupts)
                ret = megasas_issue_blocked_cmd(instance, cmd,
-                       MEGASAS_BLOCKED_CMD_TIMEOUT);
+                       MFI_IO_TIMEOUT_SECS);
        else
                ret = megasas_issue_polled(instance, cmd);
 
-
        ld_count = le32_to_cpu(ci->ldCount);
 
-       /* the following function will get the instance PD LIST */
+       switch (ret) {
+       case DCMD_FAILED:
+               megaraid_sas_kill_hba(instance);
+               break;
+       case DCMD_TIMEOUT:
+
+               switch (dcmd_timeout_ocr_possible(instance)) {
+               case INITIATE_OCR:
+                       cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+                       /*
+                        * DCMD failed from AEN path.
+                        * AEN path already hold reset_mutex to avoid PCI access
+                        * while OCR is in progress.
+                        */
+                       mutex_unlock(&instance->reset_mutex);
+                       megasas_reset_fusion(instance->host,
+                                               MFI_IO_TIMEOUT_OCR);
+                       mutex_lock(&instance->reset_mutex);
+                       break;
+               case KILL_ADAPTER:
+                       megaraid_sas_kill_hba(instance);
+                       break;
+               case IGNORE_TIMEOUT:
+                       dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+                               __func__, __LINE__);
+                       break;
+               }
+
+               break;
+
+       case DCMD_SUCCESS:
+               if (ld_count > instance->fw_supported_vd_count)
+                       break;
 
-       if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) {
                memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
 
                for (ld_index = 0; ld_index < ld_count; ld_index++) {
                        if (ci->ldList[ld_index].state != 0) {
                                ids = ci->ldList[ld_index].ref.targetId;
-                               instance->ld_ids[ids] =
-                                       ci->ldList[ld_index].ref.targetId;
+                               instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
                        }
                }
+
+               break;
        }
 
-       pci_free_consistent(instance->pdev,
-                               sizeof(struct MR_LD_LIST),
-                               ci,
-                               ci_h);
+       pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
+
+       if (ret != DCMD_TIMEOUT)
+               megasas_return_cmd(instance, cmd);
 
-       megasas_return_cmd(instance, cmd);
        return ret;
 }
 
@@ -4090,26 +4324,61 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
        dcmd->pad_0  = 0;
 
        if (instance->ctrl_context && !instance->mask_interrupts)
-               ret = megasas_issue_blocked_cmd(instance, cmd,
-                       MEGASAS_BLOCKED_CMD_TIMEOUT);
+               ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
        else
                ret = megasas_issue_polled(instance, cmd);
 
-       tgtid_count = le32_to_cpu(ci->count);
+       switch (ret) {
+       case DCMD_FAILED:
+               dev_info(&instance->pdev->dev,
+                       "DCMD not supported by firmware - %s %d\n",
+                               __func__, __LINE__);
+               ret = megasas_get_ld_list(instance);
+               break;
+       case DCMD_TIMEOUT:
+               switch (dcmd_timeout_ocr_possible(instance)) {
+               case INITIATE_OCR:
+                       cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+                       /*
+                        * DCMD failed from AEN path.
+                        * AEN path already hold reset_mutex to avoid PCI access
+                        * while OCR is in progress.
+                        */
+                       mutex_unlock(&instance->reset_mutex);
+                       megasas_reset_fusion(instance->host,
+                                               MFI_IO_TIMEOUT_OCR);
+                       mutex_lock(&instance->reset_mutex);
+                       break;
+               case KILL_ADAPTER:
+                       megaraid_sas_kill_hba(instance);
+                       break;
+               case IGNORE_TIMEOUT:
+                       dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+                               __func__, __LINE__);
+                       break;
+               }
+
+               break;
+       case DCMD_SUCCESS:
+               tgtid_count = le32_to_cpu(ci->count);
+
+               if ((tgtid_count > (instance->fw_supported_vd_count)))
+                       break;
 
-       if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) {
                memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
                for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
                        ids = ci->targetId[ld_index];
                        instance->ld_ids[ids] = ci->targetId[ld_index];
                }
 
+               break;
        }
 
        pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
-                           ci, ci_h);
+                   ci, ci_h);
 
-       megasas_return_cmd(instance, cmd);
+       if (ret != DCMD_TIMEOUT)
+               megasas_return_cmd(instance, cmd);
 
        return ret;
 }
@@ -4223,38 +4492,73 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
        dcmd->mbox.b[0] = 1;
 
        if (instance->ctrl_context && !instance->mask_interrupts)
-               ret = megasas_issue_blocked_cmd(instance, cmd,
-                       MEGASAS_BLOCKED_CMD_TIMEOUT);
+               ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
        else
                ret = megasas_issue_polled(instance, cmd);
 
-       if (!ret) {
+       switch (ret) {
+       case DCMD_SUCCESS:
                memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
+               /* Save required controller information in
+                * CPU endianness format.
+                */
                le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
                le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
                le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+
+               /* Update the latest Ext VD info.
+                * From Init path, store current firmware details.
+                * From OCR path, detect any firmware properties changes.
+                * in case of Firmware upgrade without system reboot.
+                */
                megasas_update_ext_vd_details(instance);
                instance->use_seqnum_jbod_fp =
                        ctrl_info->adapterOperations3.useSeqNumJbodFP;
+
+               /*Check whether controller is iMR or MR */
                instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
                dev_info(&instance->pdev->dev,
-                               "controller type\t: %s(%dMB)\n",
-                               instance->is_imr ? "iMR" : "MR",
-                               le16_to_cpu(ctrl_info->memory_size));
+                       "controller type\t: %s(%dMB)\n",
+                       instance->is_imr ? "iMR" : "MR",
+                       le16_to_cpu(ctrl_info->memory_size));
+
                instance->disableOnlineCtrlReset =
                        ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
-               dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
-                       instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
                instance->secure_jbod_support =
                        ctrl_info->adapterOperations3.supportSecurityonJBOD;
+               dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
+                       instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
                dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
                        instance->secure_jbod_support ? "Yes" : "No");
+               break;
+
+       case DCMD_TIMEOUT:
+               switch (dcmd_timeout_ocr_possible(instance)) {
+               case INITIATE_OCR:
+                       cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+                       megasas_reset_fusion(instance->host,
+                               MFI_IO_TIMEOUT_OCR);
+                       break;
+               case KILL_ADAPTER:
+                       megaraid_sas_kill_hba(instance);
+                       break;
+               case IGNORE_TIMEOUT:
+                       dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+                               __func__, __LINE__);
+                       break;
+               }
+       case DCMD_FAILED:
+               megaraid_sas_kill_hba(instance);
+               break;
+
        }
 
        pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
                            ci, ci_h);
 
        megasas_return_cmd(instance, cmd);
+
+
        return ret;
 }
 
@@ -4304,12 +4608,28 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
        dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
 
        if (instance->ctrl_context && !instance->mask_interrupts)
-               ret = megasas_issue_blocked_cmd(instance, cmd,
-                       MEGASAS_BLOCKED_CMD_TIMEOUT);
+               ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
        else
                ret = megasas_issue_polled(instance, cmd);
 
-       megasas_return_cmd(instance, cmd);
+       if (ret == DCMD_TIMEOUT) {
+               switch (dcmd_timeout_ocr_possible(instance)) {
+               case INITIATE_OCR:
+                       cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+                       megasas_reset_fusion(instance->host,
+                                       MFI_IO_TIMEOUT_OCR);
+                       break;
+               case KILL_ADAPTER:
+                       megaraid_sas_kill_hba(instance);
+                       break;
+               case IGNORE_TIMEOUT:
+                       dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+                               __func__, __LINE__);
+                       break;
+               }
+       } else
+               megasas_return_cmd(instance, cmd);
+
        return ret;
 }
 
@@ -4426,6 +4746,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
                sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
        }
 
+       instance->cur_can_queue = instance->max_scsi_cmds;
        /*
         * Create a pool of commands
         */
@@ -4756,6 +5077,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
                                instance->msix_vectors = ((scratch_pad_2
                                        & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
                                        >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+                               if (rdpq_enable)
+                                       instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
+                                                               1 : 0;
                                fw_msix_count = instance->msix_vectors;
                                /* Save 1-15 reply post index address to local memory
                                 * Index 0 is already saved from reg offset
@@ -4792,6 +5116,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
        dev_info(&instance->pdev->dev,
                "current msix/online cpus\t: (%d/%d)\n",
                instance->msix_vectors, (unsigned int)num_online_cpus());
+       dev_info(&instance->pdev->dev,
+               "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
 
        tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
                (unsigned long)instance);
@@ -4932,6 +5258,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
                instance->throttlequeuedepth =
                                MEGASAS_THROTTLE_QUEUE_DEPTH;
 
+       if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
+               resetwaittime = MEGASAS_RESET_WAIT_TIME;
+
+       if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
+               scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
 
        /* Launch SR-IOV heartbeat timer */
        if (instance->requestorId) {
@@ -5035,10 +5366,8 @@ megasas_get_seq_num(struct megasas_instance *instance,
        dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
        dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
 
-       if (megasas_issue_blocked_cmd(instance, cmd, 30))
-               dev_err(&instance->pdev->dev, "Command timedout"
-                       "from %s\n", __func__);
-       else {
+       if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
+               DCMD_SUCCESS) {
                /*
                 * Copy the data back into callers buffer
                 */
@@ -5047,7 +5376,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
                eli->clear_seq_num = el_info->clear_seq_num;
                eli->shutdown_seq_num = el_info->shutdown_seq_num;
                eli->boot_seq_num = el_info->boot_seq_num;
-       }
+       } else
+               dev_err(&instance->pdev->dev, "DCMD failed "
+                       "from %s\n", __func__);
 
        pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
                            el_info, el_info_h);
@@ -5262,6 +5593,8 @@ static int megasas_io_attach(struct megasas_instance *instance)
        if (instance->ctrl_context) {
                host->hostt->eh_device_reset_handler = NULL;
                host->hostt->eh_bus_reset_handler = NULL;
+               host->hostt->eh_target_reset_handler = megasas_reset_target_fusion;
+               host->hostt->eh_abort_handler = megasas_task_abort_fusion;
        }
 
        /*
@@ -5447,7 +5780,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
        instance->flag_ieee = 0;
        instance->ev = NULL;
        instance->issuepend_done = 1;
-       instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+       atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
        instance->is_imr = 0;
 
        instance->evt_detail = pci_alloc_consistent(pdev,
@@ -5461,6 +5794,12 @@ static int megasas_probe_one(struct pci_dev *pdev,
                goto fail_alloc_dma_buf;
        }
 
+       instance->pd_info = pci_alloc_consistent(pdev,
+               sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+
+       if (!instance->pd_info)
+               dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
+
        /*
         * Initialize locks and queues
         */
@@ -5476,8 +5815,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
        spin_lock_init(&instance->hba_lock);
        spin_lock_init(&instance->completion_lock);
 
-       mutex_init(&instance->aen_mutex);
        mutex_init(&instance->reset_mutex);
+       mutex_init(&instance->hba_mutex);
 
        /*
         * Initialize PCI related and misc parameters
@@ -5592,6 +5931,10 @@ fail_alloc_dma_buf:
                                    instance->evt_detail,
                                    instance->evt_detail_h);
 
+       if (instance->pd_info)
+               pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+                                       instance->pd_info,
+                                       instance->pd_info_h);
        if (instance->producer)
                pci_free_consistent(pdev, sizeof(u32), instance->producer,
                                    instance->producer_h);
@@ -5616,7 +5959,7 @@ static void megasas_flush_cache(struct megasas_instance *instance)
        struct megasas_cmd *cmd;
        struct megasas_dcmd_frame *dcmd;
 
-       if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
                return;
 
        cmd = megasas_get_cmd(instance);
@@ -5638,9 +5981,12 @@ static void megasas_flush_cache(struct megasas_instance *instance)
        dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
        dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
 
-       if (megasas_issue_blocked_cmd(instance, cmd, 30))
-               dev_err(&instance->pdev->dev, "Command timedout"
-                       " from %s\n", __func__);
+       if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
+                       != DCMD_SUCCESS) {
+               dev_err(&instance->pdev->dev,
+                       "return from %s %d\n", __func__, __LINE__);
+               return;
+       }
 
        megasas_return_cmd(instance, cmd);
 }
@@ -5656,7 +6002,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
        struct megasas_cmd *cmd;
        struct megasas_dcmd_frame *dcmd;
 
-       if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
                return;
 
        cmd = megasas_get_cmd(instance);
@@ -5666,13 +6012,13 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
 
        if (instance->aen_cmd)
                megasas_issue_blocked_abort_cmd(instance,
-                       instance->aen_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT);
+                       instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
        if (instance->map_update_cmd)
                megasas_issue_blocked_abort_cmd(instance,
-                       instance->map_update_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT);
+                       instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
        if (instance->jbod_seq_cmd)
                megasas_issue_blocked_abort_cmd(instance,
-                       instance->jbod_seq_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT);
+                       instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
 
        dcmd = &cmd->frame->dcmd;
 
@@ -5687,9 +6033,12 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
        dcmd->data_xfer_len = 0;
        dcmd->opcode = cpu_to_le32(opcode);
 
-       if (megasas_issue_blocked_cmd(instance, cmd, 30))
-               dev_err(&instance->pdev->dev, "Command timedout"
-                       "from %s\n", __func__);
+       if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
+                       != DCMD_SUCCESS) {
+               dev_err(&instance->pdev->dev,
+                       "return from %s %d\n", __func__, __LINE__);
+               return;
+       }
 
        megasas_return_cmd(instance, cmd);
 }
@@ -5847,6 +6196,10 @@ fail_init_mfi:
                                instance->evt_detail,
                                instance->evt_detail_h);
 
+       if (instance->pd_info)
+               pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+                                       instance->pd_info,
+                                       instance->pd_info_h);
        if (instance->producer)
                pci_free_consistent(pdev, sizeof(u32), instance->producer,
                                instance->producer_h);
@@ -5941,11 +6294,11 @@ static void megasas_detach_one(struct pci_dev *pdev)
                        if (fusion->ld_drv_map[i])
                                free_pages((ulong)fusion->ld_drv_map[i],
                                        fusion->drv_map_pages);
-                               if (fusion->pd_seq_sync)
-                                       dma_free_coherent(&instance->pdev->dev,
-                                               pd_seq_map_sz,
-                                               fusion->pd_seq_sync[i],
-                                               fusion->pd_seq_phys[i]);
+                       if (fusion->pd_seq_sync[i])
+                               dma_free_coherent(&instance->pdev->dev,
+                                       pd_seq_map_sz,
+                                       fusion->pd_seq_sync[i],
+                                       fusion->pd_seq_phys[i]);
                }
                free_pages((ulong)instance->ctrl_context,
                        instance->ctrl_context_pages);
@@ -5965,6 +6318,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
                pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
                                instance->evt_detail, instance->evt_detail_h);
 
+       if (instance->pd_info)
+               pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+                                       instance->pd_info,
+                                       instance->pd_info_h);
        if (instance->vf_affiliation)
                pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
                                    sizeof(struct MR_LD_VF_AFFILIATION),
@@ -6090,7 +6447,7 @@ static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
        for (i = 0; i < megasas_mgmt_info.max_index; i++) {
                local_instance = megasas_mgmt_info.instance[i];
                if (local_instance && local_instance->crash_dump_drv_support) {
-                       if ((local_instance->adprecovery ==
+                       if ((atomic_read(&local_instance->adprecovery) ==
                                MEGASAS_HBA_OPERATIONAL) &&
                                !megasas_set_crash_dump_params(local_instance,
                                        crash_support)) {
@@ -6227,7 +6584,15 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
         * cmd to the SCSI mid-layer
         */
        cmd->sync_cmd = 1;
-       megasas_issue_blocked_cmd(instance, cmd, 0);
+       if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
+               cmd->sync_cmd = 0;
+               dev_err(&instance->pdev->dev,
+                       "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
+                       __func__, __LINE__, cmd->frame->dcmd.opcode,
+                       cmd->cmd_status_drv);
+               return -EBUSY;
+       }
+
        cmd->sync_cmd = 0;
 
        if (instance->unload == 1) {
@@ -6330,7 +6695,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
                goto out_kfree_ioc;
        }
 
-       if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
                dev_err(&instance->pdev->dev, "Controller in crit error\n");
                error = -ENODEV;
                goto out_kfree_ioc;
@@ -6349,7 +6714,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
        for (i = 0; i < wait_time; i++) {
 
                spin_lock_irqsave(&instance->hba_lock, flags);
-               if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+               if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
                        spin_unlock_irqrestore(&instance->hba_lock, flags);
                        break;
                }
@@ -6364,7 +6729,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
        }
 
        spin_lock_irqsave(&instance->hba_lock, flags);
-       if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+       if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
                spin_unlock_irqrestore(&instance->hba_lock, flags);
 
                dev_err(&instance->pdev->dev, "timed out while"
@@ -6406,7 +6771,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
        if (!instance)
                return -ENODEV;
 
-       if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
                return -ENODEV;
        }
 
@@ -6417,7 +6782,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
        for (i = 0; i < wait_time; i++) {
 
                spin_lock_irqsave(&instance->hba_lock, flags);
-               if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+               if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
                        spin_unlock_irqrestore(&instance->hba_lock,
                                                flags);
                        break;
@@ -6434,7 +6799,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
        }
 
        spin_lock_irqsave(&instance->hba_lock, flags);
-       if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+       if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
                spin_unlock_irqrestore(&instance->hba_lock, flags);
                dev_err(&instance->pdev->dev, "timed out while waiting"
                                "for HBA to recover\n");
@@ -6442,10 +6807,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
        }
        spin_unlock_irqrestore(&instance->hba_lock, flags);
 
-       mutex_lock(&instance->aen_mutex);
+       mutex_lock(&instance->reset_mutex);
        error = megasas_register_aen(instance, aen.seq_num,
                                     aen.class_locale_word);
-       mutex_unlock(&instance->aen_mutex);
+       mutex_unlock(&instance->reset_mutex);
        return error;
 }
 
@@ -6647,6 +7012,7 @@ megasas_aen_polling(struct work_struct *work)
        int     i, j, doscan = 0;
        u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
        int error;
+       u8  dcmd_ret = DCMD_SUCCESS;
 
        if (!instance) {
                printk(KERN_ERR "invalid instance!\n");
@@ -6659,16 +7025,7 @@ megasas_aen_polling(struct work_struct *work)
                wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
 
        /* Don't run the event workqueue thread if OCR is running */
-       for (i = 0; i < wait_time; i++) {
-               if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
-                       break;
-               if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
-                       dev_notice(&instance->pdev->dev, "%s waiting for "
-                              "controller reset to finish for scsi%d\n",
-                              __func__, instance->host->host_no);
-               }
-               msleep(1000);
-       }
+       mutex_lock(&instance->reset_mutex);
 
        instance->ev = NULL;
        host = instance->host;
@@ -6676,212 +7033,127 @@ megasas_aen_polling(struct work_struct *work)
                megasas_decode_evt(instance);
 
                switch (le32_to_cpu(instance->evt_detail->code)) {
-               case MR_EVT_PD_INSERTED:
-                       if (megasas_get_pd_list(instance) == 0) {
-                       for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
-                               for (j = 0;
-                               j < MEGASAS_MAX_DEV_PER_CHANNEL;
-                               j++) {
-
-                               pd_index =
-                               (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-                               sdev1 = scsi_device_lookup(host, i, j, 0);
-
-                               if (instance->pd_list[pd_index].driveState
-                                               == MR_PD_STATE_SYSTEM) {
-                                       if (!sdev1)
-                                               scsi_add_device(host, i, j, 0);
-
-                                       if (sdev1)
-                                               scsi_device_put(sdev1);
-                                       }
-                               }
-                       }
-                       }
-                       doscan = 0;
-                       break;
 
+               case MR_EVT_PD_INSERTED:
                case MR_EVT_PD_REMOVED:
-                       if (megasas_get_pd_list(instance) == 0) {
-                       for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
-                               for (j = 0;
-                               j < MEGASAS_MAX_DEV_PER_CHANNEL;
-                               j++) {
-
-                               pd_index =
-                               (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-                               sdev1 = scsi_device_lookup(host, i, j, 0);
-
-                               if (instance->pd_list[pd_index].driveState
-                                       == MR_PD_STATE_SYSTEM) {
-                                       if (sdev1)
-                                               scsi_device_put(sdev1);
-                               } else {
-                                       if (sdev1) {
-                                               scsi_remove_device(sdev1);
-                                               scsi_device_put(sdev1);
-                                       }
-                               }
-                               }
-                       }
-                       }
-                       doscan = 0;
+                       dcmd_ret = megasas_get_pd_list(instance);
+                       if (dcmd_ret == DCMD_SUCCESS)
+                               doscan = SCAN_PD_CHANNEL;
                        break;
 
                case MR_EVT_LD_OFFLINE:
                case MR_EVT_CFG_CLEARED:
                case MR_EVT_LD_DELETED:
-                       if (!instance->requestorId ||
-                           megasas_get_ld_vf_affiliation(instance, 0)) {
-                               if (megasas_ld_list_query(instance,
-                                                         MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-                                       megasas_get_ld_list(instance);
-                               for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-                                       for (j = 0;
-                                            j < MEGASAS_MAX_DEV_PER_CHANNEL;
-                                            j++) {
-
-                                               ld_index =
-                                                       (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-                                               sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-
-                                               if (instance->ld_ids[ld_index]
-                                                   != 0xff) {
-                                                       if (sdev1)
-                                                               scsi_device_put(sdev1);
-                                               } else {
-                                                       if (sdev1) {
-                                                               scsi_remove_device(sdev1);
-                                                               scsi_device_put(sdev1);
-                                                       }
-                                               }
-                                       }
-                               }
-                               doscan = 0;
-                       }
-                       break;
                case MR_EVT_LD_CREATED:
                        if (!instance->requestorId ||
-                           megasas_get_ld_vf_affiliation(instance, 0)) {
-                               if (megasas_ld_list_query(instance,
-                                                         MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-                                       megasas_get_ld_list(instance);
-                               for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-                                       for (j = 0;
-                                            j < MEGASAS_MAX_DEV_PER_CHANNEL;
-                                            j++) {
-                                               ld_index =
-                                                       (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-                                               sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-
-                                               if (instance->ld_ids[ld_index]
-                                                   != 0xff) {
-                                                       if (!sdev1)
-                                                               scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-                                               }
-                                               if (sdev1)
-                                                       scsi_device_put(sdev1);
-                                       }
-                               }
-                               doscan = 0;
-                       }
+                               (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+                               dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+                       if (dcmd_ret == DCMD_SUCCESS)
+                               doscan = SCAN_VD_CHANNEL;
+
                        break;
+
                case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
                case MR_EVT_FOREIGN_CFG_IMPORTED:
                case MR_EVT_LD_STATE_CHANGE:
-                       doscan = 1;
+                       dcmd_ret = megasas_get_pd_list(instance);
+
+                       if (dcmd_ret != DCMD_SUCCESS)
+                               break;
+
+                       if (!instance->requestorId ||
+                               (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+                               dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+                       if (dcmd_ret != DCMD_SUCCESS)
+                               break;
+
+                       doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
+                       dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
+                               instance->host->host_no);
                        break;
+
                case MR_EVT_CTRL_PROP_CHANGED:
-                       megasas_get_ctrl_info(instance);
-                       break;
+                               dcmd_ret = megasas_get_ctrl_info(instance);
+                               break;
                default:
                        doscan = 0;
                        break;
                }
        } else {
                dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
+               mutex_unlock(&instance->reset_mutex);
                kfree(ev);
                return;
        }
 
-       if (doscan) {
-               dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
-                      instance->host->host_no);
-               if (megasas_get_pd_list(instance) == 0) {
-                       for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
-                               for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
-                                       pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
-                                       sdev1 = scsi_device_lookup(host, i, j, 0);
-                                       if (instance->pd_list[pd_index].driveState ==
-                                           MR_PD_STATE_SYSTEM) {
-                                               if (!sdev1) {
-                                                       scsi_add_device(host, i, j, 0);
-                                               }
-                                               if (sdev1)
-                                                       scsi_device_put(sdev1);
-                                       } else {
-                                               if (sdev1) {
-                                                       scsi_remove_device(sdev1);
-                                                       scsi_device_put(sdev1);
-                                               }
+       mutex_unlock(&instance->reset_mutex);
+
+       if (doscan & SCAN_PD_CHANNEL) {
+               for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+                       for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+                               pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+                               sdev1 = scsi_device_lookup(host, i, j, 0);
+                               if (instance->pd_list[pd_index].driveState ==
+                                                       MR_PD_STATE_SYSTEM) {
+                                       if (!sdev1)
+                                               scsi_add_device(host, i, j, 0);
+                                       else
+                                               scsi_device_put(sdev1);
+                               } else {
+                                       if (sdev1) {
+                                               scsi_remove_device(sdev1);
+                                               scsi_device_put(sdev1);
                                        }
                                }
                        }
                }
+       }
 
-               if (!instance->requestorId ||
-                   megasas_get_ld_vf_affiliation(instance, 0)) {
-                       if (megasas_ld_list_query(instance,
-                                                 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-                               megasas_get_ld_list(instance);
-                       for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-                               for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
-                                    j++) {
-                                       ld_index =
-                                               (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-                                       sdev1 = scsi_device_lookup(host,
-                                                                  MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-                                       if (instance->ld_ids[ld_index]
-                                           != 0xff) {
-                                               if (!sdev1)
-                                                       scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-                                               else
-                                                       scsi_device_put(sdev1);
-                                       } else {
-                                               if (sdev1) {
-                                                       scsi_remove_device(sdev1);
-                                                       scsi_device_put(sdev1);
-                                               }
+       if (doscan & SCAN_VD_CHANNEL) {
+               for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+                       for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+                               ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+                               sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+                               if (instance->ld_ids[ld_index] != 0xff) {
+                                       if (!sdev1)
+                                               scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+                                       else
+                                               scsi_device_put(sdev1);
+                               } else {
+                                       if (sdev1) {
+                                               scsi_remove_device(sdev1);
+                                               scsi_device_put(sdev1);
                                        }
                                }
                        }
                }
        }
 
-       if (instance->aen_cmd != NULL) {
-               kfree(ev);
-               return ;
-       }
-
-       seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+       if (dcmd_ret == DCMD_SUCCESS)
+               seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+       else
+               seq_num = instance->last_seq_num;
 
        /* Register AEN with FW for latest sequence number plus 1 */
        class_locale.members.reserved = 0;
        class_locale.members.locale = MR_EVT_LOCALE_ALL;
        class_locale.members.class = MR_EVT_CLASS_DEBUG;
-       mutex_lock(&instance->aen_mutex);
+
+       if (instance->aen_cmd != NULL) {
+               kfree(ev);
+               return;
+       }
+
+       mutex_lock(&instance->reset_mutex);
        error = megasas_register_aen(instance, seq_num,
                                        class_locale.word);
-       mutex_unlock(&instance->aen_mutex);
-
        if (error)
-               dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
+               dev_err(&instance->pdev->dev,
+                       "register aen failed error %x\n", error);
 
+       mutex_unlock(&instance->reset_mutex);
        kfree(ev);
 }
 
index 741509b3561776a874189ad1613984df86129ffe..e413113c86ac17924ae6cd7bce872444c217843a 100644 (file)
@@ -1020,6 +1020,8 @@ MR_BuildRaidContext(struct megasas_instance *instance,
        /* assume this IO needs the full row - we'll adjust if not true */
        regSize             = stripSize;
 
+       io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock;
+
        /* Check if we can send this I/O via FastPath */
        if (raid->capability.fpCapable) {
                if (isRead)
index 8d630a552b078721c5c5eea640995046a9907565..be9c3f1b9def0c19d9725c0c2878b2ece864d642 100644 (file)
@@ -91,7 +91,10 @@ void megasas_start_timer(struct megasas_instance *instance,
                        struct timer_list *timer,
                         void *fn, unsigned long interval);
 extern struct megasas_mgmt_info megasas_mgmt_info;
-extern int resetwaittime;
+extern unsigned int resetwaittime;
+extern unsigned int dual_qdepth_disable;
+static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
+static void megasas_free_reply_fusion(struct megasas_instance *instance);
 
 
 
@@ -205,54 +208,67 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
 #endif
 }
 
-
 /**
- * megasas_teardown_frame_pool_fusion -        Destroy the cmd frame DMA pool
- * @instance:                          Adapter soft state
+ * megasas_fusion_update_can_queue -   Do all Adapter Queue depth related calculations here
+ * @instance:                                                  Adapter soft state
+ * fw_boot_context:                                            Whether this function called during probe or after OCR
+ *
+ * This function is only for fusion controllers.
+ * Update host can queue, if firmware downgrade max supported firmware commands.
+ * Firmware upgrade case will be skiped because underlying firmware has
+ * more resource than exposed to the OS.
+ *
  */
-static void megasas_teardown_frame_pool_fusion(
-       struct megasas_instance *instance)
+static void
+megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
 {
-       int i;
-       struct fusion_context *fusion = instance->ctrl_context;
-
-       u16 max_cmd = instance->max_fw_cmds;
+       u16 cur_max_fw_cmds = 0;
+       u16 ldio_threshold = 0;
+       struct megasas_register_set __iomem *reg_set;
 
-       struct megasas_cmd_fusion *cmd;
+       reg_set = instance->reg_set;
 
-       if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
-               dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, "
-                      "sense pool : %p\n", fusion->sg_dma_pool,
-                      fusion->sense_dma_pool);
-               return;
-       }
+       cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
 
-       /*
-        * Return all frames to pool
-        */
-       for (i = 0; i < max_cmd; i++) {
+       if (dual_qdepth_disable || !cur_max_fw_cmds)
+               cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+       else
+               ldio_threshold =
+                       (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
+
+       dev_info(&instance->pdev->dev,
+                       "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
+                       cur_max_fw_cmds, ldio_threshold);
+
+       if (fw_boot_context == OCR_CONTEXT) {
+               cur_max_fw_cmds = cur_max_fw_cmds - 1;
+               if (cur_max_fw_cmds <= instance->max_fw_cmds) {
+                       instance->cur_can_queue =
+                               cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
+                                               MEGASAS_FUSION_IOCTL_CMDS);
+                       instance->host->can_queue = instance->cur_can_queue;
+                       instance->ldio_threshold = ldio_threshold;
+               }
+       } else {
+               instance->max_fw_cmds = cur_max_fw_cmds;
+               instance->ldio_threshold = ldio_threshold;
 
-               cmd = fusion->cmd_list[i];
+               if (!instance->is_rdpq)
+                       instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024);
 
-               if (cmd->sg_frame)
-                       pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
-                                     cmd->sg_frame_phys_addr);
+               /*
+               * Reduce the max supported cmds by 1. This is to ensure that the
+               * reply_q_sz (1 more than the max cmd that driver may send)
+               * does not exceed max cmds that the FW can support
+               */
+               instance->max_fw_cmds = instance->max_fw_cmds-1;
 
-               if (cmd->sense)
-                       pci_pool_free(fusion->sense_dma_pool, cmd->sense,
-                                     cmd->sense_phys_addr);
+               instance->max_scsi_cmds = instance->max_fw_cmds -
+                               (MEGASAS_FUSION_INTERNAL_CMDS +
+                               MEGASAS_FUSION_IOCTL_CMDS);
+               instance->cur_can_queue = instance->max_scsi_cmds;
        }
-
-       /*
-        * Now destroy the pool itself
-        */
-       pci_pool_destroy(fusion->sg_dma_pool);
-       pci_pool_destroy(fusion->sense_dma_pool);
-
-       fusion->sg_dma_pool = NULL;
-       fusion->sense_dma_pool = NULL;
 }
-
 /**
  * megasas_free_cmds_fusion -  Free all the cmds in the free cmd pool
  * @instance:          Adapter soft state
@@ -262,55 +278,65 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
 {
        int i;
        struct fusion_context *fusion = instance->ctrl_context;
+       struct megasas_cmd_fusion *cmd;
 
-       u32 max_cmds, req_sz, reply_sz, io_frames_sz;
+       /* SG, Sense */
+       for (i = 0; i < instance->max_fw_cmds; i++) {
+               cmd = fusion->cmd_list[i];
+               if (cmd) {
+                       if (cmd->sg_frame)
+                               pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
+                                     cmd->sg_frame_phys_addr);
+                       if (cmd->sense)
+                               pci_pool_free(fusion->sense_dma_pool, cmd->sense,
+                                     cmd->sense_phys_addr);
+               }
+       }
 
+       if (fusion->sg_dma_pool) {
+               pci_pool_destroy(fusion->sg_dma_pool);
+               fusion->sg_dma_pool = NULL;
+       }
+       if (fusion->sense_dma_pool) {
+               pci_pool_destroy(fusion->sense_dma_pool);
+               fusion->sense_dma_pool = NULL;
+       }
 
-       req_sz = fusion->request_alloc_sz;
-       reply_sz = fusion->reply_alloc_sz;
-       io_frames_sz = fusion->io_frames_alloc_sz;
 
-       max_cmds = instance->max_fw_cmds;
+       /* Reply Frame, Desc*/
+       if (instance->is_rdpq)
+               megasas_free_rdpq_fusion(instance);
+       else
+               megasas_free_reply_fusion(instance);
 
-       /* Free descriptors and request Frames memory */
+       /* Request Frame, Desc*/
        if (fusion->req_frames_desc)
-               dma_free_coherent(&instance->pdev->dev, req_sz,
-                                 fusion->req_frames_desc,
-                                 fusion->req_frames_desc_phys);
-
-       if (fusion->reply_frames_desc) {
-               pci_pool_free(fusion->reply_frames_desc_pool,
-                             fusion->reply_frames_desc,
-                             fusion->reply_frames_desc_phys);
-               pci_pool_destroy(fusion->reply_frames_desc_pool);
-       }
-
-       if (fusion->io_request_frames) {
+               dma_free_coherent(&instance->pdev->dev,
+                       fusion->request_alloc_sz, fusion->req_frames_desc,
+                       fusion->req_frames_desc_phys);
+       if (fusion->io_request_frames)
                pci_pool_free(fusion->io_request_frames_pool,
-                             fusion->io_request_frames,
-                             fusion->io_request_frames_phys);
+                       fusion->io_request_frames,
+                       fusion->io_request_frames_phys);
+       if (fusion->io_request_frames_pool) {
                pci_pool_destroy(fusion->io_request_frames_pool);
+               fusion->io_request_frames_pool = NULL;
        }
 
-       /* Free the Fusion frame pool */
-       megasas_teardown_frame_pool_fusion(instance);
 
-       /* Free all the commands in the cmd_list */
-       for (i = 0; i < max_cmds; i++)
+       /* cmd_list */
+       for (i = 0; i < instance->max_fw_cmds; i++)
                kfree(fusion->cmd_list[i]);
 
-       /* Free the cmd_list buffer itself */
        kfree(fusion->cmd_list);
-       fusion->cmd_list = NULL;
-
 }
 
 /**
- * megasas_create_frame_pool_fusion -  Creates DMA pool for cmd frames
+ * megasas_create_sg_sense_fusion -    Creates DMA pool for cmd frames
  * @instance:                  Adapter soft state
  *
  */
-static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
+static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 {
        int i;
        u32 max_cmd;
@@ -321,25 +347,17 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
        max_cmd = instance->max_fw_cmds;
 
 
-       /*
-        * Use DMA pool facility provided by PCI layer
-        */
-
-       fusion->sg_dma_pool = pci_pool_create("sg_pool_fusion", instance->pdev,
-                                               instance->max_chain_frame_sz,
-                                               4, 0);
-       if (!fusion->sg_dma_pool) {
-               dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n");
-               return -ENOMEM;
-       }
-       fusion->sense_dma_pool = pci_pool_create("sense pool fusion",
-                                                instance->pdev,
-                                                SCSI_SENSE_BUFFERSIZE, 64, 0);
+       fusion->sg_dma_pool =
+                       pci_pool_create("mr_sg", instance->pdev,
+                               instance->max_chain_frame_sz, 4, 0);
+       /* SCSI_SENSE_BUFFERSIZE  = 96 bytes */
+       fusion->sense_dma_pool =
+                       pci_pool_create("mr_sense", instance->pdev,
+                               SCSI_SENSE_BUFFERSIZE, 64, 0);
 
-       if (!fusion->sense_dma_pool) {
-               dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n");
-               pci_pool_destroy(fusion->sg_dma_pool);
-               fusion->sg_dma_pool = NULL;
+       if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
+               dev_err(&instance->pdev->dev,
+                       "Failed from %s %d\n",  __func__, __LINE__);
                return -ENOMEM;
        }
 
@@ -347,160 +365,280 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
         * Allocate and attach a frame to each of the commands in cmd_list
         */
        for (i = 0; i < max_cmd; i++) {
-
                cmd = fusion->cmd_list[i];
-
                cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
-                                              GFP_KERNEL,
-                                              &cmd->sg_frame_phys_addr);
+                                       GFP_KERNEL, &cmd->sg_frame_phys_addr);
 
                cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
-                                           GFP_KERNEL, &cmd->sense_phys_addr);
-               /*
-                * megasas_teardown_frame_pool_fusion() takes care of freeing
-                * whatever has been allocated
-                */
+                                       GFP_KERNEL, &cmd->sense_phys_addr);
                if (!cmd->sg_frame || !cmd->sense) {
-                       dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
-                       megasas_teardown_frame_pool_fusion(instance);
+                       dev_err(&instance->pdev->dev,
+                               "Failed from %s %d\n",  __func__, __LINE__);
                        return -ENOMEM;
                }
        }
        return 0;
 }
 
-/**
- * megasas_alloc_cmds_fusion - Allocates the command packets
- * @instance:          Adapter soft state
- *
- *
- * Each frame has a 32-bit field called context. This context is used to get
- * back the megasas_cmd_fusion from the frame when a frame gets completed
- * In this driver, the 32 bit values are the indices into an array cmd_list.
- * This array is used only to look up the megasas_cmd_fusion given the context.
- * The free commands themselves are maintained in a linked list called cmd_pool.
- *
- * cmds are formed in the io_request and sg_frame members of the
- * megasas_cmd_fusion. The context field is used to get a request descriptor
- * and is used as SMID of the cmd.
- * SMID value range is from 1 to max_fw_cmds.
- */
 int
-megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
 {
-       int i, j, count;
-       u32 max_cmd, io_frames_sz;
+       u32 max_cmd, i;
        struct fusion_context *fusion;
-       struct megasas_cmd_fusion *cmd;
-       union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
-       u32 offset;
-       dma_addr_t io_req_base_phys;
-       u8 *io_req_base;
 
        fusion = instance->ctrl_context;
 
        max_cmd = instance->max_fw_cmds;
 
+       /*
+        * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
+        * Allocate the dynamic array first and then allocate individual
+        * commands.
+        */
+       fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd,
+                                               GFP_KERNEL);
+       if (!fusion->cmd_list) {
+               dev_err(&instance->pdev->dev,
+                       "Failed from %s %d\n",  __func__, __LINE__);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < max_cmd; i++) {
+               fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
+                                             GFP_KERNEL);
+               if (!fusion->cmd_list[i]) {
+                       dev_err(&instance->pdev->dev,
+                               "Failed from %s %d\n",  __func__, __LINE__);
+                       return -ENOMEM;
+               }
+       }
+       return 0;
+}
+int
+megasas_alloc_request_fusion(struct megasas_instance *instance)
+{
+       struct fusion_context *fusion;
+
+       fusion = instance->ctrl_context;
+
        fusion->req_frames_desc =
                dma_alloc_coherent(&instance->pdev->dev,
-                                  fusion->request_alloc_sz,
-                                  &fusion->req_frames_desc_phys, GFP_KERNEL);
-
+                       fusion->request_alloc_sz,
+                       &fusion->req_frames_desc_phys, GFP_KERNEL);
        if (!fusion->req_frames_desc) {
-               dev_err(&instance->pdev->dev, "Could not allocate memory for "
-                      "request_frames\n");
-               goto fail_req_desc;
+               dev_err(&instance->pdev->dev,
+                       "Failed from %s %d\n",  __func__, __LINE__);
+               return -ENOMEM;
+       }
+
+       fusion->io_request_frames_pool =
+                       pci_pool_create("mr_ioreq", instance->pdev,
+                               fusion->io_frames_alloc_sz, 16, 0);
+
+       if (!fusion->io_request_frames_pool) {
+               dev_err(&instance->pdev->dev,
+                       "Failed from %s %d\n",  __func__, __LINE__);
+               return -ENOMEM;
+       }
+
+       fusion->io_request_frames =
+                       pci_pool_alloc(fusion->io_request_frames_pool,
+                               GFP_KERNEL, &fusion->io_request_frames_phys);
+       if (!fusion->io_request_frames) {
+               dev_err(&instance->pdev->dev,
+                       "Failed from %s %d\n",  __func__, __LINE__);
+               return -ENOMEM;
        }
+       return 0;
+}
+
+int
+megasas_alloc_reply_fusion(struct megasas_instance *instance)
+{
+       int i, count;
+       struct fusion_context *fusion;
+       union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+       fusion = instance->ctrl_context;
 
        count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
        fusion->reply_frames_desc_pool =
-               pci_pool_create("reply_frames pool", instance->pdev,
+                       pci_pool_create("mr_reply", instance->pdev,
                                fusion->reply_alloc_sz * count, 16, 0);
 
        if (!fusion->reply_frames_desc_pool) {
-               dev_err(&instance->pdev->dev, "Could not allocate memory for "
-                      "reply_frame pool\n");
-               goto fail_reply_desc;
+               dev_err(&instance->pdev->dev,
+                       "Failed from %s %d\n",  __func__, __LINE__);
+               return -ENOMEM;
        }
 
-       fusion->reply_frames_desc =
-               pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
-                              &fusion->reply_frames_desc_phys);
-       if (!fusion->reply_frames_desc) {
-               dev_err(&instance->pdev->dev, "Could not allocate memory for "
-                      "reply_frame pool\n");
-               pci_pool_destroy(fusion->reply_frames_desc_pool);
-               goto fail_reply_desc;
+       fusion->reply_frames_desc[0] =
+               pci_pool_alloc(fusion->reply_frames_desc_pool,
+                       GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
+       if (!fusion->reply_frames_desc[0]) {
+               dev_err(&instance->pdev->dev,
+                       "Failed from %s %d\n",  __func__, __LINE__);
+               return -ENOMEM;
        }
-
-       reply_desc = fusion->reply_frames_desc;
+       reply_desc = fusion->reply_frames_desc[0];
        for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
                reply_desc->Words = cpu_to_le64(ULLONG_MAX);
 
-       io_frames_sz = fusion->io_frames_alloc_sz;
+       /* This is not a rdpq mode, but driver still populate
+        * reply_frame_desc array to use same msix index in ISR path.
+        */
+       for (i = 0; i < (count - 1); i++)
+               fusion->reply_frames_desc[i + 1] =
+                       fusion->reply_frames_desc[i] +
+                       (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
 
-       fusion->io_request_frames_pool =
-               pci_pool_create("io_request_frames pool", instance->pdev,
-                               fusion->io_frames_alloc_sz, 16, 0);
+       return 0;
+}
 
-       if (!fusion->io_request_frames_pool) {
-               dev_err(&instance->pdev->dev, "Could not allocate memory for "
-                      "io_request_frame pool\n");
-               goto fail_io_frames;
+int
+megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
+{
+       int i, j, count;
+       struct fusion_context *fusion;
+       union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+
+       fusion = instance->ctrl_context;
+
+       fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
+                               sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
+                               &fusion->rdpq_phys);
+       if (!fusion->rdpq_virt) {
+               dev_err(&instance->pdev->dev,
+                       "Failed from %s %d\n",  __func__, __LINE__);
+               return -ENOMEM;
        }
 
-       fusion->io_request_frames =
-               pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
-                              &fusion->io_request_frames_phys);
-       if (!fusion->io_request_frames) {
-               dev_err(&instance->pdev->dev, "Could not allocate memory for "
-                      "io_request_frames frames\n");
-               pci_pool_destroy(fusion->io_request_frames_pool);
-               goto fail_io_frames;
+       memset(fusion->rdpq_virt, 0,
+                       sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
+       count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+       fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq",
+                                                        instance->pdev, fusion->reply_alloc_sz, 16, 0);
+
+       if (!fusion->reply_frames_desc_pool) {
+               dev_err(&instance->pdev->dev,
+                       "Failed from %s %d\n",  __func__, __LINE__);
+               return -ENOMEM;
        }
 
-       /*
-        * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
-        * Allocate the dynamic array first and then allocate individual
-        * commands.
-        */
-       fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
-                                  * max_cmd, GFP_KERNEL);
+       for (i = 0; i < count; i++) {
+               fusion->reply_frames_desc[i] =
+                               pci_pool_alloc(fusion->reply_frames_desc_pool,
+                                       GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
+               if (!fusion->reply_frames_desc[i]) {
+                       dev_err(&instance->pdev->dev,
+                               "Failed from %s %d\n",  __func__, __LINE__);
+                       return -ENOMEM;
+               }
 
-       if (!fusion->cmd_list) {
-               dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc "
-                      "memory for cmd_list_fusion\n");
-               goto fail_cmd_list;
+               fusion->rdpq_virt[i].RDPQBaseAddress =
+                       fusion->reply_frames_desc_phys[i];
+
+               reply_desc = fusion->reply_frames_desc[i];
+               for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
+                       reply_desc->Words = cpu_to_le64(ULLONG_MAX);
        }
+       return 0;
+}
 
-       max_cmd = instance->max_fw_cmds;
-       for (i = 0; i < max_cmd; i++) {
-               fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
-                                             GFP_KERNEL);
-               if (!fusion->cmd_list[i]) {
-                       dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n");
+static void
+megasas_free_rdpq_fusion(struct megasas_instance *instance) {
+
+       int i;
+       struct fusion_context *fusion;
 
-                       for (j = 0; j < i; j++)
-                               kfree(fusion->cmd_list[j]);
+       fusion = instance->ctrl_context;
 
-                       kfree(fusion->cmd_list);
-                       fusion->cmd_list = NULL;
-                       goto fail_cmd_list;
-               }
+       for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
+               if (fusion->reply_frames_desc[i])
+                       pci_pool_free(fusion->reply_frames_desc_pool,
+                               fusion->reply_frames_desc[i],
+                               fusion->reply_frames_desc_phys[i]);
        }
 
-       /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
-       io_req_base = fusion->io_request_frames +
-               MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
-       io_req_base_phys = fusion->io_request_frames_phys +
-               MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+       if (fusion->reply_frames_desc_pool)
+               pci_pool_destroy(fusion->reply_frames_desc_pool);
+
+       if (fusion->rdpq_virt)
+               pci_free_consistent(instance->pdev,
+                       sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
+                       fusion->rdpq_virt, fusion->rdpq_phys);
+}
+
+static void
+megasas_free_reply_fusion(struct megasas_instance *instance) {
+
+       struct fusion_context *fusion;
+
+       fusion = instance->ctrl_context;
+
+       if (fusion->reply_frames_desc[0])
+               pci_pool_free(fusion->reply_frames_desc_pool,
+                       fusion->reply_frames_desc[0],
+                       fusion->reply_frames_desc_phys[0]);
+
+       if (fusion->reply_frames_desc_pool)
+               pci_pool_destroy(fusion->reply_frames_desc_pool);
+
+}
+
+
+/**
+ * megasas_alloc_cmds_fusion - Allocates the command packets
+ * @instance:          Adapter soft state
+ *
+ *
+ * Each frame has a 32-bit field called context. This context is used to get
+ * back the megasas_cmd_fusion from the frame when a frame gets completed
+ * In this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd_fusion given the context.
+ * The free commands themselves are maintained in a linked list called cmd_pool.
+ *
+ * cmds are formed in the io_request and sg_frame members of the
+ * megasas_cmd_fusion. The context field is used to get a request descriptor
+ * and is used as SMID of the cmd.
+ * SMID value range is from 1 to max_fw_cmds.
+ */
+int
+megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+{
+       int i;
+       struct fusion_context *fusion;
+       struct megasas_cmd_fusion *cmd;
+       u32 offset;
+       dma_addr_t io_req_base_phys;
+       u8 *io_req_base;
+
+
+       fusion = instance->ctrl_context;
+
+       if (megasas_alloc_cmdlist_fusion(instance))
+               goto fail_exit;
+
+       if (megasas_alloc_request_fusion(instance))
+               goto fail_exit;
+
+       if (instance->is_rdpq) {
+               if (megasas_alloc_rdpq_fusion(instance))
+                       goto fail_exit;
+       } else
+               if (megasas_alloc_reply_fusion(instance))
+                       goto fail_exit;
+
+
+       /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
+       io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+       io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
 
        /*
         * Add all the commands to command pool (fusion->cmd_pool)
         */
 
        /* SMID 0 is reserved. Set SMID/index from 1 */
-       for (i = 0; i < max_cmd; i++) {
+       for (i = 0; i < instance->max_fw_cmds; i++) {
                cmd = fusion->cmd_list[i];
                offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
                memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
@@ -518,35 +656,13 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
                cmd->io_request_phys_addr = io_req_base_phys + offset;
        }
 
-       /*
-        * Create a frame pool and assign one frame to each cmd
-        */
-       if (megasas_create_frame_pool_fusion(instance)) {
-               dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
-               megasas_free_cmds_fusion(instance);
-               goto fail_req_desc;
-       }
+       if (megasas_create_sg_sense_fusion(instance))
+               goto fail_exit;
 
        return 0;
 
-fail_cmd_list:
-       pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
-                     fusion->io_request_frames_phys);
-       pci_pool_destroy(fusion->io_request_frames_pool);
-fail_io_frames:
-       dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
-                         fusion->reply_frames_desc,
-                         fusion->reply_frames_desc_phys);
-       pci_pool_free(fusion->reply_frames_desc_pool,
-                     fusion->reply_frames_desc,
-                     fusion->reply_frames_desc_phys);
-       pci_pool_destroy(fusion->reply_frames_desc_pool);
-
-fail_reply_desc:
-       dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
-                         fusion->req_frames_desc,
-                         fusion->req_frames_desc_phys);
-fail_req_desc:
+fail_exit:
+       megasas_free_cmds_fusion(instance);
        return -ENOMEM;
 }
 
@@ -576,11 +692,12 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
                msleep(20);
        }
 
-       if (frame_hdr->cmd_status == 0xff)
-               return -ETIME;
-
-       return (frame_hdr->cmd_status == MFI_STAT_OK) ?
-               0 : 1;
+       if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
+               return DCMD_TIMEOUT;
+       else if (frame_hdr->cmd_status == MFI_STAT_OK)
+               return DCMD_SUCCESS;
+       else
+               return DCMD_FAILED;
 }
 
 /**
@@ -593,16 +710,17 @@ int
 megasas_ioc_init_fusion(struct megasas_instance *instance)
 {
        struct megasas_init_frame *init_frame;
-       struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
+       struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL;
        dma_addr_t      ioc_init_handle;
        struct megasas_cmd *cmd;
-       u8 ret;
+       u8 ret, cur_rdpq_mode;
        struct fusion_context *fusion;
        union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
        int i;
        struct megasas_header *frame_hdr;
        const char *sys_info;
        MFI_CAPABILITIES *drv_ops;
+       u32 scratch_pad_2;
 
        fusion = instance->ctrl_context;
 
@@ -614,6 +732,18 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                goto fail_get_cmd;
        }
 
+       scratch_pad_2 = readl
+               (&instance->reg_set->outbound_scratch_pad_2);
+
+       cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+
+       if (instance->is_rdpq && !cur_rdpq_mode) {
+               dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
+                       " from RDPQ mode to non RDPQ mode\n");
+               ret = 1;
+               goto fail_fw_init;
+       }
+
        IOCInitMessage =
          dma_alloc_coherent(&instance->pdev->dev,
                             sizeof(struct MPI2_IOC_INIT_REQUEST),
@@ -635,7 +765,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
        IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
 
        IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
-       IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
+       IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ?
+                       cpu_to_le64(fusion->rdpq_phys) :
+                       cpu_to_le64(fusion->reply_frames_desc_phys[0]);
+       IOCInitMessage->MsgFlags = instance->is_rdpq ?
+                       MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
        IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
        IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
        init_frame = (struct megasas_init_frame *)cmd->frame;
@@ -665,6 +799,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
        if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
                drv_ops->mfi_capabilities.support_ext_io_size = 1;
 
+       drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
+       if (!dual_qdepth_disable)
+               drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
+
+       drv_ops->mfi_capabilities.support_qd_throttling = 1;
        /* Convert capability to LE32 */
        cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
 
@@ -784,7 +923,8 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
 
        /* Below code is only for non pended DCMD */
        if (instance->ctrl_context && !instance->mask_interrupts)
-               ret = megasas_issue_blocked_cmd(instance, cmd, 60);
+               ret = megasas_issue_blocked_cmd(instance, cmd,
+                       MFI_IO_TIMEOUT_SECS);
        else
                ret = megasas_issue_polled(instance, cmd);
 
@@ -795,7 +935,10 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
                ret = -EINVAL;
        }
 
-       if (!ret)
+       if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+               megaraid_sas_kill_hba(instance);
+
+       if (ret == DCMD_SUCCESS)
                instance->pd_seq_map_id++;
 
        megasas_return_cmd(instance, cmd);
@@ -875,10 +1018,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
 
        if (instance->ctrl_context && !instance->mask_interrupts)
                ret = megasas_issue_blocked_cmd(instance, cmd,
-                       MEGASAS_BLOCKED_CMD_TIMEOUT);
+                       MFI_IO_TIMEOUT_SECS);
        else
                ret = megasas_issue_polled(instance, cmd);
 
+       if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+               megaraid_sas_kill_hba(instance);
+
        megasas_return_cmd(instance, cmd);
 
        return ret;
@@ -1072,12 +1218,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
 
        reg_set = instance->reg_set;
 
-       /*
-        * Get various operational parameters from status register
-        */
-       instance->max_fw_cmds =
-               instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
-       instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
+       megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
 
        /*
         * Reduce the max supported cmds by 1. This is to ensure that the
@@ -1658,7 +1799,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
                                   local_map_ptr, start_lba_lo);
                io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
                cmd->request_desc->SCSIIO.RequestFlags =
-                       (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
+                       (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
                         << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
                if (fusion->adapter_type == INVADER_SERIES) {
                        if (io_request->RaidContext.regLockFlags ==
@@ -1702,8 +1843,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
                        (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
                         << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
                if (fusion->adapter_type == INVADER_SERIES) {
-                       if (io_request->RaidContext.regLockFlags ==
-                           REGION_TYPE_UNUSED)
+                       if (io_info.do_fp_rlbypass ||
+                               (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED))
                                cmd->request_desc->SCSIIO.RequestFlags =
                                        (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
                                        MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -1791,7 +1932,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
 
                /* build request descriptor */
                cmd->request_desc->SCSIIO.RequestFlags =
-                       (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+                       (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
                        MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
                cmd->request_desc->SCSIIO.DevHandle = devHandle;
 
@@ -1897,7 +2038,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
                                cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
                }
                cmd->request_desc->SCSIIO.RequestFlags =
-                       (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+                       (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
                                MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
        }
 }
@@ -2035,13 +2176,21 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
 
        fusion = instance->ctrl_context;
 
+       if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
+               instance->ldio_threshold &&
+               (atomic_inc_return(&instance->ldio_outstanding) >
+               instance->ldio_threshold)) {
+               atomic_dec(&instance->ldio_outstanding);
+               return SCSI_MLQUEUE_DEVICE_BUSY;
+       }
+
        cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
 
        index = cmd->index;
 
        req_desc = megasas_get_request_descriptor(instance, index-1);
        if (!req_desc)
-               return 1;
+               return SCSI_MLQUEUE_HOST_BUSY;
 
        req_desc->Words = 0;
        cmd->request_desc = req_desc;
@@ -2050,7 +2199,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
                megasas_return_cmd_fusion(instance, cmd);
                dev_err(&instance->pdev->dev, "Error building command\n");
                cmd->request_desc = NULL;
-               return 1;
+               return SCSI_MLQUEUE_HOST_BUSY;
        }
 
        req_desc = cmd->request_desc;
@@ -2092,16 +2241,16 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
        struct LD_LOAD_BALANCE_INFO *lbinfo;
        int threshold_reply_count = 0;
        struct scsi_cmnd *scmd_local = NULL;
+       struct MR_TASK_MANAGE_REQUEST *mr_tm_req;
+       struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
 
        fusion = instance->ctrl_context;
 
-       if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
                return IRQ_HANDLED;
 
-       desc = fusion->reply_frames_desc;
-       desc += ((MSIxIndex * fusion->reply_alloc_sz)/
-                sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
-               fusion->last_reply_idx[MSIxIndex];
+       desc = fusion->reply_frames_desc[MSIxIndex] +
+                               fusion->last_reply_idx[MSIxIndex];
 
        reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
 
@@ -2133,6 +2282,16 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
                extStatus = scsi_io_req->RaidContext.exStatus;
 
                switch (scsi_io_req->Function) {
+               case MPI2_FUNCTION_SCSI_TASK_MGMT:
+                       mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *)
+                                               cmd_fusion->io_request;
+                       mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *)
+                                               &mr_tm_req->TmRequest;
+                       dev_dbg(&instance->pdev->dev, "TM completion:"
+                               "type: 0x%x TaskMID: 0x%x\n",
+                               mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
+                       complete(&cmd_fusion->done);
+                       break;
                case MPI2_FUNCTION_SCSI_IO_REQUEST:  /*Fast Path IO.*/
                        /* Update load balancing info */
                        device_id = MEGASAS_DEV_INDEX(scmd_local);
@@ -2155,6 +2314,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
                        map_cmd_status(cmd_fusion, status, extStatus);
                        scsi_io_req->RaidContext.status = 0;
                        scsi_io_req->RaidContext.exStatus = 0;
+                       if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+                               atomic_dec(&instance->ldio_outstanding);
                        megasas_return_cmd_fusion(instance, cmd_fusion);
                        scsi_dma_unmap(scmd_local);
                        scmd_local->scsi_done(scmd_local);
@@ -2186,9 +2347,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
 
                /* Get the next reply descriptor */
                if (!fusion->last_reply_idx[MSIxIndex])
-                       desc = fusion->reply_frames_desc +
-                               ((MSIxIndex * fusion->reply_alloc_sz)/
-                                sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
+                       desc = fusion->reply_frames_desc[MSIxIndex];
                else
                        desc++;
 
@@ -2254,7 +2413,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
 
        /* If we have already declared adapter dead, donot complete cmds */
        spin_lock_irqsave(&instance->hba_lock, flags);
-       if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
                spin_unlock_irqrestore(&instance->hba_lock, flags);
                return;
        }
@@ -2411,7 +2570,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
  * @cmd:                       mfi cmd pointer
  *
  */
-void
+int
 megasas_issue_dcmd_fusion(struct megasas_instance *instance,
                          struct megasas_cmd *cmd)
 {
@@ -2419,10 +2578,13 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
 
        req_desc = build_mpt_cmd(instance, cmd);
        if (!req_desc) {
-               dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n");
-               return;
+               dev_info(&instance->pdev->dev, "Failed from %s %d\n",
+                                       __func__, __LINE__);
+               return DCMD_NOT_FIRED;
        }
+
        megasas_fire_cmd_fusion(instance, req_desc);
+       return DCMD_SUCCESS;
 }
 
 /**
@@ -2583,7 +2745,7 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
 
 /* This function waits for outstanding commands on fusion to complete */
 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
-                                       int iotimeout, int *convert)
+                                       int reason, int *convert)
 {
        int i, outstanding, retval = 0, hb_seconds_missed = 0;
        u32 fw_state;
@@ -2599,14 +2761,22 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
                        retval = 1;
                        goto out;
                }
+
+               if (reason == MFI_IO_TIMEOUT_OCR) {
+                       dev_info(&instance->pdev->dev,
+                               "MFI IO is timed out, initiating OCR\n");
+                       retval = 1;
+                       goto out;
+               }
+
                /* If SR-IOV VF mode & heartbeat timeout, don't wait */
-               if (instance->requestorId && !iotimeout) {
+               if (instance->requestorId && !reason) {
                        retval = 1;
                        goto out;
                }
 
                /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
-               if (instance->requestorId && iotimeout) {
+               if (instance->requestorId && reason) {
                        if (instance->hb_host_mem->HB.fwCounter !=
                            instance->hb_host_mem->HB.driverCounter) {
                                instance->hb_host_mem->HB.driverCounter =
@@ -2655,17 +2825,18 @@ out:
 
 void  megasas_reset_reply_desc(struct megasas_instance *instance)
 {
-       int i, count;
+       int i, j, count;
        struct fusion_context *fusion;
        union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
 
        fusion = instance->ctrl_context;
        count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
-       for (i = 0 ; i < count ; i++)
+       for (i = 0 ; i < count ; i++) {
                fusion->last_reply_idx[i] = 0;
-       reply_desc = fusion->reply_frames_desc;
-       for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
-               reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+               reply_desc = fusion->reply_frames_desc[i];
+               for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
+                       reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+       }
 }
 
 /*
@@ -2680,6 +2851,7 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
        struct megasas_cmd *cmd_mfi;
        union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
        u16 smid;
+       bool refire_cmd = 0;
 
        fusion = instance->ctrl_context;
 
@@ -2695,16 +2867,464 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
                        continue;
                req_desc = megasas_get_request_descriptor
                                        (instance, smid - 1);
-               if (req_desc && ((cmd_mfi->frame->dcmd.opcode !=
+               refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
                                cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
                                 (cmd_mfi->frame->dcmd.opcode !=
-                               cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO))))
+                               cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
+                               && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
+               if (refire_cmd)
                        megasas_fire_cmd_fusion(instance, req_desc);
                else
                        megasas_return_cmd(instance, cmd_mfi);
        }
 }
 
+/*
+ * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
+ * @instance: per adapter struct
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ *
+ * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
+ */
+
+static int megasas_track_scsiio(struct megasas_instance *instance,
+               int id, int channel)
+{
+       int i, found = 0;
+       struct megasas_cmd_fusion *cmd_fusion;
+       struct fusion_context *fusion;
+       fusion = instance->ctrl_context;
+
+       for (i = 0 ; i < instance->max_scsi_cmds; i++) {
+               cmd_fusion = fusion->cmd_list[i];
+               if (cmd_fusion->scmd &&
+                       (cmd_fusion->scmd->device->id == id &&
+                       cmd_fusion->scmd->device->channel == channel)) {
+                       dev_info(&instance->pdev->dev,
+                               "SCSI commands pending to target"
+                               "channel %d id %d \tSMID: 0x%x\n",
+                               channel, id, cmd_fusion->index);
+                       scsi_print_command(cmd_fusion->scmd);
+                       found = 1;
+                       break;
+               }
+       }
+
+       return found ? FAILED : SUCCESS;
+}
+
+/**
+ * megasas_tm_response_code - translation of device response code
+ * @ioc: per adapter object
+ * @mpi_reply: MPI reply returned by firmware
+ *
+ * Return nothing.
+ */
+static void
+megasas_tm_response_code(struct megasas_instance *instance,
+               struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
+{
+       char *desc;
+
+       switch (mpi_reply->ResponseCode) {
+       case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+               desc = "task management request completed";
+               break;
+       case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+               desc = "invalid frame";
+               break;
+       case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+               desc = "task management request not supported";
+               break;
+       case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+               desc = "task management request failed";
+               break;
+       case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+               desc = "task management request succeeded";
+               break;
+       case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+               desc = "invalid lun";
+               break;
+       case 0xA:
+               desc = "overlapped tag attempted";
+               break;
+       case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+               desc = "task queued, however not sent to target";
+               break;
+       default:
+               desc = "unknown";
+               break;
+       }
+       dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n",
+               mpi_reply->ResponseCode, desc);
+       dev_dbg(&instance->pdev->dev,
+               "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
+               " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
+               mpi_reply->TerminationCount, mpi_reply->DevHandle,
+               mpi_reply->Function, mpi_reply->TaskType,
+               mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
+}
+
+/**
+ * megasas_issue_tm - main routine for sending tm requests
+ * @instance: per adapter struct
+ * @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
+ * @smid_task: smid assigned to the task
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * Context: user
+ *
+ * MegaRaid use MPT interface for Task Magement request.
+ * A generic API for sending task management requests to firmware.
+ *
+ * Return SUCCESS or FAILED.
+ */
+static int
+megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
+       uint channel, uint id, u16 smid_task, u8 type)
+{
+       struct MR_TASK_MANAGE_REQUEST *mr_request;
+       struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
+       unsigned long timeleft;
+       struct megasas_cmd_fusion *cmd_fusion;
+       struct megasas_cmd *cmd_mfi;
+       union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+       struct fusion_context *fusion;
+       struct megasas_cmd_fusion *scsi_lookup;
+       int rc;
+       struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
+
+       fusion = instance->ctrl_context;
+
+       cmd_mfi = megasas_get_cmd(instance);
+
+       if (!cmd_mfi) {
+               dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+                       __func__, __LINE__);
+               return -ENOMEM;
+       }
+
+       cmd_fusion = megasas_get_cmd_fusion(instance,
+                       instance->max_scsi_cmds + cmd_mfi->index);
+
+       /*  Save the smid. To be used for returning the cmd */
+       cmd_mfi->context.smid = cmd_fusion->index;
+
+       req_desc = megasas_get_request_descriptor(instance,
+                       (cmd_fusion->index - 1));
+       if (!req_desc) {
+               dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+                       __func__, __LINE__);
+               megasas_return_cmd(instance, cmd_mfi);
+               return -ENOMEM;
+       }
+
+       cmd_fusion->request_desc = req_desc;
+       req_desc->Words = 0;
+
+       scsi_lookup = fusion->cmd_list[smid_task - 1];
+
+       mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
+       memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
+       mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
+       mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+       mpi_request->DevHandle = cpu_to_le16(device_handle);
+       mpi_request->TaskType = type;
+       mpi_request->TaskMID = cpu_to_le16(smid_task);
+       mpi_request->LUN[1] = 0;
+
+
+       req_desc = cmd_fusion->request_desc;
+       req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index);
+       req_desc->HighPriority.RequestFlags =
+               (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+               MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+       req_desc->HighPriority.MSIxIndex =  0;
+       req_desc->HighPriority.LMID = 0;
+       req_desc->HighPriority.Reserved1 = 0;
+
+       if (channel < MEGASAS_MAX_PD_CHANNELS)
+               mr_request->tmReqFlags.isTMForPD = 1;
+       else
+               mr_request->tmReqFlags.isTMForLD = 1;
+
+       init_completion(&cmd_fusion->done);
+       megasas_fire_cmd_fusion(instance, req_desc);
+
+       timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
+
+       if (!timeleft) {
+               dev_err(&instance->pdev->dev,
+                       "task mgmt type 0x%x timed out\n", type);
+               cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
+               mutex_unlock(&instance->reset_mutex);
+               rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
+               mutex_lock(&instance->reset_mutex);
+               return rc;
+       }
+
+       mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply;
+       megasas_tm_response_code(instance, mpi_reply);
+
+       megasas_return_cmd(instance, cmd_mfi);
+       rc = SUCCESS;
+       switch (type) {
+       case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+               if (scsi_lookup->scmd == NULL)
+                       break;
+               else {
+                       instance->instancet->disable_intr(instance);
+                       msleep(1000);
+                       megasas_complete_cmd_dpc_fusion
+                                       ((unsigned long)instance);
+                       instance->instancet->enable_intr(instance);
+                       if (scsi_lookup->scmd == NULL)
+                               break;
+               }
+               rc = FAILED;
+               break;
+
+       case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+               if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
+                       break;
+               instance->instancet->disable_intr(instance);
+               msleep(1000);
+               megasas_complete_cmd_dpc_fusion
+                               ((unsigned long)instance);
+               rc = megasas_track_scsiio(instance, id, channel);
+               instance->instancet->enable_intr(instance);
+
+               break;
+       case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+       case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+               break;
+       default:
+               rc = FAILED;
+               break;
+       }
+
+       return rc;
+
+}
+
+/*
+ * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
+ * @instance: per adapter struct
+ *
+ * Return Non Zero index, if SMID found in outstanding commands
+ */
+static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd)
+{
+       int i, ret = 0;
+       struct megasas_instance *instance;
+       struct megasas_cmd_fusion *cmd_fusion;
+       struct fusion_context *fusion;
+
+       instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+       fusion = instance->ctrl_context;
+
+       for (i = 0; i < instance->max_scsi_cmds; i++) {
+               cmd_fusion = fusion->cmd_list[i];
+               if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) {
+                       scmd_printk(KERN_NOTICE, scmd, "Abort request is for"
+                               " SMID: %d\n", cmd_fusion->index);
+                       ret = cmd_fusion->index;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+/*
+* megasas_get_tm_devhandle - Get devhandle for TM request
+* @sdev-                    OS provided scsi device
+*
+* Returns-                  devhandle/targetID of SCSI device
+*/
+static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
+{
+       u16 pd_index = 0;
+       u32 device_id;
+       struct megasas_instance *instance;
+       struct fusion_context *fusion;
+       struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+       u16 devhandle = (u16)ULONG_MAX;
+
+       instance = (struct megasas_instance *)sdev->host->hostdata;
+       fusion = instance->ctrl_context;
+
+       if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+               if (instance->use_seqnum_jbod_fp) {
+                               pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+                                               sdev->id;
+                               pd_sync = (void *)fusion->pd_seq_sync
+                                               [(instance->pd_seq_map_id - 1) & 1];
+                               devhandle = pd_sync->seq[pd_index].devHandle;
+               } else
+                       sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
+                               " without JBOD MAP support from %s %d\n", __func__, __LINE__);
+       } else {
+               device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
+                               + sdev->id;
+               devhandle = device_id;
+       }
+
+       return devhandle;
+}
+
+/*
+ * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
+ * @scmd : pointer to scsi command object
+ *
+ * Return SUCCESS, if command aborted else FAILED
+ */
+
+int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
+{
+       struct megasas_instance *instance;
+       u16 smid, devhandle;
+       struct fusion_context *fusion;
+       int ret;
+       struct MR_PRIV_DEVICE *mr_device_priv_data;
+       mr_device_priv_data = scmd->device->hostdata;
+
+
+       instance = (struct megasas_instance *)scmd->device->host->hostdata;
+       fusion = instance->ctrl_context;
+
+       if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+               dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
+               "SCSI host:%d\n", instance->host->host_no);
+               ret = FAILED;
+               return ret;
+       }
+
+       if (!mr_device_priv_data) {
+               sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
+                       "scmd(%p)\n", scmd);
+               scmd->result = DID_NO_CONNECT << 16;
+               ret = SUCCESS;
+               goto out;
+       }
+
+
+       if (!mr_device_priv_data->is_tm_capable) {
+               ret = FAILED;
+               goto out;
+       }
+
+       mutex_lock(&instance->reset_mutex);
+
+       smid = megasas_fusion_smid_lookup(scmd);
+
+       if (!smid) {
+               ret = SUCCESS;
+               scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
+                       " issued is not found in oustanding commands\n");
+               mutex_unlock(&instance->reset_mutex);
+               goto out;
+       }
+
+       devhandle = megasas_get_tm_devhandle(scmd->device);
+
+       if (devhandle == (u16)ULONG_MAX) {
+               ret = SUCCESS;
+               sdev_printk(KERN_INFO, scmd->device,
+                       "task abort issued for invalid devhandle\n");
+               mutex_unlock(&instance->reset_mutex);
+               goto out;
+       }
+       sdev_printk(KERN_INFO, scmd->device,
+               "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
+               scmd, devhandle);
+
+       mr_device_priv_data->tm_busy = 1;
+       ret = megasas_issue_tm(instance, devhandle,
+                       scmd->device->channel, scmd->device->id, smid,
+                       MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
+       mr_device_priv_data->tm_busy = 0;
+
+       mutex_unlock(&instance->reset_mutex);
+out:
+       sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+                       ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+       return ret;
+}
+
+/*
+ * megasas_reset_target_fusion : target reset function for fusion adapters
+ * scmd: SCSI command pointer
+ *
+ * Returns SUCCESS if all commands associated with target aborted else FAILED
+ */
+
+int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
+{
+
+       struct megasas_instance *instance;
+       int ret = FAILED;
+       u16 devhandle;
+       struct fusion_context *fusion;
+       struct MR_PRIV_DEVICE *mr_device_priv_data;
+       mr_device_priv_data = scmd->device->hostdata;
+
+       instance = (struct megasas_instance *)scmd->device->host->hostdata;
+       fusion = instance->ctrl_context;
+
+       if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+               dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
+               "SCSI host:%d\n", instance->host->host_no);
+               ret = FAILED;
+               return ret;
+       }
+
+       if (!mr_device_priv_data) {
+               sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
+                       "scmd(%p)\n", scmd);
+               scmd->result = DID_NO_CONNECT << 16;
+               ret = SUCCESS;
+               goto out;
+       }
+
+
+       if (!mr_device_priv_data->is_tm_capable) {
+               ret = FAILED;
+               goto out;
+       }
+
+       mutex_lock(&instance->reset_mutex);
+       devhandle = megasas_get_tm_devhandle(scmd->device);
+
+       if (devhandle == (u16)ULONG_MAX) {
+               ret = SUCCESS;
+               sdev_printk(KERN_INFO, scmd->device,
+                       "target reset issued for invalid devhandle\n");
+               mutex_unlock(&instance->reset_mutex);
+               goto out;
+       }
+
+       sdev_printk(KERN_INFO, scmd->device,
+               "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
+               scmd, devhandle);
+       mr_device_priv_data->tm_busy = 1;
+       ret = megasas_issue_tm(instance, devhandle,
+                       scmd->device->channel, scmd->device->id, 0,
+                       MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
+       mr_device_priv_data->tm_busy = 0;
+       mutex_unlock(&instance->reset_mutex);
+out:
+       scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
+               (ret == SUCCESS) ? "SUCCESS" : "FAILED");
+
+       return ret;
+}
+
 /* Check for a second path that is currently UP */
 int megasas_check_mpio_paths(struct megasas_instance *instance,
        struct scsi_cmnd *scmd)
@@ -2730,7 +3350,7 @@ out:
 }
 
 /* Core fusion reset function */
-int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 {
        int retval = SUCCESS, i, convert = 0;
        struct megasas_instance *instance;
@@ -2739,13 +3359,14 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
        u32 abs_state, status_reg, reset_adapter;
        u32 io_timeout_in_crash_mode = 0;
        struct scsi_cmnd *scmd_local = NULL;
+       struct scsi_device *sdev;
 
        instance = (struct megasas_instance *)shost->hostdata;
        fusion = instance->ctrl_context;
 
        mutex_lock(&instance->reset_mutex);
 
-       if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
                dev_warn(&instance->pdev->dev, "Hardware critical error, "
                       "returning FAILED for scsi%d.\n",
                        instance->host->host_no);
@@ -2757,10 +3378,10 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
 
        /* IO timeout detected, forcibly put FW in FAULT state */
        if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
-               instance->crash_dump_app_support && iotimeout) {
-               dev_info(&instance->pdev->dev, "IO timeout is detected, "
+               instance->crash_dump_app_support && reason) {
+               dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
                        "forcibly FAULT Firmware\n");
-               instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+               atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
                status_reg = readl(&instance->reg_set->doorbell);
                writel(status_reg | MFI_STATE_FORCE_OCR,
                        &instance->reg_set->doorbell);
@@ -2772,10 +3393,10 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
                        dev_dbg(&instance->pdev->dev, "waiting for [%d] "
                                "seconds for crash dump collection and OCR "
                                "to be done\n", (io_timeout_in_crash_mode * 3));
-               } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+               } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
                        (io_timeout_in_crash_mode < 80));
 
-               if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+               if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
                        dev_info(&instance->pdev->dev, "OCR done for IO "
                                "timeout case\n");
                        retval = SUCCESS;
@@ -2792,18 +3413,18 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
        if (instance->requestorId && !instance->skip_heartbeat_timer_del)
                del_timer_sync(&instance->sriov_heartbeat_timer);
        set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
-       instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
+       atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
        instance->instancet->disable_intr(instance);
        msleep(1000);
 
        /* First try waiting for commands to complete */
-       if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
+       if (megasas_wait_for_outstanding_fusion(instance, reason,
                                                &convert)) {
-               instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+               atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
                dev_warn(&instance->pdev->dev, "resetting fusion "
                       "adapter scsi%d.\n", instance->host->host_no);
                if (convert)
-                       iotimeout = 0;
+                       reason = 0;
 
                /* Now return commands back to the OS */
                for (i = 0 ; i < instance->max_scsi_cmds; i++) {
@@ -2813,6 +3434,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
                                scmd_local->result =
                                        megasas_check_mpio_paths(instance,
                                                        scmd_local);
+                               if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+                                       atomic_dec(&instance->ldio_outstanding);
                                megasas_return_cmd_fusion(instance, cmd_fusion);
                                scsi_dma_unmap(scmd_local);
                                scmd_local->scsi_done(scmd_local);
@@ -2837,55 +3460,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
                }
 
                /* Let SR-IOV VF & PF sync up if there was a HB failure */
-               if (instance->requestorId && !iotimeout) {
+               if (instance->requestorId && !reason) {
                        msleep(MEGASAS_OCR_SETTLE_TIME_VF);
-                       /* Look for a late HB update after VF settle time */
-                       if (abs_state == MFI_STATE_OPERATIONAL &&
-                           (instance->hb_host_mem->HB.fwCounter !=
-                            instance->hb_host_mem->HB.driverCounter)) {
-                                       instance->hb_host_mem->HB.driverCounter =
-                                               instance->hb_host_mem->HB.fwCounter;
-                                       dev_warn(&instance->pdev->dev, "SR-IOV:"
-                                              "Late FW heartbeat update for "
-                                              "scsi%d.\n",
-                                              instance->host->host_no);
-                       } else {
-                               /* In VF mode, first poll for FW ready */
-                               for (i = 0;
-                                    i < (MEGASAS_RESET_WAIT_TIME * 1000);
-                                    i += 20) {
-                                       status_reg =
-                                               instance->instancet->
-                                               read_fw_status_reg(
-                                                       instance->reg_set);
-                                       abs_state = status_reg &
-                                               MFI_STATE_MASK;
-                                       if (abs_state == MFI_STATE_READY) {
-                                               dev_warn(&instance->pdev->dev,
-                                                      "SR-IOV: FW was found"
-                                                      "to be in ready state "
-                                                      "for scsi%d.\n",
-                                                      instance->host->host_no);
-                                               break;
-                                       }
-                                       msleep(20);
-                               }
-                               if (abs_state != MFI_STATE_READY) {
-                                       dev_warn(&instance->pdev->dev, "SR-IOV: "
-                                              "FW not in ready state after %d"
-                                              " seconds for scsi%d, status_reg = "
-                                              "0x%x.\n",
-                                              MEGASAS_RESET_WAIT_TIME,
-                                              instance->host->host_no,
-                                              status_reg);
-                                       megaraid_sas_kill_hba(instance);
-                                       instance->skip_heartbeat_timer_del = 1;
-                                       instance->adprecovery =
-                                               MEGASAS_HW_CRITICAL_ERROR;
-                                       retval = FAILED;
-                                       goto out;
-                               }
-                       }
+                       goto transition_to_ready;
                }
 
                /* Now try to reset the chip */
@@ -2894,23 +3471,28 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
                        if (instance->instancet->adp_reset
                                (instance, instance->reg_set))
                                continue;
-
+transition_to_ready:
                        /* Wait for FW to become ready */
                        if (megasas_transition_to_ready(instance, 1)) {
-                               dev_warn(&instance->pdev->dev, "Failed to "
-                                      "transition controller to ready "
-                                      "for scsi%d.\n",
-                                      instance->host->host_no);
-                               continue;
+                               dev_warn(&instance->pdev->dev,
+                                       "Failed to transition controller to ready for "
+                                       "scsi%d.\n", instance->host->host_no);
+                               if (instance->requestorId && !reason)
+                                       goto fail_kill_adapter;
+                               else
+                                       continue;
                        }
-
                        megasas_reset_reply_desc(instance);
+                       megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
+
                        if (megasas_ioc_init_fusion(instance)) {
                                dev_warn(&instance->pdev->dev,
-                                      "megasas_ioc_init_fusion() failed!"
-                                      " for scsi%d\n",
-                                      instance->host->host_no);
-                               continue;
+                                      "megasas_ioc_init_fusion() failed! for "
+                                      "scsi%d\n", instance->host->host_no);
+                               if (instance->requestorId && !reason)
+                                       goto fail_kill_adapter;
+                               else
+                                       continue;
                        }
 
                        megasas_refire_mgmt_cmd(instance);
@@ -2932,10 +3514,13 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
 
                        megasas_setup_jbod_map(instance);
 
+                       shost_for_each_device(sdev, shost)
+                               megasas_update_sdev_properties(sdev);
+
                        clear_bit(MEGASAS_FUSION_IN_RESET,
                                  &instance->reset_flags);
                        instance->instancet->enable_intr(instance);
-                       instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+                       atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
 
                        /* Restart SR-IOV heartbeat */
                        if (instance->requestorId) {
@@ -2964,6 +3549,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
                        retval = SUCCESS;
                        goto out;
                }
+fail_kill_adapter:
                /* Reset failed, kill the adapter */
                dev_warn(&instance->pdev->dev, "Reset failed, killing "
                       "adapter scsi%d.\n", instance->host->host_no);
@@ -2980,7 +3566,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
                }
                clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
                instance->instancet->enable_intr(instance);
-               instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+               atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
        }
 out:
        clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
index 473005c99b4406d241cfb92ffbfea0ded5a124df..80eaee22f5bc3dc67a206e30bfc3716a3659bfe7 100644 (file)
@@ -176,7 +176,9 @@ enum REGION_TYPE {
 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD           (0x0100)
 #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP             (0x0004)
 #define MPI2_FUNCTION_SCSI_IO_REQUEST               (0x00) /* SCSI IO */
-#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY           (0x06)
+#define MPI2_FUNCTION_SCSI_TASK_MGMT                (0x01)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY       (0x03)
+#define MPI2_REQ_DESCRIPT_FLAGS_FP_IO               (0x06)
 #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO                 (0x00)
 #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING        (0x02)
 #define MPI2_SCSIIO_CONTROL_WRITE               (0x01000000)
@@ -277,6 +279,100 @@ union MPI2_SCSI_IO_CDB_UNION {
        struct MPI2_SGE_SIMPLE_UNION SGE;
 };
 
+/****************************************************************************
+*  SCSI Task Management messages
+****************************************************************************/
+
+/*SCSI Task Management Request Message */
+struct MPI2_SCSI_TASK_MANAGE_REQUEST {
+       u16 DevHandle;          /*0x00 */
+       u8 ChainOffset;         /*0x02 */
+       u8 Function;            /*0x03 */
+       u8 Reserved1;           /*0x04 */
+       u8 TaskType;            /*0x05 */
+       u8 Reserved2;           /*0x06 */
+       u8 MsgFlags;            /*0x07 */
+       u8 VP_ID;               /*0x08 */
+       u8 VF_ID;               /*0x09 */
+       u16 Reserved3;          /*0x0A */
+       u8 LUN[8];              /*0x0C */
+       u32 Reserved4[7];       /*0x14 */
+       u16 TaskMID;            /*0x30 */
+       u16 Reserved5;          /*0x32 */
+};
+
+
+/*SCSI Task Management Reply Message */
+struct MPI2_SCSI_TASK_MANAGE_REPLY {
+       u16 DevHandle;          /*0x00 */
+       u8 MsgLength;           /*0x02 */
+       u8 Function;            /*0x03 */
+       u8 ResponseCode;        /*0x04 */
+       u8 TaskType;            /*0x05 */
+       u8 Reserved1;           /*0x06 */
+       u8 MsgFlags;            /*0x07 */
+       u8 VP_ID;               /*0x08 */
+       u8 VF_ID;               /*0x09 */
+       u16 Reserved2;          /*0x0A */
+       u16 Reserved3;          /*0x0C */
+       u16 IOCStatus;          /*0x0E */
+       u32 IOCLogInfo;         /*0x10 */
+       u32 TerminationCount;   /*0x14 */
+       u32 ResponseInfo;       /*0x18 */
+};
+
+struct MR_TM_REQUEST {
+       char request[128];
+};
+
+struct MR_TM_REPLY {
+       char reply[128];
+};
+
+/* SCSI Task Management Request Message */
+struct MR_TASK_MANAGE_REQUEST {
+       /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */
+       struct MR_TM_REQUEST         TmRequest;
+       union {
+               struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+                       u32 reserved1:30;
+                       u32 isTMForPD:1;
+                       u32 isTMForLD:1;
+#else
+                       u32 isTMForLD:1;
+                       u32 isTMForPD:1;
+                       u32 reserved1:30;
+#endif
+                       u32 reserved2;
+               } tmReqFlags;
+               struct MR_TM_REPLY   TMReply;
+       };
+};
+
+/* TaskType values */
+
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK           (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET        (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET         (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET   (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET       (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK           (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA              (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET         (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT      (0x0A)
+
+/* ResponseCode values */
+
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE               (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME             (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED          (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED                 (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED              (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN            (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG         (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC          (0x80)
+
 /*
  * RAID SCSI IO Request Message
  * Total SGE count will be one less than  _MPI2_SCSI_IO_REQUEST
@@ -547,7 +643,9 @@ struct MR_SPAN_BLOCK_INFO {
 struct MR_LD_RAID {
        struct {
 #if   defined(__BIG_ENDIAN_BITFIELD)
-               u32     reserved4:7;
+               u32     reserved4:5;
+               u32     fpBypassRegionLock:1;
+               u32     tmCapable:1;
                u32     fpNonRWCapable:1;
                u32     fpReadAcrossStripe:1;
                u32     fpWriteAcrossStripe:1;
@@ -569,7 +667,9 @@ struct MR_LD_RAID {
                u32     fpWriteAcrossStripe:1;
                u32     fpReadAcrossStripe:1;
                u32     fpNonRWCapable:1;
-               u32     reserved4:7;
+               u32     tmCapable:1;
+               u32     fpBypassRegionLock:1;
+               u32     reserved4:5;
 #endif
        } capability;
        __le32     reserved6;
@@ -639,7 +739,7 @@ struct IO_REQUEST_INFO {
        u8 fpOkForIo;
        u8 IoforUnevenSpan;
        u8 start_span;
-       u8 reserved;
+       u8 do_fp_rlbypass;
        u64 start_row;
        u8  span_arm;   /* span[7:5], arm[4:0] */
        u8  pd_after_lb;
@@ -694,6 +794,7 @@ struct megasas_cmd_fusion {
        u32 sync_cmd_idx;
        u32 index;
        u8 pd_r1_lb;
+       struct completion done;
 };
 
 struct LD_LOAD_BALANCE_INFO {
@@ -807,9 +908,18 @@ struct MR_FW_RAID_MAP_EXT {
  *  * define MR_PD_CFG_SEQ structure for system PDs
  *   */
 struct MR_PD_CFG_SEQ {
-       __le16 seqNum;
-       __le16 devHandle;
-       u8  reserved[4];
+       u16 seqNum;
+       u16 devHandle;
+       struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+               u8     reserved:7;
+               u8     tmCapable:1;
+#else
+               u8     tmCapable:1;
+               u8     reserved:7;
+#endif
+       } capability;
+       u8  reserved[3];
 } __packed;
 
 struct MR_PD_CFG_SEQ_NUM_SYNC {
@@ -818,6 +928,12 @@ struct MR_PD_CFG_SEQ_NUM_SYNC {
        struct MR_PD_CFG_SEQ seq[1];
 } __packed;
 
+struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
+       u64 RDPQBaseAddress;
+       u32 Reserved1;
+       u32 Reserved2;
+};
+
 struct fusion_context {
        struct megasas_cmd_fusion **cmd_list;
        dma_addr_t req_frames_desc_phys;
@@ -830,8 +946,8 @@ struct fusion_context {
        struct dma_pool *sg_dma_pool;
        struct dma_pool *sense_dma_pool;
 
-       dma_addr_t reply_frames_desc_phys;
-       union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc;
+       dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
+       union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
        struct dma_pool *reply_frames_desc_pool;
 
        u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
@@ -841,6 +957,8 @@ struct fusion_context {
        u32 reply_alloc_sz;
        u32 io_frames_alloc_sz;
 
+       struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY *rdpq_virt;
+       dma_addr_t rdpq_phys;
        u16     max_sge_in_main_msg;
        u16     max_sge_in_chain;
 
index 6992ebc50c87943c8e59015be6e41f82feb0ba9a..4dc06a13cab879255d7f7b58dde4643ed25f5c52 100644 (file)
@@ -272,8 +272,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
 
                iter = (uint32_t *)buf;
                chksum = 0;
-               for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
-                       chksum += le32_to_cpu(*iter++);
+               for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
+                       chksum += le32_to_cpu(*iter);
                chksum = ~chksum + 1;
                *iter = cpu_to_le32(chksum);
        } else {
@@ -562,6 +562,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
        struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
            struct device, kobj)));
        struct qla_hw_data *ha = vha->hw;
+       uint32_t faddr;
 
        if (unlikely(pci_channel_offline(ha->pdev)))
                return -EAGAIN;
@@ -569,9 +570,16 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
        if (!capable(CAP_SYS_ADMIN))
                return -EINVAL;
 
-       if (IS_NOCACHE_VPD_TYPE(ha))
-               ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
+       if (IS_NOCACHE_VPD_TYPE(ha)) {
+               faddr = ha->flt_region_vpd << 2;
+
+               if (IS_QLA27XX(ha) &&
+                   qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+                       faddr = ha->flt_region_vpd_sec << 2;
+
+               ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
                    ha->vpd_size);
+       }
        return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
 }
 
@@ -1909,7 +1917,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
        if (qla2x00_reset_active(vha))
                goto done;
 
-       stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
+       stats = dma_alloc_coherent(&ha->pdev->dev,
+           sizeof(struct link_statistics), &stats_dma, GFP_KERNEL);
        if (stats == NULL) {
                ql_log(ql_log_warn, vha, 0x707d,
                    "Failed to allocate memory for stats.\n");
@@ -1957,7 +1966,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
        do_div(pfc_host_stat->seconds_since_last_reset, HZ);
 
 done_free:
-        dma_pool_free(ha->s_dma_pool, stats, stats_dma);
+       dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
+           stats, stats_dma);
 done:
        return pfc_host_stat;
 }
index c26acde797f0dda4395fe80a205131fdeb64d0c9..392c147d5793e4345d06b78c698b63cc9251282f 100644 (file)
@@ -2106,6 +2106,195 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
        return 0;
 }
 
+static int
+qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_flash_update_caps cap;
+
+       if (!(IS_QLA27XX(ha)))
+               return -EPERM;
+
+       memset(&cap, 0, sizeof(cap));
+       cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
+                          (uint64_t)ha->fw_attributes_ext[0] << 32 |
+                          (uint64_t)ha->fw_attributes_h << 16 |
+                          (uint64_t)ha->fw_attributes;
+
+       sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+           bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
+       bsg_job->reply->reply_payload_rcv_len = sizeof(cap);
+
+       bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+           EXT_STATUS_OK;
+
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       bsg_job->reply->result = DID_OK << 16;
+       bsg_job->job_done(bsg_job);
+       return 0;
+}
+
+static int
+qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       struct qla_hw_data *ha = vha->hw;
+       uint64_t online_fw_attr = 0;
+       struct qla_flash_update_caps cap;
+
+       if (!(IS_QLA27XX(ha)))
+               return -EPERM;
+
+       memset(&cap, 0, sizeof(cap));
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
+
+       online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
+                        (uint64_t)ha->fw_attributes_ext[0] << 32 |
+                        (uint64_t)ha->fw_attributes_h << 16 |
+                        (uint64_t)ha->fw_attributes;
+
+       if (online_fw_attr != cap.capabilities) {
+               bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+                   EXT_STATUS_INVALID_PARAM;
+               return -EINVAL;
+       }
+
+       if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
+               bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+                   EXT_STATUS_INVALID_PARAM;
+               return -EINVAL;
+       }
+
+       bsg_job->reply->reply_payload_rcv_len = 0;
+
+       bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+           EXT_STATUS_OK;
+
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       bsg_job->reply->result = DID_OK << 16;
+       bsg_job->job_done(bsg_job);
+       return 0;
+}
+
+static int
+qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_bbcr_data bbcr;
+       uint16_t loop_id, topo, sw_cap;
+       uint8_t domain, area, al_pa, state;
+       int rval;
+
+       if (!(IS_QLA27XX(ha)))
+               return -EPERM;
+
+       memset(&bbcr, 0, sizeof(bbcr));
+
+       if (vha->flags.bbcr_enable)
+               bbcr.status = QLA_BBCR_STATUS_ENABLED;
+       else
+               bbcr.status = QLA_BBCR_STATUS_DISABLED;
+
+       if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
+               rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
+                       &area, &domain, &topo, &sw_cap);
+               if (rval != QLA_SUCCESS) {
+                       bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
+                       bbcr.state = QLA_BBCR_STATE_OFFLINE;
+                       bbcr.mbx1 = loop_id;
+                       goto done;
+               }
+
+               state = (vha->bbcr >> 12) & 0x1;
+
+               if (state) {
+                       bbcr.state = QLA_BBCR_STATE_OFFLINE;
+                       bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
+               } else {
+                       bbcr.state = QLA_BBCR_STATE_ONLINE;
+                       bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
+               }
+
+               bbcr.configured_bbscn = vha->bbcr & 0xf;
+       }
+
+done:
+       sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+               bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
+       bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr);
+
+       bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       bsg_job->reply->result = DID_OK << 16;
+       bsg_job->job_done(bsg_job);
+       return 0;
+}
+
+static int
+qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       struct qla_hw_data *ha = vha->hw;
+       struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+       struct link_statistics *stats = NULL;
+       dma_addr_t stats_dma;
+       int rval = QLA_FUNCTION_FAILED;
+
+       if (test_bit(UNLOADING, &vha->dpc_flags))
+               goto done;
+
+       if (unlikely(pci_channel_offline(ha->pdev)))
+               goto done;
+
+       if (qla2x00_reset_active(vha))
+               goto done;
+
+       if (!IS_FWI2_CAPABLE(ha))
+               goto done;
+
+       stats = dma_alloc_coherent(&ha->pdev->dev,
+               sizeof(struct link_statistics), &stats_dma, GFP_KERNEL);
+       if (!stats) {
+               ql_log(ql_log_warn, vha, 0x70e2,
+               "Failed to allocate memory for stats.\n");
+               goto done;
+       }
+
+       memset(stats, 0, sizeof(struct link_statistics));
+
+       rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
+
+       if (rval != QLA_SUCCESS)
+               goto done_free;
+
+       ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
+           (uint8_t *)stats, sizeof(struct link_statistics));
+
+       sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+       bsg_job->reply_payload.sg_cnt, stats, sizeof(struct link_statistics));
+       bsg_job->reply->reply_payload_rcv_len = sizeof(struct link_statistics);
+
+       bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       bsg_job->reply->result = DID_OK << 16;
+       bsg_job->job_done(bsg_job);
+
+done_free:
+       dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
+               stats, stats_dma);
+done:
+       return rval;
+}
+
 static int
 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
 {
@@ -2161,6 +2350,18 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
        case QL_VND_SERDES_OP_EX:
                return qla8044_serdes_op(bsg_job);
 
+       case QL_VND_GET_FLASH_UPDATE_CAPS:
+               return qla27xx_get_flash_upd_cap(bsg_job);
+
+       case QL_VND_SET_FLASH_UPDATE_CAPS:
+               return qla27xx_set_flash_upd_cap(bsg_job);
+
+       case QL_VND_GET_BBCR_DATA:
+               return qla27xx_get_bbcr_data(bsg_job);
+
+       case QL_VND_GET_PRIV_STATS:
+               return qla2x00_get_priv_stats(bsg_job);
+
        default:
                return -ENOSYS;
        }
index d38f9efa56fa57e233804be207c869b87e43fe99..c80192d45536286b695fe067fb41351461c138ce 100644 (file)
 #define QL_VND_FX00_MGMT_CMD   0x12
 #define QL_VND_SERDES_OP       0x13
 #define        QL_VND_SERDES_OP_EX     0x14
+#define QL_VND_GET_FLASH_UPDATE_CAPS    0x15
+#define QL_VND_SET_FLASH_UPDATE_CAPS    0x16
+#define QL_VND_GET_BBCR_DATA    0x17
+#define QL_VND_GET_PRIV_STATS  0x18
 
 /* BSG Vendor specific subcode returns */
 #define EXT_STATUS_OK                  0
@@ -232,4 +236,34 @@ struct qla_serdes_reg_ex {
        uint32_t val;
 } __packed;
 
+struct qla_flash_update_caps {
+       uint64_t  capabilities;
+       uint32_t  outage_duration;
+       uint8_t   reserved[20];
+} __packed;
+
+/* BB_CR Status */
+#define QLA_BBCR_STATUS_DISABLED       0
+#define QLA_BBCR_STATUS_ENABLED        1
+#define QLA_BBCR_STATUS_UNKNOWN        2
+
+/* BB_CR State */
+#define QLA_BBCR_STATE_OFFLINE         0
+#define QLA_BBCR_STATE_ONLINE          1
+
+/* BB_CR Offline Reason Code */
+#define QLA_BBCR_REASON_PORT_SPEED     1
+#define QLA_BBCR_REASON_PEER_PORT      2
+#define QLA_BBCR_REASON_SWITCH         3
+#define QLA_BBCR_REASON_LOGIN_REJECT   4
+
+struct  qla_bbcr_data {
+       uint8_t   status;         /* 1 - enabled, 0 - Disabled */
+       uint8_t   state;          /* 1 - online, 0 - offline */
+       uint8_t   configured_bbscn;       /* 0-15 */
+       uint8_t   negotiated_bbscn;       /* 0-15 */
+       uint8_t   offline_reason_code;
+       uint16_t  mbx1;                 /* Port state */
+       uint8_t   reserved[9];
+} __packed;
 #endif
index cd0d94ea7f74047503a9b54a3bcbd6978e2b2262..b64c504ff12fa2dc5657ce68297cb59dda0f27e8 100644 (file)
@@ -11,7 +11,7 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x017f       | 0x0146         |
+ * | Module Init and Probe        |       0x018f       | 0x0146         |
  * |                              |                    | 0x015b-0x0160 |
  * |                              |                    | 0x016e-0x0170  |
  * | Mailbox commands             |       0x1192       |               |
  * |                              |                    | 0x303a                |
  * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |
  * | Async Events                 |       0x5089       | 0x502b-0x502f  |
+ * |                              |                    | 0x505e         |
  * |                              |                    | 0x5084,0x5075 |
  * |                              |                    | 0x503d,0x5044  |
  * |                              |                    | 0x507b,0x505f |
  * | Timer Routines               |       0x6012       |                |
- * | User Space Interactions      |       0x70e65      | 0x7018,0x702e  |
+ * | User Space Interactions      |       0x70e      | 0x7018,0x702e  |
  * |                             |                    | 0x7020,0x7024  |
  * |                              |                    | 0x7039,0x7045  |
  * |                              |                    | 0x7073-0x7075  |
@@ -293,8 +294,8 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
 
        WRT_REG_DWORD(&reg->iobase_addr, iobase);
        dmp_reg = &reg->iobase_window;
-       while (count--)
-               *buf++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for ( ; count--; dmp_reg++)
+               *buf++ = htonl(RD_REG_DWORD(dmp_reg));
 
        return buf;
 }
@@ -456,8 +457,8 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
 {
        uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
 
-       while (count--)
-               *buf++ = htons(RD_REG_WORD(dmp_reg++));
+       for ( ; count--; dmp_reg++)
+               *buf++ = htons(RD_REG_WORD(dmp_reg));
 }
 
 static inline void *
@@ -732,16 +733,18 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        if (rval == QLA_SUCCESS) {
                dmp_reg = &reg->flash_address;
-               for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
-                       fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+               for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
+                       fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
 
                dmp_reg = &reg->u.isp2300.req_q_in;
-               for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
-                       fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+               for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
+                   cnt++, dmp_reg++)
+                       fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
 
                dmp_reg = &reg->u.isp2300.mailbox0;
-               for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
-                       fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+               for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
+                   cnt++, dmp_reg++)
+                       fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
 
                WRT_REG_WORD(&reg->ctrl_status, 0x40);
                qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
@@ -751,8 +754,9 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
                WRT_REG_WORD(&reg->ctrl_status, 0x00);
                dmp_reg = &reg->risc_hw;
-               for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
-                       fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+               for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
+                   cnt++, dmp_reg++)
+                       fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
 
                WRT_REG_WORD(&reg->pcr, 0x2000);
                qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
@@ -895,25 +899,25 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        }
        if (rval == QLA_SUCCESS) {
                dmp_reg = &reg->flash_address;
-               for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
-                       fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+               for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
+                       fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
 
                dmp_reg = &reg->u.isp2100.mailbox0;
-               for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+               for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
                        if (cnt == 8)
                                dmp_reg = &reg->u_end.isp2200.mailbox8;
 
-                       fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+                       fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
                }
 
                dmp_reg = &reg->u.isp2100.unused_2[0];
-               for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
-                       fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+               for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
+                       fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
 
                WRT_REG_WORD(&reg->ctrl_status, 0x00);
                dmp_reg = &reg->risc_hw;
-               for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
-                       fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+               for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
+                       fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
 
                WRT_REG_WORD(&reg->pcr, 0x2000);
                qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
@@ -1095,8 +1099,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        /* Host interface registers. */
        dmp_reg = &reg->flash_addr;
-       for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
-               fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+               fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
 
        /* Disable interrupts. */
        WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1128,8 +1132,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        /* Mailbox registers. */
        mbx_reg = &reg->mailbox0;
-       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
-               fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+               fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
 
        /* Transfer sequence registers. */
        iter_reg = fw->xseq_gp_reg;
@@ -1167,20 +1171,20 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        iter_reg = fw->req0_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        iter_reg = fw->resp0_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        iter_reg = fw->req1_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        /* Transmit DMA registers. */
        iter_reg = fw->xmt0_dma_reg;
@@ -1358,8 +1362,10 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        RD_REG_DWORD(&reg->iobase_addr);
        WRT_REG_DWORD(&reg->iobase_window, 0x01);
        dmp_reg = &reg->iobase_c4;
-       fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
-       fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+       fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+       dmp_reg++;
+       fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+       dmp_reg++;
        fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
        fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
 
@@ -1368,8 +1374,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        /* Host interface registers. */
        dmp_reg = &reg->flash_addr;
-       for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
-               fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+               fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
 
        /* Disable interrupts. */
        WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1417,8 +1423,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        /* Mailbox registers. */
        mbx_reg = &reg->mailbox0;
-       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
-               fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+               fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
 
        /* Transfer sequence registers. */
        iter_reg = fw->xseq_gp_reg;
@@ -1481,20 +1487,20 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        iter_reg = fw->req0_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        iter_reg = fw->resp0_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        iter_reg = fw->req1_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        /* Transmit DMA registers. */
        iter_reg = fw->xmt0_dma_reg;
@@ -1679,8 +1685,10 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        RD_REG_DWORD(&reg->iobase_addr);
        WRT_REG_DWORD(&reg->iobase_window, 0x01);
        dmp_reg = &reg->iobase_c4;
-       fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
-       fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+       fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+       dmp_reg++;
+       fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+       dmp_reg++;
        fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
        fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
 
@@ -1689,8 +1697,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        /* Host interface registers. */
        dmp_reg = &reg->flash_addr;
-       for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
-               fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+               fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
 
        /* Disable interrupts. */
        WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1738,8 +1746,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        /* Mailbox registers. */
        mbx_reg = &reg->mailbox0;
-       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
-               fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+               fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
 
        /* Transfer sequence registers. */
        iter_reg = fw->xseq_gp_reg;
@@ -1802,20 +1810,20 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        iter_reg = fw->req0_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        iter_reg = fw->resp0_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        iter_reg = fw->req1_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        /* Transmit DMA registers. */
        iter_reg = fw->xmt0_dma_reg;
@@ -2022,8 +2030,10 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        RD_REG_DWORD(&reg->iobase_addr);
        WRT_REG_DWORD(&reg->iobase_window, 0x01);
        dmp_reg = &reg->iobase_c4;
-       fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
-       fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+       fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+       dmp_reg++;
+       fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+       dmp_reg++;
        fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
        fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
 
@@ -2032,8 +2042,8 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        /* Host interface registers. */
        dmp_reg = &reg->flash_addr;
-       for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
-               fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+               fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
 
        /* Disable interrupts. */
        WRT_REG_DWORD(&reg->ictrl, 0);
@@ -2081,8 +2091,8 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        /* Mailbox registers. */
        mbx_reg = &reg->mailbox0;
-       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
-               fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+               fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
 
        /* Transfer sequence registers. */
        iter_reg = fw->xseq_gp_reg;
@@ -2177,20 +2187,20 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        iter_reg = fw->req0_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        iter_reg = fw->resp0_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        iter_reg = fw->req1_dma_reg;
        iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
        dmp_reg = &reg->iobase_q;
-       for (cnt = 0; cnt < 7; cnt++)
-               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+       for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+               *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
 
        /* Transmit DMA registers. */
        iter_reg = fw->xmt0_dma_reg;
index 9872f3429e53ae183d1c5667f21acd202461eebf..ceb452dd143c972ac728f2b9326ff1eff0187e02 100644 (file)
@@ -1060,6 +1060,12 @@ struct mbx_cmd_32 {
 #define FSTATE_FATAL_ERROR         4
 #define FSTATE_LOOP_BACK_CONN      5
 
+#define QLA27XX_IMG_STATUS_VER_MAJOR   0x01
+#define QLA27XX_IMG_STATUS_VER_MINOR    0x00
+#define QLA27XX_IMG_STATUS_SIGN   0xFACEFADE
+#define QLA27XX_PRIMARY_IMAGE  1
+#define QLA27XX_SECONDARY_IMAGE    2
+
 /*
  * Port Database structure definition
  * Little endian except where noted.
@@ -1248,13 +1254,41 @@ struct link_statistics {
        uint32_t inval_xmit_word_cnt;
        uint32_t inval_crc_cnt;
        uint32_t lip_cnt;
-       uint32_t unused1[0x1a];
+       uint32_t link_up_cnt;
+       uint32_t link_down_loop_init_tmo;
+       uint32_t link_down_los;
+       uint32_t link_down_loss_rcv_clk;
+       uint32_t reserved0[5];
+       uint32_t port_cfg_chg;
+       uint32_t reserved1[11];
+       uint32_t rsp_q_full;
+       uint32_t atio_q_full;
+       uint32_t drop_ae;
+       uint32_t els_proto_err;
+       uint32_t reserved2;
        uint32_t tx_frames;
        uint32_t rx_frames;
        uint32_t discarded_frames;
        uint32_t dropped_frames;
-       uint32_t unused2[1];
+       uint32_t reserved3;
        uint32_t nos_rcvd;
+       uint32_t reserved4[4];
+       uint32_t tx_prjt;
+       uint32_t rcv_exfail;
+       uint32_t rcv_abts;
+       uint32_t seq_frm_miss;
+       uint32_t corr_err;
+       uint32_t mb_rqst;
+       uint32_t nport_full;
+       uint32_t eofa;
+       uint32_t reserved5;
+       uint32_t fpm_recv_word_cnt_lo;
+       uint32_t fpm_recv_word_cnt_hi;
+       uint32_t fpm_disc_word_cnt_lo;
+       uint32_t fpm_disc_word_cnt_hi;
+       uint32_t fpm_xmit_word_cnt_lo;
+       uint32_t fpm_xmit_word_cnt_hi;
+       uint32_t reserved6[70];
 };
 
 /*
@@ -3433,14 +3467,20 @@ struct qla_hw_data {
        uint32_t        flt_region_flt;
        uint32_t        flt_region_fdt;
        uint32_t        flt_region_boot;
+       uint32_t        flt_region_boot_sec;
        uint32_t        flt_region_fw;
+       uint32_t        flt_region_fw_sec;
        uint32_t        flt_region_vpd_nvram;
        uint32_t        flt_region_vpd;
+       uint32_t        flt_region_vpd_sec;
        uint32_t        flt_region_nvram;
        uint32_t        flt_region_npiv_conf;
        uint32_t        flt_region_gold_fw;
        uint32_t        flt_region_fcp_prio;
        uint32_t        flt_region_bootload;
+       uint32_t        flt_region_img_status_pri;
+       uint32_t        flt_region_img_status_sec;
+       uint8_t         active_image;
 
        /* Needed for BEACON */
        uint16_t        beacon_blink_led;
@@ -3571,6 +3611,7 @@ typedef struct scsi_qla_host {
                uint32_t        delete_progress:1;
 
                uint32_t        fw_tgt_reported:1;
+               uint32_t        bbcr_enable:1;
        } flags;
 
        atomic_t        loop_state;
@@ -3703,8 +3744,19 @@ typedef struct scsi_qla_host {
        atomic_t        vref_count;
        struct qla8044_reset_template reset_tmplt;
        struct qla_tgt_counters tgt_counters;
+       uint16_t        bbcr;
 } scsi_qla_host_t;
 
+struct qla27xx_image_status {
+       uint8_t image_status_mask;
+       uint16_t generation_number;
+       uint8_t reserved[3];
+       uint8_t ver_minor;
+       uint8_t ver_major;
+       uint32_t checksum;
+       uint32_t signature;
+} __packed;
+
 #define SET_VP_IDX     1
 #define SET_AL_PA      2
 #define RESET_VP_IDX   3
index 42bb357bf56b1dcd7d4e0391c1a39cfa2a164197..4c0f3a774799d1f4da454975c5eb92bb6b116282 100644 (file)
@@ -1288,7 +1288,9 @@ struct vp_rpt_id_entry_24xx {
 
        uint8_t vp_idx_map[16];
 
-       uint8_t reserved_4[32];
+       uint8_t reserved_4[28];
+       uint16_t bbcr;
+       uint8_t reserved_5[6];
 };
 
 #define VF_EVFP_IOCB_TYPE       0x26    /* Exchange Virtual Fabric Parameters entry. */
@@ -1393,6 +1395,16 @@ struct qla_flt_header {
 #define FLT_REG_FCOE_NVRAM_0   0xAA
 #define FLT_REG_FCOE_NVRAM_1   0xAC
 
+/* 27xx */
+#define FLT_REG_IMG_PRI_27XX   0x95
+#define FLT_REG_IMG_SEC_27XX   0x96
+#define FLT_REG_FW_SEC_27XX    0x02
+#define FLT_REG_BOOTLOAD_SEC_27XX      0x9
+#define FLT_REG_VPD_SEC_27XX_0 0x50
+#define FLT_REG_VPD_SEC_27XX_1 0x52
+#define FLT_REG_VPD_SEC_27XX_2 0xD8
+#define FLT_REG_VPD_SEC_27XX_3 0xDA
+
 struct qla_flt_region {
        uint32_t code;
        uint32_t size;
index 0103e468e3578b58887f9053233672c0b0cdba44..fe943772fe7beed0cd49f19a742e11a04f6d91df 100644 (file)
@@ -90,6 +90,7 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
 extern int
 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
 extern int qla2x00_init_rings(scsi_qla_host_t *);
+extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
 
 /*
  * Global Data in qla_os.c source file.
@@ -121,6 +122,7 @@ extern int ql2xmdcapmask;
 extern int ql2xmdenable;
 extern int ql2xexlogins;
 extern int ql2xexchoffld;
+extern int ql2xfwholdabts;
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
index 52a87657c7dd3e4945aad29e9b3cbf983b94efad..184b6b697fb1ca6c3e5356dc74b74050b898d4a9 100644 (file)
@@ -157,8 +157,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
        if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
                lio->u.logio.flags |= SRB_LOGIN_RETRIED;
        rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
+       if (rval != QLA_SUCCESS) {
+               fcport->flags &= ~FCF_ASYNC_SENT;
+               fcport->flags |= FCF_LOGIN_NEEDED;
+               set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                goto done_free_sp;
+       }
 
        ql_dbg(ql_dbg_disc, vha, 0x2072,
            "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
@@ -2062,6 +2066,10 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
        if (IS_P3P_TYPE(ha))
                return;
 
+       /*  Hold status IOCBs until ABTS response received. */
+       if (ql2xfwholdabts)
+               ha->fw_options[3] |= BIT_12;
+
        /* Update Serial Link options. */
        if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
                return;
@@ -2844,7 +2852,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
        if (nv->login_timeout < 4)
                nv->login_timeout = 4;
        ha->login_timeout = nv->login_timeout;
-       icb->login_timeout = nv->login_timeout;
 
        /* Set minimum RATOV to 100 tenths of a second. */
        ha->r_a_tov = 100;
@@ -5122,8 +5129,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
        dptr = (uint32_t *)nv;
        ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
            ha->nvram_size);
-       for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
-               chksum += le32_to_cpu(*dptr++);
+       for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
+               chksum += le32_to_cpu(*dptr);
 
        ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
            "Contents of NVRAM\n");
@@ -5274,7 +5281,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
        if (le16_to_cpu(nv->login_timeout) < 4)
                nv->login_timeout = cpu_to_le16(4);
        ha->login_timeout = le16_to_cpu(nv->login_timeout);
-       icb->login_timeout = nv->login_timeout;
 
        /* Set minimum RATOV to 100 tenths of a second. */
        ha->r_a_tov = 100;
@@ -5346,6 +5352,93 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
        return (rval);
 }
 
+uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
+{
+       struct qla27xx_image_status pri_image_status, sec_image_status;
+       uint8_t valid_pri_image, valid_sec_image;
+       uint32_t *wptr;
+       uint32_t cnt, chksum, size;
+       struct qla_hw_data *ha = vha->hw;
+
+       valid_pri_image = valid_sec_image = 1;
+       ha->active_image = 0;
+       size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
+
+       if (!ha->flt_region_img_status_pri) {
+               valid_pri_image = 0;
+               goto check_sec_image;
+       }
+
+       qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
+           ha->flt_region_img_status_pri, size);
+
+       if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+               ql_dbg(ql_dbg_init, vha, 0x018b,
+                   "Primary image signature (0x%x) not valid\n",
+                   pri_image_status.signature);
+               valid_pri_image = 0;
+               goto check_sec_image;
+       }
+
+       wptr = (uint32_t *)(&pri_image_status);
+       cnt = size;
+
+       for (chksum = 0; cnt--; wptr++)
+               chksum += le32_to_cpu(*wptr);
+       if (chksum) {
+               ql_dbg(ql_dbg_init, vha, 0x018c,
+                   "Checksum validation failed for primary image (0x%x)\n",
+                   chksum);
+               valid_pri_image = 0;
+       }
+
+check_sec_image:
+       if (!ha->flt_region_img_status_sec) {
+               valid_sec_image = 0;
+               goto check_valid_image;
+       }
+
+       qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
+           ha->flt_region_img_status_sec, size);
+
+       if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+               ql_dbg(ql_dbg_init, vha, 0x018d,
+                   "Secondary image signature(0x%x) not valid\n",
+                   sec_image_status.signature);
+               valid_sec_image = 0;
+               goto check_valid_image;
+       }
+
+       wptr = (uint32_t *)(&sec_image_status);
+       cnt = size;
+       for (chksum = 0; cnt--; wptr++)
+               chksum += le32_to_cpu(*wptr);
+       if (chksum) {
+               ql_dbg(ql_dbg_init, vha, 0x018e,
+                   "Checksum validation failed for secondary image (0x%x)\n",
+                   chksum);
+               valid_sec_image = 0;
+       }
+
+check_valid_image:
+       if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
+               ha->active_image = QLA27XX_PRIMARY_IMAGE;
+       if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
+               if (!ha->active_image ||
+                   pri_image_status.generation_number <
+                   sec_image_status.generation_number)
+                       ha->active_image = QLA27XX_SECONDARY_IMAGE;
+       }
+
+       ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
+           ha->active_image == 0 ? "default bootld and fw" :
+           ha->active_image == 1 ? "primary" :
+           ha->active_image == 2 ? "secondary" :
+           "Invalid");
+
+       return ha->active_image;
+}
+
 static int
 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
     uint32_t faddr)
@@ -5368,6 +5461,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
        dcode = (uint32_t *)req->ring;
        *srisc_addr = 0;
 
+       if (IS_QLA27XX(ha) &&
+           qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+               faddr = ha->flt_region_fw_sec;
+
        /* Validate firmware image by checking version. */
        qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
        for (i = 0; i < 4; i++)
@@ -6068,8 +6165,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
            ha->nvram_size);
        dptr = (uint32_t *)nv;
-       for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
-               chksum += le32_to_cpu(*dptr++);
+       for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
+               chksum += le32_to_cpu(*dptr);
 
        ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
            "Contents of NVRAM:\n");
@@ -6231,7 +6328,6 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        if (le16_to_cpu(nv->login_timeout) < 4)
                nv->login_timeout = cpu_to_le16(4);
        ha->login_timeout = le16_to_cpu(nv->login_timeout);
-       icb->login_timeout = nv->login_timeout;
 
        /* Set minimum RATOV to 100 tenths of a second. */
        ha->r_a_tov = 100;
@@ -6413,12 +6509,17 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
 
+       /*  Hold status IOCBs until ABTS response received. */
+       if (ql2xfwholdabts)
+               ha->fw_options[3] |= BIT_12;
+
        if (!ql2xetsenable)
-               return;
+               goto out;
 
        /* Enable ETS Burst. */
        memset(ha->fw_options, 0, sizeof(ha->fw_options));
        ha->fw_options[2] |= BIT_9;
+out:
        qla2x00_set_fw_options(vha, ha->fw_options);
 }
 
index a6b7f1588aa407a56b2ed767c2f3bfde8fd91269..edc48f3b8230cd60b9b3df25bbf87394f5ad6676 100644 (file)
@@ -87,8 +87,8 @@ host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
        __le32 *odest = (__le32 *) dst;
        uint32_t iter = bsize >> 2;
 
-       for (; iter ; iter--)
-               *odest++ = cpu_to_le32(*isrc++);
+       for ( ; iter--; isrc++)
+               *odest++ = cpu_to_le32(*isrc);
 }
 
 static inline void
index d4d65eb0e9b41264eb2d86e4d4af4997e0268c5c..edd97de4e23867535564d9666733cd9b3050200a 100644 (file)
@@ -934,10 +934,6 @@ skip_rio:
                        break;
 
 global_port_update:
-                       /* Port unavailable. */
-                       ql_log(ql_log_warn, vha, 0x505e,
-                           "Link is offline.\n");
-
                        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                                atomic_set(&vha->loop_state, LOOP_DOWN);
                                atomic_set(&vha->loop_down_timer,
index 87e6758302f61b648907d16d5b2a65b3fddfe0eb..968b846130965a5ba77522c6d048988f0a4c291e 100644 (file)
@@ -1349,6 +1349,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
                mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
        if (IS_FWI2_CAPABLE(vha->hw))
                mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
+       if (IS_QLA27XX(vha->hw))
+               mcp->in_mb |= MBX_15;
        mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
        rval = qla2x00_mailbox_command(vha, mcp);
@@ -1400,6 +1402,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
                                    wwn_to_u64(vha->port_name));
                        }
                }
+
+               if (IS_QLA27XX(vha->hw))
+                       vha->bbcr = mcp->mb[15];
        }
 
        return rval;
@@ -2754,7 +2759,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
        int rval;
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
-       uint32_t *siter, *diter, dwords;
+       uint32_t *iter, dwords;
        struct qla_hw_data *ha = vha->hw;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
@@ -2794,10 +2799,11 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
                        /* Copy over data -- firmware data is LE. */
                        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
                            "Done %s.\n", __func__);
-                       dwords = offsetof(struct link_statistics, unused1) / 4;
-                       siter = diter = &stats->link_fail_cnt;
-                       while (dwords--)
-                               *diter++ = le32_to_cpu(*siter++);
+                       dwords = offsetof(struct link_statistics,
+                                       link_up_cnt) / 4;
+                       iter = &stats->link_fail_cnt;
+                       for ( ; dwords--; iter++)
+                               le32_to_cpus(iter);
                }
        } else {
                /* Failed. */
@@ -2814,7 +2820,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
        int rval;
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
-       uint32_t *siter, *diter, dwords;
+       uint32_t *iter, dwords;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
            "Entered %s.\n", __func__);
@@ -2843,9 +2849,9 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
                            "Done %s.\n", __func__);
                        /* Copy over data -- firmware data is LE. */
                        dwords = sizeof(struct link_statistics) / 4;
-                       siter = diter = &stats->link_fail_cnt;
-                       while (dwords--)
-                               *diter++ = le32_to_cpu(*siter++);
+                       iter = &stats->link_fail_cnt;
+                       for ( ; dwords--; iter++)
+                               le32_to_cpus(iter);
                }
        } else {
                /* Failed. */
@@ -3612,6 +3618,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                    rptid_entry->port_id[2], rptid_entry->port_id[1],
                    rptid_entry->port_id[0]);
 
+               /* buffer to buffer credit flag */
+               vha->flags.bbcr_enable = (rptid_entry->bbcr & 0xf) != 0;
+
                /* FA-WWN is only for physical port */
                if (!vp_idx) {
                        void *wwpn = ha->init_cb->port_name;
index f1788db4319560c8210eda5a0fc4f576d5ec321f..fa017e9b02b0b7ad1509abb1a379bdff7ef2866a 100644 (file)
@@ -233,6 +233,13 @@ MODULE_PARM_DESC(ql2xexchoffld,
                 "Number of exchanges to offload. "
                 "0 (Default)- Disabled.");
 
+int ql2xfwholdabts = 0;
+module_param(ql2xfwholdabts, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xfwholdabts,
+               "Allow FW to hold status IOCB until ABTS rsp received. "
+               "0 (Default) Do not set fw option. "
+               "1 - Set fw option to hold ABTS.");
+
 /*
  * SCSI host template entry points
  */
@@ -2210,6 +2217,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
                ha->device_type |= DT_ZIO_SUPPORTED;
                ha->device_type |= DT_FWI2;
                ha->device_type |= DT_IIDMA;
+               ha->device_type |= DT_T10_PI;
                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
                break;
        case PCI_DEVICE_ID_QLOGIC_ISP2271:
@@ -2217,6 +2225,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
                ha->device_type |= DT_ZIO_SUPPORTED;
                ha->device_type |= DT_FWI2;
                ha->device_type |= DT_IIDMA;
+               ha->device_type |= DT_T10_PI;
                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
                break;
        case PCI_DEVICE_ID_QLOGIC_ISP2261:
@@ -2224,6 +2233,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
                ha->device_type |= DT_ZIO_SUPPORTED;
                ha->device_type |= DT_FWI2;
                ha->device_type |= DT_IIDMA;
+               ha->device_type |= DT_T10_PI;
                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
                break;
        }
index 3272ed5bbcc7108d7aa245fe7d1e45068d54dc35..5e93923164252926f8ee9c34d78d74cae79716e1 100644 (file)
@@ -610,8 +610,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
 
        wptr = (uint16_t *)req->ring;
        cnt = sizeof(struct qla_flt_location) >> 1;
-       for (chksum = 0; cnt; cnt--)
-               chksum += le16_to_cpu(*wptr++);
+       for (chksum = 0; cnt--; wptr++)
+               chksum += le16_to_cpu(*wptr);
        if (chksum) {
                ql_log(ql_log_fatal, vha, 0x0045,
                    "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
@@ -702,8 +702,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
        }
 
        cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
-       for (chksum = 0; cnt; cnt--)
-               chksum += le16_to_cpu(*wptr++);
+       for (chksum = 0; cnt--; wptr++)
+               chksum += le16_to_cpu(*wptr);
        if (chksum) {
                ql_log(ql_log_fatal, vha, 0x0048,
                    "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
@@ -846,6 +846,38 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
                        if (ha->port_no == 1)
                                ha->flt_region_nvram = start;
                        break;
+               case FLT_REG_IMG_PRI_27XX:
+                       if (IS_QLA27XX(ha))
+                               ha->flt_region_img_status_pri = start;
+                       break;
+               case FLT_REG_IMG_SEC_27XX:
+                       if (IS_QLA27XX(ha))
+                               ha->flt_region_img_status_sec = start;
+                       break;
+               case FLT_REG_FW_SEC_27XX:
+                       if (IS_QLA27XX(ha))
+                               ha->flt_region_fw_sec = start;
+                       break;
+               case FLT_REG_BOOTLOAD_SEC_27XX:
+                       if (IS_QLA27XX(ha))
+                               ha->flt_region_boot_sec = start;
+                       break;
+               case FLT_REG_VPD_SEC_27XX_0:
+                       if (IS_QLA27XX(ha))
+                               ha->flt_region_vpd_sec = start;
+                       break;
+               case FLT_REG_VPD_SEC_27XX_1:
+                       if (IS_QLA27XX(ha))
+                               ha->flt_region_vpd_sec = start;
+                       break;
+               case FLT_REG_VPD_SEC_27XX_2:
+                       if (IS_QLA27XX(ha))
+                               ha->flt_region_vpd_sec = start;
+                       break;
+               case FLT_REG_VPD_SEC_27XX_3:
+                       if (IS_QLA27XX(ha))
+                               ha->flt_region_vpd_sec = start;
+                       break;
                }
        }
        goto done;
@@ -898,9 +930,8 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
            fdt->sig[3] != 'D')
                goto no_flash_data;
 
-       for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1;
-           cnt++)
-               chksum += le16_to_cpu(*wptr++);
+       for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++)
+               chksum += le16_to_cpu(*wptr);
        if (chksum) {
                ql_dbg(ql_dbg_init, vha, 0x004c,
                    "Inconsistent FDT detected:"
@@ -995,7 +1026,8 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
                ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
                ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
        } else {
-               ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr++);
+               ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr);
+               wptr++;
                ha->fcoe_reset_timeout = le32_to_cpu(*wptr);
        }
        ql_dbg(ql_dbg_init, vha, 0x004e,
@@ -1072,10 +1104,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
        ha->isp_ops->read_optrom(vha, (uint8_t *)data,
            ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
 
-       cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
-           sizeof(struct qla_npiv_entry)) >> 1;
-       for (wptr = data, chksum = 0; cnt; cnt--)
-               chksum += le16_to_cpu(*wptr++);
+       cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1;
+       for (wptr = data, chksum = 0; cnt--; wptr++)
+               chksum += le16_to_cpu(*wptr);
        if (chksum) {
                ql_dbg(ql_dbg_user, vha, 0x7092,
                    "Inconsistent NPIV-Config "
@@ -2989,6 +3020,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
        uint8_t code_type, last_image;
        int i;
        struct qla_hw_data *ha = vha->hw;
+       uint32_t faddr = 0;
+
+       pcihdr = pcids = 0;
 
        if (IS_P3P_TYPE(ha))
                return ret;
@@ -3002,9 +3036,11 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
        memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
 
        dcode = mbuf;
-
-       /* Begin with first PCI expansion ROM header. */
        pcihdr = ha->flt_region_boot << 2;
+       if (IS_QLA27XX(ha) &&
+           qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+               pcihdr = ha->flt_region_boot_sec << 2;
+
        last_image = 1;
        do {
                /* Verify PCI expansion ROM header. */
@@ -3077,8 +3113,12 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
        /* Read firmware image information. */
        memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
        dcode = mbuf;
+       faddr = ha->flt_region_fw;
+       if (IS_QLA27XX(ha) &&
+           qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+               faddr = ha->flt_region_fw_sec;
 
-       qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4, 4);
+       qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
        for (i = 0; i < 4; i++)
                dcode[i] = be32_to_cpu(dcode[i]);
 
index 6d31faa8c57b8f7531c67c117819db460eaf9259..0bc93fa46dae4222646f17402d06eb1dc69e1f07 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.07.00.26-k"
+#define QLA2XXX_VERSION      "8.07.00.33-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   7
index 47b9d13f97b880033c20144968cf715c8af41623..da2e068ee47d15cf02b6b352c861227ccd82103a 100644 (file)
@@ -205,6 +205,7 @@ static struct {
        {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
        {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
        {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
+       {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
        {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
        {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
        {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
index 6a820668d442f2e1b1d111ee652717e9ae0e896a..1f02e842b9d13c6c7eb6ec6319fe98adf7dac8ea 100644 (file)
@@ -518,7 +518,8 @@ void scsi_target_reap(struct scsi_target *starget)
 }
 
 /**
- * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string
+ * scsi_sanitize_inquiry_string - remove non-graphical chars from an
+ *                                INQUIRY result string
  * @s: INQUIRY result string to sanitize
  * @len: length of the string
  *
@@ -531,7 +532,7 @@ void scsi_target_reap(struct scsi_target *starget)
  *     string terminator, so all the following characters are set to
  *     spaces.
  **/
-static void sanitize_inquiry_string(unsigned char *s, int len)
+void scsi_sanitize_inquiry_string(unsigned char *s, int len)
 {
        int terminated = 0;
 
@@ -542,6 +543,7 @@ static void sanitize_inquiry_string(unsigned char *s, int len)
                        *s = ' ';
        }
 }
+EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
 
 /**
  * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
@@ -627,9 +629,9 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
        }
 
        if (result == 0) {
-               sanitize_inquiry_string(&inq_result[8], 8);
-               sanitize_inquiry_string(&inq_result[16], 16);
-               sanitize_inquiry_string(&inq_result[32], 4);
+               scsi_sanitize_inquiry_string(&inq_result[8], 8);
+               scsi_sanitize_inquiry_string(&inq_result[16], 16);
+               scsi_sanitize_inquiry_string(&inq_result[32], 4);
 
                response_len = inq_result[4] + 5;
                if (response_len > 255)
index e4b3d8f4fd85e25f09c19af401476a261d457b49..441481623fb9ca24f198c164e0372b59ae5e30af 100644 (file)
@@ -4308,6 +4308,8 @@ static const struct {
        {ISCSI_PORT_SPEED_100MBPS,      "100 Mbps" },
        {ISCSI_PORT_SPEED_1GBPS,        "1 Gbps" },
        {ISCSI_PORT_SPEED_10GBPS,       "10 Gbps" },
+       {ISCSI_PORT_SPEED_25GBPS,       "25 Gbps" },
+       {ISCSI_PORT_SPEED_40GBPS,       "40 Gbps" },
 };
 
 char *iscsi_get_port_speed_name(struct Scsi_Host *shost)
index bb669d32ccd0daee203a69840313fcb9cf343ee0..d749da765df141d7aa66ef2a31e591f6cbc94fed 100644 (file)
@@ -761,7 +761,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
                break;
 
        default:
-               ret = BLKPREP_KILL;
+               ret = BLKPREP_INVALID;
                goto out;
        }
 
@@ -839,7 +839,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
        int ret;
 
        if (sdkp->device->no_write_same)
-               return BLKPREP_KILL;
+               return BLKPREP_INVALID;
 
        BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
 
index 55627d097873a85780a878e32b32c20e6c4a9524..292c04eec9ad4b56518afc243d92b37602a3f456 100644 (file)
@@ -42,6 +42,7 @@
 #include <scsi/scsi_devinfo.h>
 #include <scsi/scsi_dbg.h>
 #include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_transport.h>
 
 /*
  * All wire protocol details (storage protocol between the guest and the host)
@@ -477,19 +478,18 @@ struct hv_host_device {
 struct storvsc_scan_work {
        struct work_struct work;
        struct Scsi_Host *host;
-       uint lun;
+       u8 lun;
+       u8 tgt_id;
 };
 
 static void storvsc_device_scan(struct work_struct *work)
 {
        struct storvsc_scan_work *wrk;
-       uint lun;
        struct scsi_device *sdev;
 
        wrk = container_of(work, struct storvsc_scan_work, work);
-       lun = wrk->lun;
 
-       sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
+       sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
        if (!sdev)
                goto done;
        scsi_rescan_device(&sdev->sdev_gendev);
@@ -540,7 +540,7 @@ static void storvsc_remove_lun(struct work_struct *work)
        if (!scsi_host_get(wrk->host))
                goto done;
 
-       sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
+       sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
 
        if (sdev) {
                scsi_remove_device(sdev);
@@ -940,6 +940,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
 
        wrk->host = host;
        wrk->lun = vm_srb->lun;
+       wrk->tgt_id = vm_srb->target_id;
        INIT_WORK(&wrk->work, process_err_fn);
        schedule_work(&wrk->work);
 }
@@ -1770,6 +1771,11 @@ static int __init storvsc_drv_init(void)
        fc_transport_template = fc_attach_transport(&fc_transport_functions);
        if (!fc_transport_template)
                return -ENODEV;
+
+       /*
+        * Install Hyper-V specific timeout handler.
+        */
+       fc_transport_template->eh_timed_out = storvsc_eh_timed_out;
 #endif
 
        ret = vmbus_driver_register(&storvsc_drv);
index 29189aeace19df0650c15e374e60724ce77ac917..4571ef1a12a968ea62a78b87371c83a07e89d8e9 100644 (file)
@@ -682,9 +682,12 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
 /*
  * q->prep_rq_fn return values
  */
-#define BLKPREP_OK             0       /* serve it */
-#define BLKPREP_KILL           1       /* fatal error, kill */
-#define BLKPREP_DEFER          2       /* leave on queue */
+enum {
+       BLKPREP_OK,             /* serve it */
+       BLKPREP_KILL,           /* fatal error, kill, return -EIO */
+       BLKPREP_DEFER,          /* leave on queue */
+       BLKPREP_INVALID,        /* invalid command, kill, return -EREMOTEIO */
+};
 
 extern unsigned long blk_max_low_pfn, blk_max_pfn;
 
index 95ed9424a11af26d21ba83865386d9e1e573231c..d66c07077d687f9dff63628b56e7d874f97fe5ea 100644 (file)
@@ -724,6 +724,8 @@ enum iscsi_port_speed {
        ISCSI_PORT_SPEED_100MBPS        = 0x4,
        ISCSI_PORT_SPEED_1GBPS          = 0x8,
        ISCSI_PORT_SPEED_10GBPS         = 0x10,
+       ISCSI_PORT_SPEED_25GBPS         = 0x20,
+       ISCSI_PORT_SPEED_40GBPS         = 0x40,
 };
 
 /* iSCSI port state */
index f63a16760ae90a2d9e4887bc6874dbe2dd7bcfc9..9173ab5a6f724a51bf9832958bcbb3fb4905c442 100644 (file)
@@ -397,6 +397,7 @@ extern void scsi_remove_target(struct device *);
 extern const char *scsi_device_state_name(enum scsi_device_state);
 extern int scsi_is_sdev_device(const struct device *);
 extern int scsi_is_target_device(const struct device *);
+extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
 extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
                        int data_direction, void *buffer, unsigned bufflen,
                        unsigned char *sense, int timeout, int retries,