]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/target/target_core_transport.c
Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[karo-tx-linux.git] / drivers / target / target_core_transport.c
index dcecbfb172436b3ec0d51232efd988e7247f9afe..c23c76ccef65aa1da973fd2869389a72774daacc 100644 (file)
@@ -3,10 +3,7 @@
  *
  * This file contains the Generic Target Engine Core.
  *
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -70,7 +67,6 @@ static void transport_handle_queue_full(struct se_cmd *cmd,
 static int transport_generic_get_mem(struct se_cmd *cmd);
 static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
 static void transport_put_cmd(struct se_cmd *cmd);
-static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
@@ -297,7 +293,7 @@ void transport_register_session(
 }
 EXPORT_SYMBOL(transport_register_session);
 
-void target_release_session(struct kref *kref)
+static void target_release_session(struct kref *kref)
 {
        struct se_session *se_sess = container_of(kref,
                        struct se_session, sess_kref);
@@ -558,7 +554,8 @@ static void target_complete_failure_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 
-       transport_generic_request_failure(cmd);
+       transport_generic_request_failure(cmd,
+                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
 }
 
 /*
@@ -626,7 +623,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
                complete(&cmd->t_transport_stop_comp);
                return;
        } else if (cmd->transport_state & CMD_T_FAILED) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                INIT_WORK(&cmd->work, target_complete_failure_work);
        } else {
                INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -659,7 +655,7 @@ static void target_add_to_state_list(struct se_cmd *cmd)
 static void transport_write_pending_qf(struct se_cmd *cmd);
 static void transport_complete_qf(struct se_cmd *cmd);
 
-static void target_qf_do_work(struct work_struct *work)
+void target_qf_do_work(struct work_struct *work)
 {
        struct se_device *dev = container_of(work, struct se_device,
                                        qf_work_queue);
@@ -712,29 +708,15 @@ void transport_dump_dev_state(
        int *bl)
 {
        *bl += sprintf(b + *bl, "Status: ");
-       switch (dev->dev_status) {
-       case TRANSPORT_DEVICE_ACTIVATED:
+       if (dev->export_count)
                *bl += sprintf(b + *bl, "ACTIVATED");
-               break;
-       case TRANSPORT_DEVICE_DEACTIVATED:
+       else
                *bl += sprintf(b + *bl, "DEACTIVATED");
-               break;
-       case TRANSPORT_DEVICE_SHUTDOWN:
-               *bl += sprintf(b + *bl, "SHUTDOWN");
-               break;
-       case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
-       case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
-               *bl += sprintf(b + *bl, "OFFLINE");
-               break;
-       default:
-               *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
-               break;
-       }
 
        *bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
        *bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
-               dev->se_sub_dev->se_dev_attrib.block_size,
-               dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+               dev->dev_attrib.block_size,
+               dev->dev_attrib.hw_max_sectors);
        *bl += sprintf(b + *bl, "        ");
 }
 
@@ -991,186 +973,8 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
 }
 EXPORT_SYMBOL(transport_set_vpd_ident);
 
-static void core_setup_task_attr_emulation(struct se_device *dev)
-{
-       /*
-        * If this device is from Target_Core_Mod/pSCSI, disable the
-        * SAM Task Attribute emulation.
-        *
-        * This is currently not available in upsream Linux/SCSI Target
-        * mode code, and is assumed to be disabled while using TCM/pSCSI.
-        */
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-               dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
-               return;
-       }
-
-       dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
-       pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
-               " device\n", dev->transport->name,
-               dev->transport->get_device_rev(dev));
-}
-
-static void scsi_dump_inquiry(struct se_device *dev)
-{
-       struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
-       char buf[17];
-       int i, device_type;
-       /*
-        * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
-        */
-       for (i = 0; i < 8; i++)
-               if (wwn->vendor[i] >= 0x20)
-                       buf[i] = wwn->vendor[i];
-               else
-                       buf[i] = ' ';
-       buf[i] = '\0';
-       pr_debug("  Vendor: %s\n", buf);
-
-       for (i = 0; i < 16; i++)
-               if (wwn->model[i] >= 0x20)
-                       buf[i] = wwn->model[i];
-               else
-                       buf[i] = ' ';
-       buf[i] = '\0';
-       pr_debug("  Model: %s\n", buf);
-
-       for (i = 0; i < 4; i++)
-               if (wwn->revision[i] >= 0x20)
-                       buf[i] = wwn->revision[i];
-               else
-                       buf[i] = ' ';
-       buf[i] = '\0';
-       pr_debug("  Revision: %s\n", buf);
-
-       device_type = dev->transport->get_device_type(dev);
-       pr_debug("  Type:   %s ", scsi_device_type(device_type));
-       pr_debug("                 ANSI SCSI revision: %02x\n",
-                               dev->transport->get_device_rev(dev));
-}
-
-struct se_device *transport_add_device_to_core_hba(
-       struct se_hba *hba,
-       struct se_subsystem_api *transport,
-       struct se_subsystem_dev *se_dev,
-       u32 device_flags,
-       void *transport_dev,
-       struct se_dev_limits *dev_limits,
-       const char *inquiry_prod,
-       const char *inquiry_rev)
-{
-       int force_pt;
-       struct se_device  *dev;
-
-       dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
-       if (!dev) {
-               pr_err("Unable to allocate memory for se_dev_t\n");
-               return NULL;
-       }
-
-       dev->dev_flags          = device_flags;
-       dev->dev_status         |= TRANSPORT_DEVICE_DEACTIVATED;
-       dev->dev_ptr            = transport_dev;
-       dev->se_hba             = hba;
-       dev->se_sub_dev         = se_dev;
-       dev->transport          = transport;
-       INIT_LIST_HEAD(&dev->dev_list);
-       INIT_LIST_HEAD(&dev->dev_sep_list);
-       INIT_LIST_HEAD(&dev->dev_tmr_list);
-       INIT_LIST_HEAD(&dev->delayed_cmd_list);
-       INIT_LIST_HEAD(&dev->state_list);
-       INIT_LIST_HEAD(&dev->qf_cmd_list);
-       spin_lock_init(&dev->execute_task_lock);
-       spin_lock_init(&dev->delayed_cmd_lock);
-       spin_lock_init(&dev->dev_reservation_lock);
-       spin_lock_init(&dev->dev_status_lock);
-       spin_lock_init(&dev->se_port_lock);
-       spin_lock_init(&dev->se_tmr_lock);
-       spin_lock_init(&dev->qf_cmd_lock);
-       atomic_set(&dev->dev_ordered_id, 0);
-
-       se_dev_set_default_attribs(dev, dev_limits);
-
-       dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
-       dev->creation_time = get_jiffies_64();
-       spin_lock_init(&dev->stats_lock);
-
-       spin_lock(&hba->device_lock);
-       list_add_tail(&dev->dev_list, &hba->hba_dev_list);
-       hba->dev_count++;
-       spin_unlock(&hba->device_lock);
-       /*
-        * Setup the SAM Task Attribute emulation for struct se_device
-        */
-       core_setup_task_attr_emulation(dev);
-       /*
-        * Force PR and ALUA passthrough emulation with internal object use.
-        */
-       force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
-       /*
-        * Setup the Reservations infrastructure for struct se_device
-        */
-       core_setup_reservations(dev, force_pt);
-       /*
-        * Setup the Asymmetric Logical Unit Assignment for struct se_device
-        */
-       if (core_setup_alua(dev, force_pt) < 0)
-               goto err_dev_list;
-
-       /*
-        * Startup the struct se_device processing thread
-        */
-       dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
-                                     dev->transport->name);
-       if (!dev->tmr_wq) {
-               pr_err("Unable to create tmr workqueue for %s\n",
-                       dev->transport->name);
-               goto err_dev_list;
-       }
-       /*
-        * Setup work_queue for QUEUE_FULL
-        */
-       INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
-       /*
-        * Preload the initial INQUIRY const values if we are doing
-        * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
-        * passthrough because this is being provided by the backend LLD.
-        * This is required so that transport_get_inquiry() copies these
-        * originals once back into DEV_T10_WWN(dev) for the virtual device
-        * setup.
-        */
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
-               if (!inquiry_prod || !inquiry_rev) {
-                       pr_err("All non TCM/pSCSI plugins require"
-                               " INQUIRY consts\n");
-                       goto err_wq;
-               }
-
-               strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
-               strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
-               strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
-       }
-       scsi_dump_inquiry(dev);
-
-       return dev;
-
-err_wq:
-       destroy_workqueue(dev->tmr_wq);
-err_dev_list:
-       spin_lock(&hba->device_lock);
-       list_del(&dev->dev_list);
-       hba->dev_count--;
-       spin_unlock(&hba->device_lock);
-
-       se_release_vpd_for_dev(dev);
-
-       kfree(dev);
-
-       return NULL;
-}
-EXPORT_SYMBOL(transport_add_device_to_core_hba);
-
-int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
+sense_reason_t
+target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
 {
        struct se_device *dev = cmd->se_dev;
 
@@ -1185,18 +989,18 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
                if (cmd->data_direction == DMA_TO_DEVICE) {
                        pr_err("Rejecting underflow/overflow"
                                        " WRITE data\n");
-                       goto out_invalid_cdb_field;
+                       return TCM_INVALID_CDB_FIELD;
                }
                /*
                 * Reject READ_* or WRITE_* with overflow/underflow for
                 * type SCF_SCSI_DATA_CDB.
                 */
-               if (dev->se_sub_dev->se_dev_attrib.block_size != 512)  {
+               if (dev->dev_attrib.block_size != 512)  {
                        pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
                                " CDB on non 512-byte sector setup subsystem"
                                " plugin: %s\n", dev->transport->name);
                        /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
-                       goto out_invalid_cdb_field;
+                       return TCM_INVALID_CDB_FIELD;
                }
                /*
                 * For the overflow case keep the existing fabric provided
@@ -1216,10 +1020,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
 
        return 0;
 
-out_invalid_cdb_field:
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-       return -EINVAL;
 }
 
 /*
@@ -1259,45 +1059,41 @@ void transport_init_se_cmd(
 }
 EXPORT_SYMBOL(transport_init_se_cmd);
 
-static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+static sense_reason_t
+transport_check_alloc_task_attr(struct se_cmd *cmd)
 {
+       struct se_device *dev = cmd->se_dev;
+
        /*
         * Check if SAM Task Attribute emulation is enabled for this
         * struct se_device storage object
         */
-       if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
                return 0;
 
        if (cmd->sam_task_attr == MSG_ACA_TAG) {
                pr_debug("SAM Task Attribute ACA"
                        " emulation is not supported\n");
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
        /*
         * Used to determine when ORDERED commands should go from
         * Dormant to Active status.
         */
-       cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
+       cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
        smp_mb__after_atomic_inc();
        pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
                        cmd->se_ordered_id, cmd->sam_task_attr,
-                       cmd->se_dev->transport->name);
+                       dev->transport->name);
        return 0;
 }
 
-/*     target_setup_cmd_from_cdb():
- *
- *     Called from fabric RX Thread.
- */
-int target_setup_cmd_from_cdb(
-       struct se_cmd *cmd,
-       unsigned char *cdb)
+sense_reason_t
+target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
 {
-       struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
-       u32 pr_reg_type = 0;
-       u8 alua_ascq = 0;
+       struct se_device *dev = cmd->se_dev;
        unsigned long flags;
-       int ret;
+       sense_reason_t ret;
 
        /*
         * Ensure that the received CDB is less than the max (252 + 8) bytes
@@ -1307,9 +1103,7 @@ int target_setup_cmd_from_cdb(
                pr_err("Received SCSI CDB with command_size: %d that"
                        " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
                        scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
        /*
         * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
@@ -1324,10 +1118,7 @@ int target_setup_cmd_from_cdb(
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
                                scsi_command_size(cdb),
                                (unsigned long)sizeof(cmd->__t_task_cdb));
-                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-                       cmd->scsi_sense_reason =
-                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-                       return -ENOMEM;
+                       return TCM_OUT_OF_RESOURCES;
                }
        } else
                cmd->t_task_cdb = &cmd->__t_task_cdb[0];
@@ -1339,70 +1130,30 @@ int target_setup_cmd_from_cdb(
        /*
         * Check for an existing UNIT ATTENTION condition
         */
-       if (core_scsi3_ua_check(cmd, cdb) < 0) {
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
-               return -EINVAL;
-       }
+       ret = target_scsi3_ua_check(cmd);
+       if (ret)
+               return ret;
 
-       ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
-       if (ret != 0) {
-               /*
-                * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
-                * The ALUA additional sense code qualifier (ASCQ) is determined
-                * by the ALUA primary or secondary access state..
-                */
-               if (ret > 0) {
-                       pr_debug("[%s]: ALUA TG Port not available, "
-                               "SenseKey: NOT_READY, ASC/ASCQ: "
-                               "0x04/0x%02x\n",
-                               cmd->se_tfo->get_fabric_name(), alua_ascq);
-
-                       transport_set_sense_codes(cmd, 0x04, alua_ascq);
-                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-                       cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
-                       return -EINVAL;
-               }
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
-       }
+       ret = target_alua_state_check(cmd);
+       if (ret)
+               return ret;
 
-       /*
-        * Check status for SPC-3 Persistent Reservations
-        */
-       if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
-               if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
-                                       cmd, cdb, pr_reg_type) != 0) {
-                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-                       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
-                       cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
-                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-                       return -EBUSY;
-               }
-               /*
-                * This means the CDB is allowed for the SCSI Initiator port
-                * when said port is *NOT* holding the legacy SPC-2 or
-                * SPC-3 Persistent Reservation.
-                */
-       }
+       ret = target_check_reservation(cmd);
+       if (ret)
+               return ret;
 
-       ret = cmd->se_dev->transport->parse_cdb(cmd);
-       if (ret < 0)
+       ret = dev->transport->parse_cdb(cmd);
+       if (ret)
+               return ret;
+
+       ret = transport_check_alloc_task_attr(cmd);
+       if (ret)
                return ret;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-       /*
-        * Check for SAM Task Attribute Emulation
-        */
-       if (transport_check_alloc_task_attr(cmd) < 0) {
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
-       }
        spin_lock(&cmd->se_lun->lun_sep_lock);
        if (cmd->se_lun->lun_sep)
                cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
@@ -1418,7 +1169,7 @@ EXPORT_SYMBOL(target_setup_cmd_from_cdb);
 int transport_handle_cdb_direct(
        struct se_cmd *cmd)
 {
-       int ret;
+       sense_reason_t ret;
 
        if (!cmd->se_lun) {
                dump_stack();
@@ -1448,13 +1199,41 @@ int transport_handle_cdb_direct(
         * and call transport_generic_request_failure() if necessary..
         */
        ret = transport_generic_new_cmd(cmd);
-       if (ret < 0)
-               transport_generic_request_failure(cmd);
-
+       if (ret)
+               transport_generic_request_failure(cmd, ret);
        return 0;
 }
 EXPORT_SYMBOL(transport_handle_cdb_direct);
 
+static sense_reason_t
+transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
+               u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+{
+       if (!sgl || !sgl_count)
+               return 0;
+
+       /*
+        * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+        * scatterlists already have been set to follow what the fabric
+        * passes for the original expected data transfer length.
+        */
+       if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+               pr_warn("Rejecting SCSI DATA overflow for fabric using"
+                       " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+               return TCM_INVALID_CDB_FIELD;
+       }
+
+       cmd->t_data_sg = sgl;
+       cmd->t_data_nents = sgl_count;
+
+       if (sgl_bidi && sgl_bidi_count) {
+               cmd->t_bidi_data_sg = sgl_bidi;
+               cmd->t_bidi_data_nents = sgl_bidi_count;
+       }
+       cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+       return 0;
+}
+
 /*
  * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
  *                      se_cmd + use pre-allocated SGL memory.
@@ -1487,7 +1266,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
                struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
 {
        struct se_portal_group *se_tpg;
-       int rc;
+       sense_reason_t rc;
+       int ret;
 
        se_tpg = se_sess->se_tpg;
        BUG_ON(!se_tpg);
@@ -1508,9 +1288,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
         * for fabrics using TARGET_SCF_ACK_KREF that expect a second
         * kref_put() to happen during fabric packet acknowledgement.
         */
-       rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
-       if (rc)
-               return rc;
+       ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+       if (ret)
+               return ret;
        /*
         * Signal bidirectional data payloads to target-core
         */
@@ -1519,16 +1299,16 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
        /*
         * Locate se_lun pointer and attach it to struct se_cmd
         */
-       if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
-               transport_send_check_condition_and_sense(se_cmd,
-                               se_cmd->scsi_sense_reason, 0);
+       rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
+       if (rc) {
+               transport_send_check_condition_and_sense(se_cmd, rc, 0);
                target_put_sess_cmd(se_sess, se_cmd);
                return 0;
        }
 
        rc = target_setup_cmd_from_cdb(se_cmd, cdb);
        if (rc != 0) {
-               transport_generic_request_failure(se_cmd);
+               transport_generic_request_failure(se_cmd, rc);
                return 0;
        }
        /*
@@ -1563,7 +1343,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
                rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
                                sgl_bidi, sgl_bidi_count);
                if (rc != 0) {
-                       transport_generic_request_failure(se_cmd);
+                       transport_generic_request_failure(se_cmd, rc);
                        return 0;
                }
        }
@@ -1709,16 +1489,17 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
 /*
  * Handle SAM-esque emulation for generic transport request failures.
  */
-void transport_generic_request_failure(struct se_cmd *cmd)
+void transport_generic_request_failure(struct se_cmd *cmd,
+               sense_reason_t sense_reason)
 {
        int ret = 0;
 
        pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
                " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->t_task_cdb[0]);
-       pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
+       pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
                cmd->se_tfo->get_cmd_state(cmd),
-               cmd->t_state, cmd->scsi_sense_reason);
+               cmd->t_state, sense_reason);
        pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
                (cmd->transport_state & CMD_T_ACTIVE) != 0,
                (cmd->transport_state & CMD_T_STOP) != 0,
@@ -1727,10 +1508,9 @@ void transport_generic_request_failure(struct se_cmd *cmd)
        /*
         * For SAM Task Attribute emulation for failed struct se_cmd
         */
-       if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
-               transport_complete_task_attr(cmd);
+       transport_complete_task_attr(cmd);
 
-       switch (cmd->scsi_sense_reason) {
+       switch (sense_reason) {
        case TCM_NON_EXISTENT_LUN:
        case TCM_UNSUPPORTED_SCSI_OPCODE:
        case TCM_INVALID_CDB_FIELD:
@@ -1743,6 +1523,9 @@ void transport_generic_request_failure(struct se_cmd *cmd)
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
        case TCM_CHECK_CONDITION_NOT_READY:
                break;
+       case TCM_OUT_OF_RESOURCES:
+               sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               break;
        case TCM_RESERVATION_CONFLICT:
                /*
                 * No SENSE Data payload for this case, set SCSI Status
@@ -1759,7 +1542,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
                 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
                 */
                if (cmd->se_sess &&
-                   cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+                   cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
                        core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
                                cmd->orig_fe_lun, 0x2C,
                                ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
@@ -1770,13 +1553,12 @@ void transport_generic_request_failure(struct se_cmd *cmd)
                goto check_stop;
        default:
                pr_err("Unknown transport error for CDB 0x%02x: %d\n",
-                       cmd->t_task_cdb[0], cmd->scsi_sense_reason);
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+                       cmd->t_task_cdb[0], sense_reason);
+               sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
                break;
        }
 
-       ret = transport_send_check_condition_and_sense(cmd,
-                       cmd->scsi_sense_reason, 0);
+       ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
        if (ret == -EAGAIN || ret == -ENOMEM)
                goto queue_full;
 
@@ -1794,69 +1576,30 @@ EXPORT_SYMBOL(transport_generic_request_failure);
 
 static void __target_execute_cmd(struct se_cmd *cmd)
 {
-       int error = 0;
+       sense_reason_t ret;
 
        spin_lock_irq(&cmd->t_state_lock);
        cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
        spin_unlock_irq(&cmd->t_state_lock);
 
-       if (cmd->execute_cmd)
-               error = cmd->execute_cmd(cmd);
+       if (cmd->execute_cmd) {
+               ret = cmd->execute_cmd(cmd);
+               if (ret) {
+                       spin_lock_irq(&cmd->t_state_lock);
+                       cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+                       spin_unlock_irq(&cmd->t_state_lock);
 
-       if (error) {
-               spin_lock_irq(&cmd->t_state_lock);
-               cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
-               spin_unlock_irq(&cmd->t_state_lock);
-
-               transport_generic_request_failure(cmd);
+                       transport_generic_request_failure(cmd, ret);
+               }
        }
 }
 
-void target_execute_cmd(struct se_cmd *cmd)
+static bool target_handle_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
-       /*
-        * If the received CDB has aleady been aborted stop processing it here.
-        */
-       if (transport_check_aborted_status(cmd, 1)) {
-               complete(&cmd->t_transport_stop_comp);
-               return;
-       }
-
-       /*
-        * Determine if IOCTL context caller in requesting the stopping of this
-        * command for LUN shutdown purposes.
-        */
-       spin_lock_irq(&cmd->t_state_lock);
-       if (cmd->transport_state & CMD_T_LUN_STOP) {
-               pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
-                       __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-
-               cmd->transport_state &= ~CMD_T_ACTIVE;
-               spin_unlock_irq(&cmd->t_state_lock);
-               complete(&cmd->transport_lun_stop_comp);
-               return;
-       }
-       /*
-        * Determine if frontend context caller is requesting the stopping of
-        * this command for frontend exceptions.
-        */
-       if (cmd->transport_state & CMD_T_STOP) {
-               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
-                       __func__, __LINE__,
-                       cmd->se_tfo->get_task_tag(cmd));
-
-               spin_unlock_irq(&cmd->t_state_lock);
-               complete(&cmd->t_transport_stop_comp);
-               return;
-       }
-
-       cmd->t_state = TRANSPORT_PROCESSING;
-       spin_unlock_irq(&cmd->t_state_lock);
-
-       if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
-               goto execute;
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+               return false;
 
        /*
         * Check for the existence of HEAD_OF_QUEUE, and if true return 1
@@ -1867,7 +1610,7 @@ void target_execute_cmd(struct se_cmd *cmd)
                pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
                         "se_ordered_id: %u\n",
                         cmd->t_task_cdb[0], cmd->se_ordered_id);
-               goto execute;
+               return false;
        case MSG_ORDERED_TAG:
                atomic_inc(&dev->dev_ordered_sync);
                smp_mb__after_atomic_inc();
@@ -1881,7 +1624,7 @@ void target_execute_cmd(struct se_cmd *cmd)
                 * exist that need to be completed first.
                 */
                if (!atomic_read(&dev->simple_cmds))
-                       goto execute;
+                       return false;
                break;
        default:
                /*
@@ -1892,23 +1635,63 @@ void target_execute_cmd(struct se_cmd *cmd)
                break;
        }
 
-       if (atomic_read(&dev->dev_ordered_sync) != 0) {
-               spin_lock(&dev->delayed_cmd_lock);
-               list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
-               spin_unlock(&dev->delayed_cmd_lock);
+       if (atomic_read(&dev->dev_ordered_sync) == 0)
+               return false;
 
-               pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
-                       " delayed CMD list, se_ordered_id: %u\n",
-                       cmd->t_task_cdb[0], cmd->sam_task_attr,
-                       cmd->se_ordered_id);
+       spin_lock(&dev->delayed_cmd_lock);
+       list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
+       spin_unlock(&dev->delayed_cmd_lock);
+
+       pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
+               " delayed CMD list, se_ordered_id: %u\n",
+               cmd->t_task_cdb[0], cmd->sam_task_attr,
+               cmd->se_ordered_id);
+       return true;
+}
+
+void target_execute_cmd(struct se_cmd *cmd)
+{
+       /*
+        * If the received CDB has aleady been aborted stop processing it here.
+        */
+       if (transport_check_aborted_status(cmd, 1)) {
+               complete(&cmd->transport_lun_stop_comp);
                return;
        }
 
-execute:
        /*
-        * Otherwise, no ORDERED task attributes exist..
+        * Determine if IOCTL context caller in requesting the stopping of this
+        * command for LUN shutdown purposes.
         */
-       __target_execute_cmd(cmd);
+       spin_lock_irq(&cmd->t_state_lock);
+       if (cmd->transport_state & CMD_T_LUN_STOP) {
+               pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
+
+               cmd->transport_state &= ~CMD_T_ACTIVE;
+               spin_unlock_irq(&cmd->t_state_lock);
+               complete(&cmd->transport_lun_stop_comp);
+               return;
+       }
+       /*
+        * Determine if frontend context caller is requesting the stopping of
+        * this command for frontend exceptions.
+        */
+       if (cmd->transport_state & CMD_T_STOP) {
+               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__,
+                       cmd->se_tfo->get_task_tag(cmd));
+
+               spin_unlock_irq(&cmd->t_state_lock);
+               complete(&cmd->t_transport_stop_comp);
+               return;
+       }
+
+       cmd->t_state = TRANSPORT_PROCESSING;
+       spin_unlock_irq(&cmd->t_state_lock);
+
+       if (!target_handle_task_attr(cmd))
+               __target_execute_cmd(cmd);
 }
 EXPORT_SYMBOL(target_execute_cmd);
 
@@ -1947,6 +1730,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+               return;
+
        if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
                atomic_dec(&dev->simple_cmds);
                smp_mb__after_atomic_dec();
@@ -1975,8 +1761,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
 {
        int ret = 0;
 
-       if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
-               transport_complete_task_attr(cmd);
+       transport_complete_task_attr(cmd);
 
        if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
                ret = cmd->se_tfo->queue_status(cmd);
@@ -2034,8 +1819,8 @@ static void target_complete_ok_work(struct work_struct *work)
         * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
         * Attribute.
         */
-       if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
-               transport_complete_task_attr(cmd);
+       transport_complete_task_attr(cmd);
+
        /*
         * Check to schedule QUEUE_FULL work, or execute an existing
         * cmd->transport_qf_callback()
@@ -2183,9 +1968,10 @@ static void transport_put_cmd(struct se_cmd *cmd)
        unsigned long flags;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (atomic_read(&cmd->t_fe_count)) {
-               if (!atomic_dec_and_test(&cmd->t_fe_count))
-                       goto out_busy;
+       if (atomic_read(&cmd->t_fe_count) &&
+           !atomic_dec_and_test(&cmd->t_fe_count)) {
+               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+               return;
        }
 
        if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
@@ -2197,56 +1983,7 @@ static void transport_put_cmd(struct se_cmd *cmd)
        transport_free_pages(cmd);
        transport_release_cmd(cmd);
        return;
-out_busy:
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
-/*
- * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
- * allocating in the core.
- * @cmd:  Associated se_cmd descriptor
- * @mem:  SGL style memory for TCM WRITE / READ
- * @sg_mem_num: Number of SGL elements
- * @mem_bidi_in: SGL style memory for TCM BIDI READ
- * @sg_mem_bidi_num: Number of BIDI READ SGL elements
- *
- * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
- * of parameters.
- */
-int transport_generic_map_mem_to_cmd(
-       struct se_cmd *cmd,
-       struct scatterlist *sgl,
-       u32 sgl_count,
-       struct scatterlist *sgl_bidi,
-       u32 sgl_bidi_count)
-{
-       if (!sgl || !sgl_count)
-               return 0;
-
-       /*
-        * Reject SCSI data overflow with map_mem_to_cmd() as incoming
-        * scatterlists already have been set to follow what the fabric
-        * passes for the original expected data transfer length.
-        */
-       if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
-               pr_warn("Rejecting SCSI DATA overflow for fabric using"
-                       " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
-       }
-
-       cmd->t_data_sg = sgl;
-       cmd->t_data_nents = sgl_count;
-
-       if (sgl_bidi && sgl_bidi_count) {
-               cmd->t_bidi_data_sg = sgl_bidi;
-               cmd->t_bidi_data_nents = sgl_bidi_count;
-       }
-       cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
-       return 0;
 }
-EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
 
 void *transport_kmap_data_sg(struct se_cmd *cmd)
 {
@@ -2268,10 +2005,8 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
 
        /* >1 page. use vmap */
        pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
-       if (!pages) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       if (!pages)
                return NULL;
-       }
 
        /* convert sg[] to pages[] */
        for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
@@ -2280,10 +2015,8 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
 
        cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
        kfree(pages);
-       if (!cmd->t_data_vmap) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       if (!cmd->t_data_vmap)
                return NULL;
-       }
 
        return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
 }
@@ -2349,7 +2082,8 @@ out:
  * might not have the payload yet, so notify the fabric via a call to
  * ->write_pending instead. Otherwise place it on the execution queue.
  */
-int transport_generic_new_cmd(struct se_cmd *cmd)
+sense_reason_t
+transport_generic_new_cmd(struct se_cmd *cmd)
 {
        int ret = 0;
 
@@ -2362,7 +2096,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
            cmd->data_length) {
                ret = transport_generic_get_mem(cmd);
                if (ret < 0)
-                       goto out_fail;
+                       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
        atomic_inc(&cmd->t_fe_count);
@@ -2388,14 +2122,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
        if (ret == -EAGAIN || ret == -ENOMEM)
                goto queue_full;
 
-       if (ret < 0)
-               return ret;
-       return 1;
+       /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
+       WARN_ON(ret);
+
+       return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
-out_fail:
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       return -EINVAL;
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
        cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
@@ -2839,21 +2570,9 @@ static int transport_get_sense_codes(
        return 0;
 }
 
-static int transport_set_sense_codes(
-       struct se_cmd *cmd,
-       u8 asc,
-       u8 ascq)
-{
-       cmd->scsi_asc = asc;
-       cmd->scsi_ascq = ascq;
-
-       return 0;
-}
-
-int transport_send_check_condition_and_sense(
-       struct se_cmd *cmd,
-       u8 reason,
-       int from_transport)
+int
+transport_send_check_condition_and_sense(struct se_cmd *cmd,
+               sense_reason_t reason, int from_transport)
 {
        unsigned char *buffer = cmd->sense_buffer;
        unsigned long flags;
@@ -3044,23 +2763,19 @@ EXPORT_SYMBOL(transport_send_check_condition_and_sense);
 
 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
 {
-       int ret = 0;
+       if (!(cmd->transport_state & CMD_T_ABORTED))
+               return 0;
 
-       if (cmd->transport_state & CMD_T_ABORTED) {
-               if (!send_status ||
-                    (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
-                       return 1;
+       if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+               return 1;
 
-               pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
-                       " status for CDB: 0x%02x ITT: 0x%08x\n",
-                       cmd->t_task_cdb[0],
-                       cmd->se_tfo->get_task_tag(cmd));
+       pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
+                cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
 
-               cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
-               cmd->se_tfo->queue_status(cmd);
-               ret = 1;
-       }
-       return ret;
+       cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+       cmd->se_tfo->queue_status(cmd);
+
+       return 1;
 }
 EXPORT_SYMBOL(transport_check_aborted_status);