1 /*******************************************************************************
2 * Filename: target_core_transport.c
4 * This file contains the Generic Target Engine Core.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/version.h>
30 #include <linux/net.h>
31 #include <linux/delay.h>
32 #include <linux/string.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/blkdev.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp_lock.h>
38 #include <linux/kthread.h>
40 #include <linux/cdrom.h>
41 #include <asm/unaligned.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/libsas.h> /* For TASK_ATTR_* */
48 #include <target/target_core_base.h>
49 #include <target/target_core_device.h>
50 #include <target/target_core_tmr.h>
51 #include <target/target_core_tpg.h>
52 #include <target/target_core_transport.h>
53 #include <target/target_core_fabric_ops.h>
54 #include <target/target_core_configfs.h>
56 #include "target_core_alua.h"
57 #include "target_core_hba.h"
58 #include "target_core_pr.h"
59 #include "target_core_scdb.h"
60 #include "target_core_ua.h"
62 /* #define DEBUG_CDB_HANDLER */
63 #ifdef DEBUG_CDB_HANDLER
64 #define DEBUG_CDB_H(x...) printk(KERN_INFO x)
66 #define DEBUG_CDB_H(x...)
69 /* #define DEBUG_CMD_MAP */
71 #define DEBUG_CMD_M(x...) printk(KERN_INFO x)
73 #define DEBUG_CMD_M(x...)
76 /* #define DEBUG_MEM_ALLOC */
77 #ifdef DEBUG_MEM_ALLOC
78 #define DEBUG_MEM(x...) printk(KERN_INFO x)
80 #define DEBUG_MEM(x...)
83 /* #define DEBUG_MEM2_ALLOC */
84 #ifdef DEBUG_MEM2_ALLOC
85 #define DEBUG_MEM2(x...) printk(KERN_INFO x)
87 #define DEBUG_MEM2(x...)
90 /* #define DEBUG_SG_CALC */
92 #define DEBUG_SC(x...) printk(KERN_INFO x)
94 #define DEBUG_SC(x...)
97 /* #define DEBUG_SE_OBJ */
99 #define DEBUG_SO(x...) printk(KERN_INFO x)
101 #define DEBUG_SO(x...)
104 /* #define DEBUG_CMD_VOL */
106 #define DEBUG_VOL(x...) printk(KERN_INFO x)
108 #define DEBUG_VOL(x...)
111 /* #define DEBUG_CMD_STOP */
112 #ifdef DEBUG_CMD_STOP
113 #define DEBUG_CS(x...) printk(KERN_INFO x)
115 #define DEBUG_CS(x...)
118 /* #define DEBUG_PASSTHROUGH */
119 #ifdef DEBUG_PASSTHROUGH
120 #define DEBUG_PT(x...) printk(KERN_INFO x)
122 #define DEBUG_PT(x...)
125 /* #define DEBUG_TASK_STOP */
126 #ifdef DEBUG_TASK_STOP
127 #define DEBUG_TS(x...) printk(KERN_INFO x)
129 #define DEBUG_TS(x...)
132 /* #define DEBUG_TRANSPORT_STOP */
133 #ifdef DEBUG_TRANSPORT_STOP
134 #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
136 #define DEBUG_TRANSPORT_S(x...)
139 /* #define DEBUG_TASK_FAILURE */
140 #ifdef DEBUG_TASK_FAILURE
141 #define DEBUG_TF(x...) printk(KERN_INFO x)
143 #define DEBUG_TF(x...)
146 /* #define DEBUG_DEV_OFFLINE */
147 #ifdef DEBUG_DEV_OFFLINE
148 #define DEBUG_DO(x...) printk(KERN_INFO x)
150 #define DEBUG_DO(x...)
153 /* #define DEBUG_TASK_STATE */
154 #ifdef DEBUG_TASK_STATE
155 #define DEBUG_TSTATE(x...) printk(KERN_INFO x)
157 #define DEBUG_TSTATE(x...)
160 /* #define DEBUG_STATUS_THR */
161 #ifdef DEBUG_STATUS_THR
162 #define DEBUG_ST(x...) printk(KERN_INFO x)
164 #define DEBUG_ST(x...)
167 /* #define DEBUG_TASK_TIMEOUT */
168 #ifdef DEBUG_TASK_TIMEOUT
169 #define DEBUG_TT(x...) printk(KERN_INFO x)
171 #define DEBUG_TT(x...)
174 /* #define DEBUG_GENERIC_REQUEST_FAILURE */
175 #ifdef DEBUG_GENERIC_REQUEST_FAILURE
176 #define DEBUG_GRF(x...) printk(KERN_INFO x)
178 #define DEBUG_GRF(x...)
181 /* #define DEBUG_SAM_TASK_ATTRS */
182 #ifdef DEBUG_SAM_TASK_ATTRS
183 #define DEBUG_STA(x...) printk(KERN_INFO x)
185 #define DEBUG_STA(x...)
188 struct se_global *se_global;
190 static struct kmem_cache *se_cmd_cache;
191 static struct kmem_cache *se_sess_cache;
192 struct kmem_cache *se_tmr_req_cache;
193 struct kmem_cache *se_ua_cache;
194 struct kmem_cache *se_mem_cache;
195 struct kmem_cache *t10_pr_reg_cache;
196 struct kmem_cache *t10_alua_lu_gp_cache;
197 struct kmem_cache *t10_alua_lu_gp_mem_cache;
198 struct kmem_cache *t10_alua_tg_pt_gp_cache;
199 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
201 /* Used for transport_dev_get_map_*() */
202 typedef int (*map_func_t)(struct se_task *, u32);
204 static int transport_generic_write_pending(struct se_cmd *);
205 static int transport_processing_thread(void *);
206 static int __transport_execute_tasks(struct se_device *dev);
207 static void transport_complete_task_attr(struct se_cmd *cmd);
208 static void transport_direct_request_timeout(struct se_cmd *cmd);
209 static void transport_free_dev_tasks(struct se_cmd *cmd);
210 static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
211 unsigned long long starting_lba, u32 sectors,
212 enum dma_data_direction data_direction,
213 struct list_head *mem_list, int set_counts);
214 static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
216 static int transport_generic_remove(struct se_cmd *cmd,
217 int release_to_pool, int session_reinstatement);
218 static int transport_get_sectors(struct se_cmd *cmd);
219 static struct list_head *transport_init_se_mem_list(void);
220 static int transport_map_sg_to_mem(struct se_cmd *cmd,
221 struct list_head *se_mem_list, void *in_mem,
223 static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
224 unsigned char *dst, struct list_head *se_mem_list);
225 static void transport_release_fe_cmd(struct se_cmd *cmd);
226 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
227 struct se_queue_obj *qobj);
228 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
229 static void transport_stop_all_task_timers(struct se_cmd *cmd);
231 int transport_emulate_control_cdb(struct se_task *task);
233 int init_se_global(void)
235 struct se_global *global;
237 global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
239 printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
243 INIT_LIST_HEAD(&global->g_lu_gps_list);
244 INIT_LIST_HEAD(&global->g_se_tpg_list);
245 INIT_LIST_HEAD(&global->g_hba_list);
246 INIT_LIST_HEAD(&global->g_se_dev_list);
247 spin_lock_init(&global->g_device_lock);
248 spin_lock_init(&global->hba_lock);
249 spin_lock_init(&global->se_tpg_lock);
250 spin_lock_init(&global->lu_gps_lock);
251 spin_lock_init(&global->plugin_class_lock);
253 se_cmd_cache = kmem_cache_create("se_cmd_cache",
254 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
255 if (!(se_cmd_cache)) {
256 printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
259 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
260 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
262 if (!(se_tmr_req_cache)) {
263 printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
267 se_sess_cache = kmem_cache_create("se_sess_cache",
268 sizeof(struct se_session), __alignof__(struct se_session),
270 if (!(se_sess_cache)) {
271 printk(KERN_ERR "kmem_cache_create() for struct se_session"
275 se_ua_cache = kmem_cache_create("se_ua_cache",
276 sizeof(struct se_ua), __alignof__(struct se_ua),
278 if (!(se_ua_cache)) {
279 printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
282 se_mem_cache = kmem_cache_create("se_mem_cache",
283 sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
284 if (!(se_mem_cache)) {
285 printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
288 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
289 sizeof(struct t10_pr_registration),
290 __alignof__(struct t10_pr_registration), 0, NULL);
291 if (!(t10_pr_reg_cache)) {
292 printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
296 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
297 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
299 if (!(t10_alua_lu_gp_cache)) {
300 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
304 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
305 sizeof(struct t10_alua_lu_gp_member),
306 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
307 if (!(t10_alua_lu_gp_mem_cache)) {
308 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
312 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
313 sizeof(struct t10_alua_tg_pt_gp),
314 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
315 if (!(t10_alua_tg_pt_gp_cache)) {
316 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
320 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
321 "t10_alua_tg_pt_gp_mem_cache",
322 sizeof(struct t10_alua_tg_pt_gp_member),
323 __alignof__(struct t10_alua_tg_pt_gp_member),
325 if (!(t10_alua_tg_pt_gp_mem_cache)) {
326 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
336 kmem_cache_destroy(se_cmd_cache);
337 if (se_tmr_req_cache)
338 kmem_cache_destroy(se_tmr_req_cache);
340 kmem_cache_destroy(se_sess_cache);
342 kmem_cache_destroy(se_ua_cache);
344 kmem_cache_destroy(se_mem_cache);
345 if (t10_pr_reg_cache)
346 kmem_cache_destroy(t10_pr_reg_cache);
347 if (t10_alua_lu_gp_cache)
348 kmem_cache_destroy(t10_alua_lu_gp_cache);
349 if (t10_alua_lu_gp_mem_cache)
350 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
351 if (t10_alua_tg_pt_gp_cache)
352 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
353 if (t10_alua_tg_pt_gp_mem_cache)
354 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
359 void release_se_global(void)
361 struct se_global *global;
367 kmem_cache_destroy(se_cmd_cache);
368 kmem_cache_destroy(se_tmr_req_cache);
369 kmem_cache_destroy(se_sess_cache);
370 kmem_cache_destroy(se_ua_cache);
371 kmem_cache_destroy(se_mem_cache);
372 kmem_cache_destroy(t10_pr_reg_cache);
373 kmem_cache_destroy(t10_alua_lu_gp_cache);
374 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
375 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
376 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
382 /* SCSI statistics table index */
383 static struct scsi_index_table scsi_index_table;
386 * Initialize the index table for allocating unique row indexes to various mib
389 void init_scsi_index_table(void)
391 memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
392 spin_lock_init(&scsi_index_table.lock);
396 * Allocate a new row index for the entry type specified
398 u32 scsi_get_new_index(scsi_index_t type)
402 if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
403 printk(KERN_ERR "Invalid index type %d\n", type);
407 spin_lock(&scsi_index_table.lock);
408 new_index = ++scsi_index_table.scsi_mib_index[type];
410 new_index = ++scsi_index_table.scsi_mib_index[type];
411 spin_unlock(&scsi_index_table.lock);
416 void transport_init_queue_obj(struct se_queue_obj *qobj)
418 atomic_set(&qobj->queue_cnt, 0);
419 INIT_LIST_HEAD(&qobj->qobj_list);
420 init_waitqueue_head(&qobj->thread_wq);
421 spin_lock_init(&qobj->cmd_queue_lock);
423 EXPORT_SYMBOL(transport_init_queue_obj);
425 static int transport_subsystem_reqmods(void)
429 ret = request_module("target_core_iblock");
431 printk(KERN_ERR "Unable to load target_core_iblock\n");
433 ret = request_module("target_core_file");
435 printk(KERN_ERR "Unable to load target_core_file\n");
437 ret = request_module("target_core_pscsi");
439 printk(KERN_ERR "Unable to load target_core_pscsi\n");
441 ret = request_module("target_core_stgt");
443 printk(KERN_ERR "Unable to load target_core_stgt\n");
448 int transport_subsystem_check_init(void)
450 if (se_global->g_sub_api_initialized)
453 * Request the loading of known TCM subsystem plugins..
455 if (transport_subsystem_reqmods() < 0)
458 se_global->g_sub_api_initialized = 1;
462 struct se_session *transport_init_session(void)
464 struct se_session *se_sess;
466 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
468 printk(KERN_ERR "Unable to allocate struct se_session from"
470 return ERR_PTR(-ENOMEM);
472 INIT_LIST_HEAD(&se_sess->sess_list);
473 INIT_LIST_HEAD(&se_sess->sess_acl_list);
477 EXPORT_SYMBOL(transport_init_session);
480 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
482 void __transport_register_session(
483 struct se_portal_group *se_tpg,
484 struct se_node_acl *se_nacl,
485 struct se_session *se_sess,
486 void *fabric_sess_ptr)
488 unsigned char buf[PR_REG_ISID_LEN];
490 se_sess->se_tpg = se_tpg;
491 se_sess->fabric_sess_ptr = fabric_sess_ptr;
493 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
495 * Only set for struct se_session's that will actually be moving I/O.
496 * eg: *NOT* discovery sessions.
500 * If the fabric module supports an ISID based TransportID,
501 * save this value in binary from the fabric I_T Nexus now.
503 if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
504 memset(&buf[0], 0, PR_REG_ISID_LEN);
505 TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
506 &buf[0], PR_REG_ISID_LEN);
507 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
509 spin_lock_irq(&se_nacl->nacl_sess_lock);
511 * The se_nacl->nacl_sess pointer will be set to the
512 * last active I_T Nexus for each struct se_node_acl.
514 se_nacl->nacl_sess = se_sess;
516 list_add_tail(&se_sess->sess_acl_list,
517 &se_nacl->acl_sess_list);
518 spin_unlock_irq(&se_nacl->nacl_sess_lock);
520 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
522 printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
523 TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
525 EXPORT_SYMBOL(__transport_register_session);
527 void transport_register_session(
528 struct se_portal_group *se_tpg,
529 struct se_node_acl *se_nacl,
530 struct se_session *se_sess,
531 void *fabric_sess_ptr)
533 spin_lock_bh(&se_tpg->session_lock);
534 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
535 spin_unlock_bh(&se_tpg->session_lock);
537 EXPORT_SYMBOL(transport_register_session);
539 void transport_deregister_session_configfs(struct se_session *se_sess)
541 struct se_node_acl *se_nacl;
544 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
546 se_nacl = se_sess->se_node_acl;
548 spin_lock_irq(&se_nacl->nacl_sess_lock);
549 list_del(&se_sess->sess_acl_list);
551 * If the session list is empty, then clear the pointer.
552 * Otherwise, set the struct se_session pointer from the tail
553 * element of the per struct se_node_acl active session list.
555 if (list_empty(&se_nacl->acl_sess_list))
556 se_nacl->nacl_sess = NULL;
558 se_nacl->nacl_sess = container_of(
559 se_nacl->acl_sess_list.prev,
560 struct se_session, sess_acl_list);
562 spin_unlock_irq(&se_nacl->nacl_sess_lock);
565 EXPORT_SYMBOL(transport_deregister_session_configfs);
567 void transport_free_session(struct se_session *se_sess)
569 kmem_cache_free(se_sess_cache, se_sess);
571 EXPORT_SYMBOL(transport_free_session);
573 void transport_deregister_session(struct se_session *se_sess)
575 struct se_portal_group *se_tpg = se_sess->se_tpg;
576 struct se_node_acl *se_nacl;
579 transport_free_session(se_sess);
583 spin_lock_bh(&se_tpg->session_lock);
584 list_del(&se_sess->sess_list);
585 se_sess->se_tpg = NULL;
586 se_sess->fabric_sess_ptr = NULL;
587 spin_unlock_bh(&se_tpg->session_lock);
590 * Determine if we need to do extra work for this initiator node's
591 * struct se_node_acl if it had been previously dynamically generated.
593 se_nacl = se_sess->se_node_acl;
595 spin_lock_bh(&se_tpg->acl_node_lock);
596 if (se_nacl->dynamic_node_acl) {
597 if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
599 list_del(&se_nacl->acl_list);
600 se_tpg->num_node_acls--;
601 spin_unlock_bh(&se_tpg->acl_node_lock);
603 core_tpg_wait_for_nacl_pr_ref(se_nacl);
604 core_free_device_list_for_node(se_nacl, se_tpg);
605 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
607 spin_lock_bh(&se_tpg->acl_node_lock);
610 spin_unlock_bh(&se_tpg->acl_node_lock);
613 transport_free_session(se_sess);
615 printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
616 TPG_TFO(se_tpg)->get_fabric_name());
618 EXPORT_SYMBOL(transport_deregister_session);
621 * Called with T_TASK(cmd)->t_state_lock held.
623 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
625 struct se_device *dev;
626 struct se_task *task;
632 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
637 if (atomic_read(&task->task_active))
640 if (!(atomic_read(&task->task_state_active)))
643 spin_lock_irqsave(&dev->execute_task_lock, flags);
644 list_del(&task->t_state_list);
645 DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
646 CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
647 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
649 atomic_set(&task->task_state_active, 0);
650 atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
654 /* transport_cmd_check_stop():
656 * 'transport_off = 1' determines if t_transport_active should be cleared.
657 * 'transport_off = 2' determines if task_dev_state should be removed.
659 * A non-zero u8 t_state sets cmd->t_state.
660 * Returns 1 when command is stopped, else 0.
662 static int transport_cmd_check_stop(
669 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
671 * Determine if IOCTL context caller in requesting the stopping of this
672 * command for LUN shutdown purposes.
674 if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
675 DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
676 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
677 CMD_TFO(cmd)->get_task_tag(cmd));
679 cmd->deferred_t_state = cmd->t_state;
680 cmd->t_state = TRANSPORT_DEFERRED_CMD;
681 atomic_set(&T_TASK(cmd)->t_transport_active, 0);
682 if (transport_off == 2)
683 transport_all_task_dev_remove_state(cmd);
684 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
686 complete(&T_TASK(cmd)->transport_lun_stop_comp);
690 * Determine if frontend context caller is requesting the stopping of
691 * this command for frontend excpections.
693 if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
694 DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
695 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
696 CMD_TFO(cmd)->get_task_tag(cmd));
698 cmd->deferred_t_state = cmd->t_state;
699 cmd->t_state = TRANSPORT_DEFERRED_CMD;
700 if (transport_off == 2)
701 transport_all_task_dev_remove_state(cmd);
704 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
707 if (transport_off == 2)
709 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
711 complete(&T_TASK(cmd)->t_transport_stop_comp);
715 atomic_set(&T_TASK(cmd)->t_transport_active, 0);
716 if (transport_off == 2) {
717 transport_all_task_dev_remove_state(cmd);
719 * Clear struct se_cmd->se_lun before the transport_off == 2
720 * handoff to fabric module.
724 * Some fabric modules like tcm_loop can release
725 * their internally allocated I/O refrence now and
728 if (CMD_TFO(cmd)->check_stop_free != NULL) {
729 spin_unlock_irqrestore(
730 &T_TASK(cmd)->t_state_lock, flags);
732 CMD_TFO(cmd)->check_stop_free(cmd);
736 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
740 cmd->t_state = t_state;
741 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
746 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
748 return transport_cmd_check_stop(cmd, 2, 0);
751 static void transport_lun_remove_cmd(struct se_cmd *cmd)
753 struct se_lun *lun = SE_LUN(cmd);
759 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
760 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
761 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
764 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
765 transport_all_task_dev_remove_state(cmd);
766 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
768 transport_free_dev_tasks(cmd);
771 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
772 if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
773 list_del(&cmd->se_lun_list);
774 atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
776 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
777 CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
780 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
783 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
785 transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
786 transport_lun_remove_cmd(cmd);
788 if (transport_cmd_check_stop_to_fabric(cmd))
791 transport_generic_remove(cmd, 0, 0);
794 void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
796 transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
798 if (transport_cmd_check_stop_to_fabric(cmd))
801 transport_generic_remove(cmd, 0, 0);
804 static int transport_add_cmd_to_queue(
808 struct se_device *dev = cmd->se_dev;
809 struct se_queue_obj *qobj = dev->dev_queue_obj;
810 struct se_queue_req *qr;
813 qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
815 printk(KERN_ERR "Unable to allocate memory for"
816 " struct se_queue_req\n");
819 INIT_LIST_HEAD(&qr->qr_list);
821 qr->cmd = (void *)cmd;
825 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
826 cmd->t_state = t_state;
827 atomic_set(&T_TASK(cmd)->t_transport_active, 1);
828 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
831 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
832 list_add_tail(&qr->qr_list, &qobj->qobj_list);
833 atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
834 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
836 atomic_inc(&qobj->queue_cnt);
837 wake_up_interruptible(&qobj->thread_wq);
842 * Called with struct se_queue_obj->cmd_queue_lock held.
844 static struct se_queue_req *
845 __transport_get_qr_from_queue(struct se_queue_obj *qobj)
848 struct se_queue_req *qr = NULL;
850 if (list_empty(&qobj->qobj_list))
853 list_for_each_entry(qr, &qobj->qobj_list, qr_list)
857 cmd = (struct se_cmd *)qr->cmd;
858 atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
860 list_del(&qr->qr_list);
861 atomic_dec(&qobj->queue_cnt);
866 static struct se_queue_req *
867 transport_get_qr_from_queue(struct se_queue_obj *qobj)
870 struct se_queue_req *qr;
873 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
874 if (list_empty(&qobj->qobj_list)) {
875 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
879 list_for_each_entry(qr, &qobj->qobj_list, qr_list)
883 cmd = (struct se_cmd *)qr->cmd;
884 atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
886 list_del(&qr->qr_list);
887 atomic_dec(&qobj->queue_cnt);
888 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
893 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
894 struct se_queue_obj *qobj)
896 struct se_cmd *q_cmd;
897 struct se_queue_req *qr = NULL, *qr_p = NULL;
900 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
901 if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
902 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
906 list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
907 q_cmd = (struct se_cmd *)qr->cmd;
911 atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
912 atomic_dec(&qobj->queue_cnt);
913 list_del(&qr->qr_list);
916 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
918 if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
919 printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
920 CMD_TFO(cmd)->get_task_tag(cmd),
921 atomic_read(&T_TASK(cmd)->t_transport_queue_active));
926 * Completion function used by TCM subsystem plugins (such as FILEIO)
927 * for queueing up response from struct se_subsystem_api->do_task()
929 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
931 struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
932 struct se_task, t_list);
935 cmd->scsi_status = SAM_STAT_GOOD;
936 task->task_scsi_status = GOOD;
938 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
939 task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
940 TASK_CMD(task)->transport_error_status =
941 PYX_TRANSPORT_ILLEGAL_REQUEST;
944 transport_complete_task(task, good);
946 EXPORT_SYMBOL(transport_complete_sync_cache);
948 /* transport_complete_task():
950 * Called from interrupt and non interrupt context depending
951 * on the transport plugin.
953 void transport_complete_task(struct se_task *task, int success)
955 struct se_cmd *cmd = TASK_CMD(task);
956 struct se_device *dev = task->se_dev;
960 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
961 T_TASK(cmd)->t_task_cdb[0], dev);
964 spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
965 atomic_inc(&dev->depth_left);
966 atomic_inc(&SE_HBA(dev)->left_queue_depth);
967 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
970 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
971 atomic_set(&task->task_active, 0);
974 * See if any sense data exists, if so set the TASK_SENSE flag.
975 * Also check for any other post completion work that needs to be
976 * done by the plugins.
978 if (dev && dev->transport->transport_complete) {
979 if (dev->transport->transport_complete(task) != 0) {
980 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
981 task->task_sense = 1;
987 * See if we are waiting for outstanding struct se_task
988 * to complete for an exception condition
990 if (atomic_read(&task->task_stop)) {
992 * Decrement T_TASK(cmd)->t_se_count if this task had
993 * previously thrown its timeout exception handler.
995 if (atomic_read(&task->task_timeout)) {
996 atomic_dec(&T_TASK(cmd)->t_se_count);
997 atomic_set(&task->task_timeout, 0);
999 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1001 complete(&task->task_stop_comp);
1005 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
1006 * left counter to determine when the struct se_cmd is ready to be queued to
1007 * the processing thread.
1009 if (atomic_read(&task->task_timeout)) {
1010 if (!(atomic_dec_and_test(
1011 &T_TASK(cmd)->t_task_cdbs_timeout_left))) {
1012 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
1016 t_state = TRANSPORT_COMPLETE_TIMEOUT;
1017 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1019 transport_add_cmd_to_queue(cmd, t_state);
1022 atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
1025 * Decrement the outstanding t_task_cdbs_left count. The last
1026 * struct se_task from struct se_cmd will complete itself into the
1027 * device queue depending upon int success.
1029 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
1031 T_TASK(cmd)->t_tasks_failed = 1;
1033 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1037 if (!success || T_TASK(cmd)->t_tasks_failed) {
1038 t_state = TRANSPORT_COMPLETE_FAILURE;
1039 if (!task->task_error_status) {
1040 task->task_error_status =
1041 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1042 cmd->transport_error_status =
1043 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1046 atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
1047 t_state = TRANSPORT_COMPLETE_OK;
1049 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1051 transport_add_cmd_to_queue(cmd, t_state);
1053 EXPORT_SYMBOL(transport_complete_task);
1056 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
1057 * struct se_task list are ready to be added to the active execution list
1060 * Called with se_dev_t->execute_task_lock called.
1062 static inline int transport_add_task_check_sam_attr(
1063 struct se_task *task,
1064 struct se_task *task_prev,
1065 struct se_device *dev)
1068 * No SAM Task attribute emulation enabled, add to tail of
1071 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
1072 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
1076 * HEAD_OF_QUEUE attribute for received CDB, which means
1077 * the first task that is associated with a struct se_cmd goes to
1078 * head of the struct se_device->execute_task_list, and task_prev
1079 * after that for each subsequent task
1081 if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
1082 list_add(&task->t_execute_list,
1083 (task_prev != NULL) ?
1084 &task_prev->t_execute_list :
1085 &dev->execute_task_list);
1087 DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
1088 " in execution queue\n",
1089 T_TASK(task->task_se_cmd)->t_task_cdb[0]);
1093 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
1094 * transitioned from Dermant -> Active state, and are added to the end
1095 * of the struct se_device->execute_task_list
1097 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
1101 /* __transport_add_task_to_execute_queue():
1103 * Called with se_dev_t->execute_task_lock called.
1105 static void __transport_add_task_to_execute_queue(
1106 struct se_task *task,
1107 struct se_task *task_prev,
1108 struct se_device *dev)
1112 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
1113 atomic_inc(&dev->execute_tasks);
1115 if (atomic_read(&task->task_state_active))
1118 * Determine if this task needs to go to HEAD_OF_QUEUE for the
1119 * state list as well. Running with SAM Task Attribute emulation
1120 * will always return head_of_queue == 0 here
1123 list_add(&task->t_state_list, (task_prev) ?
1124 &task_prev->t_state_list :
1125 &dev->state_task_list);
1127 list_add_tail(&task->t_state_list, &dev->state_task_list);
1129 atomic_set(&task->task_state_active, 1);
1131 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
1132 CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
1136 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1138 struct se_device *dev;
1139 struct se_task *task;
1140 unsigned long flags;
1142 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
1143 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
1146 if (atomic_read(&task->task_state_active))
1149 spin_lock(&dev->execute_task_lock);
1150 list_add_tail(&task->t_state_list, &dev->state_task_list);
1151 atomic_set(&task->task_state_active, 1);
1153 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
1154 CMD_TFO(task->task_se_cmd)->get_task_tag(
1155 task->task_se_cmd), task, dev);
1157 spin_unlock(&dev->execute_task_lock);
1159 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1162 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
1164 struct se_device *dev = SE_DEV(cmd);
1165 struct se_task *task, *task_prev = NULL;
1166 unsigned long flags;
1168 spin_lock_irqsave(&dev->execute_task_lock, flags);
1169 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
1170 if (atomic_read(&task->task_execute_queue))
1173 * __transport_add_task_to_execute_queue() handles the
1174 * SAM Task Attribute emulation if enabled
1176 __transport_add_task_to_execute_queue(task, task_prev, dev);
1177 atomic_set(&task->task_execute_queue, 1);
1180 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1185 /* transport_get_task_from_execute_queue():
1187 * Called with dev->execute_task_lock held.
1189 static struct se_task *
1190 transport_get_task_from_execute_queue(struct se_device *dev)
1192 struct se_task *task;
1194 if (list_empty(&dev->execute_task_list))
1197 list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
1200 list_del(&task->t_execute_list);
1201 atomic_dec(&dev->execute_tasks);
1206 /* transport_remove_task_from_execute_queue():
1210 static void transport_remove_task_from_execute_queue(
1211 struct se_task *task,
1212 struct se_device *dev)
1214 unsigned long flags;
1216 spin_lock_irqsave(&dev->execute_task_lock, flags);
1217 list_del(&task->t_execute_list);
1218 atomic_dec(&dev->execute_tasks);
1219 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1222 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
1224 switch (cmd->data_direction) {
1227 case DMA_FROM_DEVICE:
1231 case DMA_BIDIRECTIONAL:
1240 void transport_dump_dev_state(
1241 struct se_device *dev,
1245 *bl += sprintf(b + *bl, "Status: ");
1246 switch (dev->dev_status) {
1247 case TRANSPORT_DEVICE_ACTIVATED:
1248 *bl += sprintf(b + *bl, "ACTIVATED");
1250 case TRANSPORT_DEVICE_DEACTIVATED:
1251 *bl += sprintf(b + *bl, "DEACTIVATED");
1253 case TRANSPORT_DEVICE_SHUTDOWN:
1254 *bl += sprintf(b + *bl, "SHUTDOWN");
1256 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
1257 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
1258 *bl += sprintf(b + *bl, "OFFLINE");
1261 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
1265 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
1266 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
1268 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
1269 DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
1270 *bl += sprintf(b + *bl, " ");
1273 /* transport_release_all_cmds():
1277 static void transport_release_all_cmds(struct se_device *dev)
1279 struct se_cmd *cmd = NULL;
1280 struct se_queue_req *qr = NULL, *qr_p = NULL;
1281 int bug_out = 0, t_state;
1282 unsigned long flags;
1284 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
1285 list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
1288 cmd = (struct se_cmd *)qr->cmd;
1289 t_state = qr->state;
1290 list_del(&qr->qr_list);
1292 spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
1295 printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
1296 " t_state: %u directly\n",
1297 CMD_TFO(cmd)->get_task_tag(cmd),
1298 CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
1300 transport_release_fe_cmd(cmd);
1303 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
1305 spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
1312 void transport_dump_vpd_proto_id(
1313 struct t10_vpd *vpd,
1314 unsigned char *p_buf,
1317 unsigned char buf[VPD_TMP_BUF_SIZE];
1320 memset(buf, 0, VPD_TMP_BUF_SIZE);
1321 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1323 switch (vpd->protocol_identifier) {
1325 sprintf(buf+len, "Fibre Channel\n");
1328 sprintf(buf+len, "Parallel SCSI\n");
1331 sprintf(buf+len, "SSA\n");
1334 sprintf(buf+len, "IEEE 1394\n");
1337 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1341 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1344 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1347 sprintf(buf+len, "Automation/Drive Interface Transport"
1351 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1354 sprintf(buf+len, "Unknown 0x%02x\n",
1355 vpd->protocol_identifier);
1360 strncpy(p_buf, buf, p_buf_len);
1362 printk(KERN_INFO "%s", buf);
1366 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1369 * Check if the Protocol Identifier Valid (PIV) bit is set..
1371 * from spc3r23.pdf section 7.5.1
1373 if (page_83[1] & 0x80) {
1374 vpd->protocol_identifier = (page_83[0] & 0xf0);
1375 vpd->protocol_identifier_set = 1;
1376 transport_dump_vpd_proto_id(vpd, NULL, 0);
1379 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1381 int transport_dump_vpd_assoc(
1382 struct t10_vpd *vpd,
1383 unsigned char *p_buf,
1386 unsigned char buf[VPD_TMP_BUF_SIZE];
1389 memset(buf, 0, VPD_TMP_BUF_SIZE);
1390 len = sprintf(buf, "T10 VPD Identifier Association: ");
1392 switch (vpd->association) {
1394 sprintf(buf+len, "addressed logical unit\n");
1397 sprintf(buf+len, "target port\n");
1400 sprintf(buf+len, "SCSI target device\n");
1403 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1409 strncpy(p_buf, buf, p_buf_len);
1416 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1419 * The VPD identification association..
1421 * from spc3r23.pdf Section 7.6.3.1 Table 297
1423 vpd->association = (page_83[1] & 0x30);
1424 return transport_dump_vpd_assoc(vpd, NULL, 0);
1426 EXPORT_SYMBOL(transport_set_vpd_assoc);
1428 int transport_dump_vpd_ident_type(
1429 struct t10_vpd *vpd,
1430 unsigned char *p_buf,
1433 unsigned char buf[VPD_TMP_BUF_SIZE];
1436 memset(buf, 0, VPD_TMP_BUF_SIZE);
1437 len = sprintf(buf, "T10 VPD Identifier Type: ");
1439 switch (vpd->device_identifier_type) {
1441 sprintf(buf+len, "Vendor specific\n");
1444 sprintf(buf+len, "T10 Vendor ID based\n");
1447 sprintf(buf+len, "EUI-64 based\n");
1450 sprintf(buf+len, "NAA\n");
1453 sprintf(buf+len, "Relative target port identifier\n");
1456 sprintf(buf+len, "SCSI name string\n");
1459 sprintf(buf+len, "Unsupported: 0x%02x\n",
1460 vpd->device_identifier_type);
1466 strncpy(p_buf, buf, p_buf_len);
1473 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1476 * The VPD identifier type..
1478 * from spc3r23.pdf Section 7.6.3.1 Table 298
1480 vpd->device_identifier_type = (page_83[1] & 0x0f);
1481 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1483 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1485 int transport_dump_vpd_ident(
1486 struct t10_vpd *vpd,
1487 unsigned char *p_buf,
1490 unsigned char buf[VPD_TMP_BUF_SIZE];
1493 memset(buf, 0, VPD_TMP_BUF_SIZE);
1495 switch (vpd->device_identifier_code_set) {
1496 case 0x01: /* Binary */
1497 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1498 &vpd->device_identifier[0]);
1500 case 0x02: /* ASCII */
1501 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1502 &vpd->device_identifier[0]);
1504 case 0x03: /* UTF-8 */
1505 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1506 &vpd->device_identifier[0]);
1509 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1510 " 0x%02x", vpd->device_identifier_code_set);
1516 strncpy(p_buf, buf, p_buf_len);
1524 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1526 static const char hex_str[] = "0123456789abcdef";
1527 int j = 0, i = 4; /* offset to start of the identifer */
1530 * The VPD Code Set (encoding)
1532 * from spc3r23.pdf Section 7.6.3.1 Table 296
1534 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1535 switch (vpd->device_identifier_code_set) {
1536 case 0x01: /* Binary */
1537 vpd->device_identifier[j++] =
1538 hex_str[vpd->device_identifier_type];
1539 while (i < (4 + page_83[3])) {
1540 vpd->device_identifier[j++] =
1541 hex_str[(page_83[i] & 0xf0) >> 4];
1542 vpd->device_identifier[j++] =
1543 hex_str[page_83[i] & 0x0f];
1547 case 0x02: /* ASCII */
1548 case 0x03: /* UTF-8 */
1549 while (i < (4 + page_83[3]))
1550 vpd->device_identifier[j++] = page_83[i++];
1556 return transport_dump_vpd_ident(vpd, NULL, 0);
1558 EXPORT_SYMBOL(transport_set_vpd_ident);
1560 static void core_setup_task_attr_emulation(struct se_device *dev)
1563 * If this device is from Target_Core_Mod/pSCSI, disable the
1564 * SAM Task Attribute emulation.
1566 * This is currently not available in upsream Linux/SCSI Target
1567 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1569 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1570 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1574 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1575 DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1576 " device\n", TRANSPORT(dev)->name,
1577 TRANSPORT(dev)->get_device_rev(dev));
1580 static void scsi_dump_inquiry(struct se_device *dev)
1582 struct t10_wwn *wwn = DEV_T10_WWN(dev);
1585 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1587 printk(" Vendor: ");
1588 for (i = 0; i < 8; i++)
1589 if (wwn->vendor[i] >= 0x20)
1590 printk("%c", wwn->vendor[i]);
1595 for (i = 0; i < 16; i++)
1596 if (wwn->model[i] >= 0x20)
1597 printk("%c", wwn->model[i]);
1601 printk(" Revision: ");
1602 for (i = 0; i < 4; i++)
1603 if (wwn->revision[i] >= 0x20)
1604 printk("%c", wwn->revision[i]);
1610 device_type = TRANSPORT(dev)->get_device_type(dev);
1611 printk(" Type: %s ", scsi_device_type(device_type));
1612 printk(" ANSI SCSI revision: %02x\n",
1613 TRANSPORT(dev)->get_device_rev(dev));
1616 struct se_device *transport_add_device_to_core_hba(
1618 struct se_subsystem_api *transport,
1619 struct se_subsystem_dev *se_dev,
1621 void *transport_dev,
1622 struct se_dev_limits *dev_limits,
1623 const char *inquiry_prod,
1624 const char *inquiry_rev)
1626 int ret = 0, force_pt;
1627 struct se_device *dev;
1629 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1631 printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
1634 dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
1635 if (!(dev->dev_queue_obj)) {
1636 printk(KERN_ERR "Unable to allocate memory for"
1637 " dev->dev_queue_obj\n");
1641 transport_init_queue_obj(dev->dev_queue_obj);
1643 dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
1645 if (!(dev->dev_status_queue_obj)) {
1646 printk(KERN_ERR "Unable to allocate memory for"
1647 " dev->dev_status_queue_obj\n");
1648 kfree(dev->dev_queue_obj);
1652 transport_init_queue_obj(dev->dev_status_queue_obj);
1654 dev->dev_flags = device_flags;
1655 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
1656 dev->dev_ptr = (void *) transport_dev;
1658 dev->se_sub_dev = se_dev;
1659 dev->transport = transport;
1660 atomic_set(&dev->active_cmds, 0);
1661 INIT_LIST_HEAD(&dev->dev_list);
1662 INIT_LIST_HEAD(&dev->dev_sep_list);
1663 INIT_LIST_HEAD(&dev->dev_tmr_list);
1664 INIT_LIST_HEAD(&dev->execute_task_list);
1665 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1666 INIT_LIST_HEAD(&dev->ordered_cmd_list);
1667 INIT_LIST_HEAD(&dev->state_task_list);
1668 spin_lock_init(&dev->execute_task_lock);
1669 spin_lock_init(&dev->delayed_cmd_lock);
1670 spin_lock_init(&dev->ordered_cmd_lock);
1671 spin_lock_init(&dev->state_task_lock);
1672 spin_lock_init(&dev->dev_alua_lock);
1673 spin_lock_init(&dev->dev_reservation_lock);
1674 spin_lock_init(&dev->dev_status_lock);
1675 spin_lock_init(&dev->dev_status_thr_lock);
1676 spin_lock_init(&dev->se_port_lock);
1677 spin_lock_init(&dev->se_tmr_lock);
1679 dev->queue_depth = dev_limits->queue_depth;
1680 atomic_set(&dev->depth_left, dev->queue_depth);
1681 atomic_set(&dev->dev_ordered_id, 0);
1683 se_dev_set_default_attribs(dev, dev_limits);
1685 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1686 dev->creation_time = get_jiffies_64();
1687 spin_lock_init(&dev->stats_lock);
1689 spin_lock(&hba->device_lock);
1690 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1692 spin_unlock(&hba->device_lock);
1694 * Setup the SAM Task Attribute emulation for struct se_device
1696 core_setup_task_attr_emulation(dev);
1698 * Force PR and ALUA passthrough emulation with internal object use.
1700 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1702 * Setup the Reservations infrastructure for struct se_device
1704 core_setup_reservations(dev, force_pt);
1706 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1708 if (core_setup_alua(dev, force_pt) < 0)
1712 * Startup the struct se_device processing thread
1714 dev->process_thread = kthread_run(transport_processing_thread, dev,
1715 "LIO_%s", TRANSPORT(dev)->name);
1716 if (IS_ERR(dev->process_thread)) {
1717 printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
1718 TRANSPORT(dev)->name);
1723 * Preload the initial INQUIRY const values if we are doing
1724 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1725 * passthrough because this is being provided by the backend LLD.
1726 * This is required so that transport_get_inquiry() copies these
1727 * originals once back into DEV_T10_WWN(dev) for the virtual device
1730 if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1731 if (!(inquiry_prod) || !(inquiry_prod)) {
1732 printk(KERN_ERR "All non TCM/pSCSI plugins require"
1733 " INQUIRY consts\n");
1737 strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
1738 strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
1739 strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
1741 scsi_dump_inquiry(dev);
1746 kthread_stop(dev->process_thread);
1748 spin_lock(&hba->device_lock);
1749 list_del(&dev->dev_list);
1751 spin_unlock(&hba->device_lock);
1753 se_release_vpd_for_dev(dev);
1755 kfree(dev->dev_status_queue_obj);
1756 kfree(dev->dev_queue_obj);
1761 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1763 /* transport_generic_prepare_cdb():
1765 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
1766 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1767 * The point of this is since we are mapping iSCSI LUNs to
1768 * SCSI Target IDs having a non-zero LUN in the CDB will throw the
1769 * devices and HBAs for a loop.
1771 static inline void transport_generic_prepare_cdb(
1775 case READ_10: /* SBC - RDProtect */
1776 case READ_12: /* SBC - RDProtect */
1777 case READ_16: /* SBC - RDProtect */
1778 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1779 case VERIFY: /* SBC - VRProtect */
1780 case VERIFY_16: /* SBC - VRProtect */
1781 case WRITE_VERIFY: /* SBC - VRProtect */
1782 case WRITE_VERIFY_12: /* SBC - VRProtect */
1785 cdb[1] &= 0x1f; /* clear logical unit number */
1790 static struct se_task *
1791 transport_generic_get_task(struct se_cmd *cmd,
1792 enum dma_data_direction data_direction)
1794 struct se_task *task;
1795 struct se_device *dev = SE_DEV(cmd);
1796 unsigned long flags;
1798 task = dev->transport->alloc_task(cmd);
1800 printk(KERN_ERR "Unable to allocate struct se_task\n");
1804 INIT_LIST_HEAD(&task->t_list);
1805 INIT_LIST_HEAD(&task->t_execute_list);
1806 INIT_LIST_HEAD(&task->t_state_list);
1807 init_completion(&task->task_stop_comp);
1808 task->task_no = T_TASK(cmd)->t_tasks_no++;
1809 task->task_se_cmd = cmd;
1811 task->task_data_direction = data_direction;
1813 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
1814 list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
1815 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1820 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1822 void transport_device_setup_cmd(struct se_cmd *cmd)
1824 cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
1826 EXPORT_SYMBOL(transport_device_setup_cmd);
1829 * Used by fabric modules containing a local struct se_cmd within their
1830 * fabric dependent per I/O descriptor.
1832 void transport_init_se_cmd(
1834 struct target_core_fabric_ops *tfo,
1835 struct se_session *se_sess,
1839 unsigned char *sense_buffer)
1841 INIT_LIST_HEAD(&cmd->se_lun_list);
1842 INIT_LIST_HEAD(&cmd->se_delayed_list);
1843 INIT_LIST_HEAD(&cmd->se_ordered_list);
1845 * Setup t_task pointer to t_task_backstore
1847 cmd->t_task = &cmd->t_task_backstore;
1849 INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
1850 init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
1851 init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
1852 init_completion(&T_TASK(cmd)->t_transport_stop_comp);
1853 spin_lock_init(&T_TASK(cmd)->t_state_lock);
1854 atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
1857 cmd->se_sess = se_sess;
1858 cmd->data_length = data_length;
1859 cmd->data_direction = data_direction;
1860 cmd->sam_task_attr = task_attr;
1861 cmd->sense_buffer = sense_buffer;
1863 EXPORT_SYMBOL(transport_init_se_cmd);
1865 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1868 * Check if SAM Task Attribute emulation is enabled for this
1869 * struct se_device storage object
1871 if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1874 if (cmd->sam_task_attr == TASK_ATTR_ACA) {
1875 DEBUG_STA("SAM Task Attribute ACA"
1876 " emulation is not supported\n");
1880 * Used to determine when ORDERED commands should go from
1881 * Dormant to Active status.
1883 cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
1884 smp_mb__after_atomic_inc();
1885 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1886 cmd->se_ordered_id, cmd->sam_task_attr,
1887 TRANSPORT(cmd->se_dev)->name);
1891 void transport_free_se_cmd(
1892 struct se_cmd *se_cmd)
1894 if (se_cmd->se_tmr_req)
1895 core_tmr_release_req(se_cmd->se_tmr_req);
1897 * Check and free any extended CDB buffer that was allocated
1899 if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
1900 kfree(T_TASK(se_cmd)->t_task_cdb);
1902 EXPORT_SYMBOL(transport_free_se_cmd);
1904 static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
1906 /* transport_generic_allocate_tasks():
1908 * Called from fabric RX Thread.
1910 int transport_generic_allocate_tasks(
1916 transport_generic_prepare_cdb(cdb);
1919 * This is needed for early exceptions.
1921 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1923 transport_device_setup_cmd(cmd);
1925 * Ensure that the received CDB is less than the max (252 + 8) bytes
1926 * for VARIABLE_LENGTH_CMD
1928 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1929 printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
1930 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1931 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1935 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1936 * allocate the additional extended CDB buffer now.. Otherwise
1937 * setup the pointer from __t_task_cdb to t_task_cdb.
1939 if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
1940 T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
1942 if (!(T_TASK(cmd)->t_task_cdb)) {
1943 printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
1944 " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
1945 scsi_command_size(cdb),
1946 (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
1950 T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
1952 * Copy the original CDB into T_TASK(cmd).
1954 memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
1956 * Setup the received CDB based on SCSI defined opcodes and
1957 * perform unit attention, persistent reservations and ALUA
1958 * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb
1959 * pointer is expected to be setup before we reach this point.
1961 ret = transport_generic_cmd_sequencer(cmd, cdb);
1965 * Check for SAM Task Attribute Emulation
1967 if (transport_check_alloc_task_attr(cmd) < 0) {
1968 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1969 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1972 spin_lock(&cmd->se_lun->lun_sep_lock);
1973 if (cmd->se_lun->lun_sep)
1974 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1975 spin_unlock(&cmd->se_lun->lun_sep_lock);
1978 EXPORT_SYMBOL(transport_generic_allocate_tasks);
1981 * Used by fabric module frontends not defining a TFO->new_cmd_map()
1982 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
1984 int transport_generic_handle_cdb(
1989 printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
1993 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
1996 EXPORT_SYMBOL(transport_generic_handle_cdb);
1999 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
2000 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
2001 * complete setup in TCM process context w/ TFO->new_cmd_map().
2003 int transport_generic_handle_cdb_map(
2008 printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
2012 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
2015 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
2017 /* transport_generic_handle_data():
2021 int transport_generic_handle_data(
2025 * For the software fabric case, then we assume the nexus is being
2026 * failed/shutdown when signals are pending from the kthread context
2027 * caller, so we return a failure. For the HW target mode case running
2028 * in interrupt code, the signal_pending() check is skipped.
2030 if (!in_interrupt() && signal_pending(current))
2033 * If the received CDB has aleady been ABORTED by the generic
2034 * target engine, we now call transport_check_aborted_status()
2035 * to queue any delated TASK_ABORTED status for the received CDB to the
2036 * fabric module as we are expecting no futher incoming DATA OUT
2037 * sequences at this point.
2039 if (transport_check_aborted_status(cmd, 1) != 0)
2042 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
2045 EXPORT_SYMBOL(transport_generic_handle_data);
2047 /* transport_generic_handle_tmr():
2051 int transport_generic_handle_tmr(
2055 * This is needed for early exceptions.
2057 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
2058 transport_device_setup_cmd(cmd);
2060 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
2063 EXPORT_SYMBOL(transport_generic_handle_tmr);
2065 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2067 struct se_task *task, *task_tmp;
2068 unsigned long flags;
2071 DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
2072 CMD_TFO(cmd)->get_task_tag(cmd));
2075 * No tasks remain in the execution queue
2077 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2078 list_for_each_entry_safe(task, task_tmp,
2079 &T_TASK(cmd)->t_task_list, t_list) {
2080 DEBUG_TS("task_no[%d] - Processing task %p\n",
2081 task->task_no, task);
2083 * If the struct se_task has not been sent and is not active,
2084 * remove the struct se_task from the execution queue.
2086 if (!atomic_read(&task->task_sent) &&
2087 !atomic_read(&task->task_active)) {
2088 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
2090 transport_remove_task_from_execute_queue(task,
2093 DEBUG_TS("task_no[%d] - Removed from execute queue\n",
2095 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2100 * If the struct se_task is active, sleep until it is returned
2103 if (atomic_read(&task->task_active)) {
2104 atomic_set(&task->task_stop, 1);
2105 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
2108 DEBUG_TS("task_no[%d] - Waiting to complete\n",
2110 wait_for_completion(&task->task_stop_comp);
2111 DEBUG_TS("task_no[%d] - Stopped successfully\n",
2114 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2115 atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
2117 atomic_set(&task->task_active, 0);
2118 atomic_set(&task->task_stop, 0);
2120 DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
2124 __transport_stop_task_timer(task, &flags);
2126 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2131 static void transport_failure_reset_queue_depth(struct se_device *dev)
2133 unsigned long flags;
2135 spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);;
2136 atomic_inc(&dev->depth_left);
2137 atomic_inc(&SE_HBA(dev)->left_queue_depth);
2138 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2142 * Handle SAM-esque emulation for generic transport request failures.
2144 static void transport_generic_request_failure(
2146 struct se_device *dev,
2150 DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
2151 " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
2152 T_TASK(cmd)->t_task_cdb[0]);
2153 DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
2154 " %d/%d transport_error_status: %d\n",
2155 CMD_TFO(cmd)->get_cmd_state(cmd),
2156 cmd->t_state, cmd->deferred_t_state,
2157 cmd->transport_error_status);
2158 DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
2159 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
2160 " t_transport_active: %d t_transport_stop: %d"
2161 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
2162 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
2163 atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
2164 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
2165 atomic_read(&T_TASK(cmd)->t_transport_active),
2166 atomic_read(&T_TASK(cmd)->t_transport_stop),
2167 atomic_read(&T_TASK(cmd)->t_transport_sent));
2169 transport_stop_all_task_timers(cmd);
2172 transport_failure_reset_queue_depth(dev);
2174 * For SAM Task Attribute emulation for failed struct se_cmd
2176 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2177 transport_complete_task_attr(cmd);
2180 transport_direct_request_timeout(cmd);
2181 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
2184 switch (cmd->transport_error_status) {
2185 case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
2186 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2188 case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
2189 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
2191 case PYX_TRANSPORT_INVALID_CDB_FIELD:
2192 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2194 case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
2195 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2197 case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
2199 transport_new_cmd_failure(cmd);
2201 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
2202 * we force this session to fall back to session
2205 CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
2206 CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
2209 case PYX_TRANSPORT_LU_COMM_FAILURE:
2210 case PYX_TRANSPORT_ILLEGAL_REQUEST:
2211 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2213 case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
2214 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
2216 case PYX_TRANSPORT_WRITE_PROTECTED:
2217 cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
2219 case PYX_TRANSPORT_RESERVATION_CONFLICT:
2221 * No SENSE Data payload for this case, set SCSI Status
2222 * and queue the response to $FABRIC_MOD.
2224 * Uses linux/include/scsi/scsi.h SAM status codes defs
2226 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2228 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2229 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2232 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2235 DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
2236 core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
2237 cmd->orig_fe_lun, 0x2C,
2238 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2240 CMD_TFO(cmd)->queue_status(cmd);
2242 case PYX_TRANSPORT_USE_SENSE_REASON:
2244 * struct se_cmd->scsi_sense_reason already set
2248 printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
2249 T_TASK(cmd)->t_task_cdb[0],
2250 cmd->transport_error_status);
2251 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2256 transport_new_cmd_failure(cmd);
2258 transport_send_check_condition_and_sense(cmd,
2259 cmd->scsi_sense_reason, 0);
2261 transport_lun_remove_cmd(cmd);
2262 if (!(transport_cmd_check_stop_to_fabric(cmd)))
2266 static void transport_direct_request_timeout(struct se_cmd *cmd)
2268 unsigned long flags;
2270 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2271 if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
2272 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2275 if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
2276 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2280 atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
2281 &T_TASK(cmd)->t_se_count);
2282 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2285 static void transport_generic_request_timeout(struct se_cmd *cmd)
2287 unsigned long flags;
2290 * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
2291 * to allow last call to free memory resources.
2293 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2294 if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
2295 int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
2297 atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
2299 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2301 transport_generic_remove(cmd, 0, 0);
2305 transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
2309 buf = kzalloc(data_length, GFP_KERNEL);
2311 printk(KERN_ERR "Unable to allocate memory for buffer\n");
2315 T_TASK(cmd)->t_tasks_se_num = 0;
2316 T_TASK(cmd)->t_task_buf = buf;
2321 static inline u32 transport_lba_21(unsigned char *cdb)
2323 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
2326 static inline u32 transport_lba_32(unsigned char *cdb)
2328 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2331 static inline unsigned long long transport_lba_64(unsigned char *cdb)
2333 unsigned int __v1, __v2;
2335 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2336 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2338 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2342 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
2344 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
2346 unsigned int __v1, __v2;
2348 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
2349 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
2351 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2354 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2356 unsigned long flags;
2358 spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
2359 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2360 spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
2364 * Called from interrupt context.
2366 static void transport_task_timeout_handler(unsigned long data)
2368 struct se_task *task = (struct se_task *)data;
2369 struct se_cmd *cmd = TASK_CMD(task);
2370 unsigned long flags;
2372 DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2374 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2375 if (task->task_flags & TF_STOP) {
2376 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2379 task->task_flags &= ~TF_RUNNING;
2382 * Determine if transport_complete_task() has already been called.
2384 if (!(atomic_read(&task->task_active))) {
2385 DEBUG_TT("transport task: %p cmd: %p timeout task_active"
2386 " == 0\n", task, cmd);
2387 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2391 atomic_inc(&T_TASK(cmd)->t_se_count);
2392 atomic_inc(&T_TASK(cmd)->t_transport_timeout);
2393 T_TASK(cmd)->t_tasks_failed = 1;
2395 atomic_set(&task->task_timeout, 1);
2396 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
2397 task->task_scsi_status = 1;
2399 if (atomic_read(&task->task_stop)) {
2400 DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
2401 " == 1\n", task, cmd);
2402 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2403 complete(&task->task_stop_comp);
2407 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
2408 DEBUG_TT("transport task: %p cmd: %p timeout non zero"
2409 " t_task_cdbs_left\n", task, cmd);
2410 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2413 DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2416 cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
2417 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2419 transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
2423 * Called with T_TASK(cmd)->t_state_lock held.
2425 static void transport_start_task_timer(struct se_task *task)
2427 struct se_device *dev = task->se_dev;
2430 if (task->task_flags & TF_RUNNING)
2433 * If the task_timeout is disabled, exit now.
2435 timeout = DEV_ATTRIB(dev)->task_timeout;
2439 init_timer(&task->task_timer);
2440 task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
2441 task->task_timer.data = (unsigned long) task;
2442 task->task_timer.function = transport_task_timeout_handler;
2444 task->task_flags |= TF_RUNNING;
2445 add_timer(&task->task_timer);
2447 printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
2448 " %d\n", task->task_se_cmd, task, timeout);
2453 * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
2455 void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2457 struct se_cmd *cmd = TASK_CMD(task);
2459 if (!(task->task_flags & TF_RUNNING))
2462 task->task_flags |= TF_STOP;
2463 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
2465 del_timer_sync(&task->task_timer);
2467 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
2468 task->task_flags &= ~TF_RUNNING;
2469 task->task_flags &= ~TF_STOP;
2472 static void transport_stop_all_task_timers(struct se_cmd *cmd)
2474 struct se_task *task = NULL, *task_tmp;
2475 unsigned long flags;
2477 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2478 list_for_each_entry_safe(task, task_tmp,
2479 &T_TASK(cmd)->t_task_list, t_list)
2480 __transport_stop_task_timer(task, &flags);
2481 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2484 static inline int transport_tcq_window_closed(struct se_device *dev)
2486 if (dev->dev_tcq_window_closed++ <
2487 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
2488 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
2490 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
2492 wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
2497 * Called from Fabric Module context from transport_execute_tasks()
2499 * The return of this function determins if the tasks from struct se_cmd
2500 * get added to the execution queue in transport_execute_tasks(),
2501 * or are added to the delayed or ordered lists here.
2503 static inline int transport_execute_task_attr(struct se_cmd *cmd)
2505 if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2508 * Check for the existance of HEAD_OF_QUEUE, and if true return 1
2509 * to allow the passed struct se_cmd list of tasks to the front of the list.
2511 if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
2512 atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
2513 smp_mb__after_atomic_inc();
2514 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
2515 " 0x%02x, se_ordered_id: %u\n",
2516 T_TASK(cmd)->t_task_cdb[0],
2517 cmd->se_ordered_id);
2519 } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
2520 spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
2521 list_add_tail(&cmd->se_ordered_list,
2522 &SE_DEV(cmd)->ordered_cmd_list);
2523 spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
2525 atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
2526 smp_mb__after_atomic_inc();
2528 DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
2529 " list, se_ordered_id: %u\n",
2530 T_TASK(cmd)->t_task_cdb[0],
2531 cmd->se_ordered_id);
2533 * Add ORDERED command to tail of execution queue if
2534 * no other older commands exist that need to be
2537 if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
2541 * For SIMPLE and UNTAGGED Task Attribute commands
2543 atomic_inc(&SE_DEV(cmd)->simple_cmds);
2544 smp_mb__after_atomic_inc();
2547 * Otherwise if one or more outstanding ORDERED task attribute exist,
2548 * add the dormant task(s) built for the passed struct se_cmd to the
2549 * execution queue and become in Active state for this struct se_device.
2551 if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
2553 * Otherwise, add cmd w/ tasks to delayed cmd queue that
2554 * will be drained upon competion of HEAD_OF_QUEUE task.
2556 spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
2557 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2558 list_add_tail(&cmd->se_delayed_list,
2559 &SE_DEV(cmd)->delayed_cmd_list);
2560 spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
2562 DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
2563 " delayed CMD list, se_ordered_id: %u\n",
2564 T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
2565 cmd->se_ordered_id);
2567 * Return zero to let transport_execute_tasks() know
2568 * not to add the delayed tasks to the execution list.
2573 * Otherwise, no ORDERED task attributes exist..
2579 * Called from fabric module context in transport_generic_new_cmd() and
2580 * transport_generic_process_write()
2582 static int transport_execute_tasks(struct se_cmd *cmd)
2586 if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
2587 if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
2588 cmd->transport_error_status =
2589 PYX_TRANSPORT_LU_COMM_FAILURE;
2590 transport_generic_request_failure(cmd, NULL, 0, 1);
2595 * Call transport_cmd_check_stop() to see if a fabric exception
2596 * has occured that prevents execution.
2598 if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
2600 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2601 * attribute for the tasks of the received struct se_cmd CDB
2603 add_tasks = transport_execute_task_attr(cmd);
2607 * This calls transport_add_tasks_from_cmd() to handle
2608 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2609 * (if enabled) in __transport_add_task_to_execute_queue() and
2610 * transport_add_task_check_sam_attr().
2612 transport_add_tasks_from_cmd(cmd);
2615 * Kick the execution queue for the cmd associated struct se_device
2619 __transport_execute_tasks(SE_DEV(cmd));
2624 * Called to check struct se_device tcq depth window, and once open pull struct se_task
2625 * from struct se_device->execute_task_list and
2627 * Called from transport_processing_thread()
2629 static int __transport_execute_tasks(struct se_device *dev)
2632 struct se_cmd *cmd = NULL;
2633 struct se_task *task;
2634 unsigned long flags;
2637 * Check if there is enough room in the device and HBA queue to send
2638 * struct se_transport_task's to the selected transport.
2641 spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
2642 if (!(atomic_read(&dev->depth_left)) ||
2643 !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
2644 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2645 return transport_tcq_window_closed(dev);
2647 dev->dev_tcq_window_closed = 0;
2649 spin_lock(&dev->execute_task_lock);
2650 task = transport_get_task_from_execute_queue(dev);
2651 spin_unlock(&dev->execute_task_lock);
2654 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2658 atomic_dec(&dev->depth_left);
2659 atomic_dec(&SE_HBA(dev)->left_queue_depth);
2660 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2662 cmd = TASK_CMD(task);
2664 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2665 atomic_set(&task->task_active, 1);
2666 atomic_set(&task->task_sent, 1);
2667 atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
2669 if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
2670 T_TASK(cmd)->t_task_cdbs)
2671 atomic_set(&cmd->transport_sent, 1);
2673 transport_start_task_timer(task);
2674 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2676 * The struct se_cmd->transport_emulate_cdb() function pointer is used
2677 * to grab REPORT_LUNS CDBs before they hit the
2678 * struct se_subsystem_api->do_task() caller below.
2680 if (cmd->transport_emulate_cdb) {
2681 error = cmd->transport_emulate_cdb(cmd);
2683 cmd->transport_error_status = error;
2684 atomic_set(&task->task_active, 0);
2685 atomic_set(&cmd->transport_sent, 0);
2686 transport_stop_tasks_for_cmd(cmd);
2687 transport_generic_request_failure(cmd, dev, 0, 1);
2691 * Handle the successful completion for transport_emulate_cdb()
2692 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
2693 * Otherwise the caller is expected to complete the task with
2696 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
2697 cmd->scsi_status = SAM_STAT_GOOD;
2698 task->task_scsi_status = GOOD;
2699 transport_complete_task(task, 1);
2703 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
2704 * RAMDISK we use the internal transport_emulate_control_cdb() logic
2705 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
2706 * LUN emulation code.
2708 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
2709 * call ->do_task() directly and let the underlying TCM subsystem plugin
2710 * code handle the CDB emulation.
2712 if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
2713 (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
2714 error = transport_emulate_control_cdb(task);
2716 error = TRANSPORT(dev)->do_task(task);
2719 cmd->transport_error_status = error;
2720 atomic_set(&task->task_active, 0);
2721 atomic_set(&cmd->transport_sent, 0);
2722 transport_stop_tasks_for_cmd(cmd);
2723 transport_generic_request_failure(cmd, dev, 0, 1);
2732 void transport_new_cmd_failure(struct se_cmd *se_cmd)
2734 unsigned long flags;
2736 * Any unsolicited data will get dumped for failed command inside of
2739 spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
2740 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2741 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2742 spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
2744 CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
2747 static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
2749 static inline u32 transport_get_sectors_6(
2754 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2757 * Assume TYPE_DISK for non struct se_device objects.
2758 * Use 8-bit sector value.
2764 * Use 24-bit allocation length for TYPE_TAPE.
2766 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
2767 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2770 * Everything else assume TYPE_DISK Sector CDB location.
2771 * Use 8-bit sector value.
2777 static inline u32 transport_get_sectors_10(
2782 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2785 * Assume TYPE_DISK for non struct se_device objects.
2786 * Use 16-bit sector value.
2792 * XXX_10 is not defined in SSC, throw an exception
2794 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2800 * Everything else assume TYPE_DISK Sector CDB location.
2801 * Use 16-bit sector value.
2804 return (u32)(cdb[7] << 8) + cdb[8];
2807 static inline u32 transport_get_sectors_12(
2812 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2815 * Assume TYPE_DISK for non struct se_device objects.
2816 * Use 32-bit sector value.
2822 * XXX_12 is not defined in SSC, throw an exception
2824 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2830 * Everything else assume TYPE_DISK Sector CDB location.
2831 * Use 32-bit sector value.
2834 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2837 static inline u32 transport_get_sectors_16(
2842 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2845 * Assume TYPE_DISK for non struct se_device objects.
2846 * Use 32-bit sector value.
2852 * Use 24-bit allocation length for TYPE_TAPE.
2854 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
2855 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2858 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2859 (cdb[12] << 8) + cdb[13];
2863 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2865 static inline u32 transport_get_sectors_32(
2871 * Assume TYPE_DISK for non struct se_device objects.
2872 * Use 32-bit sector value.
2874 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2875 (cdb[30] << 8) + cdb[31];
2879 static inline u32 transport_get_size(
2884 struct se_device *dev = SE_DEV(cmd);
2886 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2887 if (cdb[1] & 1) { /* sectors */
2888 return DEV_ATTRIB(dev)->block_size * sectors;
2893 printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
2894 " %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
2895 DEV_ATTRIB(dev)->block_size * sectors,
2896 TRANSPORT(dev)->name);
2898 return DEV_ATTRIB(dev)->block_size * sectors;
2901 unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
2903 unsigned char result = 0;
2907 if ((val[0] >= 'a') && (val[0] <= 'f'))
2908 result = ((val[0] - 'a' + 10) & 0xf) << 4;
2910 if ((val[0] >= 'A') && (val[0] <= 'F'))
2911 result = ((val[0] - 'A' + 10) & 0xf) << 4;
2913 result = ((val[0] - '0') & 0xf) << 4;
2917 if ((val[1] >= 'a') && (val[1] <= 'f'))
2918 result |= ((val[1] - 'a' + 10) & 0xf);
2920 if ((val[1] >= 'A') && (val[1] <= 'F'))
2921 result |= ((val[1] - 'A' + 10) & 0xf);
2923 result |= ((val[1] - '0') & 0xf);
2927 EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
2929 static void transport_xor_callback(struct se_cmd *cmd)
2931 unsigned char *buf, *addr;
2932 struct se_mem *se_mem;
2933 unsigned int offset;
2936 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2938 * 1) read the specified logical block(s);
2939 * 2) transfer logical blocks from the data-out buffer;
2940 * 3) XOR the logical blocks transferred from the data-out buffer with
2941 * the logical blocks read, storing the resulting XOR data in a buffer;
2942 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2943 * blocks transferred from the data-out buffer; and
2944 * 5) transfer the resulting XOR data to the data-in buffer.
2946 buf = kmalloc(cmd->data_length, GFP_KERNEL);
2948 printk(KERN_ERR "Unable to allocate xor_callback buf\n");
2952 * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
2953 * into the locally allocated *buf
2955 transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
2957 * Now perform the XOR against the BIDI read memory located at
2958 * T_TASK(cmd)->t_mem_bidi_list
2962 list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
2963 addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
2967 for (i = 0; i < se_mem->se_len; i++)
2968 *(addr + se_mem->se_off + i) ^= *(buf + offset + i);
2970 offset += se_mem->se_len;
2971 kunmap_atomic(addr, KM_USER0);
2978 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2980 static int transport_get_sense_data(struct se_cmd *cmd)
2982 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2983 struct se_device *dev;
2984 struct se_task *task = NULL, *task_tmp;
2985 unsigned long flags;
2989 printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
2992 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2993 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2994 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2998 list_for_each_entry_safe(task, task_tmp,
2999 &T_TASK(cmd)->t_task_list, t_list) {
3001 if (!task->task_sense)
3008 if (!TRANSPORT(dev)->get_sense_buffer) {
3009 printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
3014 sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
3015 if (!(sense_buffer)) {
3016 printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
3017 " sense buffer for task with sense\n",
3018 CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
3021 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3023 offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
3024 TRANSPORT_SENSE_BUFFER);
3026 memcpy((void *)&buffer[offset], (void *)sense_buffer,
3027 TRANSPORT_SENSE_BUFFER);
3028 cmd->scsi_status = task->task_scsi_status;
3029 /* Automatically padded */
3030 cmd->scsi_sense_length =
3031 (TRANSPORT_SENSE_BUFFER + offset);
3033 printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
3035 dev->se_hba->hba_id, TRANSPORT(dev)->name,
3039 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3044 static int transport_allocate_resources(struct se_cmd *cmd)
3046 u32 length = cmd->data_length;
3048 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3049 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
3050 return transport_generic_get_mem(cmd, length, PAGE_SIZE);
3051 else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
3052 return transport_generic_allocate_buf(cmd, length);
3058 transport_handle_reservation_conflict(struct se_cmd *cmd)
3060 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3061 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3062 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
3063 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
3065 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
3066 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
3069 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
3072 DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
3073 core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
3074 cmd->orig_fe_lun, 0x2C,
3075 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
3079 /* transport_generic_cmd_sequencer():
3081 * Generic Command Sequencer that should work for most DAS transport
3084 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
3087 * FIXME: Need to support other SCSI OPCODES where as well.
3089 static int transport_generic_cmd_sequencer(
3093 struct se_device *dev = SE_DEV(cmd);
3094 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
3095 int ret = 0, sector_ret = 0, passthrough;
3096 u32 sectors = 0, size = 0, pr_reg_type = 0;
3100 * Check for an existing UNIT ATTENTION condition
3102 if (core_scsi3_ua_check(cmd, cdb) < 0) {
3103 cmd->transport_wait_for_tasks =
3104 &transport_nop_wait_for_tasks;
3105 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3106 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
3110 * Check status of Asymmetric Logical Unit Assignment port
3112 ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
3114 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3116 * Set SCSI additional sense code (ASC) to 'LUN Not Accessable';
3117 * The ALUA additional sense code qualifier (ASCQ) is determined
3118 * by the ALUA primary or secondary access state..
3122 printk(KERN_INFO "[%s]: ALUA TG Port not available,"
3123 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
3124 CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
3126 transport_set_sense_codes(cmd, 0x04, alua_ascq);
3127 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3128 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
3131 goto out_invalid_cdb_field;
3134 * Check status for SPC-3 Persistent Reservations
3136 if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
3137 if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
3138 cmd, cdb, pr_reg_type) != 0)
3139 return transport_handle_reservation_conflict(cmd);
3141 * This means the CDB is allowed for the SCSI Initiator port
3142 * when said port is *NOT* holding the legacy SPC-2 or
3143 * SPC-3 Persistent Reservation.
3149 sectors = transport_get_sectors_6(cdb, cmd, §or_ret);
3151 goto out_unsupported_cdb;
3152 size = transport_get_size(sectors, cdb, cmd);
3153 cmd->transport_split_cdb = &split_cdb_XX_6;
3154 T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
3155 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3158 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
3160 goto out_unsupported_cdb;
3161 size = transport_get_size(sectors, cdb, cmd);
3162 cmd->transport_split_cdb = &split_cdb_XX_10;
3163 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3164 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3167 sectors = transport_get_sectors_12(cdb, cmd, §or_ret);
3169 goto out_unsupported_cdb;
3170 size = transport_get_size(sectors, cdb, cmd);
3171 cmd->transport_split_cdb = &split_cdb_XX_12;
3172 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3173 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3176 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
3178 goto out_unsupported_cdb;
3179 size = transport_get_size(sectors, cdb, cmd);
3180 cmd->transport_split_cdb = &split_cdb_XX_16;
3181 T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3182 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3185 sectors = transport_get_sectors_6(cdb, cmd, §or_ret);
3187 goto out_unsupported_cdb;
3188 size = transport_get_size(sectors, cdb, cmd);
3189 cmd->transport_split_cdb = &split_cdb_XX_6;
3190 T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
3191 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3194 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
3196 goto out_unsupported_cdb;
3197 size = transport_get_size(sectors, cdb, cmd);
3198 cmd->transport_split_cdb = &split_cdb_XX_10;
3199 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3200 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3201 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3204 sectors = transport_get_sectors_12(cdb, cmd, §or_ret);
3206 goto out_unsupported_cdb;
3207 size = transport_get_size(sectors, cdb, cmd);
3208 cmd->transport_split_cdb = &split_cdb_XX_12;
3209 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3210 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3211 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3214 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
3216 goto out_unsupported_cdb;
3217 size = transport_get_size(sectors, cdb, cmd);
3218 cmd->transport_split_cdb = &split_cdb_XX_16;
3219 T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3220 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3221 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3223 case XDWRITEREAD_10:
3224 if ((cmd->data_direction != DMA_TO_DEVICE) ||
3225 !(T_TASK(cmd)->t_tasks_bidi))
3226 goto out_invalid_cdb_field;
3227 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
3229 goto out_unsupported_cdb;
3230 size = transport_get_size(sectors, cdb, cmd);
3231 cmd->transport_split_cdb = &split_cdb_XX_10;
3232 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3233 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3234 passthrough = (TRANSPORT(dev)->transport_type ==
3235 TRANSPORT_PLUGIN_PHBA_PDEV);
3237 * Skip the remaining assignments for TCM/PSCSI passthrough
3242 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
3244 cmd->transport_complete_callback = &transport_xor_callback;
3245 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3247 case VARIABLE_LENGTH_CMD:
3248 service_action = get_unaligned_be16(&cdb[8]);
3250 * Determine if this is TCM/PSCSI device and we should disable
3251 * internal emulation for this CDB.
3253 passthrough = (TRANSPORT(dev)->transport_type ==
3254 TRANSPORT_PLUGIN_PHBA_PDEV);
3256 switch (service_action) {
3257 case XDWRITEREAD_32:
3258 sectors = transport_get_sectors_32(cdb, cmd, §or_ret);
3260 goto out_unsupported_cdb;
3261 size = transport_get_size(sectors, cdb, cmd);
3263 * Use WRITE_32 and READ_32 opcodes for the emulated
3264 * XDWRITE_READ_32 logic.
3266 cmd->transport_split_cdb = &split_cdb_XX_32;
3267 T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
3268 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3271 * Skip the remaining assignments for TCM/PSCSI passthrough
3277 * Setup BIDI XOR callback to be run during
3278 * transport_generic_complete_ok()
3280 cmd->transport_complete_callback = &transport_xor_callback;
3281 T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
3284 sectors = transport_get_sectors_32(cdb, cmd, §or_ret);
3286 goto out_unsupported_cdb;
3287 size = transport_get_size(sectors, cdb, cmd);
3288 T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
3289 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3292 * Skip the remaining assignments for TCM/PSCSI passthrough
3297 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3298 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
3299 " bits not supported for Block Discard"
3301 goto out_invalid_cdb_field;
3304 * Currently for the emulated case we only accept
3305 * tpws with the UNMAP=1 bit set.
3307 if (!(cdb[10] & 0x08)) {
3308 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
3309 " supported for Block Discard Emulation\n");
3310 goto out_invalid_cdb_field;
3314 printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
3315 " 0x%04x not supported\n", service_action);
3316 goto out_unsupported_cdb;
3320 if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
3321 /* MAINTENANCE_IN from SCC-2 */
3323 * Check for emulated MI_REPORT_TARGET_PGS.
3325 if (cdb[1] == MI_REPORT_TARGET_PGS) {
3326 cmd->transport_emulate_cdb =
3327 (T10_ALUA(su_dev)->alua_type ==
3328 SPC3_ALUA_EMULATED) ?
3329 &core_emulate_report_target_port_groups :
3332 size = (cdb[6] << 24) | (cdb[7] << 16) |
3333 (cdb[8] << 8) | cdb[9];
3335 /* GPCMD_SEND_KEY from multi media commands */
3336 size = (cdb[8] << 8) + cdb[9];
3338 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3342 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3344 case MODE_SELECT_10:
3345 size = (cdb[7] << 8) + cdb[8];
3346 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3350 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3353 case GPCMD_READ_BUFFER_CAPACITY:
3354 case GPCMD_SEND_OPC:
3357 size = (cdb[7] << 8) + cdb[8];
3358 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3360 case READ_BLOCK_LIMITS:
3361 size = READ_BLOCK_LEN;
3362 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3364 case GPCMD_GET_CONFIGURATION:
3365 case GPCMD_READ_FORMAT_CAPACITIES:
3366 case GPCMD_READ_DISC_INFO:
3367 case GPCMD_READ_TRACK_RZONE_INFO:
3368 size = (cdb[7] << 8) + cdb[8];
3369 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3371 case PERSISTENT_RESERVE_IN:
3372 case PERSISTENT_RESERVE_OUT:
3373 cmd->transport_emulate_cdb =
3374 (T10_RES(su_dev)->res_type ==
3375 SPC3_PERSISTENT_RESERVATIONS) ?
3376 &core_scsi3_emulate_pr : NULL;
3377 size = (cdb[7] << 8) + cdb[8];
3378 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3380 case GPCMD_MECHANISM_STATUS:
3381 case GPCMD_READ_DVD_STRUCTURE:
3382 size = (cdb[8] << 8) + cdb[9];
3383 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3386 size = READ_POSITION_LEN;
3387 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3390 if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
3391 /* MAINTENANCE_OUT from SCC-2
3393 * Check for emulated MO_SET_TARGET_PGS.
3395 if (cdb[1] == MO_SET_TARGET_PGS) {
3396 cmd->transport_emulate_cdb =
3397 (T10_ALUA(su_dev)->alua_type ==
3398 SPC3_ALUA_EMULATED) ?
3399 &core_emulate_set_target_port_groups :
3403 size = (cdb[6] << 24) | (cdb[7] << 16) |
3404 (cdb[8] << 8) | cdb[9];
3406 /* GPCMD_REPORT_KEY from multi media commands */
3407 size = (cdb[8] << 8) + cdb[9];
3409 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3412 size = (cdb[3] << 8) + cdb[4];
3414 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
3415 * See spc4r17 section 5.3
3417 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3418 cmd->sam_task_attr = TASK_ATTR_HOQ;
3419 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3422 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3423 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3426 size = READ_CAP_LEN;
3427 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3429 case READ_MEDIA_SERIAL_NUMBER:
3430 case SECURITY_PROTOCOL_IN:
3431 case SECURITY_PROTOCOL_OUT:
3432 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3433 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3435 case SERVICE_ACTION_IN:
3436 case ACCESS_CONTROL_IN:
3437 case ACCESS_CONTROL_OUT:
3439 case READ_ATTRIBUTE:
3440 case RECEIVE_COPY_RESULTS:
3441 case WRITE_ATTRIBUTE:
3442 size = (cdb[10] << 24) | (cdb[11] << 16) |
3443 (cdb[12] << 8) | cdb[13];
3444 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3446 case RECEIVE_DIAGNOSTIC:
3447 case SEND_DIAGNOSTIC:
3448 size = (cdb[3] << 8) | cdb[4];
3449 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3451 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
3454 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3455 size = (2336 * sectors);
3456 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3461 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3465 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3467 case READ_ELEMENT_STATUS:
3468 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
3469 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3472 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3473 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3478 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
3479 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3481 if (cdb[0] == RESERVE_10)
3482 size = (cdb[7] << 8) | cdb[8];
3484 size = cmd->data_length;
3487 * Setup the legacy emulated handler for SPC-2 and
3488 * >= SPC-3 compatible reservation handling (CRH=1)
3489 * Otherwise, we assume the underlying SCSI logic is
3490 * is running in SPC_PASSTHROUGH, and wants reservations
3491 * emulation disabled.
3493 cmd->transport_emulate_cdb =
3494 (T10_RES(su_dev)->res_type !=
3496 &core_scsi2_emulate_crh : NULL;
3497 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3502 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
3503 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3505 if (cdb[0] == RELEASE_10)
3506 size = (cdb[7] << 8) | cdb[8];
3508 size = cmd->data_length;
3510 cmd->transport_emulate_cdb =
3511 (T10_RES(su_dev)->res_type !=
3513 &core_scsi2_emulate_crh : NULL;
3514 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3516 case SYNCHRONIZE_CACHE:
3517 case 0x91: /* SYNCHRONIZE_CACHE_16: */
3519 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
3521 if (cdb[0] == SYNCHRONIZE_CACHE) {
3522 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
3523 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3525 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
3526 T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3529 goto out_unsupported_cdb;
3531 size = transport_get_size(sectors, cdb, cmd);
3532 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3535 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
3537 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
3540 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
3541 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
3543 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3545 * Check to ensure that LBA + Range does not exceed past end of
3548 if (transport_get_sectors(cmd) < 0)
3549 goto out_invalid_cdb_field;
3552 size = get_unaligned_be16(&cdb[7]);
3553 passthrough = (TRANSPORT(dev)->transport_type ==
3554 TRANSPORT_PLUGIN_PHBA_PDEV);
3556 * Determine if the received UNMAP used to for direct passthrough
3557 * into Linux/SCSI with struct request via TCM/pSCSI or we are
3558 * signaling the use of internal transport_generic_unmap() emulation
3559 * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
3560 * subsystem plugin backstores.
3563 cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
3565 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3568 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
3570 goto out_unsupported_cdb;
3571 size = transport_get_size(sectors, cdb, cmd);
3572 T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
3573 passthrough = (TRANSPORT(dev)->transport_type ==
3574 TRANSPORT_PLUGIN_PHBA_PDEV);
3576 * Determine if the received WRITE_SAME_16 is used to for direct
3577 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
3578 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
3579 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
3580 * TCM/FILEIO subsystem plugin backstores.
3582 if (!(passthrough)) {
3583 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
3584 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
3585 " bits not supported for Block Discard"
3587 goto out_invalid_cdb_field;
3590 * Currently for the emulated case we only accept
3591 * tpws with the UNMAP=1 bit set.
3593 if (!(cdb[1] & 0x08)) {
3594 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
3595 " supported for Block Discard Emulation\n");
3596 goto out_invalid_cdb_field;
3599 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3601 case ALLOW_MEDIUM_REMOVAL:
3602 case GPCMD_CLOSE_TRACK:
3604 case INITIALIZE_ELEMENT_STATUS:
3605 case GPCMD_LOAD_UNLOAD:
3608 case GPCMD_SET_SPEED:
3611 case TEST_UNIT_READY:
3613 case WRITE_FILEMARKS:
3615 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3618 cmd->transport_emulate_cdb =
3619 &transport_core_report_lun_response;
3620 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3622 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3623 * See spc4r17 section 5.3
3625 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3626 cmd->sam_task_attr = TASK_ATTR_HOQ;
3627 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3630 printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
3631 " 0x%02x, sending CHECK_CONDITION.\n",
3632 CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
3633 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3634 goto out_unsupported_cdb;
3637 if (size != cmd->data_length) {
3638 printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
3639 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3640 " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
3641 cmd->data_length, size, cdb[0]);
3643 cmd->cmd_spdtl = size;
3645 if (cmd->data_direction == DMA_TO_DEVICE) {
3646 printk(KERN_ERR "Rejecting underflow/overflow"
3648 goto out_invalid_cdb_field;
3651 * Reject READ_* or WRITE_* with overflow/underflow for
3652 * type SCF_SCSI_DATA_SG_IO_CDB.
3654 if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) {
3655 printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
3656 " CDB on non 512-byte sector setup subsystem"
3657 " plugin: %s\n", TRANSPORT(dev)->name);
3658 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3659 goto out_invalid_cdb_field;
3662 if (size > cmd->data_length) {
3663 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3664 cmd->residual_count = (size - cmd->data_length);
3666 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3667 cmd->residual_count = (cmd->data_length - size);
3669 cmd->data_length = size;
3672 transport_set_supported_SAM_opcode(cmd);
3675 out_unsupported_cdb:
3676 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3677 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3679 out_invalid_cdb_field:
3680 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3681 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3685 static inline void transport_release_tasks(struct se_cmd *);
3688 * This function will copy a contiguous *src buffer into a destination
3689 * struct scatterlist array.
3691 static void transport_memcpy_write_contig(
3693 struct scatterlist *sg_d,
3696 u32 i = 0, length = 0, total_length = cmd->data_length;
3699 while (total_length) {
3700 length = sg_d[i].length;
3702 if (length > total_length)
3703 length = total_length;
3705 dst = sg_virt(&sg_d[i]);
3707 memcpy(dst, src, length);
3709 if (!(total_length -= length))
3718 * This function will copy a struct scatterlist array *sg_s into a destination
3719 * contiguous *dst buffer.
3721 static void transport_memcpy_read_contig(
3724 struct scatterlist *sg_s)
3726 u32 i = 0, length = 0, total_length = cmd->data_length;
3729 while (total_length) {
3730 length = sg_s[i].length;
3732 if (length > total_length)
3733 length = total_length;
3735 src = sg_virt(&sg_s[i]);
3737 memcpy(dst, src, length);
3739 if (!(total_length -= length))
3747 static void transport_memcpy_se_mem_read_contig(
3750 struct list_head *se_mem_list)
3752 struct se_mem *se_mem;
3754 u32 length = 0, total_length = cmd->data_length;
3756 list_for_each_entry(se_mem, se_mem_list, se_list) {
3757 length = se_mem->se_len;
3759 if (length > total_length)
3760 length = total_length;
3762 src = page_address(se_mem->se_page) + se_mem->se_off;
3764 memcpy(dst, src, length);
3766 if (!(total_length -= length))
3774 * Called from transport_generic_complete_ok() and
3775 * transport_generic_request_failure() to determine which dormant/delayed
3776 * and ordered cmds need to have their tasks added to the execution queue.
3778 static void transport_complete_task_attr(struct se_cmd *cmd)
3780 struct se_device *dev = SE_DEV(cmd);
3781 struct se_cmd *cmd_p, *cmd_tmp;
3782 int new_active_tasks = 0;
3784 if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
3785 atomic_dec(&dev->simple_cmds);
3786 smp_mb__after_atomic_dec();
3787 dev->dev_cur_ordered_id++;
3788 DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
3789 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3790 cmd->se_ordered_id);
3791 } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
3792 atomic_dec(&dev->dev_hoq_count);
3793 smp_mb__after_atomic_dec();
3794 dev->dev_cur_ordered_id++;
3795 DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
3796 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3797 cmd->se_ordered_id);
3798 } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
3799 spin_lock(&dev->ordered_cmd_lock);
3800 list_del(&cmd->se_ordered_list);
3801 atomic_dec(&dev->dev_ordered_sync);
3802 smp_mb__after_atomic_dec();
3803 spin_unlock(&dev->ordered_cmd_lock);
3805 dev->dev_cur_ordered_id++;
3806 DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
3807 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3810 * Process all commands up to the last received
3811 * ORDERED task attribute which requires another blocking
3814 spin_lock(&dev->delayed_cmd_lock);
3815 list_for_each_entry_safe(cmd_p, cmd_tmp,
3816 &dev->delayed_cmd_list, se_delayed_list) {
3818 list_del(&cmd_p->se_delayed_list);
3819 spin_unlock(&dev->delayed_cmd_lock);
3821 DEBUG_STA("Calling add_tasks() for"
3822 " cmd_p: 0x%02x Task Attr: 0x%02x"
3823 " Dormant -> Active, se_ordered_id: %u\n",
3824 T_TASK(cmd_p)->t_task_cdb[0],
3825 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3827 transport_add_tasks_from_cmd(cmd_p);
3830 spin_lock(&dev->delayed_cmd_lock);
3831 if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
3834 spin_unlock(&dev->delayed_cmd_lock);
3836 * If new tasks have become active, wake up the transport thread
3837 * to do the processing of the Active tasks.
3839 if (new_active_tasks != 0)
3840 wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
3843 static void transport_generic_complete_ok(struct se_cmd *cmd)
3847 * Check if we need to move delayed/dormant tasks from cmds on the
3848 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3851 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3852 transport_complete_task_attr(cmd);
3854 * Check if we need to retrieve a sense buffer from
3855 * the struct se_cmd in question.
3857 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3858 if (transport_get_sense_data(cmd) < 0)
3859 reason = TCM_NON_EXISTENT_LUN;
3862 * Only set when an struct se_task->task_scsi_status returned
3863 * a non GOOD status.
3865 if (cmd->scsi_status) {
3866 transport_send_check_condition_and_sense(
3868 transport_lun_remove_cmd(cmd);
3869 transport_cmd_check_stop_to_fabric(cmd);
3874 * Check for a callback, used by amoungst other things
3875 * XDWRITE_READ_10 emulation.
3877 if (cmd->transport_complete_callback)
3878 cmd->transport_complete_callback(cmd);
3880 switch (cmd->data_direction) {
3881 case DMA_FROM_DEVICE:
3882 spin_lock(&cmd->se_lun->lun_sep_lock);
3883 if (SE_LUN(cmd)->lun_sep) {
3884 SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
3887 spin_unlock(&cmd->se_lun->lun_sep_lock);
3889 * If enabled by TCM fabirc module pre-registered SGL
3890 * memory, perform the memcpy() from the TCM internal
3891 * contigious buffer back to the original SGL.
3893 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
3894 transport_memcpy_write_contig(cmd,
3895 T_TASK(cmd)->t_task_pt_sgl,
3896 T_TASK(cmd)->t_task_buf);
3898 CMD_TFO(cmd)->queue_data_in(cmd);
3901 spin_lock(&cmd->se_lun->lun_sep_lock);
3902 if (SE_LUN(cmd)->lun_sep) {
3903 SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
3906 spin_unlock(&cmd->se_lun->lun_sep_lock);
3908 * Check if we need to send READ payload for BIDI-COMMAND
3910 if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
3911 spin_lock(&cmd->se_lun->lun_sep_lock);
3912 if (SE_LUN(cmd)->lun_sep) {
3913 SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
3916 spin_unlock(&cmd->se_lun->lun_sep_lock);
3917 CMD_TFO(cmd)->queue_data_in(cmd);
3920 /* Fall through for DMA_TO_DEVICE */
3922 CMD_TFO(cmd)->queue_status(cmd);
3928 transport_lun_remove_cmd(cmd);
3929 transport_cmd_check_stop_to_fabric(cmd);
3932 static void transport_free_dev_tasks(struct se_cmd *cmd)
3934 struct se_task *task, *task_tmp;
3935 unsigned long flags;
3937 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
3938 list_for_each_entry_safe(task, task_tmp,
3939 &T_TASK(cmd)->t_task_list, t_list) {
3940 if (atomic_read(&task->task_active))
3943 kfree(task->task_sg_bidi);
3944 kfree(task->task_sg);
3946 list_del(&task->t_list);
3948 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3950 TRANSPORT(task->se_dev)->free_task(task);
3952 printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
3954 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
3956 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3959 static inline void transport_free_pages(struct se_cmd *cmd)
3961 struct se_mem *se_mem, *se_mem_tmp;
3964 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3966 if (cmd->se_dev->transport->do_se_mem_map)
3969 if (T_TASK(cmd)->t_task_buf) {
3970 kfree(T_TASK(cmd)->t_task_buf);
3971 T_TASK(cmd)->t_task_buf = NULL;
3976 * Caller will handle releasing of struct se_mem.
3978 if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
3981 if (!(T_TASK(cmd)->t_tasks_se_num))
3984 list_for_each_entry_safe(se_mem, se_mem_tmp,
3985 T_TASK(cmd)->t_mem_list, se_list) {
3987 * We only release call __free_page(struct se_mem->se_page) when
3988 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3991 __free_page(se_mem->se_page);
3993 list_del(&se_mem->se_list);
3994 kmem_cache_free(se_mem_cache, se_mem);
3997 if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
3998 list_for_each_entry_safe(se_mem, se_mem_tmp,
3999 T_TASK(cmd)->t_mem_bidi_list, se_list) {
4001 * We only release call __free_page(struct se_mem->se_page) when
4002 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
4005 __free_page(se_mem->se_page);
4007 list_del(&se_mem->se_list);
4008 kmem_cache_free(se_mem_cache, se_mem);
4012 kfree(T_TASK(cmd)->t_mem_bidi_list);
4013 T_TASK(cmd)->t_mem_bidi_list = NULL;
4014 kfree(T_TASK(cmd)->t_mem_list);
4015 T_TASK(cmd)->t_mem_list = NULL;
4016 T_TASK(cmd)->t_tasks_se_num = 0;
4019 static inline void transport_release_tasks(struct se_cmd *cmd)
4021 transport_free_dev_tasks(cmd);
4024 static inline int transport_dec_and_check(struct se_cmd *cmd)
4026 unsigned long flags;
4028 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4029 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
4030 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
4031 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4037 if (atomic_read(&T_TASK(cmd)->t_se_count)) {
4038 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
4039 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4044 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4049 static void transport_release_fe_cmd(struct se_cmd *cmd)
4051 unsigned long flags;
4053 if (transport_dec_and_check(cmd))
4056 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4057 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
4058 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4061 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
4062 transport_all_task_dev_remove_state(cmd);
4063 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4065 transport_release_tasks(cmd);
4067 transport_free_pages(cmd);
4068 transport_free_se_cmd(cmd);
4069 CMD_TFO(cmd)->release_cmd_direct(cmd);
4072 static int transport_generic_remove(
4074 int release_to_pool,
4075 int session_reinstatement)
4077 unsigned long flags;
4082 if (transport_dec_and_check(cmd)) {
4083 if (session_reinstatement) {
4084 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4085 transport_all_task_dev_remove_state(cmd);
4086 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4092 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4093 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
4094 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4097 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
4098 transport_all_task_dev_remove_state(cmd);
4099 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4101 transport_release_tasks(cmd);
4103 transport_free_pages(cmd);
4106 if (release_to_pool) {
4107 transport_release_cmd_to_pool(cmd);
4109 transport_free_se_cmd(cmd);
4110 CMD_TFO(cmd)->release_cmd_direct(cmd);
4117 * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
4118 * @cmd: Associated se_cmd descriptor
4119 * @mem: SGL style memory for TCM WRITE / READ
4120 * @sg_mem_num: Number of SGL elements
4121 * @mem_bidi_in: SGL style memory for TCM BIDI READ
4122 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
4124 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
4127 int transport_generic_map_mem_to_cmd(
4129 struct scatterlist *mem,
4131 struct scatterlist *mem_bidi_in,
4132 u32 sg_mem_bidi_num)
4134 u32 se_mem_cnt_out = 0;
4137 if (!(mem) || !(sg_mem_num))
4140 * Passed *mem will contain a list_head containing preformatted
4141 * struct se_mem elements...
4143 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
4144 if ((mem_bidi_in) || (sg_mem_bidi_num)) {
4145 printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
4146 " with BIDI-COMMAND\n");
4150 T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
4151 T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
4152 cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
4156 * Otherwise, assume the caller is passing a struct scatterlist
4157 * array from include/linux/scatterlist.h
4159 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
4160 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
4162 * For CDB using TCM struct se_mem linked list scatterlist memory
4163 * processed into a TCM struct se_subsystem_dev, we do the mapping
4164 * from the passed physical memory to struct se_mem->se_page here.
4166 T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
4167 if (!(T_TASK(cmd)->t_mem_list))
4170 ret = transport_map_sg_to_mem(cmd,
4171 T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
4175 T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
4177 * Setup BIDI READ list of struct se_mem elements
4179 if ((mem_bidi_in) && (sg_mem_bidi_num)) {
4180 T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
4181 if (!(T_TASK(cmd)->t_mem_bidi_list)) {
4182 kfree(T_TASK(cmd)->t_mem_list);
4187 ret = transport_map_sg_to_mem(cmd,
4188 T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
4191 kfree(T_TASK(cmd)->t_mem_list);
4195 T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
4197 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
4199 } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
4200 if (mem_bidi_in || sg_mem_bidi_num) {
4201 printk(KERN_ERR "BIDI-Commands not supported using "
4202 "SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
4206 * For incoming CDBs using a contiguous buffer internall with TCM,
4207 * save the passed struct scatterlist memory. After TCM storage object
4208 * processing has completed for this struct se_cmd, TCM core will call
4209 * transport_memcpy_[write,read]_contig() as necessary from
4210 * transport_generic_complete_ok() and transport_write_pending() in order
4211 * to copy the TCM buffer to/from the original passed *mem in SGL ->
4212 * struct scatterlist format.
4214 cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
4215 T_TASK(cmd)->t_task_pt_sgl = mem;
4220 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
4223 static inline long long transport_dev_end_lba(struct se_device *dev)
4225 return dev->transport->get_blocks(dev) + 1;
4228 static int transport_get_sectors(struct se_cmd *cmd)
4230 struct se_device *dev = SE_DEV(cmd);
4232 T_TASK(cmd)->t_tasks_sectors =
4233 (cmd->data_length / DEV_ATTRIB(dev)->block_size);
4234 if (!(T_TASK(cmd)->t_tasks_sectors))
4235 T_TASK(cmd)->t_tasks_sectors = 1;
4237 if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
4240 if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
4241 transport_dev_end_lba(dev)) {
4242 printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
4243 " transport_dev_end_lba(): %llu\n",
4244 T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
4245 transport_dev_end_lba(dev));
4246 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4247 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
4248 return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
4254 static int transport_new_cmd_obj(struct se_cmd *cmd)
4256 struct se_device *dev = SE_DEV(cmd);
4257 u32 task_cdbs = 0, rc;
4259 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
4261 T_TASK(cmd)->t_task_cdbs++;
4266 * Setup any BIDI READ tasks and memory from
4267 * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
4268 * are queued first for the non pSCSI passthrough case.
4270 if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
4271 (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
4272 rc = transport_generic_get_cdb_count(cmd,
4273 T_TASK(cmd)->t_task_lba,
4274 T_TASK(cmd)->t_tasks_sectors,
4275 DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
4278 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4279 cmd->scsi_sense_reason =
4280 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4281 return PYX_TRANSPORT_LU_COMM_FAILURE;
4286 * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
4287 * Note for BIDI transfers this will contain the WRITE payload
4289 task_cdbs = transport_generic_get_cdb_count(cmd,
4290 T_TASK(cmd)->t_task_lba,
4291 T_TASK(cmd)->t_tasks_sectors,
4292 cmd->data_direction, T_TASK(cmd)->t_mem_list,
4295 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4296 cmd->scsi_sense_reason =
4297 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4298 return PYX_TRANSPORT_LU_COMM_FAILURE;
4300 T_TASK(cmd)->t_task_cdbs += task_cdbs;
4303 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
4304 " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
4305 T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
4306 T_TASK(cmd)->t_task_cdbs);
4310 atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
4311 atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
4312 atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
4316 static struct list_head *transport_init_se_mem_list(void)
4318 struct list_head *se_mem_list;
4320 se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
4321 if (!(se_mem_list)) {
4322 printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
4325 INIT_LIST_HEAD(se_mem_list);
4331 transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
4334 struct se_mem *se_mem;
4336 T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
4337 if (!(T_TASK(cmd)->t_mem_list))
4341 * If the device uses memory mapping this is enough.
4343 if (cmd->se_dev->transport->do_se_mem_map)
4347 * Setup BIDI-COMMAND READ list of struct se_mem elements
4349 if (T_TASK(cmd)->t_tasks_bidi) {
4350 T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
4351 if (!(T_TASK(cmd)->t_mem_bidi_list)) {
4352 kfree(T_TASK(cmd)->t_mem_list);
4358 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
4360 printk(KERN_ERR "Unable to allocate struct se_mem\n");
4363 INIT_LIST_HEAD(&se_mem->se_list);
4364 se_mem->se_len = (length > dma_size) ? dma_size : length;
4366 /* #warning FIXME Allocate contigous pages for struct se_mem elements */
4367 se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0);
4368 if (!(se_mem->se_page)) {
4369 printk(KERN_ERR "alloc_pages() failed\n");
4373 buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
4375 printk(KERN_ERR "kmap_atomic() failed\n");
4378 memset(buf, 0, se_mem->se_len);
4379 kunmap_atomic(buf, KM_IRQ0);
4381 list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
4382 T_TASK(cmd)->t_tasks_se_num++;
4384 DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
4385 " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
4388 length -= se_mem->se_len;
4391 DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
4392 T_TASK(cmd)->t_tasks_se_num);
4399 extern u32 transport_calc_sg_num(
4400 struct se_task *task,
4401 struct se_mem *in_se_mem,
4404 struct se_cmd *se_cmd = task->task_se_cmd;
4405 struct se_device *se_dev = SE_DEV(se_cmd);
4406 struct se_mem *se_mem = in_se_mem;
4407 struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
4408 u32 sg_length, task_size = task->task_size, task_sg_num_padded;
4410 while (task_size != 0) {
4411 DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
4412 " se_mem->se_off(%u) task_offset(%u)\n",
4413 se_mem->se_page, se_mem->se_len,
4414 se_mem->se_off, task_offset);
4416 if (task_offset == 0) {
4417 if (task_size >= se_mem->se_len) {
4418 sg_length = se_mem->se_len;
4420 if (!(list_is_last(&se_mem->se_list,
4421 T_TASK(se_cmd)->t_mem_list)))
4422 se_mem = list_entry(se_mem->se_list.next,
4423 struct se_mem, se_list);
4425 sg_length = task_size;
4426 task_size -= sg_length;
4430 DEBUG_SC("sg_length(%u) task_size(%u)\n",
4431 sg_length, task_size);
4433 if ((se_mem->se_len - task_offset) > task_size) {
4434 sg_length = task_size;
4435 task_size -= sg_length;
4438 sg_length = (se_mem->se_len - task_offset);
4440 if (!(list_is_last(&se_mem->se_list,
4441 T_TASK(se_cmd)->t_mem_list)))
4442 se_mem = list_entry(se_mem->se_list.next,
4443 struct se_mem, se_list);
4446 DEBUG_SC("sg_length(%u) task_size(%u)\n",
4447 sg_length, task_size);
4451 task_size -= sg_length;
4453 DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
4454 task->task_no, task_size);
4456 task->task_sg_num++;
4459 * Check if the fabric module driver is requesting that all
4460 * struct se_task->task_sg[] be chained together.. If so,
4461 * then allocate an extra padding SG entry for linking and
4462 * marking the end of the chained SGL.
4464 if (tfo->task_sg_chaining) {
4465 task_sg_num_padded = (task->task_sg_num + 1);
4466 task->task_padded_sg = 1;
4468 task_sg_num_padded = task->task_sg_num;
4470 task->task_sg = kzalloc(task_sg_num_padded *
4471 sizeof(struct scatterlist), GFP_KERNEL);
4472 if (!(task->task_sg)) {
4473 printk(KERN_ERR "Unable to allocate memory for"
4474 " task->task_sg\n");
4477 sg_init_table(&task->task_sg[0], task_sg_num_padded);
4479 * Setup task->task_sg_bidi for SCSI READ payload for
4480 * TCM/pSCSI passthrough if present for BIDI-COMMAND
4482 if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
4483 (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
4484 task->task_sg_bidi = kzalloc(task_sg_num_padded *
4485 sizeof(struct scatterlist), GFP_KERNEL);
4486 if (!(task->task_sg_bidi)) {
4487 printk(KERN_ERR "Unable to allocate memory for"
4488 " task->task_sg_bidi\n");
4491 sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
4494 * For the chaining case, setup the proper end of SGL for the
4495 * initial submission struct task into struct se_subsystem_api.
4496 * This will be cleared later by transport_do_task_sg_chain()
4498 if (task->task_padded_sg) {
4499 sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
4501 * Added the 'if' check before marking end of bi-directional
4502 * scatterlist (which gets created only in case of request
4505 if (task->task_sg_bidi)
4506 sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
4509 DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
4510 " task_sg_num_padded(%u)\n", task->task_sg_num,
4511 task_sg_num_padded);
4513 return task->task_sg_num;
4516 static inline int transport_set_tasks_sectors_disk(
4517 struct se_task *task,
4518 struct se_device *dev,
4519 unsigned long long lba,
4521 int *max_sectors_set)
4523 if ((lba + sectors) > transport_dev_end_lba(dev)) {
4524 task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
4526 if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
4527 task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4528 *max_sectors_set = 1;
4531 if (sectors > DEV_ATTRIB(dev)->max_sectors) {
4532 task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4533 *max_sectors_set = 1;
4535 task->task_sectors = sectors;
4541 static inline int transport_set_tasks_sectors_non_disk(
4542 struct se_task *task,
4543 struct se_device *dev,
4544 unsigned long long lba,
4546 int *max_sectors_set)
4548 if (sectors > DEV_ATTRIB(dev)->max_sectors) {
4549 task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4550 *max_sectors_set = 1;
4552 task->task_sectors = sectors;
4557 static inline int transport_set_tasks_sectors(
4558 struct se_task *task,
4559 struct se_device *dev,
4560 unsigned long long lba,
4562 int *max_sectors_set)
4564 return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
4565 transport_set_tasks_sectors_disk(task, dev, lba, sectors,
4567 transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
4571 static int transport_map_sg_to_mem(
4573 struct list_head *se_mem_list,
4577 struct se_mem *se_mem;
4578 struct scatterlist *sg;
4579 u32 sg_count = 1, cmd_size = cmd->data_length;
4582 printk(KERN_ERR "No source scatterlist\n");
4585 sg = (struct scatterlist *)in_mem;
4588 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
4590 printk(KERN_ERR "Unable to allocate struct se_mem\n");
4593 INIT_LIST_HEAD(&se_mem->se_list);
4594 DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
4595 " sg_page: %p offset: %d length: %d\n", cmd_size,
4596 sg_page(sg), sg->offset, sg->length);
4598 se_mem->se_page = sg_page(sg);
4599 se_mem->se_off = sg->offset;
4601 if (cmd_size > sg->length) {
4602 se_mem->se_len = sg->length;
4606 se_mem->se_len = cmd_size;
4608 cmd_size -= se_mem->se_len;
4610 DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
4611 *se_mem_cnt, cmd_size);
4612 DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
4613 se_mem->se_page, se_mem->se_off, se_mem->se_len);
4615 list_add_tail(&se_mem->se_list, se_mem_list);
4619 DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
4620 " struct se_mem\n", sg_count, *se_mem_cnt);
4622 if (sg_count != *se_mem_cnt)
4628 /* transport_map_mem_to_sg():
4632 int transport_map_mem_to_sg(
4633 struct se_task *task,
4634 struct list_head *se_mem_list,
4636 struct se_mem *in_se_mem,
4637 struct se_mem **out_se_mem,
4641 struct se_cmd *se_cmd = task->task_se_cmd;
4642 struct se_mem *se_mem = in_se_mem;
4643 struct scatterlist *sg = (struct scatterlist *)in_mem;
4644 u32 task_size = task->task_size, sg_no = 0;
4647 printk(KERN_ERR "Unable to locate valid struct"
4648 " scatterlist pointer\n");
4652 while (task_size != 0) {
4654 * Setup the contigious array of scatterlists for
4655 * this struct se_task.
4657 sg_assign_page(sg, se_mem->se_page);
4659 if (*task_offset == 0) {
4660 sg->offset = se_mem->se_off;
4662 if (task_size >= se_mem->se_len) {
4663 sg->length = se_mem->se_len;
4665 if (!(list_is_last(&se_mem->se_list,
4666 T_TASK(se_cmd)->t_mem_list))) {
4667 se_mem = list_entry(se_mem->se_list.next,
4668 struct se_mem, se_list);
4672 sg->length = task_size;
4674 * Determine if we need to calculate an offset
4675 * into the struct se_mem on the next go around..
4677 task_size -= sg->length;
4679 *task_offset = sg->length;
4685 sg->offset = (*task_offset + se_mem->se_off);
4687 if ((se_mem->se_len - *task_offset) > task_size) {
4688 sg->length = task_size;
4690 * Determine if we need to calculate an offset
4691 * into the struct se_mem on the next go around..
4693 task_size -= sg->length;
4695 *task_offset += sg->length;
4699 sg->length = (se_mem->se_len - *task_offset);
4701 if (!(list_is_last(&se_mem->se_list,
4702 T_TASK(se_cmd)->t_mem_list))) {
4703 se_mem = list_entry(se_mem->se_list.next,
4704 struct se_mem, se_list);
4711 task_size -= sg->length;
4713 DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
4714 " task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
4715 sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
4723 if (task_size > se_cmd->data_length)
4726 *out_se_mem = se_mem;
4728 DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
4729 " SGs\n", task->task_no, *se_mem_cnt, sg_no);
4735 * This function can be used by HW target mode drivers to create a linked
4736 * scatterlist from all contiguously allocated struct se_task->task_sg[].
4737 * This is intended to be called during the completion path by TCM Core
4738 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
4740 void transport_do_task_sg_chain(struct se_cmd *cmd)
4742 struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
4743 struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
4744 struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
4745 struct se_task *task;
4746 struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
4747 u32 task_sg_num = 0, sg_count = 0;
4750 if (tfo->task_sg_chaining == 0) {
4751 printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
4752 " %s\n", tfo->get_fabric_name());
4757 * Walk the struct se_task list and setup scatterlist chains
4758 * for each contiguosly allocated struct se_task->task_sg[].
4760 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
4761 if (!(task->task_sg) || !(task->task_padded_sg))
4764 if (sg_head && sg_link) {
4765 sg_head_cur = &task->task_sg[0];
4766 sg_link_cur = &task->task_sg[task->task_sg_num];
4768 * Either add chain or mark end of scatterlist
4770 if (!(list_is_last(&task->t_list,
4771 &T_TASK(cmd)->t_task_list))) {
4773 * Clear existing SGL termination bit set in
4774 * transport_calc_sg_num(), see sg_mark_end()
4776 sg_end_cur = &task->task_sg[task->task_sg_num - 1];
4777 sg_end_cur->page_link &= ~0x02;
4779 sg_chain(sg_head, task_sg_num, sg_head_cur);
4780 sg_count += (task->task_sg_num + 1);
4782 sg_count += task->task_sg_num;
4784 sg_head = sg_head_cur;
4785 sg_link = sg_link_cur;
4786 task_sg_num = task->task_sg_num;
4789 sg_head = sg_first = &task->task_sg[0];
4790 sg_link = &task->task_sg[task->task_sg_num];
4791 task_sg_num = task->task_sg_num;
4793 * Check for single task..
4795 if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
4797 * Clear existing SGL termination bit set in
4798 * transport_calc_sg_num(), see sg_mark_end()
4800 sg_end = &task->task_sg[task->task_sg_num - 1];
4801 sg_end->page_link &= ~0x02;
4802 sg_count += (task->task_sg_num + 1);
4804 sg_count += task->task_sg_num;
4807 * Setup the starting pointer and total t_tasks_sg_linked_no including
4808 * padding SGs for linking and to mark the end.
4810 T_TASK(cmd)->t_tasks_sg_chained = sg_first;
4811 T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
4813 DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
4814 " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
4815 T_TASK(cmd)->t_tasks_sg_chained_no);
4817 for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
4818 T_TASK(cmd)->t_tasks_sg_chained_no, i) {
4820 DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
4821 sg, sg_page(sg), sg->length, sg->offset);
4822 if (sg_is_chain(sg))
4823 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
4825 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
4829 EXPORT_SYMBOL(transport_do_task_sg_chain);
4831 static int transport_do_se_mem_map(
4832 struct se_device *dev,
4833 struct se_task *task,
4834 struct list_head *se_mem_list,
4836 struct se_mem *in_se_mem,
4837 struct se_mem **out_se_mem,
4839 u32 *task_offset_in)
4841 u32 task_offset = *task_offset_in;
4844 * se_subsystem_api_t->do_se_mem_map is used when internal allocation
4845 * has been done by the transport plugin.
4847 if (TRANSPORT(dev)->do_se_mem_map) {
4848 ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
4849 in_mem, in_se_mem, out_se_mem, se_mem_cnt,
4852 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
4857 BUG_ON(list_empty(se_mem_list));
4859 * This is the normal path for all normal non BIDI and BIDI-COMMAND
4860 * WRITE payloads.. If we need to do BIDI READ passthrough for
4861 * TCM/pSCSI the first call to transport_do_se_mem_map ->
4862 * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
4863 * allocation for task->task_sg_bidi, and the subsequent call to
4864 * transport_do_se_mem_map() from transport_generic_get_cdb_count()
4866 if (!(task->task_sg_bidi)) {
4868 * Assume default that transport plugin speaks preallocated
4871 if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
4874 * struct se_task->task_sg now contains the struct scatterlist array.
4876 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
4877 in_se_mem, out_se_mem, se_mem_cnt,
4881 * Handle the se_mem_list -> struct task->task_sg_bidi
4882 * memory map for the extra BIDI READ payload
4884 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
4885 in_se_mem, out_se_mem, se_mem_cnt,
4889 static u32 transport_generic_get_cdb_count(
4891 unsigned long long lba,
4893 enum dma_data_direction data_direction,
4894 struct list_head *mem_list,
4897 unsigned char *cdb = NULL;
4898 struct se_task *task;
4899 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
4900 struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
4901 struct se_device *dev = SE_DEV(cmd);
4902 int max_sectors_set = 0, ret;
4903 u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
4906 printk(KERN_ERR "mem_list is NULL in transport_generic_get"
4911 * While using RAMDISK_DR backstores is the only case where
4912 * mem_list will ever be empty at this point.
4914 if (!(list_empty(mem_list)))
4915 se_mem = list_entry(mem_list->next, struct se_mem, se_list);
4917 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
4918 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
4920 if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
4921 !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
4922 (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
4923 se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
4924 struct se_mem, se_list);
4927 DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
4928 CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
4929 transport_dev_end_lba(dev));
4931 task = transport_generic_get_task(cmd, data_direction);
4935 transport_set_tasks_sectors(task, dev, lba, sectors,
4938 task->task_lba = lba;
4939 lba += task->task_sectors;
4940 sectors -= task->task_sectors;
4941 task->task_size = (task->task_sectors *
4942 DEV_ATTRIB(dev)->block_size);
4944 cdb = TRANSPORT(dev)->get_cdb(task);
4946 memcpy(cdb, T_TASK(cmd)->t_task_cdb,
4947 scsi_command_size(T_TASK(cmd)->t_task_cdb));
4948 cmd->transport_split_cdb(task->task_lba,
4949 &task->task_sectors, cdb);
4953 * Perform the SE OBJ plugin and/or Transport plugin specific
4954 * mapping for T_TASK(cmd)->t_mem_list. And setup the
4955 * task->task_sg and if necessary task->task_sg_bidi
4957 ret = transport_do_se_mem_map(dev, task, mem_list,
4958 NULL, se_mem, &se_mem_lout, &se_mem_cnt,
4963 se_mem = se_mem_lout;
4965 * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
4966 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
4968 * Note that the first call to transport_do_se_mem_map() above will
4969 * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
4970 * -> transport_calc_sg_num(), and the second here will do the
4971 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
4973 if (task->task_sg_bidi != NULL) {
4974 ret = transport_do_se_mem_map(dev, task,
4975 T_TASK(cmd)->t_mem_bidi_list, NULL,
4976 se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
4981 se_mem_bidi = se_mem_bidi_lout;
4985 DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
4986 task_cdbs, task->task_sg_num);
4988 if (max_sectors_set) {
4989 max_sectors_set = 0;
4998 atomic_inc(&T_TASK(cmd)->t_fe_count);
4999 atomic_inc(&T_TASK(cmd)->t_se_count);
5002 DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
5003 CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
5004 ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
5012 transport_map_control_cmd_to_task(struct se_cmd *cmd)
5014 struct se_device *dev = SE_DEV(cmd);
5016 struct se_task *task;
5019 task = transport_generic_get_task(cmd, cmd->data_direction);
5021 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5023 cdb = TRANSPORT(dev)->get_cdb(task);
5025 memcpy(cdb, cmd->t_task->t_task_cdb,
5026 scsi_command_size(cmd->t_task->t_task_cdb));
5028 task->task_size = cmd->data_length;
5030 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
5032 atomic_inc(&cmd->t_task->t_fe_count);
5033 atomic_inc(&cmd->t_task->t_se_count);
5035 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
5036 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
5037 u32 se_mem_cnt = 0, task_offset = 0;
5039 if (!list_empty(T_TASK(cmd)->t_mem_list))
5040 se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
5041 struct se_mem, se_list);
5043 ret = transport_do_se_mem_map(dev, task,
5044 cmd->t_task->t_mem_list, NULL, se_mem,
5045 &se_mem_lout, &se_mem_cnt, &task_offset);
5047 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5049 if (dev->transport->map_task_SG)
5050 return dev->transport->map_task_SG(task);
5052 } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
5053 if (dev->transport->map_task_non_SG)
5054 return dev->transport->map_task_non_SG(task);
5056 } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
5057 if (dev->transport->cdb_none)
5058 return dev->transport->cdb_none(task);
5062 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5066 /* transport_generic_new_cmd(): Called from transport_processing_thread()
5068 * Allocate storage transport resources from a set of values predefined
5069 * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
5070 * Any non zero return here is treated as an "out of resource' op here.
5073 * Generate struct se_task(s) and/or their payloads for this CDB.
5075 static int transport_generic_new_cmd(struct se_cmd *cmd)
5077 struct se_portal_group *se_tpg;
5078 struct se_task *task;
5079 struct se_device *dev = SE_DEV(cmd);
5083 * Determine is the TCM fabric module has already allocated physical
5084 * memory, and is directly calling transport_generic_map_mem_to_cmd()
5085 * to setup beforehand the linked list of physical memory at
5086 * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
5088 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
5089 ret = transport_allocate_resources(cmd);
5094 ret = transport_get_sectors(cmd);
5098 ret = transport_new_cmd_obj(cmd);
5103 * Determine if the calling TCM fabric module is talking to
5104 * Linux/NET via kernel sockets and needs to allocate a
5105 * struct iovec array to complete the struct se_cmd
5107 se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
5108 if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
5109 ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
5111 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5114 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
5115 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
5116 if (atomic_read(&task->task_sent))
5118 if (!dev->transport->map_task_SG)
5121 ret = dev->transport->map_task_SG(task);
5126 ret = transport_map_control_cmd_to_task(cmd);
5132 * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
5133 * This WRITE struct se_cmd (and all of its associated struct se_task's)
5134 * will be added to the struct se_device execution queue after its WRITE
5135 * data has arrived. (ie: It gets handled by the transport processing
5136 * thread a second time)
5138 if (cmd->data_direction == DMA_TO_DEVICE) {
5139 transport_add_tasks_to_state_queue(cmd);
5140 return transport_generic_write_pending(cmd);
5143 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
5144 * to the execution queue.
5146 transport_execute_tasks(cmd);
5150 /* transport_generic_process_write():
5154 void transport_generic_process_write(struct se_cmd *cmd)
5158 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
5161 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
5162 if (!T_TASK(cmd)->t_tasks_se_num) {
5163 unsigned char *dst, *buf =
5164 (unsigned char *)T_TASK(cmd)->t_task_buf;
5166 dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
5168 printk(KERN_ERR "Unable to allocate memory for"
5169 " WRITE underflow\n");
5170 transport_generic_request_failure(cmd, NULL,
5171 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5174 memcpy(dst, buf, cmd->cmd_spdtl);
5176 kfree(T_TASK(cmd)->t_task_buf);
5177 T_TASK(cmd)->t_task_buf = dst;
5179 struct scatterlist *sg =
5180 (struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
5181 struct scatterlist *orig_sg;
5183 orig_sg = kzalloc(sizeof(struct scatterlist) *
5184 T_TASK(cmd)->t_tasks_se_num,
5187 printk(KERN_ERR "Unable to allocate memory"
5188 " for WRITE underflow\n");
5189 transport_generic_request_failure(cmd, NULL,
5190 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5194 memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
5195 sizeof(struct scatterlist) *
5196 T_TASK(cmd)->t_tasks_se_num);
5198 cmd->data_length = cmd->cmd_spdtl;
5200 * FIXME, clear out original struct se_task and state
5203 if (transport_generic_new_cmd(cmd) < 0) {
5204 transport_generic_request_failure(cmd, NULL,
5205 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5210 transport_memcpy_write_sg(cmd, orig_sg);
5214 transport_execute_tasks(cmd);
5216 EXPORT_SYMBOL(transport_generic_process_write);
5218 /* transport_generic_write_pending():
5222 static int transport_generic_write_pending(struct se_cmd *cmd)
5224 unsigned long flags;
5227 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5228 cmd->t_state = TRANSPORT_WRITE_PENDING;
5229 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5231 * For the TCM control CDBs using a contiguous buffer, do the memcpy
5232 * from the passed Linux/SCSI struct scatterlist located at
5233 * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
5234 * T_TASK(se_cmd)->t_task_buf.
5236 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
5237 transport_memcpy_read_contig(cmd,
5238 T_TASK(cmd)->t_task_buf,
5239 T_TASK(cmd)->t_task_pt_sgl);
5241 * Clear the se_cmd for WRITE_PENDING status in order to set
5242 * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
5243 * can be called from HW target mode interrupt code. This is safe
5244 * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
5245 * because the se_cmd->se_lun pointer is not being cleared.
5247 transport_cmd_check_stop(cmd, 1, 0);
5250 * Call the fabric write_pending function here to let the
5251 * frontend know that WRITE buffers are ready.
5253 ret = CMD_TFO(cmd)->write_pending(cmd);
5257 return PYX_TRANSPORT_WRITE_PENDING;
5260 /* transport_release_cmd_to_pool():
5264 void transport_release_cmd_to_pool(struct se_cmd *cmd)
5266 BUG_ON(!T_TASK(cmd));
5267 BUG_ON(!CMD_TFO(cmd));
5269 transport_free_se_cmd(cmd);
5270 CMD_TFO(cmd)->release_cmd_to_pool(cmd);
5272 EXPORT_SYMBOL(transport_release_cmd_to_pool);
5274 /* transport_generic_free_cmd():
5276 * Called from processing frontend to release storage engine resources
5278 void transport_generic_free_cmd(
5281 int release_to_pool,
5282 int session_reinstatement)
5284 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
5285 transport_release_cmd_to_pool(cmd);
5287 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
5291 printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
5292 " SE_LUN(cmd)\n", cmd,
5293 CMD_TFO(cmd)->get_task_tag(cmd));
5295 transport_lun_remove_cmd(cmd);
5298 if (wait_for_tasks && cmd->transport_wait_for_tasks)
5299 cmd->transport_wait_for_tasks(cmd, 0, 0);
5301 transport_generic_remove(cmd, release_to_pool,
5302 session_reinstatement);
5305 EXPORT_SYMBOL(transport_generic_free_cmd);
5307 static void transport_nop_wait_for_tasks(
5310 int session_reinstatement)
5315 /* transport_lun_wait_for_tasks():
5317 * Called from ConfigFS context to stop the passed struct se_cmd to allow
5318 * an struct se_lun to be successfully shutdown.
5320 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
5322 unsigned long flags;
5325 * If the frontend has already requested this struct se_cmd to
5326 * be stopped, we can safely ignore this struct se_cmd.
5328 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5329 if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
5330 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
5331 DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
5332 " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
5333 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5334 transport_cmd_check_stop(cmd, 1, 0);
5337 atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
5338 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5340 wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
5342 ret = transport_stop_tasks_for_cmd(cmd);
5344 DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
5345 " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
5347 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
5348 CMD_TFO(cmd)->get_task_tag(cmd));
5349 wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
5350 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
5351 CMD_TFO(cmd)->get_task_tag(cmd));
5353 transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
5358 /* #define DEBUG_CLEAR_LUN */
5359 #ifdef DEBUG_CLEAR_LUN
5360 #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
5362 #define DEBUG_CLEAR_L(x...)
5365 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
5367 struct se_cmd *cmd = NULL;
5368 unsigned long lun_flags, cmd_flags;
5370 * Do exception processing and return CHECK_CONDITION status to the
5373 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5374 while (!list_empty_careful(&lun->lun_cmd_list)) {
5375 cmd = list_entry(lun->lun_cmd_list.next,
5376 struct se_cmd, se_lun_list);
5377 list_del(&cmd->se_lun_list);
5379 if (!(T_TASK(cmd))) {
5380 printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
5381 "[i,t]_state: %u/%u\n",
5382 CMD_TFO(cmd)->get_task_tag(cmd),
5383 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
5386 atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
5388 * This will notify iscsi_target_transport.c:
5389 * transport_cmd_check_stop() that a LUN shutdown is in
5390 * progress for the iscsi_cmd_t.
5392 spin_lock(&T_TASK(cmd)->t_state_lock);
5393 DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
5394 "_lun_stop for ITT: 0x%08x\n",
5395 SE_LUN(cmd)->unpacked_lun,
5396 CMD_TFO(cmd)->get_task_tag(cmd));
5397 atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
5398 spin_unlock(&T_TASK(cmd)->t_state_lock);
5400 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
5402 if (!(SE_LUN(cmd))) {
5403 printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
5404 CMD_TFO(cmd)->get_task_tag(cmd),
5405 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
5409 * If the Storage engine still owns the iscsi_cmd_t, determine
5410 * and/or stop its context.
5412 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
5413 "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
5414 CMD_TFO(cmd)->get_task_tag(cmd));
5416 if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
5417 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5421 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
5422 "_wait_for_tasks(): SUCCESS\n",
5423 SE_LUN(cmd)->unpacked_lun,
5424 CMD_TFO(cmd)->get_task_tag(cmd));
5426 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
5427 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
5428 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5431 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
5432 transport_all_task_dev_remove_state(cmd);
5433 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5435 transport_free_dev_tasks(cmd);
5437 * The Storage engine stopped this struct se_cmd before it was
5438 * send to the fabric frontend for delivery back to the
5439 * Initiator Node. Return this SCSI CDB back with an
5440 * CHECK_CONDITION status.
5443 transport_send_check_condition_and_sense(cmd,
5444 TCM_NON_EXISTENT_LUN, 0);
5446 * If the fabric frontend is waiting for this iscsi_cmd_t to
5447 * be released, notify the waiting thread now that LU has
5448 * finished accessing it.
5450 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
5451 if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
5452 DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
5453 " struct se_cmd: %p ITT: 0x%08x\n",
5455 cmd, CMD_TFO(cmd)->get_task_tag(cmd));
5457 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
5459 transport_cmd_check_stop(cmd, 1, 0);
5460 complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
5461 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5464 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
5465 lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
5467 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5468 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5470 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
5473 static int transport_clear_lun_thread(void *p)
5475 struct se_lun *lun = (struct se_lun *)p;
5477 __transport_clear_lun_from_sessions(lun);
5478 complete(&lun->lun_shutdown_comp);
5483 int transport_clear_lun_from_sessions(struct se_lun *lun)
5485 struct task_struct *kt;
5487 kt = kthread_run(transport_clear_lun_thread, (void *)lun,
5488 "tcm_cl_%u", lun->unpacked_lun);
5490 printk(KERN_ERR "Unable to start clear_lun thread\n");
5493 wait_for_completion(&lun->lun_shutdown_comp);
5498 /* transport_generic_wait_for_tasks():
5500 * Called from frontend or passthrough context to wait for storage engine
5501 * to pause and/or release frontend generated struct se_cmd.
5503 static void transport_generic_wait_for_tasks(
5506 int session_reinstatement)
5508 unsigned long flags;
5510 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
5513 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5515 * If we are already stopped due to an external event (ie: LUN shutdown)
5516 * sleep until the connection can have the passed struct se_cmd back.
5517 * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
5518 * transport_clear_lun_from_sessions() once the ConfigFS context caller
5519 * has completed its operation on the struct se_cmd.
5521 if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
5523 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
5524 " wait_for_completion(&T_TASK(cmd)transport_lun_fe"
5525 "_stop_comp); for ITT: 0x%08x\n",
5526 CMD_TFO(cmd)->get_task_tag(cmd));
5528 * There is a special case for WRITES where a FE exception +
5529 * LUN shutdown means ConfigFS context is still sleeping on
5530 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
5531 * We go ahead and up transport_lun_stop_comp just to be sure
5534 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5535 complete(&T_TASK(cmd)->transport_lun_stop_comp);
5536 wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
5537 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5539 transport_all_task_dev_remove_state(cmd);
5541 * At this point, the frontend who was the originator of this
5542 * struct se_cmd, now owns the structure and can be released through
5543 * normal means below.
5545 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
5546 " wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
5547 "stop_comp); for ITT: 0x%08x\n",
5548 CMD_TFO(cmd)->get_task_tag(cmd));
5550 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
5552 if (!atomic_read(&T_TASK(cmd)->t_transport_active))
5555 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
5557 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
5558 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
5559 " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
5560 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
5561 cmd->deferred_t_state);
5563 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5565 wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
5567 wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
5569 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5570 atomic_set(&T_TASK(cmd)->t_transport_active, 0);
5571 atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
5573 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
5574 "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
5575 CMD_TFO(cmd)->get_task_tag(cmd));
5577 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5581 transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
5584 static int transport_get_sense_codes(
5589 *asc = cmd->scsi_asc;
5590 *ascq = cmd->scsi_ascq;
5595 static int transport_set_sense_codes(
5600 cmd->scsi_asc = asc;
5601 cmd->scsi_ascq = ascq;
5606 int transport_send_check_condition_and_sense(
5611 unsigned char *buffer = cmd->sense_buffer;
5612 unsigned long flags;
5614 u8 asc = 0, ascq = 0;
5616 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5617 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
5618 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5621 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
5622 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5624 if (!reason && from_transport)
5627 if (!from_transport)
5628 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
5630 * Data Segment and SenseLength of the fabric response PDU.
5632 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
5633 * from include/scsi/scsi_cmnd.h
5635 offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
5636 TRANSPORT_SENSE_BUFFER);
5638 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
5639 * SENSE KEY values from include/scsi/scsi.h
5642 case TCM_NON_EXISTENT_LUN:
5643 case TCM_UNSUPPORTED_SCSI_OPCODE:
5644 case TCM_SECTOR_COUNT_TOO_MANY:
5646 buffer[offset] = 0x70;
5647 /* ILLEGAL REQUEST */
5648 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5649 /* INVALID COMMAND OPERATION CODE */
5650 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
5652 case TCM_UNKNOWN_MODE_PAGE:
5654 buffer[offset] = 0x70;
5655 /* ILLEGAL REQUEST */
5656 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5657 /* INVALID FIELD IN CDB */
5658 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
5660 case TCM_CHECK_CONDITION_ABORT_CMD:
5662 buffer[offset] = 0x70;
5663 /* ABORTED COMMAND */
5664 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5665 /* BUS DEVICE RESET FUNCTION OCCURRED */
5666 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
5667 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
5669 case TCM_INCORRECT_AMOUNT_OF_DATA:
5671 buffer[offset] = 0x70;
5672 /* ABORTED COMMAND */
5673 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5675 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
5676 /* NOT ENOUGH UNSOLICITED DATA */
5677 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
5679 case TCM_INVALID_CDB_FIELD:
5681 buffer[offset] = 0x70;
5682 /* ABORTED COMMAND */
5683 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5684 /* INVALID FIELD IN CDB */
5685 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
5687 case TCM_INVALID_PARAMETER_LIST:
5689 buffer[offset] = 0x70;
5690 /* ABORTED COMMAND */
5691 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5692 /* INVALID FIELD IN PARAMETER LIST */
5693 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
5695 case TCM_UNEXPECTED_UNSOLICITED_DATA:
5697 buffer[offset] = 0x70;
5698 /* ABORTED COMMAND */
5699 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5701 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
5702 /* UNEXPECTED_UNSOLICITED_DATA */
5703 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
5705 case TCM_SERVICE_CRC_ERROR:
5707 buffer[offset] = 0x70;
5708 /* ABORTED COMMAND */
5709 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5710 /* PROTOCOL SERVICE CRC ERROR */
5711 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
5713 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
5715 case TCM_SNACK_REJECTED:
5717 buffer[offset] = 0x70;
5718 /* ABORTED COMMAND */
5719 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5721 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
5722 /* FAILED RETRANSMISSION REQUEST */
5723 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
5725 case TCM_WRITE_PROTECTED:
5727 buffer[offset] = 0x70;
5729 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
5730 /* WRITE PROTECTED */
5731 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
5733 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
5735 buffer[offset] = 0x70;
5736 /* UNIT ATTENTION */
5737 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
5738 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
5739 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
5740 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
5742 case TCM_CHECK_CONDITION_NOT_READY:
5744 buffer[offset] = 0x70;
5746 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
5747 transport_get_sense_codes(cmd, &asc, &ascq);
5748 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
5749 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
5751 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
5754 buffer[offset] = 0x70;
5755 /* ILLEGAL REQUEST */
5756 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5757 /* LOGICAL UNIT COMMUNICATION FAILURE */
5758 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
5762 * This code uses linux/include/scsi/scsi.h SAM status codes!
5764 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
5766 * Automatically padded, this value is encoded in the fabric's
5767 * data_length response PDU containing the SCSI defined sense data.
5769 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
5772 CMD_TFO(cmd)->queue_status(cmd);
5775 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
5777 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
5781 if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
5782 if (!(send_status) ||
5783 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
5786 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
5787 " status for CDB: 0x%02x ITT: 0x%08x\n",
5788 T_TASK(cmd)->t_task_cdb[0],
5789 CMD_TFO(cmd)->get_task_tag(cmd));
5791 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
5792 CMD_TFO(cmd)->queue_status(cmd);
5797 EXPORT_SYMBOL(transport_check_aborted_status);
5799 void transport_send_task_abort(struct se_cmd *cmd)
5802 * If there are still expected incoming fabric WRITEs, we wait
5803 * until until they have completed before sending a TASK_ABORTED
5804 * response. This response with TASK_ABORTED status will be
5805 * queued back to fabric module by transport_check_aborted_status().
5807 if (cmd->data_direction == DMA_TO_DEVICE) {
5808 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
5809 atomic_inc(&T_TASK(cmd)->t_transport_aborted);
5810 smp_mb__after_atomic_inc();
5811 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5812 transport_new_cmd_failure(cmd);
5816 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5818 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
5819 " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
5820 CMD_TFO(cmd)->get_task_tag(cmd));
5822 CMD_TFO(cmd)->queue_status(cmd);
5825 /* transport_generic_do_tmr():
5829 int transport_generic_do_tmr(struct se_cmd *cmd)
5831 struct se_cmd *ref_cmd;
5832 struct se_device *dev = SE_DEV(cmd);
5833 struct se_tmr_req *tmr = cmd->se_tmr_req;
5836 switch (tmr->function) {
5838 ref_cmd = tmr->ref_cmd;
5839 tmr->response = TMR_FUNCTION_REJECTED;
5841 case ABORT_TASK_SET:
5843 case CLEAR_TASK_SET:
5844 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
5847 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
5848 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
5849 TMR_FUNCTION_REJECTED;
5852 case TARGET_WARM_RESET:
5853 transport_generic_host_reset(dev->se_hba);
5854 tmr->response = TMR_FUNCTION_REJECTED;
5856 case TARGET_COLD_RESET:
5857 transport_generic_host_reset(dev->se_hba);
5858 transport_generic_cold_reset(dev->se_hba);
5859 tmr->response = TMR_FUNCTION_REJECTED;
5863 printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
5865 tmr->response = TMR_FUNCTION_REJECTED;
5869 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
5870 CMD_TFO(cmd)->queue_tm_rsp(cmd);
5872 transport_cmd_check_stop(cmd, 2, 0);
5877 * Called with spin_lock_irq(&dev->execute_task_lock); held
5880 static struct se_task *
5881 transport_get_task_from_state_list(struct se_device *dev)
5883 struct se_task *task;
5885 if (list_empty(&dev->state_task_list))
5888 list_for_each_entry(task, &dev->state_task_list, t_state_list)
5891 list_del(&task->t_state_list);
5892 atomic_set(&task->task_state_active, 0);
5897 static void transport_processing_shutdown(struct se_device *dev)
5900 struct se_queue_req *qr;
5901 struct se_task *task;
5903 unsigned long flags;
5905 * Empty the struct se_device's struct se_task state list.
5907 spin_lock_irqsave(&dev->execute_task_lock, flags);
5908 while ((task = transport_get_task_from_state_list(dev))) {
5909 if (!(TASK_CMD(task))) {
5910 printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
5913 cmd = TASK_CMD(task);
5916 printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
5917 " %p ITT: 0x%08x\n", task, cmd,
5918 CMD_TFO(cmd)->get_task_tag(cmd));
5921 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5923 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5925 DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
5926 " i_state/def_i_state: %d/%d, t_state/def_t_state:"
5927 " %d/%d cdb: 0x%02x\n", cmd, task,
5928 CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
5929 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
5930 cmd->t_state, cmd->deferred_t_state,
5931 T_TASK(cmd)->t_task_cdb[0]);
5932 DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
5933 " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
5934 " t_transport_stop: %d t_transport_sent: %d\n",
5935 CMD_TFO(cmd)->get_task_tag(cmd),
5936 T_TASK(cmd)->t_task_cdbs,
5937 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
5938 atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
5939 atomic_read(&T_TASK(cmd)->t_transport_active),
5940 atomic_read(&T_TASK(cmd)->t_transport_stop),
5941 atomic_read(&T_TASK(cmd)->t_transport_sent));
5943 if (atomic_read(&task->task_active)) {
5944 atomic_set(&task->task_stop, 1);
5945 spin_unlock_irqrestore(
5946 &T_TASK(cmd)->t_state_lock, flags);
5948 DEBUG_DO("Waiting for task: %p to shutdown for dev:"
5949 " %p\n", task, dev);
5950 wait_for_completion(&task->task_stop_comp);
5951 DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
5954 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5955 atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
5957 atomic_set(&task->task_active, 0);
5958 atomic_set(&task->task_stop, 0);
5960 __transport_stop_task_timer(task, &flags);
5962 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
5963 spin_unlock_irqrestore(
5964 &T_TASK(cmd)->t_state_lock, flags);
5966 DEBUG_DO("Skipping task: %p, dev: %p for"
5967 " t_task_cdbs_ex_left: %d\n", task, dev,
5968 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
5970 spin_lock_irqsave(&dev->execute_task_lock, flags);
5974 if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
5975 DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
5976 " %p\n", task, dev);
5978 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
5979 spin_unlock_irqrestore(
5980 &T_TASK(cmd)->t_state_lock, flags);
5981 transport_send_check_condition_and_sense(
5982 cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
5984 transport_remove_cmd_from_queue(cmd,
5985 SE_DEV(cmd)->dev_queue_obj);
5987 transport_lun_remove_cmd(cmd);
5988 transport_cmd_check_stop(cmd, 1, 0);
5990 spin_unlock_irqrestore(
5991 &T_TASK(cmd)->t_state_lock, flags);
5993 transport_remove_cmd_from_queue(cmd,
5994 SE_DEV(cmd)->dev_queue_obj);
5996 transport_lun_remove_cmd(cmd);
5998 if (transport_cmd_check_stop(cmd, 1, 0))
5999 transport_generic_remove(cmd, 0, 0);
6002 spin_lock_irqsave(&dev->execute_task_lock, flags);
6005 DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
6008 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
6009 spin_unlock_irqrestore(
6010 &T_TASK(cmd)->t_state_lock, flags);
6011 transport_send_check_condition_and_sense(cmd,
6012 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
6013 transport_remove_cmd_from_queue(cmd,
6014 SE_DEV(cmd)->dev_queue_obj);
6016 transport_lun_remove_cmd(cmd);
6017 transport_cmd_check_stop(cmd, 1, 0);
6019 spin_unlock_irqrestore(
6020 &T_TASK(cmd)->t_state_lock, flags);
6022 transport_remove_cmd_from_queue(cmd,
6023 SE_DEV(cmd)->dev_queue_obj);
6024 transport_lun_remove_cmd(cmd);
6026 if (transport_cmd_check_stop(cmd, 1, 0))
6027 transport_generic_remove(cmd, 0, 0);
6030 spin_lock_irqsave(&dev->execute_task_lock, flags);
6032 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
6034 * Empty the struct se_device's struct se_cmd list.
6036 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
6037 while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
6038 spin_unlock_irqrestore(
6039 &dev->dev_queue_obj->cmd_queue_lock, flags);
6040 cmd = (struct se_cmd *)qr->cmd;
6044 DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
6047 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
6048 transport_send_check_condition_and_sense(cmd,
6049 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
6051 transport_lun_remove_cmd(cmd);
6052 transport_cmd_check_stop(cmd, 1, 0);
6054 transport_lun_remove_cmd(cmd);
6055 if (transport_cmd_check_stop(cmd, 1, 0))
6056 transport_generic_remove(cmd, 0, 0);
6058 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
6060 spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
6063 /* transport_processing_thread():
6067 static int transport_processing_thread(void *param)
6071 struct se_device *dev = (struct se_device *) param;
6072 struct se_queue_req *qr;
6074 set_user_nice(current, -20);
6076 while (!kthread_should_stop()) {
6077 ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
6078 atomic_read(&dev->dev_queue_obj->queue_cnt) ||
6079 kthread_should_stop());
6083 spin_lock_irq(&dev->dev_status_lock);
6084 if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
6085 spin_unlock_irq(&dev->dev_status_lock);
6086 transport_processing_shutdown(dev);
6089 spin_unlock_irq(&dev->dev_status_lock);
6092 __transport_execute_tasks(dev);
6094 qr = transport_get_qr_from_queue(dev->dev_queue_obj);
6098 cmd = (struct se_cmd *)qr->cmd;
6099 t_state = qr->state;
6103 case TRANSPORT_NEW_CMD_MAP:
6104 if (!(CMD_TFO(cmd)->new_cmd_map)) {
6105 printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
6106 " NULL for TRANSPORT_NEW_CMD_MAP\n");
6109 ret = CMD_TFO(cmd)->new_cmd_map(cmd);
6111 cmd->transport_error_status = ret;
6112 transport_generic_request_failure(cmd, NULL,
6113 0, (cmd->data_direction !=
6118 case TRANSPORT_NEW_CMD:
6119 ret = transport_generic_new_cmd(cmd);
6121 cmd->transport_error_status = ret;
6122 transport_generic_request_failure(cmd, NULL,
6123 0, (cmd->data_direction !=
6127 case TRANSPORT_PROCESS_WRITE:
6128 transport_generic_process_write(cmd);
6130 case TRANSPORT_COMPLETE_OK:
6131 transport_stop_all_task_timers(cmd);
6132 transport_generic_complete_ok(cmd);
6134 case TRANSPORT_REMOVE:
6135 transport_generic_remove(cmd, 1, 0);
6137 case TRANSPORT_PROCESS_TMR:
6138 transport_generic_do_tmr(cmd);
6140 case TRANSPORT_COMPLETE_FAILURE:
6141 transport_generic_request_failure(cmd, NULL, 1, 1);
6143 case TRANSPORT_COMPLETE_TIMEOUT:
6144 transport_stop_all_task_timers(cmd);
6145 transport_generic_request_timeout(cmd);
6148 printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
6149 " %d for ITT: 0x%08x i_state: %d on SE LUN:"
6150 " %u\n", t_state, cmd->deferred_t_state,
6151 CMD_TFO(cmd)->get_task_tag(cmd),
6152 CMD_TFO(cmd)->get_cmd_state(cmd),
6153 SE_LUN(cmd)->unpacked_lun);
6161 transport_release_all_cmds(dev);
6162 dev->process_thread = NULL;