2 * SCSI Block Commands (SBC) parsing and emulation.
4 * (c) Copyright 2002-2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@kernel.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ratelimit.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_tcq.h>
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
34 #include "target_core_internal.h"
35 #include "target_core_ua.h"
39 sbc_emulate_readcapacity(struct se_cmd *cmd)
41 struct se_device *dev = cmd->se_dev;
42 unsigned char *cdb = cmd->t_task_cdb;
43 unsigned long long blocks_long = dev->transport->get_blocks(dev);
50 * If the PMI bit is set to zero and the LOGICAL BLOCK
51 * ADDRESS field is not set to zero, the device server shall
52 * terminate the command with CHECK CONDITION status with
53 * the sense key set to ILLEGAL REQUEST and the additional
54 * sense code set to INVALID FIELD IN CDB.
56 * In SBC-3, these fields are obsolete, but some SCSI
57 * compliance tests actually check this, so we might as well
60 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
61 return TCM_INVALID_CDB_FIELD;
63 if (blocks_long >= 0x00000000ffffffff)
66 blocks = (u32)blocks_long;
68 buf[0] = (blocks >> 24) & 0xff;
69 buf[1] = (blocks >> 16) & 0xff;
70 buf[2] = (blocks >> 8) & 0xff;
71 buf[3] = blocks & 0xff;
72 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
73 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
74 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
75 buf[7] = dev->dev_attrib.block_size & 0xff;
77 rbuf = transport_kmap_data_sg(cmd);
79 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
80 transport_kunmap_data_sg(cmd);
83 target_complete_cmd(cmd, GOOD);
88 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
90 struct se_device *dev = cmd->se_dev;
92 unsigned char buf[32];
93 unsigned long long blocks = dev->transport->get_blocks(dev);
95 memset(buf, 0, sizeof(buf));
96 buf[0] = (blocks >> 56) & 0xff;
97 buf[1] = (blocks >> 48) & 0xff;
98 buf[2] = (blocks >> 40) & 0xff;
99 buf[3] = (blocks >> 32) & 0xff;
100 buf[4] = (blocks >> 24) & 0xff;
101 buf[5] = (blocks >> 16) & 0xff;
102 buf[6] = (blocks >> 8) & 0xff;
103 buf[7] = blocks & 0xff;
104 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
105 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
106 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
107 buf[11] = dev->dev_attrib.block_size & 0xff;
109 * Set Thin Provisioning Enable bit following sbc3r22 in section
110 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
112 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
115 rbuf = transport_kmap_data_sg(cmd);
117 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
118 transport_kunmap_data_sg(cmd);
121 target_complete_cmd(cmd, GOOD);
125 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
129 if (cmd->t_task_cdb[0] == WRITE_SAME)
130 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
131 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
132 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
133 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
134 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
137 * Use the explicit range when non zero is supplied, otherwise calculate
138 * the remaining range based on ->get_blocks() - starting LBA.
143 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
146 EXPORT_SYMBOL(sbc_get_write_same_sectors);
148 static sense_reason_t
149 sbc_emulate_noop(struct se_cmd *cmd)
151 target_complete_cmd(cmd, GOOD);
155 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
157 return cmd->se_dev->dev_attrib.block_size * sectors;
160 static int sbc_check_valid_sectors(struct se_cmd *cmd)
162 struct se_device *dev = cmd->se_dev;
163 unsigned long long end_lba;
166 sectors = cmd->data_length / dev->dev_attrib.block_size;
167 end_lba = dev->transport->get_blocks(dev) + 1;
169 if (cmd->t_task_lba + sectors > end_lba) {
170 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
171 cmd->t_task_lba, sectors, end_lba);
178 static inline u32 transport_get_sectors_6(unsigned char *cdb)
181 * Use 8-bit sector value. SBC-3 says:
183 * A TRANSFER LENGTH field set to zero specifies that 256
184 * logical blocks shall be written. Any other value
185 * specifies the number of logical blocks that shall be
188 return cdb[4] ? : 256;
191 static inline u32 transport_get_sectors_10(unsigned char *cdb)
193 return (u32)(cdb[7] << 8) + cdb[8];
196 static inline u32 transport_get_sectors_12(unsigned char *cdb)
198 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
201 static inline u32 transport_get_sectors_16(unsigned char *cdb)
203 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
204 (cdb[12] << 8) + cdb[13];
208 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
210 static inline u32 transport_get_sectors_32(unsigned char *cdb)
212 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
213 (cdb[30] << 8) + cdb[31];
217 static inline u32 transport_lba_21(unsigned char *cdb)
219 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
222 static inline u32 transport_lba_32(unsigned char *cdb)
224 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
227 static inline unsigned long long transport_lba_64(unsigned char *cdb)
229 unsigned int __v1, __v2;
231 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
232 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
234 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
238 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
240 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
242 unsigned int __v1, __v2;
244 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
245 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
247 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
250 static sense_reason_t
251 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
253 unsigned int sectors = sbc_get_write_same_sectors(cmd);
255 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
256 pr_err("WRITE_SAME PBDATA and LBDATA"
257 " bits not supported for Block Discard"
259 return TCM_UNSUPPORTED_SCSI_OPCODE;
261 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
262 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
263 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
264 return TCM_INVALID_CDB_FIELD;
267 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
268 * translated into block discard requests within backend code.
270 if (flags[0] & 0x08) {
271 if (!ops->execute_write_same_unmap)
272 return TCM_UNSUPPORTED_SCSI_OPCODE;
274 cmd->execute_cmd = ops->execute_write_same_unmap;
277 if (!ops->execute_write_same)
278 return TCM_UNSUPPORTED_SCSI_OPCODE;
280 cmd->execute_cmd = ops->execute_write_same;
284 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
286 unsigned char *buf, *addr;
287 struct scatterlist *sg;
289 sense_reason_t ret = TCM_NO_SENSE;
292 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
294 * 1) read the specified logical block(s);
295 * 2) transfer logical blocks from the data-out buffer;
296 * 3) XOR the logical blocks transferred from the data-out buffer with
297 * the logical blocks read, storing the resulting XOR data in a buffer;
298 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
299 * blocks transferred from the data-out buffer; and
300 * 5) transfer the resulting XOR data to the data-in buffer.
302 buf = kmalloc(cmd->data_length, GFP_KERNEL);
304 pr_err("Unable to allocate xor_callback buf\n");
305 return TCM_OUT_OF_RESOURCES;
308 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
309 * into the locally allocated *buf
311 sg_copy_to_buffer(cmd->t_data_sg,
317 * Now perform the XOR against the BIDI read memory located at
318 * cmd->t_mem_bidi_list
322 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
323 addr = kmap_atomic(sg_page(sg));
325 ret = TCM_OUT_OF_RESOURCES;
329 for (i = 0; i < sg->length; i++)
330 *(addr + sg->offset + i) ^= *(buf + offset + i);
332 offset += sg->length;
341 static sense_reason_t
342 sbc_execute_rw(struct se_cmd *cmd)
344 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
345 cmd->data_direction);
348 static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
350 struct se_device *dev = cmd->se_dev;
353 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
354 * within target_complete_ok_work() if the command was successfully
355 * sent to the backend driver.
357 spin_lock_irq(&cmd->t_state_lock);
358 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
359 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
360 spin_unlock_irq(&cmd->t_state_lock);
363 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
364 * before the original READ I/O submission.
371 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
373 struct se_device *dev = cmd->se_dev;
374 struct scatterlist *write_sg = NULL, *sg;
375 unsigned char *buf = NULL, *addr;
376 struct sg_mapping_iter m;
377 unsigned int offset = 0, len;
378 unsigned int nlbas = cmd->t_task_nolb;
379 unsigned int block_size = dev->dev_attrib.block_size;
380 unsigned int compare_len = (nlbas * block_size);
381 sense_reason_t ret = TCM_NO_SENSE;
385 * Handle early failure in transport_generic_request_failure(),
386 * which will not have taken ->caw_mutex yet..
388 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
391 * Immediately exit + release dev->caw_sem if command has already
392 * been failed with a non-zero SCSI status.
394 if (cmd->scsi_status) {
395 pr_err("compare_and_write_callback: non zero scsi_status:"
396 " 0x%02x\n", cmd->scsi_status);
400 buf = kzalloc(cmd->data_length, GFP_KERNEL);
402 pr_err("Unable to allocate compare_and_write buf\n");
403 ret = TCM_OUT_OF_RESOURCES;
407 write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
410 pr_err("Unable to allocate compare_and_write sg\n");
411 ret = TCM_OUT_OF_RESOURCES;
415 * Setup verify and write data payloads from total NumberLBAs.
417 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
420 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
421 ret = TCM_OUT_OF_RESOURCES;
425 * Compare against SCSI READ payload against verify payload
427 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
428 addr = (unsigned char *)kmap_atomic(sg_page(sg));
430 ret = TCM_OUT_OF_RESOURCES;
434 len = min(sg->length, compare_len);
436 if (memcmp(addr, buf + offset, len)) {
437 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
451 len = cmd->t_task_nolb * block_size;
452 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
454 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
459 if (block_size < PAGE_SIZE) {
460 sg_set_page(&write_sg[i], m.page, block_size,
464 sg_set_page(&write_sg[i], m.page, block_size,
472 * Save the original SGL + nents values before updating to new
473 * assignments, to be released in transport_free_pages() ->
474 * transport_reset_sgl_orig()
476 cmd->t_data_sg_orig = cmd->t_data_sg;
477 cmd->t_data_sg = write_sg;
478 cmd->t_data_nents_orig = cmd->t_data_nents;
479 cmd->t_data_nents = 1;
481 cmd->sam_task_attr = MSG_HEAD_TAG;
482 cmd->transport_complete_callback = compare_and_write_post;
484 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
485 * for submitting the adjusted SGL to write instance user-data.
487 cmd->execute_cmd = sbc_execute_rw;
489 spin_lock_irq(&cmd->t_state_lock);
490 cmd->t_state = TRANSPORT_PROCESSING;
491 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
492 spin_unlock_irq(&cmd->t_state_lock);
494 __target_execute_cmd(cmd);
500 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
501 dev->transport->name);
502 ret = TCM_MISCOMPARE_VERIFY;
505 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
506 * sbc_compare_and_write() before the original READ I/O submission.
514 static sense_reason_t
515 sbc_compare_and_write(struct se_cmd *cmd)
517 struct se_device *dev = cmd->se_dev;
521 * Submit the READ first for COMPARE_AND_WRITE to perform the
522 * comparision using SGLs at cmd->t_bidi_data_sg..
524 rc = down_interruptible(&dev->caw_sem);
525 if ((rc != 0) || signal_pending(current)) {
526 cmd->transport_complete_callback = NULL;
527 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
530 * Reset cmd->data_length to individual block_size in order to not
531 * confuse backend drivers that depend on this value matching the
532 * size of the I/O being submitted.
534 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
536 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
539 cmd->transport_complete_callback = NULL;
544 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
545 * upon MISCOMPARE, or in compare_and_write_done() upon completion
546 * of WRITE instance user-data.
552 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
554 struct se_device *dev = cmd->se_dev;
555 unsigned char *cdb = cmd->t_task_cdb;
562 sectors = transport_get_sectors_6(cdb);
563 cmd->t_task_lba = transport_lba_21(cdb);
564 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
565 cmd->execute_rw = ops->execute_rw;
566 cmd->execute_cmd = sbc_execute_rw;
569 sectors = transport_get_sectors_10(cdb);
570 cmd->t_task_lba = transport_lba_32(cdb);
571 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
572 cmd->execute_rw = ops->execute_rw;
573 cmd->execute_cmd = sbc_execute_rw;
576 sectors = transport_get_sectors_12(cdb);
577 cmd->t_task_lba = transport_lba_32(cdb);
578 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
579 cmd->execute_rw = ops->execute_rw;
580 cmd->execute_cmd = sbc_execute_rw;
583 sectors = transport_get_sectors_16(cdb);
584 cmd->t_task_lba = transport_lba_64(cdb);
585 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
586 cmd->execute_rw = ops->execute_rw;
587 cmd->execute_cmd = sbc_execute_rw;
590 sectors = transport_get_sectors_6(cdb);
591 cmd->t_task_lba = transport_lba_21(cdb);
592 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
593 cmd->execute_rw = ops->execute_rw;
594 cmd->execute_cmd = sbc_execute_rw;
598 sectors = transport_get_sectors_10(cdb);
599 cmd->t_task_lba = transport_lba_32(cdb);
601 cmd->se_cmd_flags |= SCF_FUA;
602 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
603 cmd->execute_rw = ops->execute_rw;
604 cmd->execute_cmd = sbc_execute_rw;
607 sectors = transport_get_sectors_12(cdb);
608 cmd->t_task_lba = transport_lba_32(cdb);
610 cmd->se_cmd_flags |= SCF_FUA;
611 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
612 cmd->execute_rw = ops->execute_rw;
613 cmd->execute_cmd = sbc_execute_rw;
616 sectors = transport_get_sectors_16(cdb);
617 cmd->t_task_lba = transport_lba_64(cdb);
619 cmd->se_cmd_flags |= SCF_FUA;
620 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
621 cmd->execute_rw = ops->execute_rw;
622 cmd->execute_cmd = sbc_execute_rw;
625 if (cmd->data_direction != DMA_TO_DEVICE ||
626 !(cmd->se_cmd_flags & SCF_BIDI))
627 return TCM_INVALID_CDB_FIELD;
628 sectors = transport_get_sectors_10(cdb);
630 cmd->t_task_lba = transport_lba_32(cdb);
631 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
634 * Setup BIDI XOR callback to be run after I/O completion.
636 cmd->execute_rw = ops->execute_rw;
637 cmd->execute_cmd = sbc_execute_rw;
638 cmd->transport_complete_callback = &xdreadwrite_callback;
640 cmd->se_cmd_flags |= SCF_FUA;
642 case VARIABLE_LENGTH_CMD:
644 u16 service_action = get_unaligned_be16(&cdb[8]);
645 switch (service_action) {
647 sectors = transport_get_sectors_32(cdb);
650 * Use WRITE_32 and READ_32 opcodes for the emulated
651 * XDWRITE_READ_32 logic.
653 cmd->t_task_lba = transport_lba_64_ext(cdb);
654 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
657 * Setup BIDI XOR callback to be run during after I/O
660 cmd->execute_rw = ops->execute_rw;
661 cmd->execute_cmd = sbc_execute_rw;
662 cmd->transport_complete_callback = &xdreadwrite_callback;
664 cmd->se_cmd_flags |= SCF_FUA;
667 sectors = transport_get_sectors_32(cdb);
669 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
671 return TCM_INVALID_CDB_FIELD;
674 size = sbc_get_size(cmd, 1);
675 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
677 ret = sbc_setup_write_same(cmd, &cdb[10], ops);
682 pr_err("VARIABLE_LENGTH_CMD service action"
683 " 0x%04x not supported\n", service_action);
684 return TCM_UNSUPPORTED_SCSI_OPCODE;
688 case COMPARE_AND_WRITE:
691 * Currently enforce COMPARE_AND_WRITE for a single sector
694 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
695 " than 1\n", sectors);
696 return TCM_INVALID_CDB_FIELD;
699 * Double size because we have two buffers, note that
700 * zero is not an error..
702 size = 2 * sbc_get_size(cmd, sectors);
703 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
704 cmd->t_task_nolb = sectors;
705 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
706 cmd->execute_rw = ops->execute_rw;
707 cmd->execute_cmd = sbc_compare_and_write;
708 cmd->transport_complete_callback = compare_and_write_callback;
712 cmd->execute_cmd = sbc_emulate_readcapacity;
714 case SERVICE_ACTION_IN:
715 switch (cmd->t_task_cdb[1] & 0x1f) {
716 case SAI_READ_CAPACITY_16:
717 cmd->execute_cmd = sbc_emulate_readcapacity_16;
720 pr_err("Unsupported SA: 0x%02x\n",
721 cmd->t_task_cdb[1] & 0x1f);
722 return TCM_INVALID_CDB_FIELD;
724 size = (cdb[10] << 24) | (cdb[11] << 16) |
725 (cdb[12] << 8) | cdb[13];
727 case SYNCHRONIZE_CACHE:
728 case SYNCHRONIZE_CACHE_16:
729 if (!ops->execute_sync_cache) {
731 cmd->execute_cmd = sbc_emulate_noop;
736 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
738 if (cdb[0] == SYNCHRONIZE_CACHE) {
739 sectors = transport_get_sectors_10(cdb);
740 cmd->t_task_lba = transport_lba_32(cdb);
742 sectors = transport_get_sectors_16(cdb);
743 cmd->t_task_lba = transport_lba_64(cdb);
746 size = sbc_get_size(cmd, sectors);
749 * Check to ensure that LBA + Range does not exceed past end of
750 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
752 if (cmd->t_task_lba || sectors) {
753 if (sbc_check_valid_sectors(cmd) < 0)
754 return TCM_ADDRESS_OUT_OF_RANGE;
756 cmd->execute_cmd = ops->execute_sync_cache;
759 if (!ops->execute_unmap)
760 return TCM_UNSUPPORTED_SCSI_OPCODE;
762 size = get_unaligned_be16(&cdb[7]);
763 cmd->execute_cmd = ops->execute_unmap;
766 sectors = transport_get_sectors_16(cdb);
768 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
769 return TCM_INVALID_CDB_FIELD;
772 size = sbc_get_size(cmd, 1);
773 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
775 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
780 sectors = transport_get_sectors_10(cdb);
782 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
783 return TCM_INVALID_CDB_FIELD;
786 size = sbc_get_size(cmd, 1);
787 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
790 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
791 * of byte 1 bit 3 UNMAP instead of original reserved field
793 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
799 cmd->execute_cmd = sbc_emulate_noop;
805 * There are still clients out there which use these old SCSI-2
806 * commands. This mainly happens when running VMs with legacy
807 * guest systems, connected via SCSI command pass-through to
808 * iSCSI targets. Make them happy and return status GOOD.
811 cmd->execute_cmd = sbc_emulate_noop;
814 ret = spc_parse_cdb(cmd, &size);
819 /* reject any command that we don't have a handler for */
820 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
821 return TCM_UNSUPPORTED_SCSI_OPCODE;
823 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
824 unsigned long long end_lba;
826 if (sectors > dev->dev_attrib.fabric_max_sectors) {
827 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
828 " big sectors %u exceeds fabric_max_sectors:"
829 " %u\n", cdb[0], sectors,
830 dev->dev_attrib.fabric_max_sectors);
831 return TCM_INVALID_CDB_FIELD;
833 if (sectors > dev->dev_attrib.hw_max_sectors) {
834 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
835 " big sectors %u exceeds backend hw_max_sectors:"
836 " %u\n", cdb[0], sectors,
837 dev->dev_attrib.hw_max_sectors);
838 return TCM_INVALID_CDB_FIELD;
841 end_lba = dev->transport->get_blocks(dev) + 1;
842 if (cmd->t_task_lba + sectors > end_lba) {
843 pr_err("cmd exceeds last lba %llu "
844 "(lba %llu, sectors %u)\n",
845 end_lba, cmd->t_task_lba, sectors);
846 return TCM_ADDRESS_OUT_OF_RANGE;
849 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
850 size = sbc_get_size(cmd, sectors);
853 return target_cmd_size_check(cmd, size);
855 EXPORT_SYMBOL(sbc_parse_cdb);
857 u32 sbc_get_device_type(struct se_device *dev)
861 EXPORT_SYMBOL(sbc_get_device_type);
864 sbc_execute_unmap(struct se_cmd *cmd,
865 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
869 struct se_device *dev = cmd->se_dev;
870 unsigned char *buf, *ptr = NULL;
874 sense_reason_t ret = 0;
877 /* We never set ANC_SUP */
878 if (cmd->t_task_cdb[1])
879 return TCM_INVALID_CDB_FIELD;
881 if (cmd->data_length == 0) {
882 target_complete_cmd(cmd, SAM_STAT_GOOD);
886 if (cmd->data_length < 8) {
887 pr_warn("UNMAP parameter list length %u too small\n",
889 return TCM_PARAMETER_LIST_LENGTH_ERROR;
892 buf = transport_kmap_data_sg(cmd);
894 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
896 dl = get_unaligned_be16(&buf[0]);
897 bd_dl = get_unaligned_be16(&buf[2]);
899 size = cmd->data_length - 8;
901 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
902 cmd->data_length, bd_dl);
906 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
907 ret = TCM_INVALID_PARAMETER_LIST;
911 /* First UNMAP block descriptor starts at 8 byte offset */
913 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
914 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
917 lba = get_unaligned_be64(&ptr[0]);
918 range = get_unaligned_be32(&ptr[8]);
919 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
920 (unsigned long long)lba, range);
922 if (range > dev->dev_attrib.max_unmap_lba_count) {
923 ret = TCM_INVALID_PARAMETER_LIST;
927 if (lba + range > dev->transport->get_blocks(dev) + 1) {
928 ret = TCM_ADDRESS_OUT_OF_RANGE;
932 ret = do_unmap_fn(cmd, priv, lba, range);
941 transport_kunmap_data_sg(cmd);
943 target_complete_cmd(cmd, GOOD);
946 EXPORT_SYMBOL(sbc_execute_unmap);