2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
68 #define UNRECOVERED_READ_ERR 0x11
69 #define PARAMETER_LIST_LENGTH_ERR 0x1a
70 #define INVALID_OPCODE 0x20
71 #define ADDR_OUT_OF_RANGE 0x21
72 #define INVALID_COMMAND_OPCODE 0x20
73 #define INVALID_FIELD_IN_CDB 0x24
74 #define INVALID_FIELD_IN_PARAM_LIST 0x26
75 #define POWERON_RESET 0x29
76 #define SAVING_PARAMS_UNSUP 0x39
77 #define TRANSPORT_PROBLEM 0x4b
78 #define THRESHOLD_EXCEEDED 0x5d
79 #define LOW_POWER_COND_ON 0x5e
81 /* Additional Sense Code Qualifier (ASCQ) */
82 #define ACK_NAK_TO 0x3
84 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
86 /* Default values for driver parameters */
87 #define DEF_NUM_HOST 1
88 #define DEF_NUM_TGTS 1
89 #define DEF_MAX_LUNS 1
90 /* With these defaults, this driver will make 1 host with 1 target
91 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
95 #define DEF_DEV_SIZE_MB 8
99 #define DEF_EVERY_NTH 0
100 #define DEF_FAKE_RW 0
104 #define DEF_LBPWS10 0
106 #define DEF_LOWEST_ALIGNED 0
107 #define DEF_NO_LUN_0 0
108 #define DEF_NUM_PARTS 0
110 #define DEF_OPT_BLKS 64
111 #define DEF_PHYSBLK_EXP 0
113 #define DEF_REMOVABLE false
114 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
115 #define DEF_SECTOR_SIZE 512
116 #define DEF_UNMAP_ALIGNMENT 0
117 #define DEF_UNMAP_GRANULARITY 1
118 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
119 #define DEF_UNMAP_MAX_DESC 256
120 #define DEF_VIRTUAL_GB 0
121 #define DEF_VPD_USE_HOSTNO 1
122 #define DEF_WRITESAME_LENGTH 0xFFFF
124 /* bit mask values for scsi_debug_opts */
125 #define SCSI_DEBUG_OPT_NOISE 1
126 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
127 #define SCSI_DEBUG_OPT_TIMEOUT 4
128 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
129 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
130 #define SCSI_DEBUG_OPT_DIF_ERR 32
131 #define SCSI_DEBUG_OPT_DIX_ERR 64
132 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
133 #define SCSI_DEBUG_OPT_SHORT_TRANSFER 256
134 /* When "every_nth" > 0 then modulo "every_nth" commands:
135 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
136 * - a RECOVERED_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
138 * - a TRANSPORT_ERROR is simulated on successful read and write
139 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
141 * When "every_nth" < 0 then after "- every_nth" commands:
142 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
143 * - a RECOVERED_ERROR is simulated on successful read and write
144 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
145 * - a TRANSPORT_ERROR is simulated on successful read and write
146 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
147 * This will continue until some other action occurs (e.g. the user
148 * writing a new value (other than -1 or 1) to every_nth via sysfs).
151 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
152 * sector on read commands: */
153 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
154 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
156 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
157 * or "peripheral device" addressing (value 0) */
158 #define SAM2_LUN_ADDRESS_METHOD 0
159 #define SAM2_WLUN_REPORT_LUNS 0xc101
161 /* Can queue up to this number of commands. Typically commands that
162 * that have a non-zero delay are queued. */
163 #define SCSI_DEBUG_CANQUEUE 255
165 static int scsi_debug_add_host = DEF_NUM_HOST;
166 static int scsi_debug_ato = DEF_ATO;
167 static int scsi_debug_delay = DEF_DELAY;
168 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
169 static int scsi_debug_dif = DEF_DIF;
170 static int scsi_debug_dix = DEF_DIX;
171 static int scsi_debug_dsense = DEF_D_SENSE;
172 static int scsi_debug_every_nth = DEF_EVERY_NTH;
173 static int scsi_debug_fake_rw = DEF_FAKE_RW;
174 static unsigned int scsi_debug_guard = DEF_GUARD;
175 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
176 static int scsi_debug_max_luns = DEF_MAX_LUNS;
177 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
178 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
179 static int scsi_debug_no_uld = 0;
180 static int scsi_debug_num_parts = DEF_NUM_PARTS;
181 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
182 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
183 static int scsi_debug_opts = DEF_OPTS;
184 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
185 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
186 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
187 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
188 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
189 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
190 static unsigned int scsi_debug_lbpu = DEF_LBPU;
191 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
192 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
193 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
194 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
195 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
196 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
197 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
198 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
199 static bool scsi_debug_removable = DEF_REMOVABLE;
200 static bool scsi_debug_clustering;
202 static int scsi_debug_cmnd_count = 0;
204 #define DEV_READONLY(TGT) (0)
206 static unsigned int sdebug_store_sectors;
207 static sector_t sdebug_capacity; /* in sectors */
209 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
210 may still need them */
211 static int sdebug_heads; /* heads per disk */
212 static int sdebug_cylinders_per; /* cylinders per surface */
213 static int sdebug_sectors_per; /* sectors per cylinder */
215 #define SDEBUG_MAX_PARTS 4
217 #define SDEBUG_SENSE_LEN 32
219 #define SCSI_DEBUG_MAX_CMD_LEN 32
221 static unsigned int scsi_debug_lbp(void)
223 return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
226 struct sdebug_dev_info {
227 struct list_head dev_list;
228 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
229 unsigned int channel;
232 struct sdebug_host_info *sdbg_host;
239 struct sdebug_host_info {
240 struct list_head host_list;
241 struct Scsi_Host *shost;
243 struct list_head dev_info_list;
246 #define to_sdebug_host(d) \
247 container_of(d, struct sdebug_host_info, dev)
249 static LIST_HEAD(sdebug_host_list);
250 static DEFINE_SPINLOCK(sdebug_host_list_lock);
252 typedef void (* done_funct_t) (struct scsi_cmnd *);
254 struct sdebug_queued_cmd {
256 struct timer_list cmnd_timer;
257 done_funct_t done_funct;
258 struct scsi_cmnd * a_cmnd;
261 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
263 static unsigned char * fake_storep; /* ramdisk storage */
264 static struct sd_dif_tuple *dif_storep; /* protection info */
265 static void *map_storep; /* provisioning map */
267 static unsigned long map_size;
268 static int num_aborts = 0;
269 static int num_dev_resets = 0;
270 static int num_bus_resets = 0;
271 static int num_host_resets = 0;
272 static int dix_writes;
273 static int dix_reads;
274 static int dif_errors;
276 static DEFINE_SPINLOCK(queued_arr_lock);
277 static DEFINE_RWLOCK(atomic_rw);
279 static char sdebug_proc_name[] = "scsi_debug";
281 static struct bus_type pseudo_lld_bus;
283 static struct device_driver sdebug_driverfs_driver = {
284 .name = sdebug_proc_name,
285 .bus = &pseudo_lld_bus,
288 static const int check_condition_result =
289 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
291 static const int illegal_condition_result =
292 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
294 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
296 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
299 static void *fake_store(unsigned long long lba)
301 lba = do_div(lba, sdebug_store_sectors);
303 return fake_storep + lba * scsi_debug_sector_size;
306 static struct sd_dif_tuple *dif_store(sector_t sector)
308 sector = do_div(sector, sdebug_store_sectors);
310 return dif_storep + sector;
313 static int sdebug_add_adapter(void);
314 static void sdebug_remove_adapter(void);
316 static void sdebug_max_tgts_luns(void)
318 struct sdebug_host_info *sdbg_host;
319 struct Scsi_Host *hpnt;
321 spin_lock(&sdebug_host_list_lock);
322 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
323 hpnt = sdbg_host->shost;
324 if ((hpnt->this_id >= 0) &&
325 (scsi_debug_num_tgts > hpnt->this_id))
326 hpnt->max_id = scsi_debug_num_tgts + 1;
328 hpnt->max_id = scsi_debug_num_tgts;
329 /* scsi_debug_max_luns; */
330 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
332 spin_unlock(&sdebug_host_list_lock);
335 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
338 unsigned char *sbuff;
340 sbuff = devip->sense_buff;
341 memset(sbuff, 0, SDEBUG_SENSE_LEN);
343 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
345 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
346 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
347 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
350 static void get_data_transfer_info(unsigned char *cmd,
351 unsigned long long *lba, unsigned int *num,
357 case VARIABLE_LENGTH_CMD:
358 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
359 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
360 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
361 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
363 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
364 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
366 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
373 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
374 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
375 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
376 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
378 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
383 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
386 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
393 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
396 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
400 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
401 (u32)(cmd[1] & 0x1f) << 16;
402 *num = (0 == cmd[4]) ? 256 : cmd[4];
409 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
411 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
412 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
415 /* return -ENOTTY; // correct return but upsets fdisk */
418 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
419 struct sdebug_dev_info * devip)
422 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
423 printk(KERN_INFO "scsi_debug: Reporting Unit "
424 "attention: power on reset\n");
426 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
427 return check_condition_result;
429 if ((0 == reset_only) && devip->stopped) {
430 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
431 printk(KERN_INFO "scsi_debug: Reporting Not "
432 "ready: initializing command required\n");
433 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
435 return check_condition_result;
440 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
441 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
445 struct scsi_data_buffer *sdb = scsi_in(scp);
449 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
450 return (DID_ERROR << 16);
452 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
454 sdb->resid = scsi_bufflen(scp) - act_len;
459 /* Returns number of bytes fetched into 'arr' or -1 if error. */
460 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
463 if (!scsi_bufflen(scp))
465 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
468 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
472 static const char * inq_vendor_id = "Linux ";
473 static const char * inq_product_id = "scsi_debug ";
474 static const char * inq_product_rev = "0004";
476 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
477 int target_dev_id, int dev_id_num,
478 const char * dev_id_str,
484 port_a = target_dev_id + 1;
485 /* T10 vendor identifier field format (faked) */
486 arr[0] = 0x2; /* ASCII */
489 memcpy(&arr[4], inq_vendor_id, 8);
490 memcpy(&arr[12], inq_product_id, 16);
491 memcpy(&arr[28], dev_id_str, dev_id_str_len);
492 num = 8 + 16 + dev_id_str_len;
495 if (dev_id_num >= 0) {
496 /* NAA-5, Logical unit identifier (binary) */
497 arr[num++] = 0x1; /* binary (not necessarily sas) */
498 arr[num++] = 0x3; /* PIV=0, lu, naa */
501 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
505 arr[num++] = (dev_id_num >> 24);
506 arr[num++] = (dev_id_num >> 16) & 0xff;
507 arr[num++] = (dev_id_num >> 8) & 0xff;
508 arr[num++] = dev_id_num & 0xff;
509 /* Target relative port number */
510 arr[num++] = 0x61; /* proto=sas, binary */
511 arr[num++] = 0x94; /* PIV=1, target port, rel port */
512 arr[num++] = 0x0; /* reserved */
513 arr[num++] = 0x4; /* length */
514 arr[num++] = 0x0; /* reserved */
515 arr[num++] = 0x0; /* reserved */
517 arr[num++] = 0x1; /* relative port A */
519 /* NAA-5, Target port identifier */
520 arr[num++] = 0x61; /* proto=sas, binary */
521 arr[num++] = 0x93; /* piv=1, target port, naa */
524 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
528 arr[num++] = (port_a >> 24);
529 arr[num++] = (port_a >> 16) & 0xff;
530 arr[num++] = (port_a >> 8) & 0xff;
531 arr[num++] = port_a & 0xff;
532 /* NAA-5, Target port group identifier */
533 arr[num++] = 0x61; /* proto=sas, binary */
534 arr[num++] = 0x95; /* piv=1, target port group id */
539 arr[num++] = (port_group_id >> 8) & 0xff;
540 arr[num++] = port_group_id & 0xff;
541 /* NAA-5, Target device identifier */
542 arr[num++] = 0x61; /* proto=sas, binary */
543 arr[num++] = 0xa3; /* piv=1, target device, naa */
546 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
550 arr[num++] = (target_dev_id >> 24);
551 arr[num++] = (target_dev_id >> 16) & 0xff;
552 arr[num++] = (target_dev_id >> 8) & 0xff;
553 arr[num++] = target_dev_id & 0xff;
554 /* SCSI name string: Target device identifier */
555 arr[num++] = 0x63; /* proto=sas, UTF-8 */
556 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
559 memcpy(arr + num, "naa.52222220", 12);
561 snprintf(b, sizeof(b), "%08X", target_dev_id);
562 memcpy(arr + num, b, 8);
564 memset(arr + num, 0, 4);
570 static unsigned char vpd84_data[] = {
571 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
572 0x22,0x22,0x22,0x0,0xbb,0x1,
573 0x22,0x22,0x22,0x0,0xbb,0x2,
576 static int inquiry_evpd_84(unsigned char * arr)
578 memcpy(arr, vpd84_data, sizeof(vpd84_data));
579 return sizeof(vpd84_data);
582 static int inquiry_evpd_85(unsigned char * arr)
585 const char * na1 = "https://www.kernel.org/config";
586 const char * na2 = "http://www.kernel.org/log";
589 arr[num++] = 0x1; /* lu, storage config */
590 arr[num++] = 0x0; /* reserved */
595 plen = ((plen / 4) + 1) * 4;
596 arr[num++] = plen; /* length, null termianted, padded */
597 memcpy(arr + num, na1, olen);
598 memset(arr + num + olen, 0, plen - olen);
601 arr[num++] = 0x4; /* lu, logging */
602 arr[num++] = 0x0; /* reserved */
607 plen = ((plen / 4) + 1) * 4;
608 arr[num++] = plen; /* length, null terminated, padded */
609 memcpy(arr + num, na2, olen);
610 memset(arr + num + olen, 0, plen - olen);
616 /* SCSI ports VPD page */
617 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
622 port_a = target_dev_id + 1;
624 arr[num++] = 0x0; /* reserved */
625 arr[num++] = 0x0; /* reserved */
627 arr[num++] = 0x1; /* relative port 1 (primary) */
628 memset(arr + num, 0, 6);
631 arr[num++] = 12; /* length tp descriptor */
632 /* naa-5 target port identifier (A) */
633 arr[num++] = 0x61; /* proto=sas, binary */
634 arr[num++] = 0x93; /* PIV=1, target port, NAA */
635 arr[num++] = 0x0; /* reserved */
636 arr[num++] = 0x8; /* length */
637 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
641 arr[num++] = (port_a >> 24);
642 arr[num++] = (port_a >> 16) & 0xff;
643 arr[num++] = (port_a >> 8) & 0xff;
644 arr[num++] = port_a & 0xff;
646 arr[num++] = 0x0; /* reserved */
647 arr[num++] = 0x0; /* reserved */
649 arr[num++] = 0x2; /* relative port 2 (secondary) */
650 memset(arr + num, 0, 6);
653 arr[num++] = 12; /* length tp descriptor */
654 /* naa-5 target port identifier (B) */
655 arr[num++] = 0x61; /* proto=sas, binary */
656 arr[num++] = 0x93; /* PIV=1, target port, NAA */
657 arr[num++] = 0x0; /* reserved */
658 arr[num++] = 0x8; /* length */
659 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
663 arr[num++] = (port_b >> 24);
664 arr[num++] = (port_b >> 16) & 0xff;
665 arr[num++] = (port_b >> 8) & 0xff;
666 arr[num++] = port_b & 0xff;
672 static unsigned char vpd89_data[] = {
673 /* from 4th byte */ 0,0,0,0,
674 'l','i','n','u','x',' ',' ',' ',
675 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
677 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
679 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
680 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
681 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
682 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
684 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
686 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
688 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
689 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
690 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
691 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
692 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
693 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
694 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
698 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
699 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
700 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
701 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
705 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
706 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
707 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
708 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
709 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
710 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
711 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
712 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
713 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
716 static int inquiry_evpd_89(unsigned char * arr)
718 memcpy(arr, vpd89_data, sizeof(vpd89_data));
719 return sizeof(vpd89_data);
723 /* Block limits VPD page (SBC-3) */
724 static unsigned char vpdb0_data[] = {
725 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
726 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
727 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
728 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
731 static int inquiry_evpd_b0(unsigned char * arr)
735 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
737 /* Optimal transfer length granularity */
738 gran = 1 << scsi_debug_physblk_exp;
739 arr[2] = (gran >> 8) & 0xff;
740 arr[3] = gran & 0xff;
742 /* Maximum Transfer Length */
743 if (sdebug_store_sectors > 0x400) {
744 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
745 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
746 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
747 arr[7] = sdebug_store_sectors & 0xff;
750 /* Optimal Transfer Length */
751 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
753 if (scsi_debug_lbpu) {
754 /* Maximum Unmap LBA Count */
755 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
757 /* Maximum Unmap Block Descriptor Count */
758 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
761 /* Unmap Granularity Alignment */
762 if (scsi_debug_unmap_alignment) {
763 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
764 arr[28] |= 0x80; /* UGAVALID */
767 /* Optimal Unmap Granularity */
768 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
770 /* Maximum WRITE SAME Length */
771 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
773 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
775 return sizeof(vpdb0_data);
778 /* Block device characteristics VPD page (SBC-3) */
779 static int inquiry_evpd_b1(unsigned char *arr)
781 memset(arr, 0, 0x3c);
783 arr[1] = 1; /* non rotating medium (e.g. solid state) */
785 arr[3] = 5; /* less than 1.8" */
790 /* Logical block provisioning VPD page (SBC-3) */
791 static int inquiry_evpd_b2(unsigned char *arr)
794 arr[0] = 0; /* threshold exponent */
799 if (scsi_debug_lbpws)
802 if (scsi_debug_lbpws10)
805 if (scsi_debug_lbprz)
811 #define SDEBUG_LONG_INQ_SZ 96
812 #define SDEBUG_MAX_INQ_ARR_SZ 584
814 static int resp_inquiry(struct scsi_cmnd * scp, int target,
815 struct sdebug_dev_info * devip)
817 unsigned char pq_pdt;
819 unsigned char *cmd = (unsigned char *)scp->cmnd;
820 int alloc_len, n, ret;
822 alloc_len = (cmd[3] << 8) + cmd[4];
823 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
825 return DID_REQUEUE << 16;
827 pq_pdt = 0x1e; /* present, wlun */
828 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
829 pq_pdt = 0x7f; /* not present, no device type */
831 pq_pdt = (scsi_debug_ptype & 0x1f);
833 if (0x2 & cmd[1]) { /* CMDDT bit set */
834 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
837 return check_condition_result;
838 } else if (0x1 & cmd[1]) { /* EVPD bit set */
839 int lu_id_num, port_group_id, target_dev_id, len;
841 int host_no = devip->sdbg_host->shost->host_no;
843 port_group_id = (((host_no + 1) & 0x7f) << 8) +
844 (devip->channel & 0x7f);
845 if (0 == scsi_debug_vpd_use_hostno)
847 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
848 (devip->target * 1000) + devip->lun);
849 target_dev_id = ((host_no + 1) * 2000) +
850 (devip->target * 1000) - 3;
851 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
852 if (0 == cmd[2]) { /* supported vital product data pages */
853 arr[1] = cmd[2]; /*sanity */
855 arr[n++] = 0x0; /* this page */
856 arr[n++] = 0x80; /* unit serial number */
857 arr[n++] = 0x83; /* device identification */
858 arr[n++] = 0x84; /* software interface ident. */
859 arr[n++] = 0x85; /* management network addresses */
860 arr[n++] = 0x86; /* extended inquiry */
861 arr[n++] = 0x87; /* mode page policy */
862 arr[n++] = 0x88; /* SCSI ports */
863 arr[n++] = 0x89; /* ATA information */
864 arr[n++] = 0xb0; /* Block limits (SBC) */
865 arr[n++] = 0xb1; /* Block characteristics (SBC) */
866 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
868 arr[3] = n - 4; /* number of supported VPD pages */
869 } else if (0x80 == cmd[2]) { /* unit serial number */
870 arr[1] = cmd[2]; /*sanity */
872 memcpy(&arr[4], lu_id_str, len);
873 } else if (0x83 == cmd[2]) { /* device identification */
874 arr[1] = cmd[2]; /*sanity */
875 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
876 target_dev_id, lu_id_num,
878 } else if (0x84 == cmd[2]) { /* Software interface ident. */
879 arr[1] = cmd[2]; /*sanity */
880 arr[3] = inquiry_evpd_84(&arr[4]);
881 } else if (0x85 == cmd[2]) { /* Management network addresses */
882 arr[1] = cmd[2]; /*sanity */
883 arr[3] = inquiry_evpd_85(&arr[4]);
884 } else if (0x86 == cmd[2]) { /* extended inquiry */
885 arr[1] = cmd[2]; /*sanity */
886 arr[3] = 0x3c; /* number of following entries */
887 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
888 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
889 else if (scsi_debug_dif)
890 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
892 arr[4] = 0x0; /* no protection stuff */
893 arr[5] = 0x7; /* head of q, ordered + simple q's */
894 } else if (0x87 == cmd[2]) { /* mode page policy */
895 arr[1] = cmd[2]; /*sanity */
896 arr[3] = 0x8; /* number of following entries */
897 arr[4] = 0x2; /* disconnect-reconnect mp */
898 arr[6] = 0x80; /* mlus, shared */
899 arr[8] = 0x18; /* protocol specific lu */
900 arr[10] = 0x82; /* mlus, per initiator port */
901 } else if (0x88 == cmd[2]) { /* SCSI Ports */
902 arr[1] = cmd[2]; /*sanity */
903 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
904 } else if (0x89 == cmd[2]) { /* ATA information */
905 arr[1] = cmd[2]; /*sanity */
906 n = inquiry_evpd_89(&arr[4]);
909 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
910 arr[1] = cmd[2]; /*sanity */
911 arr[3] = inquiry_evpd_b0(&arr[4]);
912 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
913 arr[1] = cmd[2]; /*sanity */
914 arr[3] = inquiry_evpd_b1(&arr[4]);
915 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
916 arr[1] = cmd[2]; /*sanity */
917 arr[3] = inquiry_evpd_b2(&arr[4]);
919 /* Illegal request, invalid field in cdb */
920 mk_sense_buffer(devip, ILLEGAL_REQUEST,
921 INVALID_FIELD_IN_CDB, 0);
923 return check_condition_result;
925 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
926 ret = fill_from_dev_buffer(scp, arr,
927 min(len, SDEBUG_MAX_INQ_ARR_SZ));
931 /* drops through here for a standard inquiry */
932 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
933 arr[2] = scsi_debug_scsi_level;
934 arr[3] = 2; /* response_data_format==2 */
935 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
936 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
937 if (0 == scsi_debug_vpd_use_hostno)
938 arr[5] = 0x10; /* claim: implicit TGPS */
939 arr[6] = 0x10; /* claim: MultiP */
940 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
941 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
942 memcpy(&arr[8], inq_vendor_id, 8);
943 memcpy(&arr[16], inq_product_id, 16);
944 memcpy(&arr[32], inq_product_rev, 4);
945 /* version descriptors (2 bytes each) follow */
946 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
947 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
949 if (scsi_debug_ptype == 0) {
950 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
951 } else if (scsi_debug_ptype == 1) {
952 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
954 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
955 ret = fill_from_dev_buffer(scp, arr,
956 min(alloc_len, SDEBUG_LONG_INQ_SZ));
961 static int resp_requests(struct scsi_cmnd * scp,
962 struct sdebug_dev_info * devip)
964 unsigned char * sbuff;
965 unsigned char *cmd = (unsigned char *)scp->cmnd;
966 unsigned char arr[SDEBUG_SENSE_LEN];
970 memset(arr, 0, sizeof(arr));
971 if (devip->reset == 1)
972 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
973 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
974 sbuff = devip->sense_buff;
975 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
978 arr[1] = 0x0; /* NO_SENSE in sense_key */
979 arr[2] = THRESHOLD_EXCEEDED;
980 arr[3] = 0xff; /* TEST set and MRIE==6 */
983 arr[2] = 0x0; /* NO_SENSE in sense_key */
984 arr[7] = 0xa; /* 18 byte sense buffer */
985 arr[12] = THRESHOLD_EXCEEDED;
986 arr[13] = 0xff; /* TEST set and MRIE==6 */
989 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
990 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
991 /* DESC bit set and sense_buff in fixed format */
992 memset(arr, 0, sizeof(arr));
994 arr[1] = sbuff[2]; /* sense key */
995 arr[2] = sbuff[12]; /* asc */
996 arr[3] = sbuff[13]; /* ascq */
1000 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
1001 return fill_from_dev_buffer(scp, arr, len);
1004 static int resp_start_stop(struct scsi_cmnd * scp,
1005 struct sdebug_dev_info * devip)
1007 unsigned char *cmd = (unsigned char *)scp->cmnd;
1008 int power_cond, errsts, start;
1010 if ((errsts = check_readiness(scp, 1, devip)))
1012 power_cond = (cmd[4] & 0xf0) >> 4;
1014 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1016 return check_condition_result;
1019 if (start == devip->stopped)
1020 devip->stopped = !start;
1024 static sector_t get_sdebug_capacity(void)
1026 if (scsi_debug_virtual_gb > 0)
1027 return (sector_t)scsi_debug_virtual_gb *
1028 (1073741824 / scsi_debug_sector_size);
1030 return sdebug_store_sectors;
1033 #define SDEBUG_READCAP_ARR_SZ 8
1034 static int resp_readcap(struct scsi_cmnd * scp,
1035 struct sdebug_dev_info * devip)
1037 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1041 if ((errsts = check_readiness(scp, 1, devip)))
1043 /* following just in case virtual_gb changed */
1044 sdebug_capacity = get_sdebug_capacity();
1045 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1046 if (sdebug_capacity < 0xffffffff) {
1047 capac = (unsigned int)sdebug_capacity - 1;
1048 arr[0] = (capac >> 24);
1049 arr[1] = (capac >> 16) & 0xff;
1050 arr[2] = (capac >> 8) & 0xff;
1051 arr[3] = capac & 0xff;
1058 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1059 arr[7] = scsi_debug_sector_size & 0xff;
1060 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1063 #define SDEBUG_READCAP16_ARR_SZ 32
1064 static int resp_readcap16(struct scsi_cmnd * scp,
1065 struct sdebug_dev_info * devip)
1067 unsigned char *cmd = (unsigned char *)scp->cmnd;
1068 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1069 unsigned long long capac;
1070 int errsts, k, alloc_len;
1072 if ((errsts = check_readiness(scp, 1, devip)))
1074 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1076 /* following just in case virtual_gb changed */
1077 sdebug_capacity = get_sdebug_capacity();
1078 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1079 capac = sdebug_capacity - 1;
1080 for (k = 0; k < 8; ++k, capac >>= 8)
1081 arr[7 - k] = capac & 0xff;
1082 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1083 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1084 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1085 arr[11] = scsi_debug_sector_size & 0xff;
1086 arr[13] = scsi_debug_physblk_exp & 0xf;
1087 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1089 if (scsi_debug_lbp()) {
1090 arr[14] |= 0x80; /* LBPME */
1091 if (scsi_debug_lbprz)
1092 arr[14] |= 0x40; /* LBPRZ */
1095 arr[15] = scsi_debug_lowest_aligned & 0xff;
1097 if (scsi_debug_dif) {
1098 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1099 arr[12] |= 1; /* PROT_EN */
1102 return fill_from_dev_buffer(scp, arr,
1103 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1106 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1108 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1109 struct sdebug_dev_info * devip)
1111 unsigned char *cmd = (unsigned char *)scp->cmnd;
1112 unsigned char * arr;
1113 int host_no = devip->sdbg_host->shost->host_no;
1114 int n, ret, alen, rlen;
1115 int port_group_a, port_group_b, port_a, port_b;
1117 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1120 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1122 return DID_REQUEUE << 16;
1124 * EVPD page 0x88 states we have two ports, one
1125 * real and a fake port with no device connected.
1126 * So we create two port groups with one port each
1127 * and set the group with port B to unavailable.
1129 port_a = 0x1; /* relative port A */
1130 port_b = 0x2; /* relative port B */
1131 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1132 (devip->channel & 0x7f);
1133 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1134 (devip->channel & 0x7f) + 0x80;
1137 * The asymmetric access state is cycled according to the host_id.
1140 if (0 == scsi_debug_vpd_use_hostno) {
1141 arr[n++] = host_no % 3; /* Asymm access state */
1142 arr[n++] = 0x0F; /* claim: all states are supported */
1144 arr[n++] = 0x0; /* Active/Optimized path */
1145 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1147 arr[n++] = (port_group_a >> 8) & 0xff;
1148 arr[n++] = port_group_a & 0xff;
1149 arr[n++] = 0; /* Reserved */
1150 arr[n++] = 0; /* Status code */
1151 arr[n++] = 0; /* Vendor unique */
1152 arr[n++] = 0x1; /* One port per group */
1153 arr[n++] = 0; /* Reserved */
1154 arr[n++] = 0; /* Reserved */
1155 arr[n++] = (port_a >> 8) & 0xff;
1156 arr[n++] = port_a & 0xff;
1157 arr[n++] = 3; /* Port unavailable */
1158 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1159 arr[n++] = (port_group_b >> 8) & 0xff;
1160 arr[n++] = port_group_b & 0xff;
1161 arr[n++] = 0; /* Reserved */
1162 arr[n++] = 0; /* Status code */
1163 arr[n++] = 0; /* Vendor unique */
1164 arr[n++] = 0x1; /* One port per group */
1165 arr[n++] = 0; /* Reserved */
1166 arr[n++] = 0; /* Reserved */
1167 arr[n++] = (port_b >> 8) & 0xff;
1168 arr[n++] = port_b & 0xff;
1171 arr[0] = (rlen >> 24) & 0xff;
1172 arr[1] = (rlen >> 16) & 0xff;
1173 arr[2] = (rlen >> 8) & 0xff;
1174 arr[3] = rlen & 0xff;
1177 * Return the smallest value of either
1178 * - The allocated length
1179 * - The constructed command length
1180 * - The maximum array size
1183 ret = fill_from_dev_buffer(scp, arr,
1184 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1189 /* <<Following mode page info copied from ST318451LW>> */
1191 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1192 { /* Read-Write Error Recovery page for mode_sense */
1193 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1196 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1198 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1199 return sizeof(err_recov_pg);
1202 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1203 { /* Disconnect-Reconnect page for mode_sense */
1204 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1205 0, 0, 0, 0, 0, 0, 0, 0};
1207 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1209 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1210 return sizeof(disconnect_pg);
1213 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1214 { /* Format device page for mode_sense */
1215 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1216 0, 0, 0, 0, 0, 0, 0, 0,
1217 0, 0, 0, 0, 0x40, 0, 0, 0};
1219 memcpy(p, format_pg, sizeof(format_pg));
1220 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1221 p[11] = sdebug_sectors_per & 0xff;
1222 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1223 p[13] = scsi_debug_sector_size & 0xff;
1224 if (scsi_debug_removable)
1225 p[20] |= 0x20; /* should agree with INQUIRY */
1227 memset(p + 2, 0, sizeof(format_pg) - 2);
1228 return sizeof(format_pg);
1231 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1232 { /* Caching page for mode_sense */
1233 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1234 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1236 memcpy(p, caching_pg, sizeof(caching_pg));
1238 memset(p + 2, 0, sizeof(caching_pg) - 2);
1239 return sizeof(caching_pg);
1242 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1243 { /* Control mode page for mode_sense */
1244 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1246 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1249 if (scsi_debug_dsense)
1250 ctrl_m_pg[2] |= 0x4;
1252 ctrl_m_pg[2] &= ~0x4;
1255 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1257 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1259 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1260 else if (2 == pcontrol)
1261 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1262 return sizeof(ctrl_m_pg);
1266 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1267 { /* Informational Exceptions control mode page for mode_sense */
1268 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1270 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1273 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1275 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1276 else if (2 == pcontrol)
1277 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1278 return sizeof(iec_m_pg);
1281 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1282 { /* SAS SSP mode page - short format for mode_sense */
1283 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1284 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1286 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1288 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1289 return sizeof(sas_sf_m_pg);
1293 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1295 { /* SAS phy control and discover mode page for mode_sense */
1296 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1297 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1298 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1299 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1300 0x2, 0, 0, 0, 0, 0, 0, 0,
1301 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1302 0, 0, 0, 0, 0, 0, 0, 0,
1303 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1304 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1305 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1306 0x3, 0, 0, 0, 0, 0, 0, 0,
1307 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1308 0, 0, 0, 0, 0, 0, 0, 0,
1312 port_a = target_dev_id + 1;
1313 port_b = port_a + 1;
1314 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1315 p[20] = (port_a >> 24);
1316 p[21] = (port_a >> 16) & 0xff;
1317 p[22] = (port_a >> 8) & 0xff;
1318 p[23] = port_a & 0xff;
1319 p[48 + 20] = (port_b >> 24);
1320 p[48 + 21] = (port_b >> 16) & 0xff;
1321 p[48 + 22] = (port_b >> 8) & 0xff;
1322 p[48 + 23] = port_b & 0xff;
1324 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1325 return sizeof(sas_pcd_m_pg);
1328 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1329 { /* SAS SSP shared protocol specific port mode subpage */
1330 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1331 0, 0, 0, 0, 0, 0, 0, 0,
1334 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1336 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1337 return sizeof(sas_sha_m_pg);
1340 #define SDEBUG_MAX_MSENSE_SZ 256
1342 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1343 struct sdebug_dev_info * devip)
1345 unsigned char dbd, llbaa;
1346 int pcontrol, pcode, subpcode, bd_len;
1347 unsigned char dev_spec;
1348 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1350 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1351 unsigned char *cmd = (unsigned char *)scp->cmnd;
1353 if ((errsts = check_readiness(scp, 1, devip)))
1355 dbd = !!(cmd[1] & 0x8);
1356 pcontrol = (cmd[2] & 0xc0) >> 6;
1357 pcode = cmd[2] & 0x3f;
1359 msense_6 = (MODE_SENSE == cmd[0]);
1360 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1361 if ((0 == scsi_debug_ptype) && (0 == dbd))
1362 bd_len = llbaa ? 16 : 8;
1365 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1366 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1367 if (0x3 == pcontrol) { /* Saving values not supported */
1368 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1370 return check_condition_result;
1372 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1373 (devip->target * 1000) - 3;
1374 /* set DPOFUA bit for disks */
1375 if (0 == scsi_debug_ptype)
1376 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1386 arr[4] = 0x1; /* set LONGLBA bit */
1387 arr[7] = bd_len; /* assume 255 or less */
1391 if ((bd_len > 0) && (!sdebug_capacity))
1392 sdebug_capacity = get_sdebug_capacity();
1395 if (sdebug_capacity > 0xfffffffe) {
1401 ap[0] = (sdebug_capacity >> 24) & 0xff;
1402 ap[1] = (sdebug_capacity >> 16) & 0xff;
1403 ap[2] = (sdebug_capacity >> 8) & 0xff;
1404 ap[3] = sdebug_capacity & 0xff;
1406 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1407 ap[7] = scsi_debug_sector_size & 0xff;
1410 } else if (16 == bd_len) {
1411 unsigned long long capac = sdebug_capacity;
1413 for (k = 0; k < 8; ++k, capac >>= 8)
1414 ap[7 - k] = capac & 0xff;
1415 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1416 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1417 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1418 ap[15] = scsi_debug_sector_size & 0xff;
1423 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1424 /* TODO: Control Extension page */
1425 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1427 return check_condition_result;
1430 case 0x1: /* Read-Write error recovery page, direct access */
1431 len = resp_err_recov_pg(ap, pcontrol, target);
1434 case 0x2: /* Disconnect-Reconnect page, all devices */
1435 len = resp_disconnect_pg(ap, pcontrol, target);
1438 case 0x3: /* Format device page, direct access */
1439 len = resp_format_pg(ap, pcontrol, target);
1442 case 0x8: /* Caching page, direct access */
1443 len = resp_caching_pg(ap, pcontrol, target);
1446 case 0xa: /* Control Mode page, all devices */
1447 len = resp_ctrl_m_pg(ap, pcontrol, target);
1450 case 0x19: /* if spc==1 then sas phy, control+discover */
1451 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1452 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1453 INVALID_FIELD_IN_CDB, 0);
1454 return check_condition_result;
1457 if ((0x0 == subpcode) || (0xff == subpcode))
1458 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1459 if ((0x1 == subpcode) || (0xff == subpcode))
1460 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1462 if ((0x2 == subpcode) || (0xff == subpcode))
1463 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1466 case 0x1c: /* Informational Exceptions Mode page, all devices */
1467 len = resp_iec_m_pg(ap, pcontrol, target);
1470 case 0x3f: /* Read all Mode pages */
1471 if ((0 == subpcode) || (0xff == subpcode)) {
1472 len = resp_err_recov_pg(ap, pcontrol, target);
1473 len += resp_disconnect_pg(ap + len, pcontrol, target);
1474 len += resp_format_pg(ap + len, pcontrol, target);
1475 len += resp_caching_pg(ap + len, pcontrol, target);
1476 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1477 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1478 if (0xff == subpcode) {
1479 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1480 target, target_dev_id);
1481 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1483 len += resp_iec_m_pg(ap + len, pcontrol, target);
1485 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1486 INVALID_FIELD_IN_CDB, 0);
1487 return check_condition_result;
1492 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1494 return check_condition_result;
1497 arr[0] = offset - 1;
1499 arr[0] = ((offset - 2) >> 8) & 0xff;
1500 arr[1] = (offset - 2) & 0xff;
1502 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1505 #define SDEBUG_MAX_MSELECT_SZ 512
1507 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1508 struct sdebug_dev_info * devip)
1510 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1511 int param_len, res, errsts, mpage;
1512 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1513 unsigned char *cmd = (unsigned char *)scp->cmnd;
1515 if ((errsts = check_readiness(scp, 1, devip)))
1517 memset(arr, 0, sizeof(arr));
1520 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1521 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1522 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1523 INVALID_FIELD_IN_CDB, 0);
1524 return check_condition_result;
1526 res = fetch_to_dev_buffer(scp, arr, param_len);
1528 return (DID_ERROR << 16);
1529 else if ((res < param_len) &&
1530 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1531 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1532 " IO sent=%d bytes\n", param_len, res);
1533 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1534 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1536 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1537 INVALID_FIELD_IN_PARAM_LIST, 0);
1538 return check_condition_result;
1540 off = bd_len + (mselect6 ? 4 : 8);
1541 mpage = arr[off] & 0x3f;
1542 ps = !!(arr[off] & 0x80);
1544 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1545 INVALID_FIELD_IN_PARAM_LIST, 0);
1546 return check_condition_result;
1548 spf = !!(arr[off] & 0x40);
1549 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1551 if ((pg_len + off) > param_len) {
1552 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1553 PARAMETER_LIST_LENGTH_ERR, 0);
1554 return check_condition_result;
1557 case 0xa: /* Control Mode page */
1558 if (ctrl_m_pg[1] == arr[off + 1]) {
1559 memcpy(ctrl_m_pg + 2, arr + off + 2,
1560 sizeof(ctrl_m_pg) - 2);
1561 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1565 case 0x1c: /* Informational Exceptions Mode page */
1566 if (iec_m_pg[1] == arr[off + 1]) {
1567 memcpy(iec_m_pg + 2, arr + off + 2,
1568 sizeof(iec_m_pg) - 2);
1575 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1576 INVALID_FIELD_IN_PARAM_LIST, 0);
1577 return check_condition_result;
1580 static int resp_temp_l_pg(unsigned char * arr)
1582 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1583 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1586 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1587 return sizeof(temp_l_pg);
1590 static int resp_ie_l_pg(unsigned char * arr)
1592 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1595 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1596 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1597 arr[4] = THRESHOLD_EXCEEDED;
1600 return sizeof(ie_l_pg);
1603 #define SDEBUG_MAX_LSENSE_SZ 512
1605 static int resp_log_sense(struct scsi_cmnd * scp,
1606 struct sdebug_dev_info * devip)
1608 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1609 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1610 unsigned char *cmd = (unsigned char *)scp->cmnd;
1612 if ((errsts = check_readiness(scp, 1, devip)))
1614 memset(arr, 0, sizeof(arr));
1618 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1619 INVALID_FIELD_IN_CDB, 0);
1620 return check_condition_result;
1622 pcontrol = (cmd[2] & 0xc0) >> 6;
1623 pcode = cmd[2] & 0x3f;
1624 subpcode = cmd[3] & 0xff;
1625 alloc_len = (cmd[7] << 8) + cmd[8];
1627 if (0 == subpcode) {
1629 case 0x0: /* Supported log pages log page */
1631 arr[n++] = 0x0; /* this page */
1632 arr[n++] = 0xd; /* Temperature */
1633 arr[n++] = 0x2f; /* Informational exceptions */
1636 case 0xd: /* Temperature log page */
1637 arr[3] = resp_temp_l_pg(arr + 4);
1639 case 0x2f: /* Informational exceptions log page */
1640 arr[3] = resp_ie_l_pg(arr + 4);
1643 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1644 INVALID_FIELD_IN_CDB, 0);
1645 return check_condition_result;
1647 } else if (0xff == subpcode) {
1651 case 0x0: /* Supported log pages and subpages log page */
1654 arr[n++] = 0x0; /* 0,0 page */
1656 arr[n++] = 0xff; /* this page */
1658 arr[n++] = 0x0; /* Temperature */
1660 arr[n++] = 0x0; /* Informational exceptions */
1663 case 0xd: /* Temperature subpages */
1666 arr[n++] = 0x0; /* Temperature */
1669 case 0x2f: /* Informational exceptions subpages */
1672 arr[n++] = 0x0; /* Informational exceptions */
1676 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1677 INVALID_FIELD_IN_CDB, 0);
1678 return check_condition_result;
1681 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1682 INVALID_FIELD_IN_CDB, 0);
1683 return check_condition_result;
1685 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1686 return fill_from_dev_buffer(scp, arr,
1687 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1690 static int check_device_access_params(struct sdebug_dev_info *devi,
1691 unsigned long long lba, unsigned int num)
1693 if (lba + num > sdebug_capacity) {
1694 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1695 return check_condition_result;
1697 /* transfer length excessive (tie in to block limits VPD page) */
1698 if (num > sdebug_store_sectors) {
1699 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1700 return check_condition_result;
1705 /* Returns number of bytes copied or -1 if error. */
1706 static int do_device_access(struct scsi_cmnd *scmd,
1707 struct sdebug_dev_info *devi,
1708 unsigned long long lba, unsigned int num, int write)
1711 unsigned long long block, rest = 0;
1712 struct scsi_data_buffer *sdb;
1713 enum dma_data_direction dir;
1714 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1718 sdb = scsi_out(scmd);
1719 dir = DMA_TO_DEVICE;
1720 func = sg_pcopy_to_buffer;
1722 sdb = scsi_in(scmd);
1723 dir = DMA_FROM_DEVICE;
1724 func = sg_pcopy_from_buffer;
1729 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1732 block = do_div(lba, sdebug_store_sectors);
1733 if (block + num > sdebug_store_sectors)
1734 rest = block + num - sdebug_store_sectors;
1736 ret = func(sdb->table.sgl, sdb->table.nents,
1737 fake_storep + (block * scsi_debug_sector_size),
1738 (num - rest) * scsi_debug_sector_size, 0);
1739 if (ret != (num - rest) * scsi_debug_sector_size)
1743 ret += func(sdb->table.sgl, sdb->table.nents,
1744 fake_storep, rest * scsi_debug_sector_size,
1745 (num - rest) * scsi_debug_sector_size);
1751 static __be16 dif_compute_csum(const void *buf, int len)
1755 if (scsi_debug_guard)
1756 csum = (__force __be16)ip_compute_csum(buf, len);
1758 csum = cpu_to_be16(crc_t10dif(buf, len));
1763 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1764 sector_t sector, u32 ei_lba)
1766 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1768 if (sdt->guard_tag != csum) {
1769 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1771 (unsigned long)sector,
1772 be16_to_cpu(sdt->guard_tag),
1776 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1777 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1778 pr_err("%s: REF check failed on sector %lu\n",
1779 __func__, (unsigned long)sector);
1782 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1783 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1784 pr_err("%s: REF check failed on sector %lu\n",
1785 __func__, (unsigned long)sector);
1791 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1792 unsigned int sectors, bool read)
1796 const void *dif_store_end = dif_storep + sdebug_store_sectors;
1797 struct sg_mapping_iter miter;
1799 /* Bytes of protection data to copy into sgl */
1800 resid = sectors * sizeof(*dif_storep);
1802 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
1803 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
1804 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
1806 while (sg_miter_next(&miter) && resid > 0) {
1807 size_t len = min(miter.length, resid);
1808 void *start = dif_store(sector);
1811 if (dif_store_end < start + len)
1812 rest = start + len - dif_store_end;
1817 memcpy(paddr, start, len - rest);
1819 memcpy(start, paddr, len - rest);
1823 memcpy(paddr + len - rest, dif_storep, rest);
1825 memcpy(dif_storep, paddr + len - rest, rest);
1828 sector += len / sizeof(*dif_storep);
1831 sg_miter_stop(&miter);
1834 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1835 unsigned int sectors, u32 ei_lba)
1838 struct sd_dif_tuple *sdt;
1841 for (i = 0; i < sectors; i++, ei_lba++) {
1844 sector = start_sec + i;
1845 sdt = dif_store(sector);
1847 if (sdt->app_tag == cpu_to_be16(0xffff))
1850 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
1857 dif_copy_prot(SCpnt, start_sec, sectors, true);
1863 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1864 unsigned int num, struct sdebug_dev_info *devip,
1867 unsigned long iflags;
1870 ret = check_device_access_params(devip, lba, num);
1874 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1875 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1876 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1877 /* claim unrecoverable read error */
1878 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1879 /* set info field and valid bit for fixed descriptor */
1880 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1881 devip->sense_buff[0] |= 0x80; /* Valid bit */
1882 ret = (lba < OPT_MEDIUM_ERR_ADDR)
1883 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1884 devip->sense_buff[3] = (ret >> 24) & 0xff;
1885 devip->sense_buff[4] = (ret >> 16) & 0xff;
1886 devip->sense_buff[5] = (ret >> 8) & 0xff;
1887 devip->sense_buff[6] = ret & 0xff;
1889 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1890 return check_condition_result;
1893 read_lock_irqsave(&atomic_rw, iflags);
1896 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1897 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1900 read_unlock_irqrestore(&atomic_rw, iflags);
1901 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1902 return illegal_condition_result;
1906 ret = do_device_access(SCpnt, devip, lba, num, 0);
1907 read_unlock_irqrestore(&atomic_rw, iflags);
1909 return DID_ERROR << 16;
1911 scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
1916 void dump_sector(unsigned char *buf, int len)
1920 printk(KERN_ERR ">>> Sector Dump <<<\n");
1922 for (i = 0 ; i < len ; i += 16) {
1923 printk(KERN_ERR "%04d: ", i);
1925 for (j = 0 ; j < 16 ; j++) {
1926 unsigned char c = buf[i+j];
1927 if (c >= 0x20 && c < 0x7e)
1928 printk(" %c ", buf[i+j]);
1930 printk("%02x ", buf[i+j]);
1937 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1938 unsigned int sectors, u32 ei_lba)
1941 struct sd_dif_tuple *sdt;
1943 sector_t sector = start_sec;
1946 struct sg_mapping_iter diter;
1947 struct sg_mapping_iter piter;
1949 BUG_ON(scsi_sg_count(SCpnt) == 0);
1950 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1952 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
1953 scsi_prot_sg_count(SCpnt),
1954 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
1955 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
1956 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
1958 /* For each protection page */
1959 while (sg_miter_next(&piter)) {
1961 if (WARN_ON(!sg_miter_next(&diter))) {
1966 for (ppage_offset = 0; ppage_offset < piter.length;
1967 ppage_offset += sizeof(struct sd_dif_tuple)) {
1968 /* If we're at the end of the current
1969 * data page advance to the next one
1971 if (dpage_offset >= diter.length) {
1972 if (WARN_ON(!sg_miter_next(&diter))) {
1979 sdt = piter.addr + ppage_offset;
1980 daddr = diter.addr + dpage_offset;
1982 ret = dif_verify(sdt, daddr, sector, ei_lba);
1984 dump_sector(daddr, scsi_debug_sector_size);
1990 dpage_offset += scsi_debug_sector_size;
1992 diter.consumed = dpage_offset;
1993 sg_miter_stop(&diter);
1995 sg_miter_stop(&piter);
1997 dif_copy_prot(SCpnt, start_sec, sectors, false);
2004 sg_miter_stop(&diter);
2005 sg_miter_stop(&piter);
2009 static unsigned long lba_to_map_index(sector_t lba)
2011 if (scsi_debug_unmap_alignment) {
2012 lba += scsi_debug_unmap_granularity -
2013 scsi_debug_unmap_alignment;
2015 do_div(lba, scsi_debug_unmap_granularity);
2020 static sector_t map_index_to_lba(unsigned long index)
2022 sector_t lba = index * scsi_debug_unmap_granularity;
2024 if (scsi_debug_unmap_alignment) {
2025 lba -= scsi_debug_unmap_granularity -
2026 scsi_debug_unmap_alignment;
2032 static unsigned int map_state(sector_t lba, unsigned int *num)
2035 unsigned int mapped;
2036 unsigned long index;
2039 index = lba_to_map_index(lba);
2040 mapped = test_bit(index, map_storep);
2043 next = find_next_zero_bit(map_storep, map_size, index);
2045 next = find_next_bit(map_storep, map_size, index);
2047 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2053 static void map_region(sector_t lba, unsigned int len)
2055 sector_t end = lba + len;
2058 unsigned long index = lba_to_map_index(lba);
2060 if (index < map_size)
2061 set_bit(index, map_storep);
2063 lba = map_index_to_lba(index + 1);
2067 static void unmap_region(sector_t lba, unsigned int len)
2069 sector_t end = lba + len;
2072 unsigned long index = lba_to_map_index(lba);
2074 if (lba == map_index_to_lba(index) &&
2075 lba + scsi_debug_unmap_granularity <= end &&
2077 clear_bit(index, map_storep);
2078 if (scsi_debug_lbprz) {
2079 memset(fake_storep +
2080 lba * scsi_debug_sector_size, 0,
2081 scsi_debug_sector_size *
2082 scsi_debug_unmap_granularity);
2085 memset(dif_storep + lba, 0xff,
2086 sizeof(*dif_storep) *
2087 scsi_debug_unmap_granularity);
2090 lba = map_index_to_lba(index + 1);
2094 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2095 unsigned int num, struct sdebug_dev_info *devip,
2098 unsigned long iflags;
2101 ret = check_device_access_params(devip, lba, num);
2105 write_lock_irqsave(&atomic_rw, iflags);
2108 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2109 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2112 write_unlock_irqrestore(&atomic_rw, iflags);
2113 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2114 return illegal_condition_result;
2118 ret = do_device_access(SCpnt, devip, lba, num, 1);
2119 if (scsi_debug_lbp())
2120 map_region(lba, num);
2121 write_unlock_irqrestore(&atomic_rw, iflags);
2123 return (DID_ERROR << 16);
2124 else if ((ret < (num * scsi_debug_sector_size)) &&
2125 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2126 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2127 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2132 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2133 unsigned int num, struct sdebug_dev_info *devip,
2134 u32 ei_lba, unsigned int unmap)
2136 unsigned long iflags;
2137 unsigned long long i;
2140 ret = check_device_access_params(devip, lba, num);
2144 if (num > scsi_debug_write_same_length) {
2145 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2147 return check_condition_result;
2150 write_lock_irqsave(&atomic_rw, iflags);
2152 if (unmap && scsi_debug_lbp()) {
2153 unmap_region(lba, num);
2157 /* Else fetch one logical block */
2158 ret = fetch_to_dev_buffer(scmd,
2159 fake_storep + (lba * scsi_debug_sector_size),
2160 scsi_debug_sector_size);
2163 write_unlock_irqrestore(&atomic_rw, iflags);
2164 return (DID_ERROR << 16);
2165 } else if ((ret < (num * scsi_debug_sector_size)) &&
2166 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2167 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2168 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2170 /* Copy first sector to remaining blocks */
2171 for (i = 1 ; i < num ; i++)
2172 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2173 fake_storep + (lba * scsi_debug_sector_size),
2174 scsi_debug_sector_size);
2176 if (scsi_debug_lbp())
2177 map_region(lba, num);
2179 write_unlock_irqrestore(&atomic_rw, iflags);
2184 struct unmap_block_desc {
2190 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2193 struct unmap_block_desc *desc;
2194 unsigned int i, payload_len, descriptors;
2196 unsigned long iflags;
2198 ret = check_readiness(scmd, 1, devip);
2202 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2203 BUG_ON(scsi_bufflen(scmd) != payload_len);
2205 descriptors = (payload_len - 8) / 16;
2207 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2209 return check_condition_result;
2211 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2213 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2214 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2216 desc = (void *)&buf[8];
2218 write_lock_irqsave(&atomic_rw, iflags);
2220 for (i = 0 ; i < descriptors ; i++) {
2221 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2222 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2224 ret = check_device_access_params(devip, lba, num);
2228 unmap_region(lba, num);
2234 write_unlock_irqrestore(&atomic_rw, iflags);
2240 #define SDEBUG_GET_LBA_STATUS_LEN 32
2242 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2243 struct sdebug_dev_info * devip)
2245 unsigned long long lba;
2246 unsigned int alloc_len, mapped, num;
2247 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2250 ret = check_readiness(scmd, 1, devip);
2254 lba = get_unaligned_be64(&scmd->cmnd[2]);
2255 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2260 ret = check_device_access_params(devip, lba, 1);
2264 mapped = map_state(lba, &num);
2266 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2267 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
2268 put_unaligned_be64(lba, &arr[8]); /* LBA */
2269 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2270 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2272 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2275 #define SDEBUG_RLUN_ARR_SZ 256
2277 static int resp_report_luns(struct scsi_cmnd * scp,
2278 struct sdebug_dev_info * devip)
2280 unsigned int alloc_len;
2281 int lun_cnt, i, upper, num, n, wlun, lun;
2282 unsigned char *cmd = (unsigned char *)scp->cmnd;
2283 int select_report = (int)cmd[2];
2284 struct scsi_lun *one_lun;
2285 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2286 unsigned char * max_addr;
2288 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2289 if ((alloc_len < 4) || (select_report > 2)) {
2290 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2292 return check_condition_result;
2294 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2295 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2296 lun_cnt = scsi_debug_max_luns;
2297 if (1 == select_report)
2299 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2301 wlun = (select_report > 0) ? 1 : 0;
2302 num = lun_cnt + wlun;
2303 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2304 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2305 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2306 sizeof(struct scsi_lun)), num);
2311 one_lun = (struct scsi_lun *) &arr[8];
2312 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2313 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2314 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2316 upper = (lun >> 8) & 0x3f;
2318 one_lun[i].scsi_lun[0] =
2319 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2320 one_lun[i].scsi_lun[1] = lun & 0xff;
2323 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2324 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2327 alloc_len = (unsigned char *)(one_lun + i) - arr;
2328 return fill_from_dev_buffer(scp, arr,
2329 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2332 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2333 unsigned int num, struct sdebug_dev_info *devip)
2336 unsigned char *kaddr, *buf;
2337 unsigned int offset;
2338 struct scsi_data_buffer *sdb = scsi_in(scp);
2339 struct sg_mapping_iter miter;
2341 /* better not to use temporary buffer. */
2342 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2344 mk_sense_buffer(devip, NOT_READY,
2345 LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
2346 return check_condition_result;
2349 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2352 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
2353 SG_MITER_ATOMIC | SG_MITER_TO_SG);
2355 while (sg_miter_next(&miter)) {
2357 for (j = 0; j < miter.length; j++)
2358 *(kaddr + j) ^= *(buf + offset + j);
2360 offset += miter.length;
2362 sg_miter_stop(&miter);
2368 /* When timer goes off this function is called. */
2369 static void timer_intr_handler(unsigned long indx)
2371 struct sdebug_queued_cmd * sqcp;
2372 unsigned long iflags;
2374 if (indx >= scsi_debug_max_queue) {
2375 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2379 spin_lock_irqsave(&queued_arr_lock, iflags);
2380 sqcp = &queued_arr[(int)indx];
2381 if (! sqcp->in_use) {
2382 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2384 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2388 if (sqcp->done_funct) {
2389 sqcp->a_cmnd->result = sqcp->scsi_result;
2390 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2392 sqcp->done_funct = NULL;
2393 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2397 static struct sdebug_dev_info *
2398 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2400 struct sdebug_dev_info *devip;
2402 devip = kzalloc(sizeof(*devip), flags);
2404 devip->sdbg_host = sdbg_host;
2405 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2410 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2412 struct sdebug_host_info * sdbg_host;
2413 struct sdebug_dev_info * open_devip = NULL;
2414 struct sdebug_dev_info * devip =
2415 (struct sdebug_dev_info *)sdev->hostdata;
2419 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2421 printk(KERN_ERR "Host info NULL\n");
2424 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2425 if ((devip->used) && (devip->channel == sdev->channel) &&
2426 (devip->target == sdev->id) &&
2427 (devip->lun == sdev->lun))
2430 if ((!devip->used) && (!open_devip))
2434 if (!open_devip) { /* try and make a new one */
2435 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2437 printk(KERN_ERR "%s: out of memory at line %d\n",
2438 __func__, __LINE__);
2443 open_devip->channel = sdev->channel;
2444 open_devip->target = sdev->id;
2445 open_devip->lun = sdev->lun;
2446 open_devip->sdbg_host = sdbg_host;
2447 open_devip->reset = 1;
2448 open_devip->used = 1;
2449 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2450 if (scsi_debug_dsense)
2451 open_devip->sense_buff[0] = 0x72;
2453 open_devip->sense_buff[0] = 0x70;
2454 open_devip->sense_buff[7] = 0xa;
2456 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2457 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2462 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2464 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2465 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2466 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2467 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2471 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2473 struct sdebug_dev_info *devip;
2475 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2476 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2477 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2478 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2479 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2480 devip = devInfoReg(sdp);
2482 return 1; /* no resources, will be marked offline */
2483 sdp->hostdata = devip;
2484 if (sdp->host->cmd_per_lun)
2485 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2486 sdp->host->cmd_per_lun);
2487 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2488 if (scsi_debug_no_uld)
2489 sdp->no_uld_attach = 1;
2493 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2495 struct sdebug_dev_info *devip =
2496 (struct sdebug_dev_info *)sdp->hostdata;
2498 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2499 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2500 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2502 /* make this slot available for re-use */
2504 sdp->hostdata = NULL;
2508 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2509 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2511 unsigned long iflags;
2513 struct sdebug_queued_cmd *sqcp;
2515 spin_lock_irqsave(&queued_arr_lock, iflags);
2516 for (k = 0; k < scsi_debug_max_queue; ++k) {
2517 sqcp = &queued_arr[k];
2518 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2519 del_timer_sync(&sqcp->cmnd_timer);
2521 sqcp->a_cmnd = NULL;
2525 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2526 return (k < scsi_debug_max_queue) ? 1 : 0;
2529 /* Deletes (stops) timers of all queued commands */
2530 static void stop_all_queued(void)
2532 unsigned long iflags;
2534 struct sdebug_queued_cmd *sqcp;
2536 spin_lock_irqsave(&queued_arr_lock, iflags);
2537 for (k = 0; k < scsi_debug_max_queue; ++k) {
2538 sqcp = &queued_arr[k];
2539 if (sqcp->in_use && sqcp->a_cmnd) {
2540 del_timer_sync(&sqcp->cmnd_timer);
2542 sqcp->a_cmnd = NULL;
2545 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2548 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2550 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2551 printk(KERN_INFO "scsi_debug: abort\n");
2553 stop_queued_cmnd(SCpnt);
2557 static int scsi_debug_biosparam(struct scsi_device *sdev,
2558 struct block_device * bdev, sector_t capacity, int *info)
2563 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2564 printk(KERN_INFO "scsi_debug: biosparam\n");
2565 buf = scsi_bios_ptable(bdev);
2567 res = scsi_partsize(buf, capacity,
2568 &info[2], &info[0], &info[1]);
2573 info[0] = sdebug_heads;
2574 info[1] = sdebug_sectors_per;
2575 info[2] = sdebug_cylinders_per;
2579 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2581 struct sdebug_dev_info * devip;
2583 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2584 printk(KERN_INFO "scsi_debug: device_reset\n");
2587 devip = devInfoReg(SCpnt->device);
2594 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2596 struct sdebug_host_info *sdbg_host;
2597 struct sdebug_dev_info * dev_info;
2598 struct scsi_device * sdp;
2599 struct Scsi_Host * hp;
2601 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2602 printk(KERN_INFO "scsi_debug: bus_reset\n");
2604 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2605 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2607 list_for_each_entry(dev_info,
2608 &sdbg_host->dev_info_list,
2610 dev_info->reset = 1;
2616 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2618 struct sdebug_host_info * sdbg_host;
2619 struct sdebug_dev_info * dev_info;
2621 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2622 printk(KERN_INFO "scsi_debug: host_reset\n");
2624 spin_lock(&sdebug_host_list_lock);
2625 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2626 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2628 dev_info->reset = 1;
2630 spin_unlock(&sdebug_host_list_lock);
2635 /* Initializes timers in queued array */
2636 static void __init init_all_queued(void)
2638 unsigned long iflags;
2640 struct sdebug_queued_cmd * sqcp;
2642 spin_lock_irqsave(&queued_arr_lock, iflags);
2643 for (k = 0; k < scsi_debug_max_queue; ++k) {
2644 sqcp = &queued_arr[k];
2645 init_timer(&sqcp->cmnd_timer);
2647 sqcp->a_cmnd = NULL;
2649 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2652 static void __init sdebug_build_parts(unsigned char *ramp,
2653 unsigned long store_size)
2655 struct partition * pp;
2656 int starts[SDEBUG_MAX_PARTS + 2];
2657 int sectors_per_part, num_sectors, k;
2658 int heads_by_sects, start_sec, end_sec;
2660 /* assume partition table already zeroed */
2661 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2663 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2664 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2665 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2666 "partitions to %d\n", SDEBUG_MAX_PARTS);
2668 num_sectors = (int)sdebug_store_sectors;
2669 sectors_per_part = (num_sectors - sdebug_sectors_per)
2670 / scsi_debug_num_parts;
2671 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2672 starts[0] = sdebug_sectors_per;
2673 for (k = 1; k < scsi_debug_num_parts; ++k)
2674 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2676 starts[scsi_debug_num_parts] = num_sectors;
2677 starts[scsi_debug_num_parts + 1] = 0;
2679 ramp[510] = 0x55; /* magic partition markings */
2681 pp = (struct partition *)(ramp + 0x1be);
2682 for (k = 0; starts[k + 1]; ++k, ++pp) {
2683 start_sec = starts[k];
2684 end_sec = starts[k + 1] - 1;
2687 pp->cyl = start_sec / heads_by_sects;
2688 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2689 / sdebug_sectors_per;
2690 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2692 pp->end_cyl = end_sec / heads_by_sects;
2693 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2694 / sdebug_sectors_per;
2695 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2697 pp->start_sect = cpu_to_le32(start_sec);
2698 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
2699 pp->sys_ind = 0x83; /* plain Linux partition */
2703 static int schedule_resp(struct scsi_cmnd * cmnd,
2704 struct sdebug_dev_info * devip,
2705 done_funct_t done, int scsi_result, int delta_jiff)
2707 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2709 struct scsi_device * sdp = cmnd->device;
2711 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2712 "non-zero result=0x%x\n", sdp->host->host_no,
2713 sdp->channel, sdp->id, sdp->lun, scsi_result);
2716 if (cmnd && devip) {
2717 /* simulate autosense by this driver */
2718 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2719 memcpy(cmnd->sense_buffer, devip->sense_buff,
2720 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2721 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2723 if (delta_jiff <= 0) {
2725 cmnd->result = scsi_result;
2730 unsigned long iflags;
2732 struct sdebug_queued_cmd * sqcp = NULL;
2734 spin_lock_irqsave(&queued_arr_lock, iflags);
2735 for (k = 0; k < scsi_debug_max_queue; ++k) {
2736 sqcp = &queued_arr[k];
2740 if (k >= scsi_debug_max_queue) {
2741 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2742 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2743 return 1; /* report busy to mid level */
2746 sqcp->a_cmnd = cmnd;
2747 sqcp->scsi_result = scsi_result;
2748 sqcp->done_funct = done;
2749 sqcp->cmnd_timer.function = timer_intr_handler;
2750 sqcp->cmnd_timer.data = k;
2751 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2752 add_timer(&sqcp->cmnd_timer);
2753 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2759 /* Note: The following macros create attribute files in the
2760 /sys/module/scsi_debug/parameters directory. Unfortunately this
2761 driver is unaware of a change and cannot trigger auxiliary actions
2762 as it can when the corresponding attribute in the
2763 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2765 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2766 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2767 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
2768 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2769 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2770 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2771 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2772 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2773 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2774 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2775 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
2776 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2777 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2778 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2779 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2780 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2781 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2782 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2783 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2784 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2785 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2786 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2787 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2788 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2789 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2790 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2791 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2792 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2793 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2794 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2795 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2796 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2797 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2798 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2799 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2801 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2804 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2805 MODULE_DESCRIPTION("SCSI debug adapter driver");
2806 MODULE_LICENSE("GPL");
2807 MODULE_VERSION(SCSI_DEBUG_VERSION);
2809 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2810 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2811 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
2812 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2813 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2814 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2815 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2816 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2817 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2818 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2819 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2820 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2821 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2822 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2823 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2824 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2825 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2826 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2827 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2828 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2829 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2830 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2831 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2832 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2833 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2834 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2835 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2836 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2837 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2838 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2839 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2840 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2841 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2842 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2843 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2844 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2846 static char sdebug_info[256];
2848 static const char * scsi_debug_info(struct Scsi_Host * shp)
2850 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2851 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2852 scsi_debug_version_date, scsi_debug_dev_size_mb,
2857 /* scsi_debug_proc_info
2858 * Used if the driver currently has no own support for /proc/scsi
2860 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
2864 int minLen = length > 15 ? 15 : length;
2866 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2868 memcpy(arr, buffer, minLen);
2870 if (1 != sscanf(arr, "%d", &opts))
2872 scsi_debug_opts = opts;
2873 if (scsi_debug_every_nth != 0)
2874 scsi_debug_cmnd_count = 0;
2878 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
2880 seq_printf(m, "scsi_debug adapter driver, version "
2882 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2883 "every_nth=%d(curr:%d)\n"
2884 "delay=%d, max_luns=%d, scsi_level=%d\n"
2885 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2886 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2887 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2888 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2889 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2890 scsi_debug_cmnd_count, scsi_debug_delay,
2891 scsi_debug_max_luns, scsi_debug_scsi_level,
2892 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2893 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2894 num_host_resets, dix_reads, dix_writes, dif_errors);
2898 static ssize_t delay_show(struct device_driver *ddp, char *buf)
2900 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2903 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
2909 if (1 == sscanf(buf, "%10s", work)) {
2910 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2911 scsi_debug_delay = delay;
2917 static DRIVER_ATTR_RW(delay);
2919 static ssize_t opts_show(struct device_driver *ddp, char *buf)
2921 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2924 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
2930 if (1 == sscanf(buf, "%10s", work)) {
2931 if (0 == strnicmp(work,"0x", 2)) {
2932 if (1 == sscanf(&work[2], "%x", &opts))
2935 if (1 == sscanf(work, "%d", &opts))
2941 scsi_debug_opts = opts;
2942 scsi_debug_cmnd_count = 0;
2945 static DRIVER_ATTR_RW(opts);
2947 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
2949 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2951 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
2956 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2957 scsi_debug_ptype = n;
2962 static DRIVER_ATTR_RW(ptype);
2964 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
2966 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2968 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
2973 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2974 scsi_debug_dsense = n;
2979 static DRIVER_ATTR_RW(dsense);
2981 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
2983 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2985 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
2990 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2991 scsi_debug_fake_rw = n;
2996 static DRIVER_ATTR_RW(fake_rw);
2998 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
3000 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
3002 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
3007 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3008 scsi_debug_no_lun_0 = n;
3013 static DRIVER_ATTR_RW(no_lun_0);
3015 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
3017 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
3019 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
3024 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3025 scsi_debug_num_tgts = n;
3026 sdebug_max_tgts_luns();
3031 static DRIVER_ATTR_RW(num_tgts);
3033 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3035 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3037 static DRIVER_ATTR_RO(dev_size_mb);
3039 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3041 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3043 static DRIVER_ATTR_RO(num_parts);
3045 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3047 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3049 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3054 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3055 scsi_debug_every_nth = nth;
3056 scsi_debug_cmnd_count = 0;
3061 static DRIVER_ATTR_RW(every_nth);
3063 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3065 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3067 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3072 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3073 scsi_debug_max_luns = n;
3074 sdebug_max_tgts_luns();
3079 static DRIVER_ATTR_RW(max_luns);
3081 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3083 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3085 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3090 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3091 (n <= SCSI_DEBUG_CANQUEUE)) {
3092 scsi_debug_max_queue = n;
3097 static DRIVER_ATTR_RW(max_queue);
3099 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3101 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3103 static DRIVER_ATTR_RO(no_uld);
3105 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3107 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3109 static DRIVER_ATTR_RO(scsi_level);
3111 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3113 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3115 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3120 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3121 scsi_debug_virtual_gb = n;
3123 sdebug_capacity = get_sdebug_capacity();
3129 static DRIVER_ATTR_RW(virtual_gb);
3131 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3133 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3136 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3141 if (sscanf(buf, "%d", &delta_hosts) != 1)
3143 if (delta_hosts > 0) {
3145 sdebug_add_adapter();
3146 } while (--delta_hosts);
3147 } else if (delta_hosts < 0) {
3149 sdebug_remove_adapter();
3150 } while (++delta_hosts);
3154 static DRIVER_ATTR_RW(add_host);
3156 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3158 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3160 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3165 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3166 scsi_debug_vpd_use_hostno = n;
3171 static DRIVER_ATTR_RW(vpd_use_hostno);
3173 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3175 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3177 static DRIVER_ATTR_RO(sector_size);
3179 static ssize_t dix_show(struct device_driver *ddp, char *buf)
3181 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3183 static DRIVER_ATTR_RO(dix);
3185 static ssize_t dif_show(struct device_driver *ddp, char *buf)
3187 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3189 static DRIVER_ATTR_RO(dif);
3191 static ssize_t guard_show(struct device_driver *ddp, char *buf)
3193 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3195 static DRIVER_ATTR_RO(guard);
3197 static ssize_t ato_show(struct device_driver *ddp, char *buf)
3199 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3201 static DRIVER_ATTR_RO(ato);
3203 static ssize_t map_show(struct device_driver *ddp, char *buf)
3207 if (!scsi_debug_lbp())
3208 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3209 sdebug_store_sectors);
3211 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3213 buf[count++] = '\n';
3218 static DRIVER_ATTR_RO(map);
3220 static ssize_t removable_show(struct device_driver *ddp, char *buf)
3222 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3224 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3229 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3230 scsi_debug_removable = (n > 0);
3235 static DRIVER_ATTR_RW(removable);
3237 /* Note: The following array creates attribute files in the
3238 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3239 files (over those found in the /sys/module/scsi_debug/parameters
3240 directory) is that auxiliary actions can be triggered when an attribute
3241 is changed. For example see: sdebug_add_host_store() above.
3244 static struct attribute *sdebug_drv_attrs[] = {
3245 &driver_attr_delay.attr,
3246 &driver_attr_opts.attr,
3247 &driver_attr_ptype.attr,
3248 &driver_attr_dsense.attr,
3249 &driver_attr_fake_rw.attr,
3250 &driver_attr_no_lun_0.attr,
3251 &driver_attr_num_tgts.attr,
3252 &driver_attr_dev_size_mb.attr,
3253 &driver_attr_num_parts.attr,
3254 &driver_attr_every_nth.attr,
3255 &driver_attr_max_luns.attr,
3256 &driver_attr_max_queue.attr,
3257 &driver_attr_no_uld.attr,
3258 &driver_attr_scsi_level.attr,
3259 &driver_attr_virtual_gb.attr,
3260 &driver_attr_add_host.attr,
3261 &driver_attr_vpd_use_hostno.attr,
3262 &driver_attr_sector_size.attr,
3263 &driver_attr_dix.attr,
3264 &driver_attr_dif.attr,
3265 &driver_attr_guard.attr,
3266 &driver_attr_ato.attr,
3267 &driver_attr_map.attr,
3268 &driver_attr_removable.attr,
3271 ATTRIBUTE_GROUPS(sdebug_drv);
3273 static struct device *pseudo_primary;
3275 static int __init scsi_debug_init(void)
3282 switch (scsi_debug_sector_size) {
3289 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3290 scsi_debug_sector_size);
3294 switch (scsi_debug_dif) {
3296 case SD_DIF_TYPE0_PROTECTION:
3297 case SD_DIF_TYPE1_PROTECTION:
3298 case SD_DIF_TYPE2_PROTECTION:
3299 case SD_DIF_TYPE3_PROTECTION:
3303 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3307 if (scsi_debug_guard > 1) {
3308 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3312 if (scsi_debug_ato > 1) {
3313 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3317 if (scsi_debug_physblk_exp > 15) {
3318 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3319 scsi_debug_physblk_exp);
3323 if (scsi_debug_lowest_aligned > 0x3fff) {
3324 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3325 scsi_debug_lowest_aligned);
3329 if (scsi_debug_dev_size_mb < 1)
3330 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3331 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3332 sdebug_store_sectors = sz / scsi_debug_sector_size;
3333 sdebug_capacity = get_sdebug_capacity();
3335 /* play around with geometry, don't waste too much on track 0 */
3337 sdebug_sectors_per = 32;
3338 if (scsi_debug_dev_size_mb >= 16)
3340 else if (scsi_debug_dev_size_mb >= 256)
3342 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3343 (sdebug_sectors_per * sdebug_heads);
3344 if (sdebug_cylinders_per >= 1024) {
3345 /* other LLDs do this; implies >= 1GB ram disk ... */
3347 sdebug_sectors_per = 63;
3348 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3349 (sdebug_sectors_per * sdebug_heads);
3352 fake_storep = vmalloc(sz);
3353 if (NULL == fake_storep) {
3354 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3357 memset(fake_storep, 0, sz);
3358 if (scsi_debug_num_parts > 0)
3359 sdebug_build_parts(fake_storep, sz);
3361 if (scsi_debug_dix) {
3364 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3365 dif_storep = vmalloc(dif_size);
3367 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3368 dif_size, dif_storep);
3370 if (dif_storep == NULL) {
3371 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3376 memset(dif_storep, 0xff, dif_size);
3379 /* Logical Block Provisioning */
3380 if (scsi_debug_lbp()) {
3381 scsi_debug_unmap_max_blocks =
3382 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3384 scsi_debug_unmap_max_desc =
3385 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3387 scsi_debug_unmap_granularity =
3388 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3390 if (scsi_debug_unmap_alignment &&
3391 scsi_debug_unmap_granularity <=
3392 scsi_debug_unmap_alignment) {
3394 "%s: ERR: unmap_granularity <= unmap_alignment\n",
3399 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3400 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3402 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3405 if (map_storep == NULL) {
3406 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3411 bitmap_zero(map_storep, map_size);
3413 /* Map first 1KB for partition table */
3414 if (scsi_debug_num_parts)
3418 pseudo_primary = root_device_register("pseudo_0");
3419 if (IS_ERR(pseudo_primary)) {
3420 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3421 ret = PTR_ERR(pseudo_primary);
3424 ret = bus_register(&pseudo_lld_bus);
3426 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3430 ret = driver_register(&sdebug_driverfs_driver);
3432 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3439 host_to_add = scsi_debug_add_host;
3440 scsi_debug_add_host = 0;
3442 for (k = 0; k < host_to_add; k++) {
3443 if (sdebug_add_adapter()) {
3444 printk(KERN_ERR "scsi_debug_init: "
3445 "sdebug_add_adapter failed k=%d\n", k);
3450 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3451 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3452 scsi_debug_add_host);
3457 bus_unregister(&pseudo_lld_bus);
3459 root_device_unregister(pseudo_primary);
3470 static void __exit scsi_debug_exit(void)
3472 int k = scsi_debug_add_host;
3476 sdebug_remove_adapter();
3477 driver_unregister(&sdebug_driverfs_driver);
3478 bus_unregister(&pseudo_lld_bus);
3479 root_device_unregister(pseudo_primary);
3487 device_initcall(scsi_debug_init);
3488 module_exit(scsi_debug_exit);
3490 static void sdebug_release_adapter(struct device * dev)
3492 struct sdebug_host_info *sdbg_host;
3494 sdbg_host = to_sdebug_host(dev);
3498 static int sdebug_add_adapter(void)
3500 int k, devs_per_host;
3502 struct sdebug_host_info *sdbg_host;
3503 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3505 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3506 if (NULL == sdbg_host) {
3507 printk(KERN_ERR "%s: out of memory at line %d\n",
3508 __func__, __LINE__);
3512 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3514 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3515 for (k = 0; k < devs_per_host; k++) {
3516 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3517 if (!sdbg_devinfo) {
3518 printk(KERN_ERR "%s: out of memory at line %d\n",
3519 __func__, __LINE__);
3525 spin_lock(&sdebug_host_list_lock);
3526 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3527 spin_unlock(&sdebug_host_list_lock);
3529 sdbg_host->dev.bus = &pseudo_lld_bus;
3530 sdbg_host->dev.parent = pseudo_primary;
3531 sdbg_host->dev.release = &sdebug_release_adapter;
3532 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3534 error = device_register(&sdbg_host->dev);
3539 ++scsi_debug_add_host;
3543 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3545 list_del(&sdbg_devinfo->dev_list);
3546 kfree(sdbg_devinfo);
3553 static void sdebug_remove_adapter(void)
3555 struct sdebug_host_info * sdbg_host = NULL;
3557 spin_lock(&sdebug_host_list_lock);
3558 if (!list_empty(&sdebug_host_list)) {
3559 sdbg_host = list_entry(sdebug_host_list.prev,
3560 struct sdebug_host_info, host_list);
3561 list_del(&sdbg_host->host_list);
3563 spin_unlock(&sdebug_host_list_lock);
3568 device_unregister(&sdbg_host->dev);
3569 --scsi_debug_add_host;
3573 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3575 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3578 unsigned long long lba;
3581 int target = SCpnt->device->id;
3582 struct sdebug_dev_info *devip = NULL;
3583 int inj_recovered = 0;
3584 int inj_transport = 0;
3588 int delay_override = 0;
3591 scsi_set_resid(SCpnt, 0);
3592 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3593 printk(KERN_INFO "scsi_debug: cmd ");
3594 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3595 printk("%02x ", (int)cmd[k]);
3599 if (target == SCpnt->device->host->hostt->this_id) {
3600 printk(KERN_INFO "scsi_debug: initiator's id used as "
3602 return schedule_resp(SCpnt, NULL, done,
3603 DID_NO_CONNECT << 16, 0);
3606 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3607 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3608 return schedule_resp(SCpnt, NULL, done,
3609 DID_NO_CONNECT << 16, 0);
3610 devip = devInfoReg(SCpnt->device);
3612 return schedule_resp(SCpnt, NULL, done,
3613 DID_NO_CONNECT << 16, 0);
3615 if ((scsi_debug_every_nth != 0) &&
3616 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3617 scsi_debug_cmnd_count = 0;
3618 if (scsi_debug_every_nth < -1)
3619 scsi_debug_every_nth = -1;
3620 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3621 return 0; /* ignore command causing timeout */
3622 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3623 scsi_medium_access_command(SCpnt))
3624 return 0; /* time out reads and writes */
3625 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3626 inj_recovered = 1; /* to reads and writes below */
3627 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3628 inj_transport = 1; /* to reads and writes below */
3629 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3630 inj_dif = 1; /* to reads and writes below */
3631 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3632 inj_dix = 1; /* to reads and writes below */
3633 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
3641 case TEST_UNIT_READY:
3643 break; /* only allowable wlun commands */
3645 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3646 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3647 "not supported for wlun\n", *cmd);
3648 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3650 errsts = check_condition_result;
3651 return schedule_resp(SCpnt, devip, done, errsts,
3657 case INQUIRY: /* mandatory, ignore unit attention */
3659 errsts = resp_inquiry(SCpnt, target, devip);
3661 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3663 errsts = resp_requests(SCpnt, devip);
3665 case REZERO_UNIT: /* actually this is REWIND for SSC */
3667 errsts = resp_start_stop(SCpnt, devip);
3669 case ALLOW_MEDIUM_REMOVAL:
3670 errsts = check_readiness(SCpnt, 1, devip);
3673 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3674 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3675 cmd[4] ? "inhibited" : "enabled");
3677 case SEND_DIAGNOSTIC: /* mandatory */
3678 errsts = check_readiness(SCpnt, 1, devip);
3680 case TEST_UNIT_READY: /* mandatory */
3682 errsts = check_readiness(SCpnt, 0, devip);
3685 errsts = check_readiness(SCpnt, 1, devip);
3688 errsts = check_readiness(SCpnt, 1, devip);
3691 errsts = check_readiness(SCpnt, 1, devip);
3694 errsts = check_readiness(SCpnt, 1, devip);
3697 errsts = resp_readcap(SCpnt, devip);
3699 case SERVICE_ACTION_IN:
3700 if (cmd[1] == SAI_READ_CAPACITY_16)
3701 errsts = resp_readcap16(SCpnt, devip);
3702 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3704 if (scsi_debug_lbp() == 0) {
3705 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3706 INVALID_COMMAND_OPCODE, 0);
3707 errsts = check_condition_result;
3709 errsts = resp_get_lba_status(SCpnt, devip);
3711 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3713 errsts = check_condition_result;
3716 case MAINTENANCE_IN:
3717 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3718 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3720 errsts = check_condition_result;
3723 errsts = resp_report_tgtpgs(SCpnt, devip);
3728 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3729 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3731 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3732 INVALID_COMMAND_OPCODE, 0);
3733 errsts = check_condition_result;
3737 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3738 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3739 (cmd[1] & 0xe0) == 0)
3740 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3745 errsts = check_readiness(SCpnt, 0, devip);
3748 if (scsi_debug_fake_rw)
3750 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3755 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3756 if (inj_recovered && (0 == errsts)) {
3757 mk_sense_buffer(devip, RECOVERED_ERROR,
3758 THRESHOLD_EXCEEDED, 0);
3759 errsts = check_condition_result;
3760 } else if (inj_transport && (0 == errsts)) {
3761 mk_sense_buffer(devip, ABORTED_COMMAND,
3762 TRANSPORT_PROBLEM, ACK_NAK_TO);
3763 errsts = check_condition_result;
3764 } else if (inj_dif && (0 == errsts)) {
3765 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3766 errsts = illegal_condition_result;
3767 } else if (inj_dix && (0 == errsts)) {
3768 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3769 errsts = illegal_condition_result;
3772 case REPORT_LUNS: /* mandatory, ignore unit attention */
3774 errsts = resp_report_luns(SCpnt, devip);
3776 case VERIFY: /* 10 byte SBC-2 command */
3777 errsts = check_readiness(SCpnt, 0, devip);
3782 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3783 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3785 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3786 INVALID_COMMAND_OPCODE, 0);
3787 errsts = check_condition_result;
3791 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3792 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3793 (cmd[1] & 0xe0) == 0)
3794 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3799 errsts = check_readiness(SCpnt, 0, devip);
3802 if (scsi_debug_fake_rw)
3804 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3805 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3806 if (inj_recovered && (0 == errsts)) {
3807 mk_sense_buffer(devip, RECOVERED_ERROR,
3808 THRESHOLD_EXCEEDED, 0);
3809 errsts = check_condition_result;
3810 } else if (inj_dif && (0 == errsts)) {
3811 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3812 errsts = illegal_condition_result;
3813 } else if (inj_dix && (0 == errsts)) {
3814 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3815 errsts = illegal_condition_result;
3821 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3822 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3823 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3824 INVALID_FIELD_IN_CDB, 0);
3825 errsts = check_condition_result;
3831 errsts = check_readiness(SCpnt, 0, devip);
3834 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3835 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3838 errsts = check_readiness(SCpnt, 0, devip);
3842 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3843 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3844 INVALID_COMMAND_OPCODE, 0);
3845 errsts = check_condition_result;
3847 errsts = resp_unmap(SCpnt, devip);
3851 errsts = resp_mode_sense(SCpnt, target, devip);
3854 errsts = resp_mode_select(SCpnt, 1, devip);
3856 case MODE_SELECT_10:
3857 errsts = resp_mode_select(SCpnt, 0, devip);
3860 errsts = resp_log_sense(SCpnt, devip);
3862 case SYNCHRONIZE_CACHE:
3864 errsts = check_readiness(SCpnt, 0, devip);
3867 errsts = check_readiness(SCpnt, 1, devip);
3869 case XDWRITEREAD_10:
3870 if (!scsi_bidi_cmnd(SCpnt)) {
3871 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3872 INVALID_FIELD_IN_CDB, 0);
3873 errsts = check_condition_result;
3877 errsts = check_readiness(SCpnt, 0, devip);
3880 if (scsi_debug_fake_rw)
3882 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3883 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3886 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3889 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3891 case VARIABLE_LENGTH_CMD:
3892 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3894 if ((cmd[10] & 0xe0) == 0)
3896 "Unprotected RD/WR to DIF device\n");
3898 if (cmd[9] == READ_32) {
3899 BUG_ON(SCpnt->cmd_len < 32);
3903 if (cmd[9] == WRITE_32) {
3904 BUG_ON(SCpnt->cmd_len < 32);
3909 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3910 INVALID_FIELD_IN_CDB, 0);
3911 errsts = check_condition_result;
3915 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3916 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3917 "supported\n", *cmd);
3918 errsts = check_readiness(SCpnt, 1, devip);
3920 break; /* Unit attention takes precedence */
3921 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3922 errsts = check_condition_result;
3925 return schedule_resp(SCpnt, devip, done, errsts,
3926 (delay_override ? 0 : scsi_debug_delay));
3929 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3931 static struct scsi_host_template sdebug_driver_template = {
3932 .show_info = scsi_debug_show_info,
3933 .write_info = scsi_debug_write_info,
3934 .proc_name = sdebug_proc_name,
3935 .name = "SCSI DEBUG",
3936 .info = scsi_debug_info,
3937 .slave_alloc = scsi_debug_slave_alloc,
3938 .slave_configure = scsi_debug_slave_configure,
3939 .slave_destroy = scsi_debug_slave_destroy,
3940 .ioctl = scsi_debug_ioctl,
3941 .queuecommand = scsi_debug_queuecommand,
3942 .eh_abort_handler = scsi_debug_abort,
3943 .eh_bus_reset_handler = scsi_debug_bus_reset,
3944 .eh_device_reset_handler = scsi_debug_device_reset,
3945 .eh_host_reset_handler = scsi_debug_host_reset,
3946 .bios_param = scsi_debug_biosparam,
3947 .can_queue = SCSI_DEBUG_CANQUEUE,
3949 .sg_tablesize = 256,
3951 .max_sectors = 0xffff,
3952 .use_clustering = DISABLE_CLUSTERING,
3953 .module = THIS_MODULE,
3956 static int sdebug_driver_probe(struct device * dev)
3959 struct sdebug_host_info *sdbg_host;
3960 struct Scsi_Host *hpnt;
3963 sdbg_host = to_sdebug_host(dev);
3965 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3966 if (scsi_debug_clustering)
3967 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
3968 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3970 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3975 sdbg_host->shost = hpnt;
3976 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3977 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3978 hpnt->max_id = scsi_debug_num_tgts + 1;
3980 hpnt->max_id = scsi_debug_num_tgts;
3981 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3985 switch (scsi_debug_dif) {
3987 case SD_DIF_TYPE1_PROTECTION:
3988 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3990 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3993 case SD_DIF_TYPE2_PROTECTION:
3994 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3996 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3999 case SD_DIF_TYPE3_PROTECTION:
4000 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4002 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4007 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4011 scsi_host_set_prot(hpnt, host_prot);
4013 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4014 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4015 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4016 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4017 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4018 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4019 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4020 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4022 if (scsi_debug_guard == 1)
4023 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4025 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4027 error = scsi_add_host(hpnt, &sdbg_host->dev);
4029 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4031 scsi_host_put(hpnt);
4033 scsi_scan_host(hpnt);
4039 static int sdebug_driver_remove(struct device * dev)
4041 struct sdebug_host_info *sdbg_host;
4042 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4044 sdbg_host = to_sdebug_host(dev);
4047 printk(KERN_ERR "%s: Unable to locate host info\n",
4052 scsi_remove_host(sdbg_host->shost);
4054 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4056 list_del(&sdbg_devinfo->dev_list);
4057 kfree(sdbg_devinfo);
4060 scsi_host_put(sdbg_host->shost);
4064 static int pseudo_lld_bus_match(struct device *dev,
4065 struct device_driver *dev_driver)
4070 static struct bus_type pseudo_lld_bus = {
4072 .match = pseudo_lld_bus_match,
4073 .probe = sdebug_driver_probe,
4074 .remove = sdebug_driver_remove,
4075 .drv_groups = sdebug_drv_groups,