2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
50 #include <net/checksum.h>
52 #include <asm/unaligned.h>
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
64 #include "scsi_logging.h"
66 #define SCSI_DEBUG_VERSION "1.85"
67 static const char *scsi_debug_version_date = "20141022";
69 #define MY_NAME "scsi_debug"
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75 #define UNRECOVERED_READ_ERR 0x11
76 #define PARAMETER_LIST_LENGTH_ERR 0x1a
77 #define INVALID_OPCODE 0x20
78 #define LBA_OUT_OF_RANGE 0x21
79 #define INVALID_FIELD_IN_CDB 0x24
80 #define INVALID_FIELD_IN_PARAM_LIST 0x26
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST 1
101 #define DEF_NUM_TGTS 1
102 #define DEF_MAX_LUNS 1
103 /* With these defaults, this driver will make 1 host with 1 target
104 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107 #define DEF_DELAY 1 /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB 8
111 #define DEF_D_SENSE 0
112 #define DEF_EVERY_NTH 0
113 #define DEF_FAKE_RW 0
115 #define DEF_HOST_LOCK 0
118 #define DEF_LBPWS10 0
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0 0
123 #define DEF_NUM_PARTS 0
125 #define DEF_OPT_BLKS 64
126 #define DEF_PHYSBLK_EXP 0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB 0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
139 #define DELAY_OVERRIDDEN -9999
141 /* bit mask values for scsi_debug_opts */
142 #define SCSI_DEBUG_OPT_NOISE 1
143 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
144 #define SCSI_DEBUG_OPT_TIMEOUT 4
145 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
146 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
147 #define SCSI_DEBUG_OPT_DIF_ERR 32
148 #define SCSI_DEBUG_OPT_DIX_ERR 64
149 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
150 #define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100
151 #define SCSI_DEBUG_OPT_Q_NOISE 0x200
152 #define SCSI_DEBUG_OPT_ALL_TSF 0x400
153 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
154 #define SCSI_DEBUG_OPT_N_WCE 0x1000
155 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
156 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
157 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
158 /* When "every_nth" > 0 then modulo "every_nth" commands:
159 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
160 * - a RECOVERED_ERROR is simulated on successful read and write
161 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
162 * - a TRANSPORT_ERROR is simulated on successful read and write
163 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
165 * When "every_nth" < 0 then after "- every_nth" commands:
166 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
167 * - a RECOVERED_ERROR is simulated on successful read and write
168 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
169 * - a TRANSPORT_ERROR is simulated on successful read and write
170 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
171 * This will continue until some other action occurs (e.g. the user
172 * writing a new value (other than -1 or 1) to every_nth via sysfs).
175 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
176 * priority order. In the subset implemented here lower numbers have higher
177 * priority. The UA numbers should be a sequence starting from 0 with
178 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
179 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
180 #define SDEBUG_UA_BUS_RESET 1
181 #define SDEBUG_UA_MODE_CHANGED 2
182 #define SDEBUG_UA_CAPACITY_CHANGED 3
183 #define SDEBUG_NUM_UAS 4
185 /* for check_readiness() */
186 #define UAS_ONLY 1 /* check for UAs only */
187 #define UAS_TUR 0 /* if no UAs then check if media access possible */
189 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
190 * sector on read commands: */
191 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
192 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
194 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
195 * or "peripheral device" addressing (value 0) */
196 #define SAM2_LUN_ADDRESS_METHOD 0
197 #define SAM2_WLUN_REPORT_LUNS 0xc101
199 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
200 * (for response) at one time. Can be reduced by max_queue option. Command
201 * responses are not queued when delay=0 and ndelay=0. The per-device
202 * DEF_CMD_PER_LUN can be changed via sysfs:
203 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
204 * SCSI_DEBUG_CANQUEUE. */
205 #define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */
206 #define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
207 #define DEF_CMD_PER_LUN 255
209 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
210 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
213 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
214 enum sdeb_opcode_index {
215 SDEB_I_INVALID_OPCODE = 0,
217 SDEB_I_REPORT_LUNS = 2,
218 SDEB_I_REQUEST_SENSE = 3,
219 SDEB_I_TEST_UNIT_READY = 4,
220 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
221 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
222 SDEB_I_LOG_SENSE = 7,
223 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
224 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
225 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
226 SDEB_I_START_STOP = 11,
227 SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
228 SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
229 SDEB_I_MAINT_IN = 14,
230 SDEB_I_MAINT_OUT = 15,
231 SDEB_I_VERIFY = 16, /* 10 only */
232 SDEB_I_VARIABLE_LEN = 17,
233 SDEB_I_RESERVE = 18, /* 6, 10 */
234 SDEB_I_RELEASE = 19, /* 6, 10 */
235 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
236 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
237 SDEB_I_ATA_PT = 22, /* 12, 16 */
238 SDEB_I_SEND_DIAG = 23,
240 SDEB_I_XDWRITEREAD = 25, /* 10 only */
241 SDEB_I_WRITE_BUFFER = 26,
242 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
243 SDEB_I_SYNC_CACHE = 28, /* 10 only */
244 SDEB_I_COMP_WRITE = 29,
245 SDEB_I_LAST_ELEMENT = 30, /* keep this last */
248 static const unsigned char opcode_ind_arr[256] = {
249 /* 0x0; 0x0->0x1f: 6 byte cdbs */
250 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
252 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
253 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
255 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
256 SDEB_I_ALLOW_REMOVAL, 0,
257 /* 0x20; 0x20->0x3f: 10 byte cdbs */
258 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
259 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
260 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
261 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
262 /* 0x40; 0x40->0x5f: 10 byte cdbs */
263 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
264 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
265 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
267 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
268 /* 0x60; 0x60->0x7d are reserved */
269 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271 0, SDEB_I_VARIABLE_LEN,
272 /* 0x80; 0x80->0x9f: 16 byte cdbs */
273 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
274 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
275 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
276 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
277 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
278 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
279 SDEB_I_MAINT_OUT, 0, 0, 0,
280 SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
282 0, 0, 0, 0, 0, 0, 0, 0,
283 0, 0, 0, 0, 0, 0, 0, 0,
284 /* 0xc0; 0xc0->0xff: vendor specific */
285 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
287 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
288 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
293 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
295 #define F_RL_WLUN_OK 0x10
296 #define F_SKIP_UA 0x20
297 #define F_DELAY_OVERR 0x40
298 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
299 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
300 #define F_INV_OP 0x200
301 #define F_FAKE_RW 0x400
302 #define F_M_ACCESS 0x800 /* media access */
304 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
305 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
306 #define FF_SA (F_SA_HIGH | F_SA_LOW)
308 struct sdebug_dev_info;
309 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
310 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
311 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
312 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
313 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
314 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
315 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
316 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
317 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
330 struct opcode_info_t {
331 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff
332 * for terminating element */
333 u8 opcode; /* if num_attached > 0, preferred */
334 u16 sa; /* service action */
335 u32 flags; /* OR-ed set of SDEB_F_* */
336 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
337 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
338 u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
339 /* ignore cdb bytes after position 15 */
342 static const struct opcode_info_t msense_iarr[1] = {
343 {0, 0x1a, 0, F_D_IN, NULL, NULL,
344 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
347 static const struct opcode_info_t mselect_iarr[1] = {
348 {0, 0x15, 0, F_D_OUT, NULL, NULL,
349 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
352 static const struct opcode_info_t read_iarr[3] = {
353 {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
354 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
356 {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
357 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
358 {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
359 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
363 static const struct opcode_info_t write_iarr[3] = {
364 {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
365 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
367 {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
368 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
369 {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
370 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
374 static const struct opcode_info_t sa_in_iarr[1] = {
375 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
376 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
377 0xff, 0xff, 0xff, 0, 0xc7} },
380 static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
381 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
382 NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
383 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
386 static const struct opcode_info_t maint_in_iarr[2] = {
387 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
388 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
390 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
391 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
395 static const struct opcode_info_t write_same_iarr[1] = {
396 {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
397 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
398 0xff, 0xff, 0xff, 0x1f, 0xc7} },
401 static const struct opcode_info_t reserve_iarr[1] = {
402 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
403 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
406 static const struct opcode_info_t release_iarr[1] = {
407 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
408 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
412 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
413 * plus the terminating elements for logic that scans this table such as
414 * REPORT SUPPORTED OPERATION CODES. */
415 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
417 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
418 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
419 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
420 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
421 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
422 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
424 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
425 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
426 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
427 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
428 {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
429 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
431 {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
432 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
433 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
434 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
436 {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
437 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
439 {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
440 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
441 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
443 {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
444 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
445 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
446 {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
447 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
448 {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
449 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
450 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
451 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
452 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
453 {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
454 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
456 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
457 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
458 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
459 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
460 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
461 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
462 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
463 {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
464 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
466 {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
467 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
470 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
471 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
472 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
473 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
474 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
475 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
476 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
477 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
478 {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
479 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
480 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
481 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
483 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */
484 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
485 {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
486 write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
487 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
488 {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
489 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
491 {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
492 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
493 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
496 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
497 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
500 struct sdebug_scmd_extra_t {
508 static int scsi_debug_add_host = DEF_NUM_HOST;
509 static int scsi_debug_ato = DEF_ATO;
510 static int scsi_debug_delay = DEF_DELAY;
511 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
512 static int scsi_debug_dif = DEF_DIF;
513 static int scsi_debug_dix = DEF_DIX;
514 static int scsi_debug_dsense = DEF_D_SENSE;
515 static int scsi_debug_every_nth = DEF_EVERY_NTH;
516 static int scsi_debug_fake_rw = DEF_FAKE_RW;
517 static unsigned int scsi_debug_guard = DEF_GUARD;
518 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
519 static int scsi_debug_max_luns = DEF_MAX_LUNS;
520 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
521 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
522 static int scsi_debug_ndelay = DEF_NDELAY;
523 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
524 static int scsi_debug_no_uld = 0;
525 static int scsi_debug_num_parts = DEF_NUM_PARTS;
526 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
527 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
528 static int scsi_debug_opts = DEF_OPTS;
529 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
530 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
531 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
532 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
533 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
534 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
535 static unsigned int scsi_debug_lbpu = DEF_LBPU;
536 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
537 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
538 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
539 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
540 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
541 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
542 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
543 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
544 static bool scsi_debug_removable = DEF_REMOVABLE;
545 static bool scsi_debug_clustering;
546 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
547 static bool scsi_debug_strict = DEF_STRICT;
548 static bool sdebug_any_injecting_opt;
550 static atomic_t sdebug_cmnd_count;
551 static atomic_t sdebug_completions;
552 static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */
554 #define DEV_READONLY(TGT) (0)
556 static unsigned int sdebug_store_sectors;
557 static sector_t sdebug_capacity; /* in sectors */
559 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
560 may still need them */
561 static int sdebug_heads; /* heads per disk */
562 static int sdebug_cylinders_per; /* cylinders per surface */
563 static int sdebug_sectors_per; /* sectors per cylinder */
565 #define SDEBUG_MAX_PARTS 4
567 #define SCSI_DEBUG_MAX_CMD_LEN 32
569 static unsigned int scsi_debug_lbp(void)
571 return ((0 == scsi_debug_fake_rw) &&
572 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
575 struct sdebug_dev_info {
576 struct list_head dev_list;
577 unsigned int channel;
580 struct sdebug_host_info *sdbg_host;
581 unsigned long uas_bm[1];
583 char stopped; /* TODO: should be atomic */
587 struct sdebug_host_info {
588 struct list_head host_list;
589 struct Scsi_Host *shost;
591 struct list_head dev_info_list;
594 #define to_sdebug_host(d) \
595 container_of(d, struct sdebug_host_info, dev)
597 static LIST_HEAD(sdebug_host_list);
598 static DEFINE_SPINLOCK(sdebug_host_list_lock);
601 struct sdebug_hrtimer { /* ... is derived from hrtimer */
602 struct hrtimer hrt; /* must be first element */
606 struct sdebug_queued_cmd {
607 /* in_use flagged by a bit in queued_in_use_bm[] */
608 struct timer_list *cmnd_timerp;
609 struct tasklet_struct *tletp;
610 struct sdebug_hrtimer *sd_hrtp;
611 struct scsi_cmnd * a_cmnd;
613 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
614 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
617 static unsigned char * fake_storep; /* ramdisk storage */
618 static struct sd_dif_tuple *dif_storep; /* protection info */
619 static void *map_storep; /* provisioning map */
621 static unsigned long map_size;
622 static int num_aborts;
623 static int num_dev_resets;
624 static int num_target_resets;
625 static int num_bus_resets;
626 static int num_host_resets;
627 static int dix_writes;
628 static int dix_reads;
629 static int dif_errors;
631 static DEFINE_SPINLOCK(queued_arr_lock);
632 static DEFINE_RWLOCK(atomic_rw);
634 static char sdebug_proc_name[] = MY_NAME;
635 static const char *my_name = MY_NAME;
637 static struct bus_type pseudo_lld_bus;
639 static struct device_driver sdebug_driverfs_driver = {
640 .name = sdebug_proc_name,
641 .bus = &pseudo_lld_bus,
644 static const int check_condition_result =
645 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
647 static const int illegal_condition_result =
648 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
650 static const int device_qfull_result =
651 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
653 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
654 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
656 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
658 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
661 static void *fake_store(unsigned long long lba)
663 lba = do_div(lba, sdebug_store_sectors);
665 return fake_storep + lba * scsi_debug_sector_size;
668 static struct sd_dif_tuple *dif_store(sector_t sector)
670 sector = do_div(sector, sdebug_store_sectors);
672 return dif_storep + sector;
675 static int sdebug_add_adapter(void);
676 static void sdebug_remove_adapter(void);
678 static void sdebug_max_tgts_luns(void)
680 struct sdebug_host_info *sdbg_host;
681 struct Scsi_Host *hpnt;
683 spin_lock(&sdebug_host_list_lock);
684 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
685 hpnt = sdbg_host->shost;
686 if ((hpnt->this_id >= 0) &&
687 (scsi_debug_num_tgts > hpnt->this_id))
688 hpnt->max_id = scsi_debug_num_tgts + 1;
690 hpnt->max_id = scsi_debug_num_tgts;
691 /* scsi_debug_max_luns; */
692 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
694 spin_unlock(&sdebug_host_list_lock);
697 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
699 /* Set in_bit to -1 to indicate no bit position of invalid field */
701 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
702 int in_byte, int in_bit)
704 unsigned char *sbuff;
708 sbuff = scp->sense_buffer;
710 sdev_printk(KERN_ERR, scp->device,
711 "%s: sense_buffer is NULL\n", __func__);
714 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
715 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
716 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
718 memset(sks, 0, sizeof(sks));
724 sks[0] |= 0x7 & in_bit;
726 put_unaligned_be16(in_byte, sks + 1);
727 if (scsi_debug_dsense) {
732 memcpy(sbuff + sl + 4, sks, 3);
734 memcpy(sbuff + 15, sks, 3);
735 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
736 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
737 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
738 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
741 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
743 unsigned char *sbuff;
745 sbuff = scp->sense_buffer;
747 sdev_printk(KERN_ERR, scp->device,
748 "%s: sense_buffer is NULL\n", __func__);
751 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
753 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
755 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
756 sdev_printk(KERN_INFO, scp->device,
757 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
758 my_name, key, asc, asq);
762 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
764 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
767 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
769 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
771 sdev_printk(KERN_INFO, dev,
772 "%s: BLKFLSBUF [0x1261]\n", __func__);
773 else if (0x5331 == cmd)
774 sdev_printk(KERN_INFO, dev,
775 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
778 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
782 /* return -ENOTTY; // correct return but upsets fdisk */
785 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
786 struct sdebug_dev_info * devip)
789 bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
791 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
792 if (k != SDEBUG_NUM_UAS) {
793 const char *cp = NULL;
797 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
798 UA_RESET_ASC, POWER_ON_RESET_ASCQ);
800 cp = "power on reset";
802 case SDEBUG_UA_BUS_RESET:
803 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
804 UA_RESET_ASC, BUS_RESET_ASCQ);
808 case SDEBUG_UA_MODE_CHANGED:
809 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
810 UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
812 cp = "mode parameters changed";
814 case SDEBUG_UA_CAPACITY_CHANGED:
815 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
816 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
818 cp = "capacity data changed";
821 pr_warn("%s: unexpected unit attention code=%d\n",
827 clear_bit(k, devip->uas_bm);
829 sdev_printk(KERN_INFO, SCpnt->device,
830 "%s reports: Unit attention: %s\n",
832 return check_condition_result;
834 if ((UAS_TUR == uas_only) && devip->stopped) {
835 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
838 sdev_printk(KERN_INFO, SCpnt->device,
839 "%s reports: Not ready: %s\n", my_name,
840 "initializing command required");
841 return check_condition_result;
846 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
847 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
851 struct scsi_data_buffer *sdb = scsi_in(scp);
855 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
856 return (DID_ERROR << 16);
858 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
860 sdb->resid = scsi_bufflen(scp) - act_len;
865 /* Returns number of bytes fetched into 'arr' or -1 if error. */
866 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
869 if (!scsi_bufflen(scp))
871 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
874 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
878 static const char * inq_vendor_id = "Linux ";
879 static const char * inq_product_id = "scsi_debug ";
880 static const char *inq_product_rev = "0184"; /* version less '.' */
882 /* Device identification VPD page. Returns number of bytes placed in arr */
883 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
884 int target_dev_id, int dev_id_num,
885 const char * dev_id_str,
891 port_a = target_dev_id + 1;
892 /* T10 vendor identifier field format (faked) */
893 arr[0] = 0x2; /* ASCII */
896 memcpy(&arr[4], inq_vendor_id, 8);
897 memcpy(&arr[12], inq_product_id, 16);
898 memcpy(&arr[28], dev_id_str, dev_id_str_len);
899 num = 8 + 16 + dev_id_str_len;
902 if (dev_id_num >= 0) {
903 /* NAA-5, Logical unit identifier (binary) */
904 arr[num++] = 0x1; /* binary (not necessarily sas) */
905 arr[num++] = 0x3; /* PIV=0, lu, naa */
908 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
912 arr[num++] = (dev_id_num >> 24);
913 arr[num++] = (dev_id_num >> 16) & 0xff;
914 arr[num++] = (dev_id_num >> 8) & 0xff;
915 arr[num++] = dev_id_num & 0xff;
916 /* Target relative port number */
917 arr[num++] = 0x61; /* proto=sas, binary */
918 arr[num++] = 0x94; /* PIV=1, target port, rel port */
919 arr[num++] = 0x0; /* reserved */
920 arr[num++] = 0x4; /* length */
921 arr[num++] = 0x0; /* reserved */
922 arr[num++] = 0x0; /* reserved */
924 arr[num++] = 0x1; /* relative port A */
926 /* NAA-5, Target port identifier */
927 arr[num++] = 0x61; /* proto=sas, binary */
928 arr[num++] = 0x93; /* piv=1, target port, naa */
931 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
935 arr[num++] = (port_a >> 24);
936 arr[num++] = (port_a >> 16) & 0xff;
937 arr[num++] = (port_a >> 8) & 0xff;
938 arr[num++] = port_a & 0xff;
939 /* NAA-5, Target port group identifier */
940 arr[num++] = 0x61; /* proto=sas, binary */
941 arr[num++] = 0x95; /* piv=1, target port group id */
946 arr[num++] = (port_group_id >> 8) & 0xff;
947 arr[num++] = port_group_id & 0xff;
948 /* NAA-5, Target device identifier */
949 arr[num++] = 0x61; /* proto=sas, binary */
950 arr[num++] = 0xa3; /* piv=1, target device, naa */
953 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
957 arr[num++] = (target_dev_id >> 24);
958 arr[num++] = (target_dev_id >> 16) & 0xff;
959 arr[num++] = (target_dev_id >> 8) & 0xff;
960 arr[num++] = target_dev_id & 0xff;
961 /* SCSI name string: Target device identifier */
962 arr[num++] = 0x63; /* proto=sas, UTF-8 */
963 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
966 memcpy(arr + num, "naa.52222220", 12);
968 snprintf(b, sizeof(b), "%08X", target_dev_id);
969 memcpy(arr + num, b, 8);
971 memset(arr + num, 0, 4);
977 static unsigned char vpd84_data[] = {
978 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
979 0x22,0x22,0x22,0x0,0xbb,0x1,
980 0x22,0x22,0x22,0x0,0xbb,0x2,
983 /* Software interface identification VPD page */
984 static int inquiry_evpd_84(unsigned char * arr)
986 memcpy(arr, vpd84_data, sizeof(vpd84_data));
987 return sizeof(vpd84_data);
990 /* Management network addresses VPD page */
991 static int inquiry_evpd_85(unsigned char * arr)
994 const char * na1 = "https://www.kernel.org/config";
995 const char * na2 = "http://www.kernel.org/log";
998 arr[num++] = 0x1; /* lu, storage config */
999 arr[num++] = 0x0; /* reserved */
1004 plen = ((plen / 4) + 1) * 4;
1005 arr[num++] = plen; /* length, null termianted, padded */
1006 memcpy(arr + num, na1, olen);
1007 memset(arr + num + olen, 0, plen - olen);
1010 arr[num++] = 0x4; /* lu, logging */
1011 arr[num++] = 0x0; /* reserved */
1016 plen = ((plen / 4) + 1) * 4;
1017 arr[num++] = plen; /* length, null terminated, padded */
1018 memcpy(arr + num, na2, olen);
1019 memset(arr + num + olen, 0, plen - olen);
1025 /* SCSI ports VPD page */
1026 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1031 port_a = target_dev_id + 1;
1032 port_b = port_a + 1;
1033 arr[num++] = 0x0; /* reserved */
1034 arr[num++] = 0x0; /* reserved */
1036 arr[num++] = 0x1; /* relative port 1 (primary) */
1037 memset(arr + num, 0, 6);
1040 arr[num++] = 12; /* length tp descriptor */
1041 /* naa-5 target port identifier (A) */
1042 arr[num++] = 0x61; /* proto=sas, binary */
1043 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1044 arr[num++] = 0x0; /* reserved */
1045 arr[num++] = 0x8; /* length */
1046 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
1050 arr[num++] = (port_a >> 24);
1051 arr[num++] = (port_a >> 16) & 0xff;
1052 arr[num++] = (port_a >> 8) & 0xff;
1053 arr[num++] = port_a & 0xff;
1055 arr[num++] = 0x0; /* reserved */
1056 arr[num++] = 0x0; /* reserved */
1058 arr[num++] = 0x2; /* relative port 2 (secondary) */
1059 memset(arr + num, 0, 6);
1062 arr[num++] = 12; /* length tp descriptor */
1063 /* naa-5 target port identifier (B) */
1064 arr[num++] = 0x61; /* proto=sas, binary */
1065 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1066 arr[num++] = 0x0; /* reserved */
1067 arr[num++] = 0x8; /* length */
1068 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
1072 arr[num++] = (port_b >> 24);
1073 arr[num++] = (port_b >> 16) & 0xff;
1074 arr[num++] = (port_b >> 8) & 0xff;
1075 arr[num++] = port_b & 0xff;
1081 static unsigned char vpd89_data[] = {
1082 /* from 4th byte */ 0,0,0,0,
1083 'l','i','n','u','x',' ',' ',' ',
1084 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1086 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1088 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1089 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1090 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1091 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1093 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1095 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1097 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1098 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1099 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1100 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1101 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1102 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1103 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1104 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1105 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1106 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1107 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1108 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1109 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1110 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1111 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1112 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1113 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1114 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1115 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1116 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1117 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1118 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1119 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1120 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1121 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1122 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1125 /* ATA Information VPD page */
1126 static int inquiry_evpd_89(unsigned char * arr)
1128 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1129 return sizeof(vpd89_data);
1133 static unsigned char vpdb0_data[] = {
1134 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1135 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1136 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1137 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1140 /* Block limits VPD page (SBC-3) */
1141 static int inquiry_evpd_b0(unsigned char * arr)
1145 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1147 /* Optimal transfer length granularity */
1148 gran = 1 << scsi_debug_physblk_exp;
1149 arr[2] = (gran >> 8) & 0xff;
1150 arr[3] = gran & 0xff;
1152 /* Maximum Transfer Length */
1153 if (sdebug_store_sectors > 0x400) {
1154 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
1155 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
1156 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
1157 arr[7] = sdebug_store_sectors & 0xff;
1160 /* Optimal Transfer Length */
1161 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
1163 if (scsi_debug_lbpu) {
1164 /* Maximum Unmap LBA Count */
1165 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
1167 /* Maximum Unmap Block Descriptor Count */
1168 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
1171 /* Unmap Granularity Alignment */
1172 if (scsi_debug_unmap_alignment) {
1173 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
1174 arr[28] |= 0x80; /* UGAVALID */
1177 /* Optimal Unmap Granularity */
1178 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
1180 /* Maximum WRITE SAME Length */
1181 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
1183 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1185 return sizeof(vpdb0_data);
1188 /* Block device characteristics VPD page (SBC-3) */
1189 static int inquiry_evpd_b1(unsigned char *arr)
1191 memset(arr, 0, 0x3c);
1193 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1195 arr[3] = 5; /* less than 1.8" */
1200 /* Logical block provisioning VPD page (SBC-3) */
1201 static int inquiry_evpd_b2(unsigned char *arr)
1203 memset(arr, 0, 0x4);
1204 arr[0] = 0; /* threshold exponent */
1206 if (scsi_debug_lbpu)
1209 if (scsi_debug_lbpws)
1212 if (scsi_debug_lbpws10)
1215 if (scsi_debug_lbprz)
1221 #define SDEBUG_LONG_INQ_SZ 96
1222 #define SDEBUG_MAX_INQ_ARR_SZ 584
1224 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1226 unsigned char pq_pdt;
1227 unsigned char * arr;
1228 unsigned char *cmd = scp->cmnd;
1229 int alloc_len, n, ret;
1232 alloc_len = (cmd[3] << 8) + cmd[4];
1233 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1235 return DID_REQUEUE << 16;
1236 have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
1238 pq_pdt = 0x1e; /* present, wlun */
1239 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
1240 pq_pdt = 0x7f; /* not present, no device type */
1242 pq_pdt = (scsi_debug_ptype & 0x1f);
1244 if (0x2 & cmd[1]) { /* CMDDT bit set */
1245 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1247 return check_condition_result;
1248 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1249 int lu_id_num, port_group_id, target_dev_id, len;
1251 int host_no = devip->sdbg_host->shost->host_no;
1253 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1254 (devip->channel & 0x7f);
1255 if (0 == scsi_debug_vpd_use_hostno)
1257 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1258 (devip->target * 1000) + devip->lun);
1259 target_dev_id = ((host_no + 1) * 2000) +
1260 (devip->target * 1000) - 3;
1261 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1262 if (0 == cmd[2]) { /* supported vital product data pages */
1263 arr[1] = cmd[2]; /*sanity */
1265 arr[n++] = 0x0; /* this page */
1266 arr[n++] = 0x80; /* unit serial number */
1267 arr[n++] = 0x83; /* device identification */
1268 arr[n++] = 0x84; /* software interface ident. */
1269 arr[n++] = 0x85; /* management network addresses */
1270 arr[n++] = 0x86; /* extended inquiry */
1271 arr[n++] = 0x87; /* mode page policy */
1272 arr[n++] = 0x88; /* SCSI ports */
1273 arr[n++] = 0x89; /* ATA information */
1274 arr[n++] = 0xb0; /* Block limits (SBC) */
1275 arr[n++] = 0xb1; /* Block characteristics (SBC) */
1276 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1278 arr[3] = n - 4; /* number of supported VPD pages */
1279 } else if (0x80 == cmd[2]) { /* unit serial number */
1280 arr[1] = cmd[2]; /*sanity */
1282 memcpy(&arr[4], lu_id_str, len);
1283 } else if (0x83 == cmd[2]) { /* device identification */
1284 arr[1] = cmd[2]; /*sanity */
1285 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1286 target_dev_id, lu_id_num,
1288 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1289 arr[1] = cmd[2]; /*sanity */
1290 arr[3] = inquiry_evpd_84(&arr[4]);
1291 } else if (0x85 == cmd[2]) { /* Management network addresses */
1292 arr[1] = cmd[2]; /*sanity */
1293 arr[3] = inquiry_evpd_85(&arr[4]);
1294 } else if (0x86 == cmd[2]) { /* extended inquiry */
1295 arr[1] = cmd[2]; /*sanity */
1296 arr[3] = 0x3c; /* number of following entries */
1297 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1298 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1299 else if (scsi_debug_dif)
1300 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1302 arr[4] = 0x0; /* no protection stuff */
1303 arr[5] = 0x7; /* head of q, ordered + simple q's */
1304 } else if (0x87 == cmd[2]) { /* mode page policy */
1305 arr[1] = cmd[2]; /*sanity */
1306 arr[3] = 0x8; /* number of following entries */
1307 arr[4] = 0x2; /* disconnect-reconnect mp */
1308 arr[6] = 0x80; /* mlus, shared */
1309 arr[8] = 0x18; /* protocol specific lu */
1310 arr[10] = 0x82; /* mlus, per initiator port */
1311 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1312 arr[1] = cmd[2]; /*sanity */
1313 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1314 } else if (0x89 == cmd[2]) { /* ATA information */
1315 arr[1] = cmd[2]; /*sanity */
1316 n = inquiry_evpd_89(&arr[4]);
1318 arr[3] = (n & 0xff);
1319 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1320 arr[1] = cmd[2]; /*sanity */
1321 arr[3] = inquiry_evpd_b0(&arr[4]);
1322 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1323 arr[1] = cmd[2]; /*sanity */
1324 arr[3] = inquiry_evpd_b1(&arr[4]);
1325 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1326 arr[1] = cmd[2]; /*sanity */
1327 arr[3] = inquiry_evpd_b2(&arr[4]);
1329 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1331 return check_condition_result;
1333 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1334 ret = fill_from_dev_buffer(scp, arr,
1335 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1339 /* drops through here for a standard inquiry */
1340 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
1341 arr[2] = scsi_debug_scsi_level;
1342 arr[3] = 2; /* response_data_format==2 */
1343 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1344 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1345 if (0 == scsi_debug_vpd_use_hostno)
1346 arr[5] = 0x10; /* claim: implicit TGPS */
1347 arr[6] = 0x10; /* claim: MultiP */
1348 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1349 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1350 memcpy(&arr[8], inq_vendor_id, 8);
1351 memcpy(&arr[16], inq_product_id, 16);
1352 memcpy(&arr[32], inq_product_rev, 4);
1353 /* version descriptors (2 bytes each) follow */
1354 arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */
1355 arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */
1357 if (scsi_debug_ptype == 0) {
1358 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1359 } else if (scsi_debug_ptype == 1) {
1360 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1362 arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */
1363 ret = fill_from_dev_buffer(scp, arr,
1364 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1369 static int resp_requests(struct scsi_cmnd * scp,
1370 struct sdebug_dev_info * devip)
1372 unsigned char * sbuff;
1373 unsigned char *cmd = scp->cmnd;
1374 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1375 bool dsense, want_dsense;
1378 memset(arr, 0, sizeof(arr));
1379 dsense = !!(cmd[1] & 1);
1380 want_dsense = dsense || scsi_debug_dsense;
1381 sbuff = scp->sense_buffer;
1382 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1385 arr[1] = 0x0; /* NO_SENSE in sense_key */
1386 arr[2] = THRESHOLD_EXCEEDED;
1387 arr[3] = 0xff; /* TEST set and MRIE==6 */
1391 arr[2] = 0x0; /* NO_SENSE in sense_key */
1392 arr[7] = 0xa; /* 18 byte sense buffer */
1393 arr[12] = THRESHOLD_EXCEEDED;
1394 arr[13] = 0xff; /* TEST set and MRIE==6 */
1397 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1398 if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1399 ; /* have sense and formats match */
1400 else if (arr[0] <= 0x70) {
1410 } else if (dsense) {
1413 arr[1] = sbuff[2]; /* sense key */
1414 arr[2] = sbuff[12]; /* asc */
1415 arr[3] = sbuff[13]; /* ascq */
1427 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1428 return fill_from_dev_buffer(scp, arr, len);
1431 static int resp_start_stop(struct scsi_cmnd * scp,
1432 struct sdebug_dev_info * devip)
1434 unsigned char *cmd = scp->cmnd;
1435 int power_cond, start;
1437 power_cond = (cmd[4] & 0xf0) >> 4;
1439 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1440 return check_condition_result;
1443 if (start == devip->stopped)
1444 devip->stopped = !start;
1448 static sector_t get_sdebug_capacity(void)
1450 if (scsi_debug_virtual_gb > 0)
1451 return (sector_t)scsi_debug_virtual_gb *
1452 (1073741824 / scsi_debug_sector_size);
1454 return sdebug_store_sectors;
1457 #define SDEBUG_READCAP_ARR_SZ 8
1458 static int resp_readcap(struct scsi_cmnd * scp,
1459 struct sdebug_dev_info * devip)
1461 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1464 /* following just in case virtual_gb changed */
1465 sdebug_capacity = get_sdebug_capacity();
1466 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1467 if (sdebug_capacity < 0xffffffff) {
1468 capac = (unsigned int)sdebug_capacity - 1;
1469 arr[0] = (capac >> 24);
1470 arr[1] = (capac >> 16) & 0xff;
1471 arr[2] = (capac >> 8) & 0xff;
1472 arr[3] = capac & 0xff;
1479 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1480 arr[7] = scsi_debug_sector_size & 0xff;
1481 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1484 #define SDEBUG_READCAP16_ARR_SZ 32
1485 static int resp_readcap16(struct scsi_cmnd * scp,
1486 struct sdebug_dev_info * devip)
1488 unsigned char *cmd = scp->cmnd;
1489 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1490 unsigned long long capac;
1493 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1495 /* following just in case virtual_gb changed */
1496 sdebug_capacity = get_sdebug_capacity();
1497 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1498 capac = sdebug_capacity - 1;
1499 for (k = 0; k < 8; ++k, capac >>= 8)
1500 arr[7 - k] = capac & 0xff;
1501 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1502 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1503 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1504 arr[11] = scsi_debug_sector_size & 0xff;
1505 arr[13] = scsi_debug_physblk_exp & 0xf;
1506 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1508 if (scsi_debug_lbp()) {
1509 arr[14] |= 0x80; /* LBPME */
1510 if (scsi_debug_lbprz)
1511 arr[14] |= 0x40; /* LBPRZ */
1514 arr[15] = scsi_debug_lowest_aligned & 0xff;
1516 if (scsi_debug_dif) {
1517 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1518 arr[12] |= 1; /* PROT_EN */
1521 return fill_from_dev_buffer(scp, arr,
1522 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1525 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1527 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1528 struct sdebug_dev_info * devip)
1530 unsigned char *cmd = scp->cmnd;
1531 unsigned char * arr;
1532 int host_no = devip->sdbg_host->shost->host_no;
1533 int n, ret, alen, rlen;
1534 int port_group_a, port_group_b, port_a, port_b;
1536 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1539 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1541 return DID_REQUEUE << 16;
1543 * EVPD page 0x88 states we have two ports, one
1544 * real and a fake port with no device connected.
1545 * So we create two port groups with one port each
1546 * and set the group with port B to unavailable.
1548 port_a = 0x1; /* relative port A */
1549 port_b = 0x2; /* relative port B */
1550 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1551 (devip->channel & 0x7f);
1552 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1553 (devip->channel & 0x7f) + 0x80;
1556 * The asymmetric access state is cycled according to the host_id.
1559 if (0 == scsi_debug_vpd_use_hostno) {
1560 arr[n++] = host_no % 3; /* Asymm access state */
1561 arr[n++] = 0x0F; /* claim: all states are supported */
1563 arr[n++] = 0x0; /* Active/Optimized path */
1564 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1566 arr[n++] = (port_group_a >> 8) & 0xff;
1567 arr[n++] = port_group_a & 0xff;
1568 arr[n++] = 0; /* Reserved */
1569 arr[n++] = 0; /* Status code */
1570 arr[n++] = 0; /* Vendor unique */
1571 arr[n++] = 0x1; /* One port per group */
1572 arr[n++] = 0; /* Reserved */
1573 arr[n++] = 0; /* Reserved */
1574 arr[n++] = (port_a >> 8) & 0xff;
1575 arr[n++] = port_a & 0xff;
1576 arr[n++] = 3; /* Port unavailable */
1577 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1578 arr[n++] = (port_group_b >> 8) & 0xff;
1579 arr[n++] = port_group_b & 0xff;
1580 arr[n++] = 0; /* Reserved */
1581 arr[n++] = 0; /* Status code */
1582 arr[n++] = 0; /* Vendor unique */
1583 arr[n++] = 0x1; /* One port per group */
1584 arr[n++] = 0; /* Reserved */
1585 arr[n++] = 0; /* Reserved */
1586 arr[n++] = (port_b >> 8) & 0xff;
1587 arr[n++] = port_b & 0xff;
1590 arr[0] = (rlen >> 24) & 0xff;
1591 arr[1] = (rlen >> 16) & 0xff;
1592 arr[2] = (rlen >> 8) & 0xff;
1593 arr[3] = rlen & 0xff;
1596 * Return the smallest value of either
1597 * - The allocated length
1598 * - The constructed command length
1599 * - The maximum array size
1602 ret = fill_from_dev_buffer(scp, arr,
1603 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1609 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1612 u8 reporting_opts, req_opcode, sdeb_i, supp;
1614 u32 alloc_len, a_len;
1615 int k, offset, len, errsts, count, bump, na;
1616 const struct opcode_info_t *oip;
1617 const struct opcode_info_t *r_oip;
1619 u8 *cmd = scp->cmnd;
1621 rctd = !!(cmd[2] & 0x80);
1622 reporting_opts = cmd[2] & 0x7;
1623 req_opcode = cmd[3];
1624 req_sa = get_unaligned_be16(cmd + 4);
1625 alloc_len = get_unaligned_be32(cmd + 6);
1626 if (alloc_len < 4 || alloc_len > 0xffff) {
1627 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1628 return check_condition_result;
1630 if (alloc_len > 8192)
1634 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1636 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1638 return check_condition_result;
1640 switch (reporting_opts) {
1641 case 0: /* all commands */
1642 /* count number of commands */
1643 for (count = 0, oip = opcode_info_arr;
1644 oip->num_attached != 0xff; ++oip) {
1645 if (F_INV_OP & oip->flags)
1647 count += (oip->num_attached + 1);
1649 bump = rctd ? 20 : 8;
1650 put_unaligned_be32(count * bump, arr);
1651 for (offset = 4, oip = opcode_info_arr;
1652 oip->num_attached != 0xff && offset < a_len; ++oip) {
1653 if (F_INV_OP & oip->flags)
1655 na = oip->num_attached;
1656 arr[offset] = oip->opcode;
1657 put_unaligned_be16(oip->sa, arr + offset + 2);
1659 arr[offset + 5] |= 0x2;
1660 if (FF_SA & oip->flags)
1661 arr[offset + 5] |= 0x1;
1662 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1664 put_unaligned_be16(0xa, arr + offset + 8);
1666 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1667 if (F_INV_OP & oip->flags)
1670 arr[offset] = oip->opcode;
1671 put_unaligned_be16(oip->sa, arr + offset + 2);
1673 arr[offset + 5] |= 0x2;
1674 if (FF_SA & oip->flags)
1675 arr[offset + 5] |= 0x1;
1676 put_unaligned_be16(oip->len_mask[0],
1679 put_unaligned_be16(0xa,
1686 case 1: /* one command: opcode only */
1687 case 2: /* one command: opcode plus service action */
1688 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1689 sdeb_i = opcode_ind_arr[req_opcode];
1690 oip = &opcode_info_arr[sdeb_i];
1691 if (F_INV_OP & oip->flags) {
1695 if (1 == reporting_opts) {
1696 if (FF_SA & oip->flags) {
1697 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1700 return check_condition_result;
1703 } else if (2 == reporting_opts &&
1704 0 == (FF_SA & oip->flags)) {
1705 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1706 kfree(arr); /* point at requested sa */
1707 return check_condition_result;
1709 if (0 == (FF_SA & oip->flags) &&
1710 req_opcode == oip->opcode)
1712 else if (0 == (FF_SA & oip->flags)) {
1713 na = oip->num_attached;
1714 for (k = 0, oip = oip->arrp; k < na;
1716 if (req_opcode == oip->opcode)
1719 supp = (k >= na) ? 1 : 3;
1720 } else if (req_sa != oip->sa) {
1721 na = oip->num_attached;
1722 for (k = 0, oip = oip->arrp; k < na;
1724 if (req_sa == oip->sa)
1727 supp = (k >= na) ? 1 : 3;
1731 u = oip->len_mask[0];
1732 put_unaligned_be16(u, arr + 2);
1733 arr[4] = oip->opcode;
1734 for (k = 1; k < u; ++k)
1735 arr[4 + k] = (k < 16) ?
1736 oip->len_mask[k] : 0xff;
1741 arr[1] = (rctd ? 0x80 : 0) | supp;
1743 put_unaligned_be16(0xa, arr + offset);
1748 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1750 return check_condition_result;
1752 offset = (offset < a_len) ? offset : a_len;
1753 len = (offset < alloc_len) ? offset : alloc_len;
1754 errsts = fill_from_dev_buffer(scp, arr, len);
1760 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1765 u8 *cmd = scp->cmnd;
1767 memset(arr, 0, sizeof(arr));
1768 repd = !!(cmd[2] & 0x80);
1769 alloc_len = get_unaligned_be32(cmd + 6);
1770 if (alloc_len < 4) {
1771 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1772 return check_condition_result;
1774 arr[0] = 0xc8; /* ATS | ATSS | LURS */
1775 arr[1] = 0x1; /* ITNRS */
1782 len = (len < alloc_len) ? len : alloc_len;
1783 return fill_from_dev_buffer(scp, arr, len);
1786 /* <<Following mode page info copied from ST318451LW>> */
1788 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1789 { /* Read-Write Error Recovery page for mode_sense */
1790 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1793 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1795 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1796 return sizeof(err_recov_pg);
1799 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1800 { /* Disconnect-Reconnect page for mode_sense */
1801 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1802 0, 0, 0, 0, 0, 0, 0, 0};
1804 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1806 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1807 return sizeof(disconnect_pg);
1810 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1811 { /* Format device page for mode_sense */
1812 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1813 0, 0, 0, 0, 0, 0, 0, 0,
1814 0, 0, 0, 0, 0x40, 0, 0, 0};
1816 memcpy(p, format_pg, sizeof(format_pg));
1817 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1818 p[11] = sdebug_sectors_per & 0xff;
1819 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1820 p[13] = scsi_debug_sector_size & 0xff;
1821 if (scsi_debug_removable)
1822 p[20] |= 0x20; /* should agree with INQUIRY */
1824 memset(p + 2, 0, sizeof(format_pg) - 2);
1825 return sizeof(format_pg);
1828 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1829 { /* Caching page for mode_sense */
1830 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1831 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1832 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1833 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1835 if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1836 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1837 memcpy(p, caching_pg, sizeof(caching_pg));
1839 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1840 else if (2 == pcontrol)
1841 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1842 return sizeof(caching_pg);
1845 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1846 { /* Control mode page for mode_sense */
1847 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1849 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1852 if (scsi_debug_dsense)
1853 ctrl_m_pg[2] |= 0x4;
1855 ctrl_m_pg[2] &= ~0x4;
1858 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1860 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1862 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1863 else if (2 == pcontrol)
1864 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1865 return sizeof(ctrl_m_pg);
1869 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1870 { /* Informational Exceptions control mode page for mode_sense */
1871 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1873 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1876 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1878 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1879 else if (2 == pcontrol)
1880 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1881 return sizeof(iec_m_pg);
1884 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1885 { /* SAS SSP mode page - short format for mode_sense */
1886 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1887 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1889 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1891 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1892 return sizeof(sas_sf_m_pg);
1896 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1898 { /* SAS phy control and discover mode page for mode_sense */
1899 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1900 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1901 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1902 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1903 0x2, 0, 0, 0, 0, 0, 0, 0,
1904 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1905 0, 0, 0, 0, 0, 0, 0, 0,
1906 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1907 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1908 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1909 0x3, 0, 0, 0, 0, 0, 0, 0,
1910 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1911 0, 0, 0, 0, 0, 0, 0, 0,
1915 port_a = target_dev_id + 1;
1916 port_b = port_a + 1;
1917 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1918 p[20] = (port_a >> 24);
1919 p[21] = (port_a >> 16) & 0xff;
1920 p[22] = (port_a >> 8) & 0xff;
1921 p[23] = port_a & 0xff;
1922 p[48 + 20] = (port_b >> 24);
1923 p[48 + 21] = (port_b >> 16) & 0xff;
1924 p[48 + 22] = (port_b >> 8) & 0xff;
1925 p[48 + 23] = port_b & 0xff;
1927 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1928 return sizeof(sas_pcd_m_pg);
1931 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1932 { /* SAS SSP shared protocol specific port mode subpage */
1933 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1934 0, 0, 0, 0, 0, 0, 0, 0,
1937 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1939 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1940 return sizeof(sas_sha_m_pg);
1943 #define SDEBUG_MAX_MSENSE_SZ 256
1946 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1948 unsigned char dbd, llbaa;
1949 int pcontrol, pcode, subpcode, bd_len;
1950 unsigned char dev_spec;
1951 int k, alloc_len, msense_6, offset, len, target_dev_id;
1952 int target = scp->device->id;
1954 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1955 unsigned char *cmd = scp->cmnd;
1957 dbd = !!(cmd[1] & 0x8);
1958 pcontrol = (cmd[2] & 0xc0) >> 6;
1959 pcode = cmd[2] & 0x3f;
1961 msense_6 = (MODE_SENSE == cmd[0]);
1962 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1963 if ((0 == scsi_debug_ptype) && (0 == dbd))
1964 bd_len = llbaa ? 16 : 8;
1967 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1968 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1969 if (0x3 == pcontrol) { /* Saving values not supported */
1970 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1971 return check_condition_result;
1973 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1974 (devip->target * 1000) - 3;
1975 /* set DPOFUA bit for disks */
1976 if (0 == scsi_debug_ptype)
1977 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1987 arr[4] = 0x1; /* set LONGLBA bit */
1988 arr[7] = bd_len; /* assume 255 or less */
1992 if ((bd_len > 0) && (!sdebug_capacity))
1993 sdebug_capacity = get_sdebug_capacity();
1996 if (sdebug_capacity > 0xfffffffe) {
2002 ap[0] = (sdebug_capacity >> 24) & 0xff;
2003 ap[1] = (sdebug_capacity >> 16) & 0xff;
2004 ap[2] = (sdebug_capacity >> 8) & 0xff;
2005 ap[3] = sdebug_capacity & 0xff;
2007 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
2008 ap[7] = scsi_debug_sector_size & 0xff;
2011 } else if (16 == bd_len) {
2012 unsigned long long capac = sdebug_capacity;
2014 for (k = 0; k < 8; ++k, capac >>= 8)
2015 ap[7 - k] = capac & 0xff;
2016 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
2017 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
2018 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
2019 ap[15] = scsi_debug_sector_size & 0xff;
2024 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2025 /* TODO: Control Extension page */
2026 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2027 return check_condition_result;
2030 case 0x1: /* Read-Write error recovery page, direct access */
2031 len = resp_err_recov_pg(ap, pcontrol, target);
2034 case 0x2: /* Disconnect-Reconnect page, all devices */
2035 len = resp_disconnect_pg(ap, pcontrol, target);
2038 case 0x3: /* Format device page, direct access */
2039 len = resp_format_pg(ap, pcontrol, target);
2042 case 0x8: /* Caching page, direct access */
2043 len = resp_caching_pg(ap, pcontrol, target);
2046 case 0xa: /* Control Mode page, all devices */
2047 len = resp_ctrl_m_pg(ap, pcontrol, target);
2050 case 0x19: /* if spc==1 then sas phy, control+discover */
2051 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2052 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2053 return check_condition_result;
2056 if ((0x0 == subpcode) || (0xff == subpcode))
2057 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2058 if ((0x1 == subpcode) || (0xff == subpcode))
2059 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2061 if ((0x2 == subpcode) || (0xff == subpcode))
2062 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2065 case 0x1c: /* Informational Exceptions Mode page, all devices */
2066 len = resp_iec_m_pg(ap, pcontrol, target);
2069 case 0x3f: /* Read all Mode pages */
2070 if ((0 == subpcode) || (0xff == subpcode)) {
2071 len = resp_err_recov_pg(ap, pcontrol, target);
2072 len += resp_disconnect_pg(ap + len, pcontrol, target);
2073 len += resp_format_pg(ap + len, pcontrol, target);
2074 len += resp_caching_pg(ap + len, pcontrol, target);
2075 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2076 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2077 if (0xff == subpcode) {
2078 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2079 target, target_dev_id);
2080 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2082 len += resp_iec_m_pg(ap + len, pcontrol, target);
2084 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2085 return check_condition_result;
2090 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2091 return check_condition_result;
2094 arr[0] = offset - 1;
2096 arr[0] = ((offset - 2) >> 8) & 0xff;
2097 arr[1] = (offset - 2) & 0xff;
2099 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2102 #define SDEBUG_MAX_MSELECT_SZ 512
2105 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2107 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2108 int param_len, res, mpage;
2109 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2110 unsigned char *cmd = scp->cmnd;
2111 int mselect6 = (MODE_SELECT == cmd[0]);
2113 memset(arr, 0, sizeof(arr));
2116 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
2117 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2118 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2119 return check_condition_result;
2121 res = fetch_to_dev_buffer(scp, arr, param_len);
2123 return (DID_ERROR << 16);
2124 else if ((res < param_len) &&
2125 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2126 sdev_printk(KERN_INFO, scp->device,
2127 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2128 __func__, param_len, res);
2129 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
2130 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
2132 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2133 return check_condition_result;
2135 off = bd_len + (mselect6 ? 4 : 8);
2136 mpage = arr[off] & 0x3f;
2137 ps = !!(arr[off] & 0x80);
2139 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2140 return check_condition_result;
2142 spf = !!(arr[off] & 0x40);
2143 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
2145 if ((pg_len + off) > param_len) {
2146 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2147 PARAMETER_LIST_LENGTH_ERR, 0);
2148 return check_condition_result;
2151 case 0x8: /* Caching Mode page */
2152 if (caching_pg[1] == arr[off + 1]) {
2153 memcpy(caching_pg + 2, arr + off + 2,
2154 sizeof(caching_pg) - 2);
2155 goto set_mode_changed_ua;
2158 case 0xa: /* Control Mode page */
2159 if (ctrl_m_pg[1] == arr[off + 1]) {
2160 memcpy(ctrl_m_pg + 2, arr + off + 2,
2161 sizeof(ctrl_m_pg) - 2);
2162 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
2163 goto set_mode_changed_ua;
2166 case 0x1c: /* Informational Exceptions Mode page */
2167 if (iec_m_pg[1] == arr[off + 1]) {
2168 memcpy(iec_m_pg + 2, arr + off + 2,
2169 sizeof(iec_m_pg) - 2);
2170 goto set_mode_changed_ua;
2176 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2177 return check_condition_result;
2178 set_mode_changed_ua:
2179 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2183 static int resp_temp_l_pg(unsigned char * arr)
2185 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2186 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2189 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2190 return sizeof(temp_l_pg);
2193 static int resp_ie_l_pg(unsigned char * arr)
2195 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2198 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2199 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2200 arr[4] = THRESHOLD_EXCEEDED;
2203 return sizeof(ie_l_pg);
2206 #define SDEBUG_MAX_LSENSE_SZ 512
2208 static int resp_log_sense(struct scsi_cmnd * scp,
2209 struct sdebug_dev_info * devip)
2211 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2212 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2213 unsigned char *cmd = scp->cmnd;
2215 memset(arr, 0, sizeof(arr));
2219 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2220 return check_condition_result;
2222 pcontrol = (cmd[2] & 0xc0) >> 6;
2223 pcode = cmd[2] & 0x3f;
2224 subpcode = cmd[3] & 0xff;
2225 alloc_len = (cmd[7] << 8) + cmd[8];
2227 if (0 == subpcode) {
2229 case 0x0: /* Supported log pages log page */
2231 arr[n++] = 0x0; /* this page */
2232 arr[n++] = 0xd; /* Temperature */
2233 arr[n++] = 0x2f; /* Informational exceptions */
2236 case 0xd: /* Temperature log page */
2237 arr[3] = resp_temp_l_pg(arr + 4);
2239 case 0x2f: /* Informational exceptions log page */
2240 arr[3] = resp_ie_l_pg(arr + 4);
2243 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2244 return check_condition_result;
2246 } else if (0xff == subpcode) {
2250 case 0x0: /* Supported log pages and subpages log page */
2253 arr[n++] = 0x0; /* 0,0 page */
2255 arr[n++] = 0xff; /* this page */
2257 arr[n++] = 0x0; /* Temperature */
2259 arr[n++] = 0x0; /* Informational exceptions */
2262 case 0xd: /* Temperature subpages */
2265 arr[n++] = 0x0; /* Temperature */
2268 case 0x2f: /* Informational exceptions subpages */
2271 arr[n++] = 0x0; /* Informational exceptions */
2275 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2276 return check_condition_result;
2279 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2280 return check_condition_result;
2282 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
2283 return fill_from_dev_buffer(scp, arr,
2284 min(len, SDEBUG_MAX_INQ_ARR_SZ));
2287 static int check_device_access_params(struct scsi_cmnd *scp,
2288 unsigned long long lba, unsigned int num)
2290 if (lba + num > sdebug_capacity) {
2291 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2292 return check_condition_result;
2294 /* transfer length excessive (tie in to block limits VPD page) */
2295 if (num > sdebug_store_sectors) {
2296 /* needs work to find which cdb byte 'num' comes from */
2297 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2298 return check_condition_result;
2303 /* Returns number of bytes copied or -1 if error. */
2305 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2308 u64 block, rest = 0;
2309 struct scsi_data_buffer *sdb;
2310 enum dma_data_direction dir;
2311 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
2315 sdb = scsi_out(scmd);
2316 dir = DMA_TO_DEVICE;
2317 func = sg_pcopy_to_buffer;
2319 sdb = scsi_in(scmd);
2320 dir = DMA_FROM_DEVICE;
2321 func = sg_pcopy_from_buffer;
2326 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2329 block = do_div(lba, sdebug_store_sectors);
2330 if (block + num > sdebug_store_sectors)
2331 rest = block + num - sdebug_store_sectors;
2333 ret = func(sdb->table.sgl, sdb->table.nents,
2334 fake_storep + (block * scsi_debug_sector_size),
2335 (num - rest) * scsi_debug_sector_size, 0);
2336 if (ret != (num - rest) * scsi_debug_sector_size)
2340 ret += func(sdb->table.sgl, sdb->table.nents,
2341 fake_storep, rest * scsi_debug_sector_size,
2342 (num - rest) * scsi_debug_sector_size);
2348 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2349 * arr into fake_store(lba,num) and return true. If comparison fails then
2352 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2355 u64 block, rest = 0;
2356 u32 store_blks = sdebug_store_sectors;
2357 u32 lb_size = scsi_debug_sector_size;
2359 block = do_div(lba, store_blks);
2360 if (block + num > store_blks)
2361 rest = block + num - store_blks;
2363 res = !memcmp(fake_storep + (block * lb_size), arr,
2364 (num - rest) * lb_size);
2368 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2372 arr += num * lb_size;
2373 memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2375 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2380 static __be16 dif_compute_csum(const void *buf, int len)
2384 if (scsi_debug_guard)
2385 csum = (__force __be16)ip_compute_csum(buf, len);
2387 csum = cpu_to_be16(crc_t10dif(buf, len));
2392 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2393 sector_t sector, u32 ei_lba)
2395 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2397 if (sdt->guard_tag != csum) {
2398 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2400 (unsigned long)sector,
2401 be16_to_cpu(sdt->guard_tag),
2405 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2406 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2407 pr_err("%s: REF check failed on sector %lu\n",
2408 __func__, (unsigned long)sector);
2411 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2412 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2413 pr_err("%s: REF check failed on sector %lu\n",
2414 __func__, (unsigned long)sector);
2420 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2421 unsigned int sectors, bool read)
2425 const void *dif_store_end = dif_storep + sdebug_store_sectors;
2426 struct sg_mapping_iter miter;
2428 /* Bytes of protection data to copy into sgl */
2429 resid = sectors * sizeof(*dif_storep);
2431 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2432 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2433 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2435 while (sg_miter_next(&miter) && resid > 0) {
2436 size_t len = min(miter.length, resid);
2437 void *start = dif_store(sector);
2440 if (dif_store_end < start + len)
2441 rest = start + len - dif_store_end;
2446 memcpy(paddr, start, len - rest);
2448 memcpy(start, paddr, len - rest);
2452 memcpy(paddr + len - rest, dif_storep, rest);
2454 memcpy(dif_storep, paddr + len - rest, rest);
2457 sector += len / sizeof(*dif_storep);
2460 sg_miter_stop(&miter);
2463 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2464 unsigned int sectors, u32 ei_lba)
2467 struct sd_dif_tuple *sdt;
2470 for (i = 0; i < sectors; i++, ei_lba++) {
2473 sector = start_sec + i;
2474 sdt = dif_store(sector);
2476 if (sdt->app_tag == cpu_to_be16(0xffff))
2479 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2486 dif_copy_prot(SCpnt, start_sec, sectors, true);
2493 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2495 u8 *cmd = scp->cmnd;
2499 unsigned long iflags;
2506 lba = get_unaligned_be64(cmd + 2);
2507 num = get_unaligned_be32(cmd + 10);
2512 lba = get_unaligned_be32(cmd + 2);
2513 num = get_unaligned_be16(cmd + 7);
2518 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2519 (u32)(cmd[1] & 0x1f) << 16;
2520 num = (0 == cmd[4]) ? 256 : cmd[4];
2525 lba = get_unaligned_be32(cmd + 2);
2526 num = get_unaligned_be32(cmd + 6);
2529 case XDWRITEREAD_10:
2531 lba = get_unaligned_be32(cmd + 2);
2532 num = get_unaligned_be16(cmd + 7);
2535 default: /* assume READ(32) */
2536 lba = get_unaligned_be64(cmd + 12);
2537 ei_lba = get_unaligned_be32(cmd + 20);
2538 num = get_unaligned_be32(cmd + 28);
2543 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2545 mk_sense_invalid_opcode(scp);
2546 return check_condition_result;
2548 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2549 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2550 (cmd[1] & 0xe0) == 0)
2551 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2554 if (sdebug_any_injecting_opt) {
2555 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2561 /* inline check_device_access_params() */
2562 if (lba + num > sdebug_capacity) {
2563 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2564 return check_condition_result;
2566 /* transfer length excessive (tie in to block limits VPD page) */
2567 if (num > sdebug_store_sectors) {
2568 /* needs work to find which cdb byte 'num' comes from */
2569 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2570 return check_condition_result;
2573 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2574 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2575 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2576 /* claim unrecoverable read error */
2577 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2578 /* set info field and valid bit for fixed descriptor */
2579 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2580 scp->sense_buffer[0] |= 0x80; /* Valid bit */
2581 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2582 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2583 put_unaligned_be32(ret, scp->sense_buffer + 3);
2585 scsi_set_resid(scp, scsi_bufflen(scp));
2586 return check_condition_result;
2589 read_lock_irqsave(&atomic_rw, iflags);
2592 if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2593 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2596 read_unlock_irqrestore(&atomic_rw, iflags);
2597 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2598 return illegal_condition_result;
2602 ret = do_device_access(scp, lba, num, false);
2603 read_unlock_irqrestore(&atomic_rw, iflags);
2605 return DID_ERROR << 16;
2607 scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2609 if (sdebug_any_injecting_opt) {
2610 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2612 if (ep->inj_recovered) {
2613 mk_sense_buffer(scp, RECOVERED_ERROR,
2614 THRESHOLD_EXCEEDED, 0);
2615 return check_condition_result;
2616 } else if (ep->inj_transport) {
2617 mk_sense_buffer(scp, ABORTED_COMMAND,
2618 TRANSPORT_PROBLEM, ACK_NAK_TO);
2619 return check_condition_result;
2620 } else if (ep->inj_dif) {
2621 /* Logical block guard check failed */
2622 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2623 return illegal_condition_result;
2624 } else if (ep->inj_dix) {
2625 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2626 return illegal_condition_result;
2632 void dump_sector(unsigned char *buf, int len)
2636 pr_err(">>> Sector Dump <<<\n");
2637 for (i = 0 ; i < len ; i += 16) {
2640 for (j = 0, n = 0; j < 16; j++) {
2641 unsigned char c = buf[i+j];
2643 if (c >= 0x20 && c < 0x7e)
2644 n += scnprintf(b + n, sizeof(b) - n,
2647 n += scnprintf(b + n, sizeof(b) - n,
2650 pr_err("%04d: %s\n", i, b);
2654 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2655 unsigned int sectors, u32 ei_lba)
2658 struct sd_dif_tuple *sdt;
2660 sector_t sector = start_sec;
2663 struct sg_mapping_iter diter;
2664 struct sg_mapping_iter piter;
2666 BUG_ON(scsi_sg_count(SCpnt) == 0);
2667 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2669 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2670 scsi_prot_sg_count(SCpnt),
2671 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2672 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2673 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2675 /* For each protection page */
2676 while (sg_miter_next(&piter)) {
2678 if (WARN_ON(!sg_miter_next(&diter))) {
2683 for (ppage_offset = 0; ppage_offset < piter.length;
2684 ppage_offset += sizeof(struct sd_dif_tuple)) {
2685 /* If we're at the end of the current
2686 * data page advance to the next one
2688 if (dpage_offset >= diter.length) {
2689 if (WARN_ON(!sg_miter_next(&diter))) {
2696 sdt = piter.addr + ppage_offset;
2697 daddr = diter.addr + dpage_offset;
2699 ret = dif_verify(sdt, daddr, sector, ei_lba);
2701 dump_sector(daddr, scsi_debug_sector_size);
2707 dpage_offset += scsi_debug_sector_size;
2709 diter.consumed = dpage_offset;
2710 sg_miter_stop(&diter);
2712 sg_miter_stop(&piter);
2714 dif_copy_prot(SCpnt, start_sec, sectors, false);
2721 sg_miter_stop(&diter);
2722 sg_miter_stop(&piter);
2726 static unsigned long lba_to_map_index(sector_t lba)
2728 if (scsi_debug_unmap_alignment) {
2729 lba += scsi_debug_unmap_granularity -
2730 scsi_debug_unmap_alignment;
2732 do_div(lba, scsi_debug_unmap_granularity);
2737 static sector_t map_index_to_lba(unsigned long index)
2739 sector_t lba = index * scsi_debug_unmap_granularity;
2741 if (scsi_debug_unmap_alignment) {
2742 lba -= scsi_debug_unmap_granularity -
2743 scsi_debug_unmap_alignment;
2749 static unsigned int map_state(sector_t lba, unsigned int *num)
2752 unsigned int mapped;
2753 unsigned long index;
2756 index = lba_to_map_index(lba);
2757 mapped = test_bit(index, map_storep);
2760 next = find_next_zero_bit(map_storep, map_size, index);
2762 next = find_next_bit(map_storep, map_size, index);
2764 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2770 static void map_region(sector_t lba, unsigned int len)
2772 sector_t end = lba + len;
2775 unsigned long index = lba_to_map_index(lba);
2777 if (index < map_size)
2778 set_bit(index, map_storep);
2780 lba = map_index_to_lba(index + 1);
2784 static void unmap_region(sector_t lba, unsigned int len)
2786 sector_t end = lba + len;
2789 unsigned long index = lba_to_map_index(lba);
2791 if (lba == map_index_to_lba(index) &&
2792 lba + scsi_debug_unmap_granularity <= end &&
2794 clear_bit(index, map_storep);
2795 if (scsi_debug_lbprz) {
2796 memset(fake_storep +
2797 lba * scsi_debug_sector_size, 0,
2798 scsi_debug_sector_size *
2799 scsi_debug_unmap_granularity);
2802 memset(dif_storep + lba, 0xff,
2803 sizeof(*dif_storep) *
2804 scsi_debug_unmap_granularity);
2807 lba = map_index_to_lba(index + 1);
2812 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2814 u8 *cmd = scp->cmnd;
2818 unsigned long iflags;
2825 lba = get_unaligned_be64(cmd + 2);
2826 num = get_unaligned_be32(cmd + 10);
2831 lba = get_unaligned_be32(cmd + 2);
2832 num = get_unaligned_be16(cmd + 7);
2837 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2838 (u32)(cmd[1] & 0x1f) << 16;
2839 num = (0 == cmd[4]) ? 256 : cmd[4];
2844 lba = get_unaligned_be32(cmd + 2);
2845 num = get_unaligned_be32(cmd + 6);
2848 case 0x53: /* XDWRITEREAD(10) */
2850 lba = get_unaligned_be32(cmd + 2);
2851 num = get_unaligned_be16(cmd + 7);
2854 default: /* assume WRITE(32) */
2855 lba = get_unaligned_be64(cmd + 12);
2856 ei_lba = get_unaligned_be32(cmd + 20);
2857 num = get_unaligned_be32(cmd + 28);
2862 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2864 mk_sense_invalid_opcode(scp);
2865 return check_condition_result;
2867 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2868 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2869 (cmd[1] & 0xe0) == 0)
2870 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2874 /* inline check_device_access_params() */
2875 if (lba + num > sdebug_capacity) {
2876 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2877 return check_condition_result;
2879 /* transfer length excessive (tie in to block limits VPD page) */
2880 if (num > sdebug_store_sectors) {
2881 /* needs work to find which cdb byte 'num' comes from */
2882 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2883 return check_condition_result;
2886 write_lock_irqsave(&atomic_rw, iflags);
2889 if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2890 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2893 write_unlock_irqrestore(&atomic_rw, iflags);
2894 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2895 return illegal_condition_result;
2899 ret = do_device_access(scp, lba, num, true);
2900 if (scsi_debug_lbp())
2901 map_region(lba, num);
2902 write_unlock_irqrestore(&atomic_rw, iflags);
2904 return (DID_ERROR << 16);
2905 else if ((ret < (num * scsi_debug_sector_size)) &&
2906 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2907 sdev_printk(KERN_INFO, scp->device,
2908 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2909 my_name, num * scsi_debug_sector_size, ret);
2911 if (sdebug_any_injecting_opt) {
2912 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2914 if (ep->inj_recovered) {
2915 mk_sense_buffer(scp, RECOVERED_ERROR,
2916 THRESHOLD_EXCEEDED, 0);
2917 return check_condition_result;
2918 } else if (ep->inj_dif) {
2919 /* Logical block guard check failed */
2920 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2921 return illegal_condition_result;
2922 } else if (ep->inj_dix) {
2923 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2924 return illegal_condition_result;
2931 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2932 bool unmap, bool ndob)
2934 unsigned long iflags;
2935 unsigned long long i;
2938 ret = check_device_access_params(scp, lba, num);
2942 write_lock_irqsave(&atomic_rw, iflags);
2944 if (unmap && scsi_debug_lbp()) {
2945 unmap_region(lba, num);
2949 /* if ndob then zero 1 logical block, else fetch 1 logical block */
2951 memset(fake_storep + (lba * scsi_debug_sector_size), 0,
2952 scsi_debug_sector_size);
2955 ret = fetch_to_dev_buffer(scp, fake_storep +
2956 (lba * scsi_debug_sector_size),
2957 scsi_debug_sector_size);
2960 write_unlock_irqrestore(&atomic_rw, iflags);
2961 return (DID_ERROR << 16);
2962 } else if ((ret < (num * scsi_debug_sector_size)) &&
2963 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2964 sdev_printk(KERN_INFO, scp->device,
2965 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2966 my_name, "write same",
2967 num * scsi_debug_sector_size, ret);
2969 /* Copy first sector to remaining blocks */
2970 for (i = 1 ; i < num ; i++)
2971 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2972 fake_storep + (lba * scsi_debug_sector_size),
2973 scsi_debug_sector_size);
2975 if (scsi_debug_lbp())
2976 map_region(lba, num);
2978 write_unlock_irqrestore(&atomic_rw, iflags);
2984 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2986 u8 *cmd = scp->cmnd;
2993 if (scsi_debug_lbpws10 == 0) {
2994 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2995 return check_condition_result;
2999 lba = get_unaligned_be32(cmd + 2);
3000 num = get_unaligned_be16(cmd + 7);
3001 if (num > scsi_debug_write_same_length) {
3002 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3003 return check_condition_result;
3005 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3009 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3011 u8 *cmd = scp->cmnd;
3018 if (cmd[1] & 0x8) { /* UNMAP */
3019 if (scsi_debug_lbpws == 0) {
3020 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3021 return check_condition_result;
3025 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3027 lba = get_unaligned_be64(cmd + 2);
3028 num = get_unaligned_be32(cmd + 10);
3029 if (num > scsi_debug_write_same_length) {
3030 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3031 return check_condition_result;
3033 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3037 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3039 u8 *cmd = scp->cmnd;
3041 u8 *fake_storep_hold;
3044 u32 lb_size = scsi_debug_sector_size;
3046 unsigned long iflags;
3050 lba = get_unaligned_be64(cmd + 2);
3051 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3053 return 0; /* degenerate case, not an error */
3054 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3056 mk_sense_invalid_opcode(scp);
3057 return check_condition_result;
3059 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3060 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3061 (cmd[1] & 0xe0) == 0)
3062 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3065 /* inline check_device_access_params() */
3066 if (lba + num > sdebug_capacity) {
3067 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3068 return check_condition_result;
3070 /* transfer length excessive (tie in to block limits VPD page) */
3071 if (num > sdebug_store_sectors) {
3072 /* needs work to find which cdb byte 'num' comes from */
3073 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3074 return check_condition_result;
3077 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3081 return check_condition_result;
3084 write_lock_irqsave(&atomic_rw, iflags);
3086 /* trick do_device_access() to fetch both compare and write buffers
3087 * from data-in into arr. Safe (atomic) since write_lock held. */
3088 fake_storep_hold = fake_storep;
3090 ret = do_device_access(scp, 0, dnum, true);
3091 fake_storep = fake_storep_hold;
3093 retval = DID_ERROR << 16;
3095 } else if ((ret < (dnum * lb_size)) &&
3096 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3097 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3098 "indicated=%u, IO sent=%d bytes\n", my_name,
3099 dnum * lb_size, ret);
3100 if (!comp_write_worker(lba, num, arr)) {
3101 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3102 retval = check_condition_result;
3105 if (scsi_debug_lbp())
3106 map_region(lba, num);
3108 write_unlock_irqrestore(&atomic_rw, iflags);
3113 struct unmap_block_desc {
3120 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3123 struct unmap_block_desc *desc;
3124 unsigned int i, payload_len, descriptors;
3126 unsigned long iflags;
3129 if (!scsi_debug_lbp())
3130 return 0; /* fib and say its done */
3131 payload_len = get_unaligned_be16(scp->cmnd + 7);
3132 BUG_ON(scsi_bufflen(scp) != payload_len);
3134 descriptors = (payload_len - 8) / 16;
3135 if (descriptors > scsi_debug_unmap_max_desc) {
3136 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3137 return check_condition_result;
3140 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3142 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3144 return check_condition_result;
3147 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3149 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3150 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3152 desc = (void *)&buf[8];
3154 write_lock_irqsave(&atomic_rw, iflags);
3156 for (i = 0 ; i < descriptors ; i++) {
3157 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3158 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3160 ret = check_device_access_params(scp, lba, num);
3164 unmap_region(lba, num);
3170 write_unlock_irqrestore(&atomic_rw, iflags);
3176 #define SDEBUG_GET_LBA_STATUS_LEN 32
3179 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3181 u8 *cmd = scp->cmnd;
3183 u32 alloc_len, mapped, num;
3184 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3187 lba = get_unaligned_be64(cmd + 2);
3188 alloc_len = get_unaligned_be32(cmd + 10);
3193 ret = check_device_access_params(scp, lba, 1);
3197 if (scsi_debug_lbp())
3198 mapped = map_state(lba, &num);
3201 /* following just in case virtual_gb changed */
3202 sdebug_capacity = get_sdebug_capacity();
3203 if (sdebug_capacity - lba <= 0xffffffff)
3204 num = sdebug_capacity - lba;
3209 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3210 put_unaligned_be32(20, arr); /* Parameter Data Length */
3211 put_unaligned_be64(lba, arr + 8); /* LBA */
3212 put_unaligned_be32(num, arr + 16); /* Number of blocks */
3213 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
3215 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3218 #define SDEBUG_RLUN_ARR_SZ 256
3220 static int resp_report_luns(struct scsi_cmnd * scp,
3221 struct sdebug_dev_info * devip)
3223 unsigned int alloc_len;
3224 int lun_cnt, i, upper, num, n, want_wlun, shortish;
3226 unsigned char *cmd = scp->cmnd;
3227 int select_report = (int)cmd[2];
3228 struct scsi_lun *one_lun;
3229 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3230 unsigned char * max_addr;
3232 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3233 shortish = (alloc_len < 4);
3234 if (shortish || (select_report > 2)) {
3235 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3236 return check_condition_result;
3238 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
3239 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3240 lun_cnt = scsi_debug_max_luns;
3241 if (1 == select_report)
3243 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
3245 want_wlun = (select_report > 0) ? 1 : 0;
3246 num = lun_cnt + want_wlun;
3247 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3248 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3249 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3250 sizeof(struct scsi_lun)), num);
3255 one_lun = (struct scsi_lun *) &arr[8];
3256 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3257 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
3258 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3260 upper = (lun >> 8) & 0x3f;
3262 one_lun[i].scsi_lun[0] =
3263 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3264 one_lun[i].scsi_lun[1] = lun & 0xff;
3267 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
3268 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
3271 alloc_len = (unsigned char *)(one_lun + i) - arr;
3272 return fill_from_dev_buffer(scp, arr,
3273 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3276 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3277 unsigned int num, struct sdebug_dev_info *devip)
3280 unsigned char *kaddr, *buf;
3281 unsigned int offset;
3282 struct scsi_data_buffer *sdb = scsi_in(scp);
3283 struct sg_mapping_iter miter;
3285 /* better not to use temporary buffer. */
3286 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3288 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3290 return check_condition_result;
3293 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3296 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3297 SG_MITER_ATOMIC | SG_MITER_TO_SG);
3299 while (sg_miter_next(&miter)) {
3301 for (j = 0; j < miter.length; j++)
3302 *(kaddr + j) ^= *(buf + offset + j);
3304 offset += miter.length;
3306 sg_miter_stop(&miter);
3313 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3315 u8 *cmd = scp->cmnd;
3320 if (!scsi_bidi_cmnd(scp)) {
3321 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3323 return check_condition_result;
3325 errsts = resp_read_dt0(scp, devip);
3328 if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
3329 errsts = resp_write_dt0(scp, devip);
3333 lba = get_unaligned_be32(cmd + 2);
3334 num = get_unaligned_be16(cmd + 7);
3335 return resp_xdwriteread(scp, lba, num, devip);
3338 /* When timer or tasklet goes off this function is called. */
3339 static void sdebug_q_cmd_complete(unsigned long indx)
3343 unsigned long iflags;
3344 struct sdebug_queued_cmd *sqcp;
3345 struct scsi_cmnd *scp;
3346 struct sdebug_dev_info *devip;
3348 atomic_inc(&sdebug_completions);
3350 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3351 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3354 spin_lock_irqsave(&queued_arr_lock, iflags);
3355 sqcp = &queued_arr[qa_indx];
3358 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3359 pr_err("%s: scp is NULL\n", __func__);
3362 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3364 atomic_dec(&devip->num_in_q);
3366 pr_err("%s: devip=NULL\n", __func__);
3367 if (atomic_read(&retired_max_queue) > 0)
3370 sqcp->a_cmnd = NULL;
3371 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3372 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3373 pr_err("%s: Unexpected completion\n", __func__);
3377 if (unlikely(retiring)) { /* user has reduced max_queue */
3380 retval = atomic_read(&retired_max_queue);
3381 if (qa_indx >= retval) {
3382 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3383 pr_err("%s: index %d too large\n", __func__, retval);
3386 k = find_last_bit(queued_in_use_bm, retval);
3387 if ((k < scsi_debug_max_queue) || (k == retval))
3388 atomic_set(&retired_max_queue, 0);
3390 atomic_set(&retired_max_queue, k + 1);
3392 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3393 scp->scsi_done(scp); /* callback to mid level */
3396 /* When high resolution timer goes off this function is called. */
3397 static enum hrtimer_restart
3398 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3402 unsigned long iflags;
3403 struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3404 struct sdebug_queued_cmd *sqcp;
3405 struct scsi_cmnd *scp;
3406 struct sdebug_dev_info *devip;
3408 atomic_inc(&sdebug_completions);
3409 qa_indx = sd_hrtp->qa_indx;
3410 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3411 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3414 spin_lock_irqsave(&queued_arr_lock, iflags);
3415 sqcp = &queued_arr[qa_indx];
3418 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3419 pr_err("%s: scp is NULL\n", __func__);
3422 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3424 atomic_dec(&devip->num_in_q);
3426 pr_err("%s: devip=NULL\n", __func__);
3427 if (atomic_read(&retired_max_queue) > 0)
3430 sqcp->a_cmnd = NULL;
3431 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3432 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3433 pr_err("%s: Unexpected completion\n", __func__);
3437 if (unlikely(retiring)) { /* user has reduced max_queue */
3440 retval = atomic_read(&retired_max_queue);
3441 if (qa_indx >= retval) {
3442 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3443 pr_err("%s: index %d too large\n", __func__, retval);
3446 k = find_last_bit(queued_in_use_bm, retval);
3447 if ((k < scsi_debug_max_queue) || (k == retval))
3448 atomic_set(&retired_max_queue, 0);
3450 atomic_set(&retired_max_queue, k + 1);
3452 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3453 scp->scsi_done(scp); /* callback to mid level */
3455 return HRTIMER_NORESTART;
3458 static struct sdebug_dev_info *
3459 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3461 struct sdebug_dev_info *devip;
3463 devip = kzalloc(sizeof(*devip), flags);
3465 devip->sdbg_host = sdbg_host;
3466 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3471 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3473 struct sdebug_host_info * sdbg_host;
3474 struct sdebug_dev_info * open_devip = NULL;
3475 struct sdebug_dev_info * devip =
3476 (struct sdebug_dev_info *)sdev->hostdata;
3480 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3482 pr_err("%s: Host info NULL\n", __func__);
3485 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3486 if ((devip->used) && (devip->channel == sdev->channel) &&
3487 (devip->target == sdev->id) &&
3488 (devip->lun == sdev->lun))
3491 if ((!devip->used) && (!open_devip))
3495 if (!open_devip) { /* try and make a new one */
3496 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3498 printk(KERN_ERR "%s: out of memory at line %d\n",
3499 __func__, __LINE__);
3504 open_devip->channel = sdev->channel;
3505 open_devip->target = sdev->id;
3506 open_devip->lun = sdev->lun;
3507 open_devip->sdbg_host = sdbg_host;
3508 atomic_set(&open_devip->num_in_q, 0);
3509 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3510 open_devip->used = true;
3514 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3516 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3517 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
3518 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3519 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3523 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3525 struct sdebug_dev_info *devip;
3527 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3528 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
3529 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3530 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3531 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3532 devip = devInfoReg(sdp);
3534 return 1; /* no resources, will be marked offline */
3535 sdp->hostdata = devip;
3536 blk_queue_max_segment_size(sdp->request_queue, -1U);
3537 if (scsi_debug_no_uld)
3538 sdp->no_uld_attach = 1;
3542 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3544 struct sdebug_dev_info *devip =
3545 (struct sdebug_dev_info *)sdp->hostdata;
3547 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3548 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
3549 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3551 /* make this slot available for re-use */
3552 devip->used = false;
3553 sdp->hostdata = NULL;
3557 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3558 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3560 unsigned long iflags;
3561 int k, qmax, r_qmax;
3562 struct sdebug_queued_cmd *sqcp;
3563 struct sdebug_dev_info *devip;
3565 spin_lock_irqsave(&queued_arr_lock, iflags);
3566 qmax = scsi_debug_max_queue;
3567 r_qmax = atomic_read(&retired_max_queue);
3570 for (k = 0; k < qmax; ++k) {
3571 if (test_bit(k, queued_in_use_bm)) {
3572 sqcp = &queued_arr[k];
3573 if (cmnd == sqcp->a_cmnd) {
3574 devip = (struct sdebug_dev_info *)
3575 cmnd->device->hostdata;
3577 atomic_dec(&devip->num_in_q);
3578 sqcp->a_cmnd = NULL;
3579 spin_unlock_irqrestore(&queued_arr_lock,
3581 if (scsi_debug_ndelay > 0) {
3584 &sqcp->sd_hrtp->hrt);
3585 } else if (scsi_debug_delay > 0) {
3586 if (sqcp->cmnd_timerp)
3589 } else if (scsi_debug_delay < 0) {
3591 tasklet_kill(sqcp->tletp);
3593 clear_bit(k, queued_in_use_bm);
3598 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3602 /* Deletes (stops) timers or tasklets of all queued commands */
3603 static void stop_all_queued(void)
3605 unsigned long iflags;
3607 struct sdebug_queued_cmd *sqcp;
3608 struct sdebug_dev_info *devip;
3610 spin_lock_irqsave(&queued_arr_lock, iflags);
3611 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3612 if (test_bit(k, queued_in_use_bm)) {
3613 sqcp = &queued_arr[k];
3615 devip = (struct sdebug_dev_info *)
3616 sqcp->a_cmnd->device->hostdata;
3618 atomic_dec(&devip->num_in_q);
3619 sqcp->a_cmnd = NULL;
3620 spin_unlock_irqrestore(&queued_arr_lock,
3622 if (scsi_debug_ndelay > 0) {
3625 &sqcp->sd_hrtp->hrt);
3626 } else if (scsi_debug_delay > 0) {
3627 if (sqcp->cmnd_timerp)
3630 } else if (scsi_debug_delay < 0) {
3632 tasklet_kill(sqcp->tletp);
3634 clear_bit(k, queued_in_use_bm);
3635 spin_lock_irqsave(&queued_arr_lock, iflags);
3639 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3642 /* Free queued command memory on heap */
3643 static void free_all_queued(void)
3645 unsigned long iflags;
3647 struct sdebug_queued_cmd *sqcp;
3649 spin_lock_irqsave(&queued_arr_lock, iflags);
3650 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3651 sqcp = &queued_arr[k];
3652 kfree(sqcp->cmnd_timerp);
3653 sqcp->cmnd_timerp = NULL;
3656 kfree(sqcp->sd_hrtp);
3657 sqcp->sd_hrtp = NULL;
3659 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3662 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3666 if (SCpnt->device &&
3667 (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3668 sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3670 stop_queued_cmnd(SCpnt);
3675 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3677 struct sdebug_dev_info * devip;
3680 if (SCpnt && SCpnt->device) {
3681 struct scsi_device *sdp = SCpnt->device;
3683 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3684 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3685 devip = devInfoReg(sdp);
3687 set_bit(SDEBUG_UA_POR, devip->uas_bm);
3692 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3694 struct sdebug_host_info *sdbg_host;
3695 struct sdebug_dev_info *devip;
3696 struct scsi_device *sdp;
3697 struct Scsi_Host *hp;
3700 ++num_target_resets;
3703 sdp = SCpnt->device;
3706 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3707 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3711 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3713 list_for_each_entry(devip,
3714 &sdbg_host->dev_info_list,
3716 if (devip->target == sdp->id) {
3717 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3721 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3722 sdev_printk(KERN_INFO, sdp,
3723 "%s: %d device(s) found in target\n", __func__, k);
3728 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3730 struct sdebug_host_info *sdbg_host;
3731 struct sdebug_dev_info *devip;
3732 struct scsi_device * sdp;
3733 struct Scsi_Host * hp;
3737 if (!(SCpnt && SCpnt->device))
3739 sdp = SCpnt->device;
3740 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3741 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3744 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3746 list_for_each_entry(devip,
3747 &sdbg_host->dev_info_list,
3749 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3754 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3755 sdev_printk(KERN_INFO, sdp,
3756 "%s: %d device(s) found in host\n", __func__, k);
3761 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3763 struct sdebug_host_info * sdbg_host;
3764 struct sdebug_dev_info *devip;
3768 if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3769 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3770 spin_lock(&sdebug_host_list_lock);
3771 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3772 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3774 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3778 spin_unlock(&sdebug_host_list_lock);
3780 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3781 sdev_printk(KERN_INFO, SCpnt->device,
3782 "%s: %d device(s) found\n", __func__, k);
3786 static void __init sdebug_build_parts(unsigned char *ramp,
3787 unsigned long store_size)
3789 struct partition * pp;
3790 int starts[SDEBUG_MAX_PARTS + 2];
3791 int sectors_per_part, num_sectors, k;
3792 int heads_by_sects, start_sec, end_sec;
3794 /* assume partition table already zeroed */
3795 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3797 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3798 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3799 pr_warn("%s: reducing partitions to %d\n", __func__,
3802 num_sectors = (int)sdebug_store_sectors;
3803 sectors_per_part = (num_sectors - sdebug_sectors_per)
3804 / scsi_debug_num_parts;
3805 heads_by_sects = sdebug_heads * sdebug_sectors_per;
3806 starts[0] = sdebug_sectors_per;
3807 for (k = 1; k < scsi_debug_num_parts; ++k)
3808 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3810 starts[scsi_debug_num_parts] = num_sectors;
3811 starts[scsi_debug_num_parts + 1] = 0;
3813 ramp[510] = 0x55; /* magic partition markings */
3815 pp = (struct partition *)(ramp + 0x1be);
3816 for (k = 0; starts[k + 1]; ++k, ++pp) {
3817 start_sec = starts[k];
3818 end_sec = starts[k + 1] - 1;
3821 pp->cyl = start_sec / heads_by_sects;
3822 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3823 / sdebug_sectors_per;
3824 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3826 pp->end_cyl = end_sec / heads_by_sects;
3827 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3828 / sdebug_sectors_per;
3829 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3831 pp->start_sect = cpu_to_le32(start_sec);
3832 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3833 pp->sys_ind = 0x83; /* plain Linux partition */
3838 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3839 int scsi_result, int delta_jiff)
3841 unsigned long iflags;
3842 int k, num_in_q, qdepth, inject;
3843 struct sdebug_queued_cmd *sqcp = NULL;
3844 struct scsi_device *sdp = cmnd->device;
3846 if (NULL == cmnd || NULL == devip) {
3847 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3849 /* no particularly good error to report back */
3850 return SCSI_MLQUEUE_HOST_BUSY;
3852 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3853 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3854 __func__, scsi_result);
3855 if (delta_jiff == 0)
3856 goto respond_in_thread;
3858 /* schedule the response at a later time if resources permit */
3859 spin_lock_irqsave(&queued_arr_lock, iflags);
3860 num_in_q = atomic_read(&devip->num_in_q);
3861 qdepth = cmnd->device->queue_depth;
3863 if ((qdepth > 0) && (num_in_q >= qdepth)) {
3865 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3866 goto respond_in_thread;
3868 scsi_result = device_qfull_result;
3869 } else if ((scsi_debug_every_nth != 0) &&
3870 (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3871 (scsi_result == 0)) {
3872 if ((num_in_q == (qdepth - 1)) &&
3873 (atomic_inc_return(&sdebug_a_tsf) >=
3874 abs(scsi_debug_every_nth))) {
3875 atomic_set(&sdebug_a_tsf, 0);
3877 scsi_result = device_qfull_result;
3881 k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3882 if (k >= scsi_debug_max_queue) {
3883 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3885 goto respond_in_thread;
3886 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3887 scsi_result = device_qfull_result;
3888 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3889 sdev_printk(KERN_INFO, sdp,
3890 "%s: max_queue=%d exceeded, %s\n",
3891 __func__, scsi_debug_max_queue,
3892 (scsi_result ? "status: TASK SET FULL" :
3893 "report: host busy"));
3895 goto respond_in_thread;
3897 return SCSI_MLQUEUE_HOST_BUSY;
3899 __set_bit(k, queued_in_use_bm);
3900 atomic_inc(&devip->num_in_q);
3901 sqcp = &queued_arr[k];
3902 sqcp->a_cmnd = cmnd;
3903 cmnd->result = scsi_result;
3904 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3905 if (delta_jiff > 0) {
3906 if (NULL == sqcp->cmnd_timerp) {
3907 sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3909 if (NULL == sqcp->cmnd_timerp)
3910 return SCSI_MLQUEUE_HOST_BUSY;
3911 init_timer(sqcp->cmnd_timerp);
3913 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3914 sqcp->cmnd_timerp->data = k;
3915 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3916 add_timer(sqcp->cmnd_timerp);
3917 } else if (scsi_debug_ndelay > 0) {
3918 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3919 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3921 if (NULL == sd_hp) {
3922 sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3924 return SCSI_MLQUEUE_HOST_BUSY;
3925 sqcp->sd_hrtp = sd_hp;
3926 hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3928 sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3931 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3932 } else { /* delay < 0 */
3933 if (NULL == sqcp->tletp) {
3934 sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3936 if (NULL == sqcp->tletp)
3937 return SCSI_MLQUEUE_HOST_BUSY;
3938 tasklet_init(sqcp->tletp,
3939 sdebug_q_cmd_complete, k);
3941 if (-1 == delta_jiff)
3942 tasklet_hi_schedule(sqcp->tletp);
3944 tasklet_schedule(sqcp->tletp);
3946 if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
3947 (scsi_result == device_qfull_result))
3948 sdev_printk(KERN_INFO, sdp,
3949 "%s: num_in_q=%d +1, %s%s\n", __func__,
3950 num_in_q, (inject ? "<inject> " : ""),
3951 "status: TASK SET FULL");
3954 respond_in_thread: /* call back to mid-layer using invocation thread */
3955 cmnd->result = scsi_result;
3956 cmnd->scsi_done(cmnd);
3960 /* Note: The following macros create attribute files in the
3961 /sys/module/scsi_debug/parameters directory. Unfortunately this
3962 driver is unaware of a change and cannot trigger auxiliary actions
3963 as it can when the corresponding attribute in the
3964 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3966 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3967 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3968 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3969 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3970 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3971 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3972 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3973 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3974 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3975 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3976 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3977 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3978 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3979 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3980 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3981 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3982 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3983 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3984 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3985 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3986 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3987 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3988 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3989 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3990 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3991 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3992 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3993 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3994 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3995 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3996 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3997 module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
3998 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3999 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
4000 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
4001 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
4002 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
4003 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
4005 module_param_named(write_same_length, scsi_debug_write_same_length, int,
4008 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4009 MODULE_DESCRIPTION("SCSI debug adapter driver");
4010 MODULE_LICENSE("GPL");
4011 MODULE_VERSION(SCSI_DEBUG_VERSION);
4013 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4014 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4015 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4016 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4017 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4018 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4019 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4020 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4021 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4022 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4023 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4024 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4025 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4026 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4027 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4028 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4029 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4030 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4031 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4032 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4033 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4034 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4035 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4036 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4037 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
4038 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4039 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4040 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4041 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4042 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4043 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4044 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4045 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4046 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4047 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4048 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4049 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4050 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4051 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4053 static char sdebug_info[256];
4055 static const char * scsi_debug_info(struct Scsi_Host * shp)
4057 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4058 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4059 scsi_debug_version_date, scsi_debug_dev_size_mb,
4064 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4065 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4069 int minLen = length > 15 ? 15 : length;
4071 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4073 memcpy(arr, buffer, minLen);
4075 if (1 != sscanf(arr, "%d", &opts))
4077 scsi_debug_opts = opts;
4078 if (scsi_debug_every_nth != 0)
4079 atomic_set(&sdebug_cmnd_count, 0);
4083 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4084 * same for each scsi_debug host (if more than one). Some of the counters
4085 * output are not atomics so might be inaccurate in a busy system. */
4086 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4091 if (scsi_debug_every_nth > 0)
4092 snprintf(b, sizeof(b), " (curr:%d)",
4093 ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
4094 atomic_read(&sdebug_a_tsf) :
4095 atomic_read(&sdebug_cmnd_count)));
4099 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4100 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4102 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4103 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4104 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4105 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4106 "usec_in_jiffy=%lu\n",
4107 SCSI_DEBUG_VERSION, scsi_debug_version_date,
4108 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
4109 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
4110 scsi_debug_max_luns, atomic_read(&sdebug_completions),
4111 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
4112 sdebug_sectors_per, num_aborts, num_dev_resets,
4113 num_target_resets, num_bus_resets, num_host_resets,
4114 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4116 f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
4117 if (f != scsi_debug_max_queue) {
4118 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
4119 seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n",
4120 "queued_in_use_bm", f, l);
4125 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4127 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
4129 /* Returns -EBUSY if delay is being changed and commands are queued */
4130 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4135 if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4137 if (scsi_debug_delay != delay) {
4138 unsigned long iflags;
4141 spin_lock_irqsave(&queued_arr_lock, iflags);
4142 k = find_first_bit(queued_in_use_bm,
4143 scsi_debug_max_queue);
4144 if (k != scsi_debug_max_queue)
4145 res = -EBUSY; /* have queued commands */
4147 scsi_debug_delay = delay;
4148 scsi_debug_ndelay = 0;
4150 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4156 static DRIVER_ATTR_RW(delay);
4158 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4160 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
4162 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4163 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
4164 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4167 unsigned long iflags;
4170 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4171 (ndelay >= 0) && (ndelay < 1000000000)) {
4173 if (scsi_debug_ndelay != ndelay) {
4174 spin_lock_irqsave(&queued_arr_lock, iflags);
4175 k = find_first_bit(queued_in_use_bm,
4176 scsi_debug_max_queue);
4177 if (k != scsi_debug_max_queue)
4178 res = -EBUSY; /* have queued commands */
4180 scsi_debug_ndelay = ndelay;
4181 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
4184 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4190 static DRIVER_ATTR_RW(ndelay);
4192 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4194 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
4197 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4203 if (1 == sscanf(buf, "%10s", work)) {
4204 if (0 == strncasecmp(work,"0x", 2)) {
4205 if (1 == sscanf(&work[2], "%x", &opts))
4208 if (1 == sscanf(work, "%d", &opts))
4214 scsi_debug_opts = opts;
4215 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4216 sdebug_any_injecting_opt = true;
4217 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4218 sdebug_any_injecting_opt = true;
4219 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4220 sdebug_any_injecting_opt = true;
4221 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4222 sdebug_any_injecting_opt = true;
4223 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4224 sdebug_any_injecting_opt = true;
4225 atomic_set(&sdebug_cmnd_count, 0);
4226 atomic_set(&sdebug_a_tsf, 0);
4229 static DRIVER_ATTR_RW(opts);
4231 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4233 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
4235 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4240 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4241 scsi_debug_ptype = n;
4246 static DRIVER_ATTR_RW(ptype);
4248 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4250 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
4252 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4257 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4258 scsi_debug_dsense = n;
4263 static DRIVER_ATTR_RW(dsense);
4265 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4267 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
4269 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4274 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4276 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
4277 if (scsi_debug_fake_rw != n) {
4278 if ((0 == n) && (NULL == fake_storep)) {
4280 (unsigned long)scsi_debug_dev_size_mb *
4283 fake_storep = vmalloc(sz);
4284 if (NULL == fake_storep) {
4285 pr_err("%s: out of memory, 9\n",
4289 memset(fake_storep, 0, sz);
4291 scsi_debug_fake_rw = n;
4297 static DRIVER_ATTR_RW(fake_rw);
4299 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4301 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
4303 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4308 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4309 scsi_debug_no_lun_0 = n;
4314 static DRIVER_ATTR_RW(no_lun_0);
4316 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4318 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
4320 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4325 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4326 scsi_debug_num_tgts = n;
4327 sdebug_max_tgts_luns();
4332 static DRIVER_ATTR_RW(num_tgts);
4334 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4336 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
4338 static DRIVER_ATTR_RO(dev_size_mb);
4340 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4342 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
4344 static DRIVER_ATTR_RO(num_parts);
4346 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4348 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
4350 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4355 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4356 scsi_debug_every_nth = nth;
4357 atomic_set(&sdebug_cmnd_count, 0);
4362 static DRIVER_ATTR_RW(every_nth);
4364 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4366 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
4368 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4373 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4374 scsi_debug_max_luns = n;
4375 sdebug_max_tgts_luns();
4380 static DRIVER_ATTR_RW(max_luns);
4382 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4384 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
4386 /* N.B. max_queue can be changed while there are queued commands. In flight
4387 * commands beyond the new max_queue will be completed. */
4388 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4391 unsigned long iflags;
4394 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4395 (n <= SCSI_DEBUG_CANQUEUE)) {
4396 spin_lock_irqsave(&queued_arr_lock, iflags);
4397 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4398 scsi_debug_max_queue = n;
4399 if (SCSI_DEBUG_CANQUEUE == k)
4400 atomic_set(&retired_max_queue, 0);
4402 atomic_set(&retired_max_queue, k + 1);
4404 atomic_set(&retired_max_queue, 0);
4405 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4410 static DRIVER_ATTR_RW(max_queue);
4412 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4414 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
4416 static DRIVER_ATTR_RO(no_uld);
4418 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4420 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
4422 static DRIVER_ATTR_RO(scsi_level);
4424 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4426 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
4428 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4434 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4435 changed = (scsi_debug_virtual_gb != n);
4436 scsi_debug_virtual_gb = n;
4437 sdebug_capacity = get_sdebug_capacity();
4439 struct sdebug_host_info *sdhp;
4440 struct sdebug_dev_info *dp;
4442 spin_lock(&sdebug_host_list_lock);
4443 list_for_each_entry(sdhp, &sdebug_host_list,
4445 list_for_each_entry(dp, &sdhp->dev_info_list,
4447 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4451 spin_unlock(&sdebug_host_list_lock);
4457 static DRIVER_ATTR_RW(virtual_gb);
4459 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4461 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
4464 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4469 if (sscanf(buf, "%d", &delta_hosts) != 1)
4471 if (delta_hosts > 0) {
4473 sdebug_add_adapter();
4474 } while (--delta_hosts);
4475 } else if (delta_hosts < 0) {
4477 sdebug_remove_adapter();
4478 } while (++delta_hosts);
4482 static DRIVER_ATTR_RW(add_host);
4484 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4486 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
4488 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4493 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4494 scsi_debug_vpd_use_hostno = n;
4499 static DRIVER_ATTR_RW(vpd_use_hostno);
4501 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4503 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
4505 static DRIVER_ATTR_RO(sector_size);
4507 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4509 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
4511 static DRIVER_ATTR_RO(dix);
4513 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4515 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
4517 static DRIVER_ATTR_RO(dif);
4519 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4521 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
4523 static DRIVER_ATTR_RO(guard);
4525 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4527 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
4529 static DRIVER_ATTR_RO(ato);
4531 static ssize_t map_show(struct device_driver *ddp, char *buf)
4535 if (!scsi_debug_lbp())
4536 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4537 sdebug_store_sectors);
4539 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
4541 buf[count++] = '\n';
4546 static DRIVER_ATTR_RO(map);
4548 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4550 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
4552 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4557 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4558 scsi_debug_removable = (n > 0);
4563 static DRIVER_ATTR_RW(removable);
4565 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4567 return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
4569 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4570 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4575 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4576 bool new_host_lock = (n > 0);
4579 if (new_host_lock != scsi_debug_host_lock) {
4580 unsigned long iflags;
4583 spin_lock_irqsave(&queued_arr_lock, iflags);
4584 k = find_first_bit(queued_in_use_bm,
4585 scsi_debug_max_queue);
4586 if (k != scsi_debug_max_queue)
4587 res = -EBUSY; /* have queued commands */
4589 scsi_debug_host_lock = new_host_lock;
4590 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4596 static DRIVER_ATTR_RW(host_lock);
4598 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4600 return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4602 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4607 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4608 scsi_debug_strict = (n > 0);
4613 static DRIVER_ATTR_RW(strict);
4616 /* Note: The following array creates attribute files in the
4617 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4618 files (over those found in the /sys/module/scsi_debug/parameters
4619 directory) is that auxiliary actions can be triggered when an attribute
4620 is changed. For example see: sdebug_add_host_store() above.
4623 static struct attribute *sdebug_drv_attrs[] = {
4624 &driver_attr_delay.attr,
4625 &driver_attr_opts.attr,
4626 &driver_attr_ptype.attr,
4627 &driver_attr_dsense.attr,
4628 &driver_attr_fake_rw.attr,
4629 &driver_attr_no_lun_0.attr,
4630 &driver_attr_num_tgts.attr,
4631 &driver_attr_dev_size_mb.attr,
4632 &driver_attr_num_parts.attr,
4633 &driver_attr_every_nth.attr,
4634 &driver_attr_max_luns.attr,
4635 &driver_attr_max_queue.attr,
4636 &driver_attr_no_uld.attr,
4637 &driver_attr_scsi_level.attr,
4638 &driver_attr_virtual_gb.attr,
4639 &driver_attr_add_host.attr,
4640 &driver_attr_vpd_use_hostno.attr,
4641 &driver_attr_sector_size.attr,
4642 &driver_attr_dix.attr,
4643 &driver_attr_dif.attr,
4644 &driver_attr_guard.attr,
4645 &driver_attr_ato.attr,
4646 &driver_attr_map.attr,
4647 &driver_attr_removable.attr,
4648 &driver_attr_host_lock.attr,
4649 &driver_attr_ndelay.attr,
4650 &driver_attr_strict.attr,
4653 ATTRIBUTE_GROUPS(sdebug_drv);
4655 static struct device *pseudo_primary;
4657 static int __init scsi_debug_init(void)
4664 atomic_set(&sdebug_cmnd_count, 0);
4665 atomic_set(&sdebug_completions, 0);
4666 atomic_set(&retired_max_queue, 0);
4668 if (scsi_debug_ndelay >= 1000000000) {
4669 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
4671 scsi_debug_ndelay = 0;
4672 } else if (scsi_debug_ndelay > 0)
4673 scsi_debug_delay = DELAY_OVERRIDDEN;
4675 switch (scsi_debug_sector_size) {
4682 pr_err("%s: invalid sector_size %d\n", __func__,
4683 scsi_debug_sector_size);
4687 switch (scsi_debug_dif) {
4689 case SD_DIF_TYPE0_PROTECTION:
4690 case SD_DIF_TYPE1_PROTECTION:
4691 case SD_DIF_TYPE2_PROTECTION:
4692 case SD_DIF_TYPE3_PROTECTION:
4696 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
4700 if (scsi_debug_guard > 1) {
4701 pr_err("%s: guard must be 0 or 1\n", __func__);
4705 if (scsi_debug_ato > 1) {
4706 pr_err("%s: ato must be 0 or 1\n", __func__);
4710 if (scsi_debug_physblk_exp > 15) {
4711 pr_err("%s: invalid physblk_exp %u\n", __func__,
4712 scsi_debug_physblk_exp);
4716 if (scsi_debug_lowest_aligned > 0x3fff) {
4717 pr_err("%s: lowest_aligned too big: %u\n", __func__,
4718 scsi_debug_lowest_aligned);
4722 if (scsi_debug_dev_size_mb < 1)
4723 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
4724 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
4725 sdebug_store_sectors = sz / scsi_debug_sector_size;
4726 sdebug_capacity = get_sdebug_capacity();
4728 /* play around with geometry, don't waste too much on track 0 */
4730 sdebug_sectors_per = 32;
4731 if (scsi_debug_dev_size_mb >= 16)
4733 else if (scsi_debug_dev_size_mb >= 256)
4735 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4736 (sdebug_sectors_per * sdebug_heads);
4737 if (sdebug_cylinders_per >= 1024) {
4738 /* other LLDs do this; implies >= 1GB ram disk ... */
4740 sdebug_sectors_per = 63;
4741 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4742 (sdebug_sectors_per * sdebug_heads);
4745 if (0 == scsi_debug_fake_rw) {
4746 fake_storep = vmalloc(sz);
4747 if (NULL == fake_storep) {
4748 pr_err("%s: out of memory, 1\n", __func__);
4751 memset(fake_storep, 0, sz);
4752 if (scsi_debug_num_parts > 0)
4753 sdebug_build_parts(fake_storep, sz);
4756 if (scsi_debug_dix) {
4759 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4760 dif_storep = vmalloc(dif_size);
4762 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
4765 if (dif_storep == NULL) {
4766 pr_err("%s: out of mem. (DIX)\n", __func__);
4771 memset(dif_storep, 0xff, dif_size);
4774 /* Logical Block Provisioning */
4775 if (scsi_debug_lbp()) {
4776 scsi_debug_unmap_max_blocks =
4777 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
4779 scsi_debug_unmap_max_desc =
4780 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
4782 scsi_debug_unmap_granularity =
4783 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
4785 if (scsi_debug_unmap_alignment &&
4786 scsi_debug_unmap_granularity <=
4787 scsi_debug_unmap_alignment) {
4788 pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
4793 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4794 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4796 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
4798 if (map_storep == NULL) {
4799 pr_err("%s: out of mem. (MAP)\n", __func__);
4804 bitmap_zero(map_storep, map_size);
4806 /* Map first 1KB for partition table */
4807 if (scsi_debug_num_parts)
4811 pseudo_primary = root_device_register("pseudo_0");
4812 if (IS_ERR(pseudo_primary)) {
4813 pr_warn("%s: root_device_register() error\n", __func__);
4814 ret = PTR_ERR(pseudo_primary);
4817 ret = bus_register(&pseudo_lld_bus);
4819 pr_warn("%s: bus_register error: %d\n", __func__, ret);
4822 ret = driver_register(&sdebug_driverfs_driver);
4824 pr_warn("%s: driver_register error: %d\n", __func__, ret);
4828 host_to_add = scsi_debug_add_host;
4829 scsi_debug_add_host = 0;
4831 for (k = 0; k < host_to_add; k++) {
4832 if (sdebug_add_adapter()) {
4833 pr_err("%s: sdebug_add_adapter failed k=%d\n",
4839 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
4840 pr_info("%s: built %d host(s)\n", __func__,
4841 scsi_debug_add_host);
4846 bus_unregister(&pseudo_lld_bus);
4848 root_device_unregister(pseudo_primary);
4859 static void __exit scsi_debug_exit(void)
4861 int k = scsi_debug_add_host;
4866 sdebug_remove_adapter();
4867 driver_unregister(&sdebug_driverfs_driver);
4868 bus_unregister(&pseudo_lld_bus);
4869 root_device_unregister(pseudo_primary);
4877 device_initcall(scsi_debug_init);
4878 module_exit(scsi_debug_exit);
4880 static void sdebug_release_adapter(struct device * dev)
4882 struct sdebug_host_info *sdbg_host;
4884 sdbg_host = to_sdebug_host(dev);
4888 static int sdebug_add_adapter(void)
4890 int k, devs_per_host;
4892 struct sdebug_host_info *sdbg_host;
4893 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4895 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4896 if (NULL == sdbg_host) {
4897 printk(KERN_ERR "%s: out of memory at line %d\n",
4898 __func__, __LINE__);
4902 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4904 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4905 for (k = 0; k < devs_per_host; k++) {
4906 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4907 if (!sdbg_devinfo) {
4908 printk(KERN_ERR "%s: out of memory at line %d\n",
4909 __func__, __LINE__);
4915 spin_lock(&sdebug_host_list_lock);
4916 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4917 spin_unlock(&sdebug_host_list_lock);
4919 sdbg_host->dev.bus = &pseudo_lld_bus;
4920 sdbg_host->dev.parent = pseudo_primary;
4921 sdbg_host->dev.release = &sdebug_release_adapter;
4922 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4924 error = device_register(&sdbg_host->dev);
4929 ++scsi_debug_add_host;
4933 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4935 list_del(&sdbg_devinfo->dev_list);
4936 kfree(sdbg_devinfo);
4943 static void sdebug_remove_adapter(void)
4945 struct sdebug_host_info * sdbg_host = NULL;
4947 spin_lock(&sdebug_host_list_lock);
4948 if (!list_empty(&sdebug_host_list)) {
4949 sdbg_host = list_entry(sdebug_host_list.prev,
4950 struct sdebug_host_info, host_list);
4951 list_del(&sdbg_host->host_list);
4953 spin_unlock(&sdebug_host_list_lock);
4958 device_unregister(&sdbg_host->dev);
4959 --scsi_debug_add_host;
4963 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4966 unsigned long iflags;
4967 struct sdebug_dev_info *devip;
4969 spin_lock_irqsave(&queued_arr_lock, iflags);
4970 devip = (struct sdebug_dev_info *)sdev->hostdata;
4971 if (NULL == devip) {
4972 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4975 num_in_q = atomic_read(&devip->num_in_q);
4976 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4980 /* allow to exceed max host queued_arr elements for testing */
4981 if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4982 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4983 scsi_change_queue_depth(sdev, qdepth);
4985 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4986 sdev_printk(KERN_INFO, sdev,
4987 "%s: qdepth=%d, num_in_q=%d\n",
4988 __func__, qdepth, num_in_q);
4990 return sdev->queue_depth;
4994 check_inject(struct scsi_cmnd *scp)
4996 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
4998 memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
5000 if (atomic_inc_return(&sdebug_cmnd_count) >=
5001 abs(scsi_debug_every_nth)) {
5002 atomic_set(&sdebug_cmnd_count, 0);
5003 if (scsi_debug_every_nth < -1)
5004 scsi_debug_every_nth = -1;
5005 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5006 return 1; /* ignore command causing timeout */
5007 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5008 scsi_medium_access_command(scp))
5009 return 1; /* time out reads and writes */
5010 if (sdebug_any_injecting_opt) {
5011 int opts = scsi_debug_opts;
5013 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5014 ep->inj_recovered = true;
5015 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5016 ep->inj_transport = true;
5017 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5019 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5021 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5022 ep->inj_short = true;
5029 scsi_debug_queuecommand(struct scsi_cmnd *scp)
5032 struct scsi_device *sdp = scp->device;
5033 const struct opcode_info_t *oip;
5034 const struct opcode_info_t *r_oip;
5035 struct sdebug_dev_info *devip;
5036 u8 *cmd = scp->cmnd;
5037 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5040 int errsts_no_connect = DID_NO_CONNECT << 16;
5045 bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5047 scsi_set_resid(scp, 0);
5048 if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5053 sb = (int)sizeof(b);
5055 strcpy(b, "too long, over 32 bytes");
5057 for (k = 0, n = 0; k < len && n < sb; ++k)
5058 n += scnprintf(b + n, sb - n, "%02x ",
5061 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5063 has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
5064 if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5065 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5067 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
5068 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
5069 devip = (struct sdebug_dev_info *)sdp->hostdata;
5071 devip = devInfoReg(sdp);
5073 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5075 na = oip->num_attached;
5077 if (na) { /* multiple commands with this opcode */
5079 if (FF_SA & r_oip->flags) {
5080 if (F_SA_LOW & oip->flags)
5083 sa = get_unaligned_be16(cmd + 8);
5084 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5085 if (opcode == oip->opcode && sa == oip->sa)
5088 } else { /* since no service action only check opcode */
5089 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5090 if (opcode == oip->opcode)
5095 if (F_SA_LOW & r_oip->flags)
5096 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5097 else if (F_SA_HIGH & r_oip->flags)
5098 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5100 mk_sense_invalid_opcode(scp);
5103 } /* else (when na==0) we assume the oip is a match */
5105 if (F_INV_OP & flags) {
5106 mk_sense_invalid_opcode(scp);
5109 if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5111 sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5112 "0x%x not supported for wlun\n", opcode);
5113 mk_sense_invalid_opcode(scp);
5116 if (scsi_debug_strict) { /* check cdb against mask */
5120 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5121 rem = ~oip->len_mask[k] & cmd[k];
5123 for (j = 7; j >= 0; --j, rem <<= 1) {
5127 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5132 if (!(F_SKIP_UA & flags) &&
5133 SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5134 errsts = check_readiness(scp, UAS_ONLY, devip);
5138 if ((F_M_ACCESS & flags) && devip->stopped) {
5139 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5141 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5142 "%s\n", my_name, "initializing command "
5144 errsts = check_condition_result;
5147 if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5149 if (scsi_debug_every_nth) {
5150 if (check_inject(scp))
5151 return 0; /* ignore command: make trouble */
5153 if (oip->pfp) /* if this command has a resp_* function, call it */
5154 errsts = oip->pfp(scp, devip);
5155 else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5156 errsts = r_pfp(scp, devip);
5159 return schedule_resp(scp, devip, errsts,
5160 ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5162 return schedule_resp(scp, devip, check_condition_result, 0);
5166 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5168 if (scsi_debug_host_lock) {
5169 unsigned long iflags;
5172 spin_lock_irqsave(shost->host_lock, iflags);
5173 rc = scsi_debug_queuecommand(cmd);
5174 spin_unlock_irqrestore(shost->host_lock, iflags);
5177 return scsi_debug_queuecommand(cmd);
5180 static struct scsi_host_template sdebug_driver_template = {
5181 .show_info = scsi_debug_show_info,
5182 .write_info = scsi_debug_write_info,
5183 .proc_name = sdebug_proc_name,
5184 .name = "SCSI DEBUG",
5185 .info = scsi_debug_info,
5186 .slave_alloc = scsi_debug_slave_alloc,
5187 .slave_configure = scsi_debug_slave_configure,
5188 .slave_destroy = scsi_debug_slave_destroy,
5189 .ioctl = scsi_debug_ioctl,
5190 .queuecommand = sdebug_queuecommand_lock_or_not,
5191 .change_queue_depth = sdebug_change_qdepth,
5192 .eh_abort_handler = scsi_debug_abort,
5193 .eh_device_reset_handler = scsi_debug_device_reset,
5194 .eh_target_reset_handler = scsi_debug_target_reset,
5195 .eh_bus_reset_handler = scsi_debug_bus_reset,
5196 .eh_host_reset_handler = scsi_debug_host_reset,
5197 .can_queue = SCSI_DEBUG_CANQUEUE,
5199 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
5200 .cmd_per_lun = DEF_CMD_PER_LUN,
5202 .use_clustering = DISABLE_CLUSTERING,
5203 .module = THIS_MODULE,
5204 .track_queue_depth = 1,
5205 .cmd_size = sizeof(struct sdebug_scmd_extra_t),
5208 static int sdebug_driver_probe(struct device * dev)
5212 struct sdebug_host_info *sdbg_host;
5213 struct Scsi_Host *hpnt;
5216 sdbg_host = to_sdebug_host(dev);
5218 sdebug_driver_template.can_queue = scsi_debug_max_queue;
5219 if (scsi_debug_clustering)
5220 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5221 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5223 pr_err("%s: scsi_host_alloc failed\n", __func__);
5228 sdbg_host->shost = hpnt;
5229 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5230 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
5231 hpnt->max_id = scsi_debug_num_tgts + 1;
5233 hpnt->max_id = scsi_debug_num_tgts;
5234 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
5238 switch (scsi_debug_dif) {
5240 case SD_DIF_TYPE1_PROTECTION:
5241 host_prot = SHOST_DIF_TYPE1_PROTECTION;
5243 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5246 case SD_DIF_TYPE2_PROTECTION:
5247 host_prot = SHOST_DIF_TYPE2_PROTECTION;
5249 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5252 case SD_DIF_TYPE3_PROTECTION:
5253 host_prot = SHOST_DIF_TYPE3_PROTECTION;
5255 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5260 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5264 scsi_host_set_prot(hpnt, host_prot);
5266 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
5267 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5268 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5269 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5270 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5271 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5272 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5273 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5275 if (scsi_debug_guard == 1)
5276 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5278 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5280 opts = scsi_debug_opts;
5281 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5282 sdebug_any_injecting_opt = true;
5283 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5284 sdebug_any_injecting_opt = true;
5285 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5286 sdebug_any_injecting_opt = true;
5287 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5288 sdebug_any_injecting_opt = true;
5289 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5290 sdebug_any_injecting_opt = true;
5292 error = scsi_add_host(hpnt, &sdbg_host->dev);
5294 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
5296 scsi_host_put(hpnt);
5298 scsi_scan_host(hpnt);
5303 static int sdebug_driver_remove(struct device * dev)
5305 struct sdebug_host_info *sdbg_host;
5306 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5308 sdbg_host = to_sdebug_host(dev);
5311 printk(KERN_ERR "%s: Unable to locate host info\n",
5316 scsi_remove_host(sdbg_host->shost);
5318 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5320 list_del(&sdbg_devinfo->dev_list);
5321 kfree(sdbg_devinfo);
5324 scsi_host_put(sdbg_host->shost);
5328 static int pseudo_lld_bus_match(struct device *dev,
5329 struct device_driver *dev_driver)
5334 static struct bus_type pseudo_lld_bus = {
5336 .match = pseudo_lld_bus_match,
5337 .probe = sdebug_driver_probe,
5338 .remove = sdebug_driver_remove,
5339 .drv_groups = sdebug_drv_groups,