]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/nvme/host/scsi.c
drm/amdgpu: Fix overflow of watermark calcs at > 4k resolutions.
[karo-tx-linux.git] / drivers / nvme / host / scsi.c
1 /*
2  * NVM Express device driver
3  * Copyright (c) 2011-2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14
15 /*
16  * Refer to the SCSI-NVMe Translation spec for details on how
17  * each command is translated.
18  */
19
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/compat.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/fs.h>
27 #include <linux/genhd.h>
28 #include <linux/idr.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/io.h>
32 #include <linux/kdev_t.h>
33 #include <linux/kthread.h>
34 #include <linux/kernel.h>
35 #include <linux/mm.h>
36 #include <linux/module.h>
37 #include <linux/moduleparam.h>
38 #include <linux/pci.h>
39 #include <linux/poison.h>
40 #include <linux/sched.h>
41 #include <linux/slab.h>
42 #include <linux/types.h>
43 #include <asm/unaligned.h>
44 #include <scsi/sg.h>
45 #include <scsi/scsi.h>
46 #include <scsi/scsi_request.h>
47
48 #include "nvme.h"
49
50 static int sg_version_num = 30534;      /* 2 digits for each component */
51
52 /* VPD Page Codes */
53 #define VPD_SUPPORTED_PAGES                             0x00
54 #define VPD_SERIAL_NUMBER                               0x80
55 #define VPD_DEVICE_IDENTIFIERS                          0x83
56 #define VPD_EXTENDED_INQUIRY                            0x86
57 #define VPD_BLOCK_LIMITS                                0xB0
58 #define VPD_BLOCK_DEV_CHARACTERISTICS                   0xB1
59
60 /* format unit paramter list offsets */
61 #define FORMAT_UNIT_SHORT_PARM_LIST_LEN                 4
62 #define FORMAT_UNIT_LONG_PARM_LIST_LEN                  8
63 #define FORMAT_UNIT_PROT_INT_OFFSET                     3
64 #define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET             0
65 #define FORMAT_UNIT_PROT_FIELD_USAGE_MASK               0x07
66
67 /* Misc. defines */
68 #define FIXED_SENSE_DATA                                0x70
69 #define DESC_FORMAT_SENSE_DATA                          0x72
70 #define FIXED_SENSE_DATA_ADD_LENGTH                     10
71 #define LUN_ENTRY_SIZE                                  8
72 #define LUN_DATA_HEADER_SIZE                            8
73 #define ALL_LUNS_RETURNED                               0x02
74 #define ALL_WELL_KNOWN_LUNS_RETURNED                    0x01
75 #define RESTRICTED_LUNS_RETURNED                        0x00
76 #define DOWNLOAD_SAVE_ACTIVATE                          0x05
77 #define DOWNLOAD_SAVE_DEFER_ACTIVATE                    0x0E
78 #define ACTIVATE_DEFERRED_MICROCODE                     0x0F
79 #define FORMAT_UNIT_IMMED_MASK                          0x2
80 #define FORMAT_UNIT_IMMED_OFFSET                        1
81 #define KELVIN_TEMP_FACTOR                              273
82 #define FIXED_FMT_SENSE_DATA_SIZE                       18
83 #define DESC_FMT_SENSE_DATA_SIZE                        8
84
85 /* SCSI/NVMe defines and bit masks */
86 #define INQ_STANDARD_INQUIRY_PAGE                       0x00
87 #define INQ_SUPPORTED_VPD_PAGES_PAGE                    0x00
88 #define INQ_UNIT_SERIAL_NUMBER_PAGE                     0x80
89 #define INQ_DEVICE_IDENTIFICATION_PAGE                  0x83
90 #define INQ_EXTENDED_INQUIRY_DATA_PAGE                  0x86
91 #define INQ_BDEV_LIMITS_PAGE                            0xB0
92 #define INQ_BDEV_CHARACTERISTICS_PAGE                   0xB1
93 #define INQ_SERIAL_NUMBER_LENGTH                        0x14
94 #define INQ_NUM_SUPPORTED_VPD_PAGES                     6
95 #define VERSION_SPC_4                                   0x06
96 #define ACA_UNSUPPORTED                                 0
97 #define STANDARD_INQUIRY_LENGTH                         36
98 #define ADDITIONAL_STD_INQ_LENGTH                       31
99 #define EXTENDED_INQUIRY_DATA_PAGE_LENGTH               0x3C
100 #define RESERVED_FIELD                                  0
101
102 /* Mode Sense/Select defines */
103 #define MODE_PAGE_INFO_EXCEP                            0x1C
104 #define MODE_PAGE_CACHING                               0x08
105 #define MODE_PAGE_CONTROL                               0x0A
106 #define MODE_PAGE_POWER_CONDITION                       0x1A
107 #define MODE_PAGE_RETURN_ALL                            0x3F
108 #define MODE_PAGE_BLK_DES_LEN                           0x08
109 #define MODE_PAGE_LLBAA_BLK_DES_LEN                     0x10
110 #define MODE_PAGE_CACHING_LEN                           0x14
111 #define MODE_PAGE_CONTROL_LEN                           0x0C
112 #define MODE_PAGE_POW_CND_LEN                           0x28
113 #define MODE_PAGE_INF_EXC_LEN                           0x0C
114 #define MODE_PAGE_ALL_LEN                               0x54
115 #define MODE_SENSE6_MPH_SIZE                            4
116 #define MODE_SENSE_PAGE_CONTROL_MASK                    0xC0
117 #define MODE_SENSE_PAGE_CODE_OFFSET                     2
118 #define MODE_SENSE_PAGE_CODE_MASK                       0x3F
119 #define MODE_SENSE_LLBAA_MASK                           0x10
120 #define MODE_SENSE_LLBAA_SHIFT                          4
121 #define MODE_SENSE_DBD_MASK                             8
122 #define MODE_SENSE_DBD_SHIFT                            3
123 #define MODE_SENSE10_MPH_SIZE                           8
124 #define MODE_SELECT_CDB_PAGE_FORMAT_MASK                0x10
125 #define MODE_SELECT_CDB_SAVE_PAGES_MASK                 0x1
126 #define MODE_SELECT_6_BD_OFFSET                         3
127 #define MODE_SELECT_10_BD_OFFSET                        6
128 #define MODE_SELECT_10_LLBAA_OFFSET                     4
129 #define MODE_SELECT_10_LLBAA_MASK                       1
130 #define MODE_SELECT_6_MPH_SIZE                          4
131 #define MODE_SELECT_10_MPH_SIZE                         8
132 #define CACHING_MODE_PAGE_WCE_MASK                      0x04
133 #define MODE_SENSE_BLK_DESC_ENABLED                     0
134 #define MODE_SENSE_BLK_DESC_COUNT                       1
135 #define MODE_SELECT_PAGE_CODE_MASK                      0x3F
136 #define SHORT_DESC_BLOCK                                8
137 #define LONG_DESC_BLOCK                                 16
138 #define MODE_PAGE_POW_CND_LEN_FIELD                     0x26
139 #define MODE_PAGE_INF_EXC_LEN_FIELD                     0x0A
140 #define MODE_PAGE_CACHING_LEN_FIELD                     0x12
141 #define MODE_PAGE_CONTROL_LEN_FIELD                     0x0A
142 #define MODE_SENSE_PC_CURRENT_VALUES                    0
143
144 /* Log Sense defines */
145 #define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE               0x00
146 #define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH             0x07
147 #define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE          0x2F
148 #define LOG_PAGE_TEMPERATURE_PAGE                       0x0D
149 #define LOG_SENSE_CDB_SP_NOT_ENABLED                    0
150 #define LOG_SENSE_CDB_PC_MASK                           0xC0
151 #define LOG_SENSE_CDB_PC_SHIFT                          6
152 #define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES              1
153 #define LOG_SENSE_CDB_PAGE_CODE_MASK                    0x3F
154 #define REMAINING_INFO_EXCP_PAGE_LENGTH                 0x8
155 #define LOG_INFO_EXCP_PAGE_LENGTH                       0xC
156 #define REMAINING_TEMP_PAGE_LENGTH                      0xC
157 #define LOG_TEMP_PAGE_LENGTH                            0x10
158 #define LOG_TEMP_UNKNOWN                                0xFF
159 #define SUPPORTED_LOG_PAGES_PAGE_LENGTH                 0x3
160
161 /* Read Capacity defines */
162 #define READ_CAP_10_RESP_SIZE                           8
163 #define READ_CAP_16_RESP_SIZE                           32
164
165 /* NVMe Namespace and Command Defines */
166 #define BYTES_TO_DWORDS                                 4
167 #define NVME_MAX_FIRMWARE_SLOT                          7
168
169 /* Report LUNs defines */
170 #define REPORT_LUNS_FIRST_LUN_OFFSET                    8
171
172 /* SCSI ADDITIONAL SENSE Codes */
173
174 #define SCSI_ASC_NO_SENSE                               0x00
175 #define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT             0x03
176 #define SCSI_ASC_LUN_NOT_READY                          0x04
177 #define SCSI_ASC_WARNING                                0x0B
178 #define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED           0x10
179 #define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED          0x10
180 #define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED          0x10
181 #define SCSI_ASC_UNRECOVERED_READ_ERROR                 0x11
182 #define SCSI_ASC_MISCOMPARE_DURING_VERIFY               0x1D
183 #define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID           0x20
184 #define SCSI_ASC_ILLEGAL_COMMAND                        0x20
185 #define SCSI_ASC_ILLEGAL_BLOCK                          0x21
186 #define SCSI_ASC_INVALID_CDB                            0x24
187 #define SCSI_ASC_INVALID_LUN                            0x25
188 #define SCSI_ASC_INVALID_PARAMETER                      0x26
189 #define SCSI_ASC_FORMAT_COMMAND_FAILED                  0x31
190 #define SCSI_ASC_INTERNAL_TARGET_FAILURE                0x44
191
192 /* SCSI ADDITIONAL SENSE Code Qualifiers */
193
194 #define SCSI_ASCQ_CAUSE_NOT_REPORTABLE                  0x00
195 #define SCSI_ASCQ_FORMAT_COMMAND_FAILED                 0x01
196 #define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED          0x01
197 #define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED         0x02
198 #define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED         0x03
199 #define SCSI_ASCQ_FORMAT_IN_PROGRESS                    0x04
200 #define SCSI_ASCQ_POWER_LOSS_EXPECTED                   0x08
201 #define SCSI_ASCQ_INVALID_LUN_ID                        0x09
202
203 /* copied from drivers/usb/gadget/function/storage_common.h */
204 static inline u32 get_unaligned_be24(u8 *buf)
205 {
206         return 0xffffff & (u32) get_unaligned_be32(buf - 1);
207 }
208
209 /* Struct to gather data that needs to be extracted from a SCSI CDB.
210    Not conforming to any particular CDB variant, but compatible with all. */
211
212 struct nvme_trans_io_cdb {
213         u8 fua;
214         u8 prot_info;
215         u64 lba;
216         u32 xfer_len;
217 };
218
219
220 /* Internal Helper Functions */
221
222
223 /* Copy data to userspace memory */
224
225 static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
226                                                                 unsigned long n)
227 {
228         int i;
229         void *index = from;
230         size_t remaining = n;
231         size_t xfer_len;
232
233         if (hdr->iovec_count > 0) {
234                 struct sg_iovec sgl;
235
236                 for (i = 0; i < hdr->iovec_count; i++) {
237                         if (copy_from_user(&sgl, hdr->dxferp +
238                                                 i * sizeof(struct sg_iovec),
239                                                 sizeof(struct sg_iovec)))
240                                 return -EFAULT;
241                         xfer_len = min(remaining, sgl.iov_len);
242                         if (copy_to_user(sgl.iov_base, index, xfer_len))
243                                 return -EFAULT;
244
245                         index += xfer_len;
246                         remaining -= xfer_len;
247                         if (remaining == 0)
248                                 break;
249                 }
250                 return 0;
251         }
252
253         if (copy_to_user(hdr->dxferp, from, n))
254                 return -EFAULT;
255         return 0;
256 }
257
258 /* Copy data from userspace memory */
259
260 static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
261                                                                 unsigned long n)
262 {
263         int i;
264         void *index = to;
265         size_t remaining = n;
266         size_t xfer_len;
267
268         if (hdr->iovec_count > 0) {
269                 struct sg_iovec sgl;
270
271                 for (i = 0; i < hdr->iovec_count; i++) {
272                         if (copy_from_user(&sgl, hdr->dxferp +
273                                                 i * sizeof(struct sg_iovec),
274                                                 sizeof(struct sg_iovec)))
275                                 return -EFAULT;
276                         xfer_len = min(remaining, sgl.iov_len);
277                         if (copy_from_user(index, sgl.iov_base, xfer_len))
278                                 return -EFAULT;
279                         index += xfer_len;
280                         remaining -= xfer_len;
281                         if (remaining == 0)
282                                 break;
283                 }
284                 return 0;
285         }
286
287         if (copy_from_user(to, hdr->dxferp, n))
288                 return -EFAULT;
289         return 0;
290 }
291
292 /* Status/Sense Buffer Writeback */
293
294 static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
295                                  u8 asc, u8 ascq)
296 {
297         u8 xfer_len;
298         u8 resp[DESC_FMT_SENSE_DATA_SIZE];
299
300         if (scsi_status_is_good(status)) {
301                 hdr->status = SAM_STAT_GOOD;
302                 hdr->masked_status = GOOD;
303                 hdr->host_status = DID_OK;
304                 hdr->driver_status = DRIVER_OK;
305                 hdr->sb_len_wr = 0;
306         } else {
307                 hdr->status = status;
308                 hdr->masked_status = status >> 1;
309                 hdr->host_status = DID_OK;
310                 hdr->driver_status = DRIVER_OK;
311
312                 memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
313                 resp[0] = DESC_FORMAT_SENSE_DATA;
314                 resp[1] = sense_key;
315                 resp[2] = asc;
316                 resp[3] = ascq;
317
318                 xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
319                 hdr->sb_len_wr = xfer_len;
320                 if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
321                         return -EFAULT;
322         }
323
324         return 0;
325 }
326
327 /*
328  * Take a status code from a lowlevel routine, and if it was a positive NVMe
329  * error code update the sense data based on it.  In either case the passed
330  * in value is returned again, unless an -EFAULT from copy_to_user overrides
331  * it.
332  */
333 static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
334 {
335         u8 status, sense_key, asc, ascq;
336         int res;
337
338         /* For non-nvme (Linux) errors, simply return the error code */
339         if (nvme_sc < 0)
340                 return nvme_sc;
341
342         /* Mask DNR, More, and reserved fields */
343         switch (nvme_sc & 0x7FF) {
344         /* Generic Command Status */
345         case NVME_SC_SUCCESS:
346                 status = SAM_STAT_GOOD;
347                 sense_key = NO_SENSE;
348                 asc = SCSI_ASC_NO_SENSE;
349                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
350                 break;
351         case NVME_SC_INVALID_OPCODE:
352                 status = SAM_STAT_CHECK_CONDITION;
353                 sense_key = ILLEGAL_REQUEST;
354                 asc = SCSI_ASC_ILLEGAL_COMMAND;
355                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
356                 break;
357         case NVME_SC_INVALID_FIELD:
358                 status = SAM_STAT_CHECK_CONDITION;
359                 sense_key = ILLEGAL_REQUEST;
360                 asc = SCSI_ASC_INVALID_CDB;
361                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
362                 break;
363         case NVME_SC_DATA_XFER_ERROR:
364                 status = SAM_STAT_CHECK_CONDITION;
365                 sense_key = MEDIUM_ERROR;
366                 asc = SCSI_ASC_NO_SENSE;
367                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
368                 break;
369         case NVME_SC_POWER_LOSS:
370                 status = SAM_STAT_TASK_ABORTED;
371                 sense_key = ABORTED_COMMAND;
372                 asc = SCSI_ASC_WARNING;
373                 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
374                 break;
375         case NVME_SC_INTERNAL:
376                 status = SAM_STAT_CHECK_CONDITION;
377                 sense_key = HARDWARE_ERROR;
378                 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
379                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
380                 break;
381         case NVME_SC_ABORT_REQ:
382                 status = SAM_STAT_TASK_ABORTED;
383                 sense_key = ABORTED_COMMAND;
384                 asc = SCSI_ASC_NO_SENSE;
385                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
386                 break;
387         case NVME_SC_ABORT_QUEUE:
388                 status = SAM_STAT_TASK_ABORTED;
389                 sense_key = ABORTED_COMMAND;
390                 asc = SCSI_ASC_NO_SENSE;
391                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
392                 break;
393         case NVME_SC_FUSED_FAIL:
394                 status = SAM_STAT_TASK_ABORTED;
395                 sense_key = ABORTED_COMMAND;
396                 asc = SCSI_ASC_NO_SENSE;
397                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
398                 break;
399         case NVME_SC_FUSED_MISSING:
400                 status = SAM_STAT_TASK_ABORTED;
401                 sense_key = ABORTED_COMMAND;
402                 asc = SCSI_ASC_NO_SENSE;
403                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
404                 break;
405         case NVME_SC_INVALID_NS:
406                 status = SAM_STAT_CHECK_CONDITION;
407                 sense_key = ILLEGAL_REQUEST;
408                 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
409                 ascq = SCSI_ASCQ_INVALID_LUN_ID;
410                 break;
411         case NVME_SC_LBA_RANGE:
412                 status = SAM_STAT_CHECK_CONDITION;
413                 sense_key = ILLEGAL_REQUEST;
414                 asc = SCSI_ASC_ILLEGAL_BLOCK;
415                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
416                 break;
417         case NVME_SC_CAP_EXCEEDED:
418                 status = SAM_STAT_CHECK_CONDITION;
419                 sense_key = MEDIUM_ERROR;
420                 asc = SCSI_ASC_NO_SENSE;
421                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
422                 break;
423         case NVME_SC_NS_NOT_READY:
424                 status = SAM_STAT_CHECK_CONDITION;
425                 sense_key = NOT_READY;
426                 asc = SCSI_ASC_LUN_NOT_READY;
427                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
428                 break;
429
430         /* Command Specific Status */
431         case NVME_SC_INVALID_FORMAT:
432                 status = SAM_STAT_CHECK_CONDITION;
433                 sense_key = ILLEGAL_REQUEST;
434                 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
435                 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
436                 break;
437         case NVME_SC_BAD_ATTRIBUTES:
438                 status = SAM_STAT_CHECK_CONDITION;
439                 sense_key = ILLEGAL_REQUEST;
440                 asc = SCSI_ASC_INVALID_CDB;
441                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
442                 break;
443
444         /* Media Errors */
445         case NVME_SC_WRITE_FAULT:
446                 status = SAM_STAT_CHECK_CONDITION;
447                 sense_key = MEDIUM_ERROR;
448                 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
449                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
450                 break;
451         case NVME_SC_READ_ERROR:
452                 status = SAM_STAT_CHECK_CONDITION;
453                 sense_key = MEDIUM_ERROR;
454                 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
455                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
456                 break;
457         case NVME_SC_GUARD_CHECK:
458                 status = SAM_STAT_CHECK_CONDITION;
459                 sense_key = MEDIUM_ERROR;
460                 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
461                 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
462                 break;
463         case NVME_SC_APPTAG_CHECK:
464                 status = SAM_STAT_CHECK_CONDITION;
465                 sense_key = MEDIUM_ERROR;
466                 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
467                 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
468                 break;
469         case NVME_SC_REFTAG_CHECK:
470                 status = SAM_STAT_CHECK_CONDITION;
471                 sense_key = MEDIUM_ERROR;
472                 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
473                 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
474                 break;
475         case NVME_SC_COMPARE_FAILED:
476                 status = SAM_STAT_CHECK_CONDITION;
477                 sense_key = MISCOMPARE;
478                 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
479                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
480                 break;
481         case NVME_SC_ACCESS_DENIED:
482                 status = SAM_STAT_CHECK_CONDITION;
483                 sense_key = ILLEGAL_REQUEST;
484                 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
485                 ascq = SCSI_ASCQ_INVALID_LUN_ID;
486                 break;
487
488         /* Unspecified/Default */
489         case NVME_SC_CMDID_CONFLICT:
490         case NVME_SC_CMD_SEQ_ERROR:
491         case NVME_SC_CQ_INVALID:
492         case NVME_SC_QID_INVALID:
493         case NVME_SC_QUEUE_SIZE:
494         case NVME_SC_ABORT_LIMIT:
495         case NVME_SC_ABORT_MISSING:
496         case NVME_SC_ASYNC_LIMIT:
497         case NVME_SC_FIRMWARE_SLOT:
498         case NVME_SC_FIRMWARE_IMAGE:
499         case NVME_SC_INVALID_VECTOR:
500         case NVME_SC_INVALID_LOG_PAGE:
501         default:
502                 status = SAM_STAT_CHECK_CONDITION;
503                 sense_key = ILLEGAL_REQUEST;
504                 asc = SCSI_ASC_NO_SENSE;
505                 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
506                 break;
507         }
508
509         res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
510         return res ? res : nvme_sc;
511 }
512
513 /* INQUIRY Helper Functions */
514
515 static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
516                                         struct sg_io_hdr *hdr, u8 *inq_response,
517                                         int alloc_len)
518 {
519         struct nvme_ctrl *ctrl = ns->ctrl;
520         struct nvme_id_ns *id_ns;
521         int res;
522         int nvme_sc;
523         int xfer_len;
524         u8 resp_data_format = 0x02;
525         u8 protect;
526         u8 cmdque = 0x01 << 1;
527         u8 fw_offset = sizeof(ctrl->firmware_rev);
528
529         /* nvme ns identify - use DPS value for PROTECT field */
530         nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
531         res = nvme_trans_status_code(hdr, nvme_sc);
532         if (res)
533                 return res;
534
535         if (id_ns->dps)
536                 protect = 0x01;
537         else
538                 protect = 0;
539         kfree(id_ns);
540
541         memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
542         inq_response[2] = VERSION_SPC_4;
543         inq_response[3] = resp_data_format;     /*normaca=0 | hisup=0 */
544         inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
545         inq_response[5] = protect;      /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
546         inq_response[7] = cmdque;       /* wbus16=0 | sync=0 | vs=0 */
547         strncpy(&inq_response[8], "NVMe    ", 8);
548         strncpy(&inq_response[16], ctrl->model, 16);
549
550         while (ctrl->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
551                 fw_offset--;
552         fw_offset -= 4;
553         strncpy(&inq_response[32], ctrl->firmware_rev + fw_offset, 4);
554
555         xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
556         return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
557 }
558
559 static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
560                                         struct sg_io_hdr *hdr, u8 *inq_response,
561                                         int alloc_len)
562 {
563         int xfer_len;
564
565         memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
566         inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE;   /* Page Code */
567         inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES;    /* Page Length */
568         inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
569         inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
570         inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
571         inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
572         inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
573         inq_response[9] = INQ_BDEV_LIMITS_PAGE;
574
575         xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
576         return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
577 }
578
579 static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
580                                         struct sg_io_hdr *hdr, u8 *inq_response,
581                                         int alloc_len)
582 {
583         int xfer_len;
584
585         memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
586         inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
587         inq_response[3] = INQ_SERIAL_NUMBER_LENGTH;    /* Page Length */
588         strncpy(&inq_response[4], ns->ctrl->serial, INQ_SERIAL_NUMBER_LENGTH);
589
590         xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
591         return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
592 }
593
594 static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
595                 u8 *inq_response, int alloc_len)
596 {
597         struct nvme_id_ns *id_ns;
598         int nvme_sc, res;
599         size_t len;
600         void *eui;
601
602         nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
603         res = nvme_trans_status_code(hdr, nvme_sc);
604         if (res)
605                 return res;
606
607         eui = id_ns->eui64;
608         len = sizeof(id_ns->eui64);
609
610         if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) {
611                 if (bitmap_empty(eui, len * 8)) {
612                         eui = id_ns->nguid;
613                         len = sizeof(id_ns->nguid);
614                 }
615         }
616
617         if (bitmap_empty(eui, len * 8)) {
618                 res = -EOPNOTSUPP;
619                 goto out_free_id;
620         }
621
622         memset(inq_response, 0, alloc_len);
623         inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
624         inq_response[3] = 4 + len; /* Page Length */
625
626         /* Designation Descriptor start */
627         inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
628         inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */
629         inq_response[6] = 0x00; /* Rsvd */
630         inq_response[7] = len;  /* Designator Length */
631         memcpy(&inq_response[8], eui, len);
632
633         res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
634 out_free_id:
635         kfree(id_ns);
636         return res;
637 }
638
639 static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
640                 struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len)
641 {
642         struct nvme_ctrl *ctrl = ns->ctrl;
643         struct nvme_id_ctrl *id_ctrl;
644         int nvme_sc, res;
645
646         if (alloc_len < 72) {
647                 return nvme_trans_completion(hdr,
648                                 SAM_STAT_CHECK_CONDITION,
649                                 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
650                                 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
651         }
652
653         nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
654         res = nvme_trans_status_code(hdr, nvme_sc);
655         if (res)
656                 return res;
657
658         memset(inq_response, 0, alloc_len);
659         inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
660         inq_response[3] = 0x48; /* Page Length */
661
662         /* Designation Descriptor start */
663         inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */
664         inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */
665         inq_response[6] = 0x00; /* Rsvd */
666         inq_response[7] = 0x44; /* Designator Length */
667
668         sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid));
669         memcpy(&inq_response[12], ctrl->model, sizeof(ctrl->model));
670         sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id));
671         memcpy(&inq_response[56], ctrl->serial, sizeof(ctrl->serial));
672
673         res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
674         kfree(id_ctrl);
675         return res;
676 }
677
678 static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
679                                         u8 *resp, int alloc_len)
680 {
681         int res;
682
683         if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) {
684                 res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
685                 if (res != -EOPNOTSUPP)
686                         return res;
687         }
688
689         return nvme_fill_device_id_scsi_string(ns, hdr, resp, alloc_len);
690 }
691
692 static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
693                                         int alloc_len)
694 {
695         u8 *inq_response;
696         int res;
697         int nvme_sc;
698         struct nvme_ctrl *ctrl = ns->ctrl;
699         struct nvme_id_ctrl *id_ctrl;
700         struct nvme_id_ns *id_ns;
701         int xfer_len;
702         u8 microcode = 0x80;
703         u8 spt;
704         u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
705         u8 grd_chk, app_chk, ref_chk, protect;
706         u8 uask_sup = 0x20;
707         u8 v_sup;
708         u8 luiclr = 0x01;
709
710         inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
711         if (inq_response == NULL)
712                 return -ENOMEM;
713
714         nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
715         res = nvme_trans_status_code(hdr, nvme_sc);
716         if (res)
717                 goto out_free_inq;
718
719         spt = spt_lut[id_ns->dpc & 0x07] << 3;
720         if (id_ns->dps)
721                 protect = 0x01;
722         else
723                 protect = 0;
724         kfree(id_ns);
725
726         grd_chk = protect << 2;
727         app_chk = protect << 1;
728         ref_chk = protect;
729
730         nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
731         res = nvme_trans_status_code(hdr, nvme_sc);
732         if (res)
733                 goto out_free_inq;
734
735         v_sup = id_ctrl->vwc;
736         kfree(id_ctrl);
737
738         memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
739         inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE;    /* Page Code */
740         inq_response[2] = 0x00;    /* Page Length MSB */
741         inq_response[3] = 0x3C;    /* Page Length LSB */
742         inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
743         inq_response[5] = uask_sup;
744         inq_response[6] = v_sup;
745         inq_response[7] = luiclr;
746         inq_response[8] = 0;
747         inq_response[9] = 0;
748
749         xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
750         res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
751
752  out_free_inq:
753         kfree(inq_response);
754         return res;
755 }
756
757 static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
758                                         u8 *inq_response, int alloc_len)
759 {
760         __be32 max_sectors = cpu_to_be32(
761                 nvme_block_nr(ns, queue_max_hw_sectors(ns->queue)));
762         __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
763         __be32 discard_desc_count = cpu_to_be32(0x100);
764
765         memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
766         inq_response[1] = VPD_BLOCK_LIMITS;
767         inq_response[3] = 0x3c; /* Page Length */
768         memcpy(&inq_response[8], &max_sectors, sizeof(u32));
769         memcpy(&inq_response[20], &max_discard, sizeof(u32));
770
771         if (max_discard)
772                 memcpy(&inq_response[24], &discard_desc_count, sizeof(u32));
773
774         return nvme_trans_copy_to_user(hdr, inq_response, 0x3c);
775 }
776
777 static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
778                                         int alloc_len)
779 {
780         u8 *inq_response;
781         int res;
782         int xfer_len;
783
784         inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
785         if (inq_response == NULL) {
786                 res = -ENOMEM;
787                 goto out_mem;
788         }
789
790         inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE;    /* Page Code */
791         inq_response[2] = 0x00;    /* Page Length MSB */
792         inq_response[3] = 0x3C;    /* Page Length LSB */
793         inq_response[4] = 0x00;    /* Medium Rotation Rate MSB */
794         inq_response[5] = 0x01;    /* Medium Rotation Rate LSB */
795         inq_response[6] = 0x00;    /* Form Factor */
796
797         xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
798         res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
799
800         kfree(inq_response);
801  out_mem:
802         return res;
803 }
804
805 /* LOG SENSE Helper Functions */
806
807 static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
808                                         int alloc_len)
809 {
810         int res;
811         int xfer_len;
812         u8 *log_response;
813
814         log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
815         if (log_response == NULL) {
816                 res = -ENOMEM;
817                 goto out_mem;
818         }
819
820         log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
821         /* Subpage=0x00, Page Length MSB=0 */
822         log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
823         log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
824         log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
825         log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
826
827         xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
828         res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
829
830         kfree(log_response);
831  out_mem:
832         return res;
833 }
834
835 static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
836                                         struct sg_io_hdr *hdr, int alloc_len)
837 {
838         int res;
839         int xfer_len;
840         u8 *log_response;
841         struct nvme_smart_log *smart_log;
842         u8 temp_c;
843         u16 temp_k;
844
845         log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
846         if (log_response == NULL)
847                 return -ENOMEM;
848
849         res = nvme_get_log_page(ns->ctrl, &smart_log);
850         if (res < 0)
851                 goto out_free_response;
852
853         if (res != NVME_SC_SUCCESS) {
854                 temp_c = LOG_TEMP_UNKNOWN;
855         } else {
856                 temp_k = (smart_log->temperature[1] << 8) +
857                                 (smart_log->temperature[0]);
858                 temp_c = temp_k - KELVIN_TEMP_FACTOR;
859         }
860         kfree(smart_log);
861
862         log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
863         /* Subpage=0x00, Page Length MSB=0 */
864         log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
865         /* Informational Exceptions Log Parameter 1 Start */
866         /* Parameter Code=0x0000 bytes 4,5 */
867         log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
868         log_response[7] = 0x04; /* PARAMETER LENGTH */
869         /* Add sense Code and qualifier = 0x00 each */
870         /* Use Temperature from NVMe Get Log Page, convert to C from K */
871         log_response[10] = temp_c;
872
873         xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
874         res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
875
876  out_free_response:
877         kfree(log_response);
878         return res;
879 }
880
881 static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
882                                         int alloc_len)
883 {
884         int res;
885         int xfer_len;
886         u8 *log_response;
887         struct nvme_smart_log *smart_log;
888         u32 feature_resp;
889         u8 temp_c_cur, temp_c_thresh;
890         u16 temp_k;
891
892         log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
893         if (log_response == NULL)
894                 return -ENOMEM;
895
896         res = nvme_get_log_page(ns->ctrl, &smart_log);
897         if (res < 0)
898                 goto out_free_response;
899
900         if (res != NVME_SC_SUCCESS) {
901                 temp_c_cur = LOG_TEMP_UNKNOWN;
902         } else {
903                 temp_k = (smart_log->temperature[1] << 8) +
904                                 (smart_log->temperature[0]);
905                 temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
906         }
907         kfree(smart_log);
908
909         /* Get Features for Temp Threshold */
910         res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, NULL, 0,
911                                                                 &feature_resp);
912         if (res != NVME_SC_SUCCESS)
913                 temp_c_thresh = LOG_TEMP_UNKNOWN;
914         else
915                 temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
916
917         log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
918         /* Subpage=0x00, Page Length MSB=0 */
919         log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
920         /* Temperature Log Parameter 1 (Temperature) Start */
921         /* Parameter Code = 0x0000 */
922         log_response[6] = 0x01;         /* Format and Linking = 01b */
923         log_response[7] = 0x02;         /* Parameter Length */
924         /* Use Temperature from NVMe Get Log Page, convert to C from K */
925         log_response[9] = temp_c_cur;
926         /* Temperature Log Parameter 2 (Reference Temperature) Start */
927         log_response[11] = 0x01;        /* Parameter Code = 0x0001 */
928         log_response[12] = 0x01;        /* Format and Linking = 01b */
929         log_response[13] = 0x02;        /* Parameter Length */
930         /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
931         log_response[15] = temp_c_thresh;
932
933         xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
934         res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
935
936  out_free_response:
937         kfree(log_response);
938         return res;
939 }
940
941 /* MODE SENSE Helper Functions */
942
943 static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
944                                         u16 mode_data_length, u16 blk_desc_len)
945 {
946         /* Quick check to make sure I don't stomp on my own memory... */
947         if ((cdb10 && len < 8) || (!cdb10 && len < 4))
948                 return -EINVAL;
949
950         if (cdb10) {
951                 resp[0] = (mode_data_length & 0xFF00) >> 8;
952                 resp[1] = (mode_data_length & 0x00FF);
953                 resp[3] = 0x10 /* DPOFUA */;
954                 resp[4] = llbaa;
955                 resp[5] = RESERVED_FIELD;
956                 resp[6] = (blk_desc_len & 0xFF00) >> 8;
957                 resp[7] = (blk_desc_len & 0x00FF);
958         } else {
959                 resp[0] = (mode_data_length & 0x00FF);
960                 resp[2] = 0x10 /* DPOFUA */;
961                 resp[3] = (blk_desc_len & 0x00FF);
962         }
963
964         return 0;
965 }
966
967 static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
968                                     u8 *resp, int len, u8 llbaa)
969 {
970         int res;
971         int nvme_sc;
972         struct nvme_id_ns *id_ns;
973         u8 flbas;
974         u32 lba_length;
975
976         if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
977                 return -EINVAL;
978         else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
979                 return -EINVAL;
980
981         nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
982         res = nvme_trans_status_code(hdr, nvme_sc);
983         if (res)
984                 return res;
985
986         flbas = (id_ns->flbas) & 0x0F;
987         lba_length = (1 << (id_ns->lbaf[flbas].ds));
988
989         if (llbaa == 0) {
990                 __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
991                 /* Byte 4 is reserved */
992                 __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
993
994                 memcpy(resp, &tmp_cap, sizeof(u32));
995                 memcpy(&resp[4], &tmp_len, sizeof(u32));
996         } else {
997                 __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
998                 __be32 tmp_len = cpu_to_be32(lba_length);
999
1000                 memcpy(resp, &tmp_cap, sizeof(u64));
1001                 /* Bytes 8, 9, 10, 11 are reserved */
1002                 memcpy(&resp[12], &tmp_len, sizeof(u32));
1003         }
1004
1005         kfree(id_ns);
1006         return res;
1007 }
1008
1009 static int nvme_trans_fill_control_page(struct nvme_ns *ns,
1010                                         struct sg_io_hdr *hdr, u8 *resp,
1011                                         int len)
1012 {
1013         if (len < MODE_PAGE_CONTROL_LEN)
1014                 return -EINVAL;
1015
1016         resp[0] = MODE_PAGE_CONTROL;
1017         resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
1018         resp[2] = 0x0E;         /* TST=000b, TMF_ONLY=0, DPICZ=1,
1019                                  * D_SENSE=1, GLTSD=1, RLEC=0 */
1020         resp[3] = 0x12;         /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
1021         /* Byte 4:  VS=0, RAC=0, UA_INT=0, SWP=0 */
1022         resp[5] = 0x40;         /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
1023         /* resp[6] and [7] are obsolete, thus zero */
1024         resp[8] = 0xFF;         /* Busy timeout period = 0xffff */
1025         resp[9] = 0xFF;
1026         /* Bytes 10,11: Extended selftest completion time = 0x0000 */
1027
1028         return 0;
1029 }
1030
1031 static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
1032                                         struct sg_io_hdr *hdr,
1033                                         u8 *resp, int len)
1034 {
1035         int res = 0;
1036         int nvme_sc;
1037         u32 feature_resp;
1038         u8 vwc;
1039
1040         if (len < MODE_PAGE_CACHING_LEN)
1041                 return -EINVAL;
1042
1043         nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, NULL, 0,
1044                                                                 &feature_resp);
1045         res = nvme_trans_status_code(hdr, nvme_sc);
1046         if (res)
1047                 return res;
1048
1049         vwc = feature_resp & 0x00000001;
1050
1051         resp[0] = MODE_PAGE_CACHING;
1052         resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
1053         resp[2] = vwc << 2;
1054         return 0;
1055 }
1056
1057 static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
1058                                         struct sg_io_hdr *hdr, u8 *resp,
1059                                         int len)
1060 {
1061         if (len < MODE_PAGE_POW_CND_LEN)
1062                 return -EINVAL;
1063
1064         resp[0] = MODE_PAGE_POWER_CONDITION;
1065         resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
1066         /* All other bytes are zero */
1067
1068         return 0;
1069 }
1070
1071 static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
1072                                         struct sg_io_hdr *hdr, u8 *resp,
1073                                         int len)
1074 {
1075         if (len < MODE_PAGE_INF_EXC_LEN)
1076                 return -EINVAL;
1077
1078         resp[0] = MODE_PAGE_INFO_EXCEP;
1079         resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
1080         resp[2] = 0x88;
1081         /* All other bytes are zero */
1082
1083         return 0;
1084 }
1085
1086 static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1087                                      u8 *resp, int len)
1088 {
1089         int res;
1090         u16 mode_pages_offset_1 = 0;
1091         u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
1092
1093         mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
1094         mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
1095         mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
1096
1097         res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
1098                                         MODE_PAGE_CACHING_LEN);
1099         if (res)
1100                 return res;
1101         res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
1102                                         MODE_PAGE_CONTROL_LEN);
1103         if (res)
1104                 return res;
1105         res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
1106                                         MODE_PAGE_POW_CND_LEN);
1107         if (res)
1108                 return res;
1109         return nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
1110                                         MODE_PAGE_INF_EXC_LEN);
1111 }
1112
1113 static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
1114 {
1115         if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
1116                 /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
1117                 return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
1118         } else {
1119                 return 0;
1120         }
1121 }
1122
1123 static int nvme_trans_mode_page_create(struct nvme_ns *ns,
1124                                         struct sg_io_hdr *hdr, u8 *cmd,
1125                                         u16 alloc_len, u8 cdb10,
1126                                         int (*mode_page_fill_func)
1127                                         (struct nvme_ns *,
1128                                         struct sg_io_hdr *hdr, u8 *, int),
1129                                         u16 mode_pages_tot_len)
1130 {
1131         int res;
1132         int xfer_len;
1133         u8 *response;
1134         u8 dbd, llbaa;
1135         u16 resp_size;
1136         int mph_size;
1137         u16 mode_pages_offset_1;
1138         u16 blk_desc_len, blk_desc_offset, mode_data_length;
1139
1140         dbd = (cmd[1] & MODE_SENSE_DBD_MASK) >> MODE_SENSE_DBD_SHIFT;
1141         llbaa = (cmd[1] & MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT;
1142         mph_size = cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE;
1143
1144         blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
1145
1146         resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
1147         /* Refer spc4r34 Table 440 for calculation of Mode data Length field */
1148         mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
1149
1150         blk_desc_offset = mph_size;
1151         mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
1152
1153         response = kzalloc(resp_size, GFP_KERNEL);
1154         if (response == NULL) {
1155                 res = -ENOMEM;
1156                 goto out_mem;
1157         }
1158
1159         res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
1160                                         llbaa, mode_data_length, blk_desc_len);
1161         if (res)
1162                 goto out_free;
1163         if (blk_desc_len > 0) {
1164                 res = nvme_trans_fill_blk_desc(ns, hdr,
1165                                                &response[blk_desc_offset],
1166                                                blk_desc_len, llbaa);
1167                 if (res)
1168                         goto out_free;
1169         }
1170         res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
1171                                         mode_pages_tot_len);
1172         if (res)
1173                 goto out_free;
1174
1175         xfer_len = min(alloc_len, resp_size);
1176         res = nvme_trans_copy_to_user(hdr, response, xfer_len);
1177
1178  out_free:
1179         kfree(response);
1180  out_mem:
1181         return res;
1182 }
1183
1184 /* Read Capacity Helper Functions */
1185
1186 static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
1187                                                                 u8 cdb16)
1188 {
1189         u8 flbas;
1190         u32 lba_length;
1191         u64 rlba;
1192         u8 prot_en;
1193         u8 p_type_lut[4] = {0, 0, 1, 2};
1194         __be64 tmp_rlba;
1195         __be32 tmp_rlba_32;
1196         __be32 tmp_len;
1197
1198         flbas = (id_ns->flbas) & 0x0F;
1199         lba_length = (1 << (id_ns->lbaf[flbas].ds));
1200         rlba = le64_to_cpup(&id_ns->nsze) - 1;
1201         (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
1202
1203         if (!cdb16) {
1204                 if (rlba > 0xFFFFFFFF)
1205                         rlba = 0xFFFFFFFF;
1206                 tmp_rlba_32 = cpu_to_be32(rlba);
1207                 tmp_len = cpu_to_be32(lba_length);
1208                 memcpy(response, &tmp_rlba_32, sizeof(u32));
1209                 memcpy(&response[4], &tmp_len, sizeof(u32));
1210         } else {
1211                 tmp_rlba = cpu_to_be64(rlba);
1212                 tmp_len = cpu_to_be32(lba_length);
1213                 memcpy(response, &tmp_rlba, sizeof(u64));
1214                 memcpy(&response[8], &tmp_len, sizeof(u32));
1215                 response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
1216                 /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
1217                 /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
1218                 /* Bytes 16-31 - Reserved */
1219         }
1220 }
1221
1222 /* Start Stop Unit Helper Functions */
1223
1224 static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1225                                         u8 buffer_id)
1226 {
1227         struct nvme_command c;
1228         int nvme_sc;
1229
1230         memset(&c, 0, sizeof(c));
1231         c.common.opcode = nvme_admin_activate_fw;
1232         c.common.cdw10[0] = cpu_to_le32(buffer_id | NVME_FWACT_REPL_ACTV);
1233
1234         nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
1235         return nvme_trans_status_code(hdr, nvme_sc);
1236 }
1237
1238 static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1239                                         u8 opcode, u32 tot_len, u32 offset,
1240                                         u8 buffer_id)
1241 {
1242         int nvme_sc;
1243         struct nvme_command c;
1244
1245         if (hdr->iovec_count > 0) {
1246                 /* Assuming SGL is not allowed for this command */
1247                 return nvme_trans_completion(hdr,
1248                                         SAM_STAT_CHECK_CONDITION,
1249                                         ILLEGAL_REQUEST,
1250                                         SCSI_ASC_INVALID_CDB,
1251                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1252         }
1253
1254         memset(&c, 0, sizeof(c));
1255         c.common.opcode = nvme_admin_download_fw;
1256         c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
1257         c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
1258
1259         nvme_sc = nvme_submit_user_cmd(ns->ctrl->admin_q, &c,
1260                         hdr->dxferp, tot_len, NULL, 0);
1261         return nvme_trans_status_code(hdr, nvme_sc);
1262 }
1263
1264 /* Mode Select Helper Functions */
1265
1266 static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
1267                                                 u16 *bd_len, u8 *llbaa)
1268 {
1269         if (cdb10) {
1270                 /* 10 Byte CDB */
1271                 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
1272                         parm_list[MODE_SELECT_10_BD_OFFSET + 1];
1273                 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
1274                                 MODE_SELECT_10_LLBAA_MASK;
1275         } else {
1276                 /* 6 Byte CDB */
1277                 *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
1278         }
1279 }
1280
1281 static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1282                                         u16 idx, u16 bd_len, u8 llbaa)
1283 {
1284         /* Store block descriptor info if a FORMAT UNIT comes later */
1285         /* TODO Saving 1st BD info; what to do if multiple BD received? */
1286         if (llbaa == 0) {
1287                 /* Standard Block Descriptor - spc4r34 7.5.5.1 */
1288                 ns->mode_select_num_blocks =
1289                                 (parm_list[idx + 1] << 16) +
1290                                 (parm_list[idx + 2] << 8) +
1291                                 (parm_list[idx + 3]);
1292
1293                 ns->mode_select_block_len =
1294                                 (parm_list[idx + 5] << 16) +
1295                                 (parm_list[idx + 6] << 8) +
1296                                 (parm_list[idx + 7]);
1297         } else {
1298                 /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
1299                 ns->mode_select_num_blocks =
1300                                 (((u64)parm_list[idx + 0]) << 56) +
1301                                 (((u64)parm_list[idx + 1]) << 48) +
1302                                 (((u64)parm_list[idx + 2]) << 40) +
1303                                 (((u64)parm_list[idx + 3]) << 32) +
1304                                 (((u64)parm_list[idx + 4]) << 24) +
1305                                 (((u64)parm_list[idx + 5]) << 16) +
1306                                 (((u64)parm_list[idx + 6]) << 8) +
1307                                 ((u64)parm_list[idx + 7]);
1308
1309                 ns->mode_select_block_len =
1310                                 (parm_list[idx + 12] << 24) +
1311                                 (parm_list[idx + 13] << 16) +
1312                                 (parm_list[idx + 14] << 8) +
1313                                 (parm_list[idx + 15]);
1314         }
1315 }
1316
1317 static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1318                                         u8 *mode_page, u8 page_code)
1319 {
1320         int res = 0;
1321         int nvme_sc;
1322         unsigned dword11;
1323
1324         switch (page_code) {
1325         case MODE_PAGE_CACHING:
1326                 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
1327                 nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC,
1328                                             dword11, NULL, 0, NULL);
1329                 res = nvme_trans_status_code(hdr, nvme_sc);
1330                 break;
1331         case MODE_PAGE_CONTROL:
1332                 break;
1333         case MODE_PAGE_POWER_CONDITION:
1334                 /* Verify the OS is not trying to set timers */
1335                 if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
1336                         res = nvme_trans_completion(hdr,
1337                                                 SAM_STAT_CHECK_CONDITION,
1338                                                 ILLEGAL_REQUEST,
1339                                                 SCSI_ASC_INVALID_PARAMETER,
1340                                                 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1341                         break;
1342                 }
1343                 break;
1344         default:
1345                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1346                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1347                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1348                 break;
1349         }
1350
1351         return res;
1352 }
1353
1354 static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1355                                         u8 *cmd, u16 parm_list_len, u8 pf,
1356                                         u8 sp, u8 cdb10)
1357 {
1358         int res;
1359         u8 *parm_list;
1360         u16 bd_len;
1361         u8 llbaa = 0;
1362         u16 index, saved_index;
1363         u8 page_code;
1364         u16 mp_size;
1365
1366         /* Get parm list from data-in/out buffer */
1367         parm_list = kmalloc(parm_list_len, GFP_KERNEL);
1368         if (parm_list == NULL) {
1369                 res = -ENOMEM;
1370                 goto out;
1371         }
1372
1373         res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
1374         if (res)
1375                 goto out_mem;
1376
1377         nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
1378         index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
1379
1380         if (bd_len != 0) {
1381                 /* Block Descriptors present, parse */
1382                 nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
1383                 index += bd_len;
1384         }
1385         saved_index = index;
1386
1387         /* Multiple mode pages may be present; iterate through all */
1388         /* In 1st Iteration, don't do NVME Command, only check for CDB errors */
1389         do {
1390                 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1391                 mp_size = parm_list[index + 1] + 2;
1392                 if ((page_code != MODE_PAGE_CACHING) &&
1393                     (page_code != MODE_PAGE_CONTROL) &&
1394                     (page_code != MODE_PAGE_POWER_CONDITION)) {
1395                         res = nvme_trans_completion(hdr,
1396                                                 SAM_STAT_CHECK_CONDITION,
1397                                                 ILLEGAL_REQUEST,
1398                                                 SCSI_ASC_INVALID_CDB,
1399                                                 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1400                         goto out_mem;
1401                 }
1402                 index += mp_size;
1403         } while (index < parm_list_len);
1404
1405         /* In 2nd Iteration, do the NVME Commands */
1406         index = saved_index;
1407         do {
1408                 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1409                 mp_size = parm_list[index + 1] + 2;
1410                 res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
1411                                                                 page_code);
1412                 if (res)
1413                         break;
1414                 index += mp_size;
1415         } while (index < parm_list_len);
1416
1417  out_mem:
1418         kfree(parm_list);
1419  out:
1420         return res;
1421 }
1422
1423 /* Format Unit Helper Functions */
1424
1425 static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1426                                              struct sg_io_hdr *hdr)
1427 {
1428         int res = 0;
1429         int nvme_sc;
1430         u8 flbas;
1431
1432         /*
1433          * SCSI Expects a MODE SELECT would have been issued prior to
1434          * a FORMAT UNIT, and the block size and number would be used
1435          * from the block descriptor in it. If a MODE SELECT had not
1436          * been issued, FORMAT shall use the current values for both.
1437          */
1438
1439         if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
1440                 struct nvme_id_ns *id_ns;
1441
1442                 nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
1443                 res = nvme_trans_status_code(hdr, nvme_sc);
1444                 if (res)
1445                         return res;
1446
1447                 if (ns->mode_select_num_blocks == 0)
1448                         ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
1449                 if (ns->mode_select_block_len == 0) {
1450                         flbas = (id_ns->flbas) & 0x0F;
1451                         ns->mode_select_block_len =
1452                                                 (1 << (id_ns->lbaf[flbas].ds));
1453                 }
1454
1455                 kfree(id_ns);
1456         }
1457
1458         return 0;
1459 }
1460
1461 static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
1462                                         u8 format_prot_info, u8 *nvme_pf_code)
1463 {
1464         int res;
1465         u8 *parm_list;
1466         u8 pf_usage, pf_code;
1467
1468         parm_list = kmalloc(len, GFP_KERNEL);
1469         if (parm_list == NULL) {
1470                 res = -ENOMEM;
1471                 goto out;
1472         }
1473         res = nvme_trans_copy_from_user(hdr, parm_list, len);
1474         if (res)
1475                 goto out_mem;
1476
1477         if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
1478                                 FORMAT_UNIT_IMMED_MASK) != 0) {
1479                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1480                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1481                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1482                 goto out_mem;
1483         }
1484
1485         if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
1486             (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
1487                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1488                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1489                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1490                 goto out_mem;
1491         }
1492         pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
1493                         FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
1494         pf_code = (pf_usage << 2) | format_prot_info;
1495         switch (pf_code) {
1496         case 0:
1497                 *nvme_pf_code = 0;
1498                 break;
1499         case 2:
1500                 *nvme_pf_code = 1;
1501                 break;
1502         case 3:
1503                 *nvme_pf_code = 2;
1504                 break;
1505         case 7:
1506                 *nvme_pf_code = 3;
1507                 break;
1508         default:
1509                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1510                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1511                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1512                 break;
1513         }
1514
1515  out_mem:
1516         kfree(parm_list);
1517  out:
1518         return res;
1519 }
1520
1521 static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1522                                    u8 prot_info)
1523 {
1524         int res;
1525         int nvme_sc;
1526         struct nvme_id_ns *id_ns;
1527         u8 i;
1528         u8 nlbaf;
1529         u8 selected_lbaf = 0xFF;
1530         u32 cdw10 = 0;
1531         struct nvme_command c;
1532
1533         /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
1534         nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
1535         res = nvme_trans_status_code(hdr, nvme_sc);
1536         if (res)
1537                 return res;
1538
1539         nlbaf = id_ns->nlbaf;
1540
1541         for (i = 0; i < nlbaf; i++) {
1542                 if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
1543                         selected_lbaf = i;
1544                         break;
1545                 }
1546         }
1547         if (selected_lbaf > 0x0F) {
1548                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1549                                 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1550                                 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1551         }
1552         if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
1553                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1554                                 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1555                                 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1556         }
1557
1558         cdw10 |= prot_info << 5;
1559         cdw10 |= selected_lbaf & 0x0F;
1560         memset(&c, 0, sizeof(c));
1561         c.format.opcode = nvme_admin_format_nvm;
1562         c.format.nsid = cpu_to_le32(ns->ns_id);
1563         c.format.cdw10 = cpu_to_le32(cdw10);
1564
1565         nvme_sc = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL, 0);
1566         res = nvme_trans_status_code(hdr, nvme_sc);
1567
1568         kfree(id_ns);
1569         return res;
1570 }
1571
1572 static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
1573                                         struct nvme_trans_io_cdb *cdb_info,
1574                                         u32 max_blocks)
1575 {
1576         /* If using iovecs, send one nvme command per vector */
1577         if (hdr->iovec_count > 0)
1578                 return hdr->iovec_count;
1579         else if (cdb_info->xfer_len > max_blocks)
1580                 return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
1581         else
1582                 return 1;
1583 }
1584
1585 static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
1586                                         struct nvme_trans_io_cdb *cdb_info)
1587 {
1588         u16 control = 0;
1589
1590         /* When Protection information support is added, implement here */
1591
1592         if (cdb_info->fua > 0)
1593                 control |= NVME_RW_FUA;
1594
1595         return control;
1596 }
1597
1598 static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1599                                 struct nvme_trans_io_cdb *cdb_info, u8 is_write)
1600 {
1601         int nvme_sc = NVME_SC_SUCCESS;
1602         u32 num_cmds;
1603         u64 unit_len;
1604         u64 unit_num_blocks;    /* Number of blocks to xfer in each nvme cmd */
1605         u32 retcode;
1606         u32 i = 0;
1607         u64 nvme_offset = 0;
1608         void __user *next_mapping_addr;
1609         struct nvme_command c;
1610         u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
1611         u16 control;
1612         u32 max_blocks = queue_max_hw_sectors(ns->queue) >> (ns->lba_shift - 9);
1613
1614         num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
1615
1616         /*
1617          * This loop handles two cases.
1618          * First, when an SGL is used in the form of an iovec list:
1619          *   - Use iov_base as the next mapping address for the nvme command_id
1620          *   - Use iov_len as the data transfer length for the command.
1621          * Second, when we have a single buffer
1622          *   - If larger than max_blocks, split into chunks, offset
1623          *        each nvme command accordingly.
1624          */
1625         for (i = 0; i < num_cmds; i++) {
1626                 memset(&c, 0, sizeof(c));
1627                 if (hdr->iovec_count > 0) {
1628                         struct sg_iovec sgl;
1629
1630                         retcode = copy_from_user(&sgl, hdr->dxferp +
1631                                         i * sizeof(struct sg_iovec),
1632                                         sizeof(struct sg_iovec));
1633                         if (retcode)
1634                                 return -EFAULT;
1635                         unit_len = sgl.iov_len;
1636                         unit_num_blocks = unit_len >> ns->lba_shift;
1637                         next_mapping_addr = sgl.iov_base;
1638                 } else {
1639                         unit_num_blocks = min((u64)max_blocks,
1640                                         (cdb_info->xfer_len - nvme_offset));
1641                         unit_len = unit_num_blocks << ns->lba_shift;
1642                         next_mapping_addr = hdr->dxferp +
1643                                         ((1 << ns->lba_shift) * nvme_offset);
1644                 }
1645
1646                 c.rw.opcode = opcode;
1647                 c.rw.nsid = cpu_to_le32(ns->ns_id);
1648                 c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
1649                 c.rw.length = cpu_to_le16(unit_num_blocks - 1);
1650                 control = nvme_trans_io_get_control(ns, cdb_info);
1651                 c.rw.control = cpu_to_le16(control);
1652
1653                 if (get_capacity(ns->disk) - unit_num_blocks <
1654                                 cdb_info->lba + nvme_offset) {
1655                         nvme_sc = NVME_SC_LBA_RANGE;
1656                         break;
1657                 }
1658                 nvme_sc = nvme_submit_user_cmd(ns->queue, &c,
1659                                 next_mapping_addr, unit_len, NULL, 0);
1660                 if (nvme_sc)
1661                         break;
1662
1663                 nvme_offset += unit_num_blocks;
1664         }
1665
1666         return nvme_trans_status_code(hdr, nvme_sc);
1667 }
1668
1669
1670 /* SCSI Command Translation Functions */
1671
1672 static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
1673                                                         u8 *cmd)
1674 {
1675         int res = 0;
1676         struct nvme_trans_io_cdb cdb_info = { 0, };
1677         u8 opcode = cmd[0];
1678         u64 xfer_bytes;
1679         u64 sum_iov_len = 0;
1680         struct sg_iovec sgl;
1681         int i;
1682         size_t not_copied;
1683
1684         /*
1685          * The FUA and WPROTECT fields are not supported in 6-byte CDBs,
1686          * but always in the same place for all others.
1687          */
1688         switch (opcode) {
1689         case WRITE_6:
1690         case READ_6:
1691                 break;
1692         default:
1693                 cdb_info.fua = cmd[1] & 0x8;
1694                 cdb_info.prot_info = (cmd[1] & 0xe0) >> 5;
1695                 if (cdb_info.prot_info && !ns->pi_type) {
1696                         return nvme_trans_completion(hdr,
1697                                         SAM_STAT_CHECK_CONDITION,
1698                                         ILLEGAL_REQUEST,
1699                                         SCSI_ASC_INVALID_CDB,
1700                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1701                 }
1702         }
1703
1704         switch (opcode) {
1705         case WRITE_6:
1706         case READ_6:
1707                 cdb_info.lba = get_unaligned_be24(&cmd[1]);
1708                 cdb_info.xfer_len = cmd[4];
1709                 if (cdb_info.xfer_len == 0)
1710                         cdb_info.xfer_len = 256;
1711                 break;
1712         case WRITE_10:
1713         case READ_10:
1714                 cdb_info.lba = get_unaligned_be32(&cmd[2]);
1715                 cdb_info.xfer_len = get_unaligned_be16(&cmd[7]);
1716                 break;
1717         case WRITE_12:
1718         case READ_12:
1719                 cdb_info.lba = get_unaligned_be32(&cmd[2]);
1720                 cdb_info.xfer_len = get_unaligned_be32(&cmd[6]);
1721                 break;
1722         case WRITE_16:
1723         case READ_16:
1724                 cdb_info.lba = get_unaligned_be64(&cmd[2]);
1725                 cdb_info.xfer_len = get_unaligned_be32(&cmd[10]);
1726                 break;
1727         default:
1728                 /* Will never really reach here */
1729                 res = -EIO;
1730                 goto out;
1731         }
1732
1733         /* Calculate total length of transfer (in bytes) */
1734         if (hdr->iovec_count > 0) {
1735                 for (i = 0; i < hdr->iovec_count; i++) {
1736                         not_copied = copy_from_user(&sgl, hdr->dxferp +
1737                                                 i * sizeof(struct sg_iovec),
1738                                                 sizeof(struct sg_iovec));
1739                         if (not_copied)
1740                                 return -EFAULT;
1741                         sum_iov_len += sgl.iov_len;
1742                         /* IO vector sizes should be multiples of block size */
1743                         if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
1744                                 res = nvme_trans_completion(hdr,
1745                                                 SAM_STAT_CHECK_CONDITION,
1746                                                 ILLEGAL_REQUEST,
1747                                                 SCSI_ASC_INVALID_PARAMETER,
1748                                                 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1749                                 goto out;
1750                         }
1751                 }
1752         } else {
1753                 sum_iov_len = hdr->dxfer_len;
1754         }
1755
1756         /* As Per sg ioctl howto, if the lengths differ, use the lower one */
1757         xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
1758
1759         /* If block count and actual data buffer size dont match, error out */
1760         if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
1761                 res = -EINVAL;
1762                 goto out;
1763         }
1764
1765         /* Check for 0 length transfer - it is not illegal */
1766         if (cdb_info.xfer_len == 0)
1767                 goto out;
1768
1769         /* Send NVMe IO Command(s) */
1770         res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
1771         if (res)
1772                 goto out;
1773
1774  out:
1775         return res;
1776 }
1777
1778 static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1779                                                         u8 *cmd)
1780 {
1781         int res = 0;
1782         u8 evpd;
1783         u8 page_code;
1784         int alloc_len;
1785         u8 *inq_response;
1786
1787         evpd = cmd[1] & 0x01;
1788         page_code = cmd[2];
1789         alloc_len = get_unaligned_be16(&cmd[3]);
1790
1791         inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
1792                                 GFP_KERNEL);
1793         if (inq_response == NULL) {
1794                 res = -ENOMEM;
1795                 goto out_mem;
1796         }
1797
1798         if (evpd == 0) {
1799                 if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
1800                         res = nvme_trans_standard_inquiry_page(ns, hdr,
1801                                                 inq_response, alloc_len);
1802                 } else {
1803                         res = nvme_trans_completion(hdr,
1804                                                 SAM_STAT_CHECK_CONDITION,
1805                                                 ILLEGAL_REQUEST,
1806                                                 SCSI_ASC_INVALID_CDB,
1807                                                 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1808                 }
1809         } else {
1810                 switch (page_code) {
1811                 case VPD_SUPPORTED_PAGES:
1812                         res = nvme_trans_supported_vpd_pages(ns, hdr,
1813                                                 inq_response, alloc_len);
1814                         break;
1815                 case VPD_SERIAL_NUMBER:
1816                         res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
1817                                                                 alloc_len);
1818                         break;
1819                 case VPD_DEVICE_IDENTIFIERS:
1820                         res = nvme_trans_device_id_page(ns, hdr, inq_response,
1821                                                                 alloc_len);
1822                         break;
1823                 case VPD_EXTENDED_INQUIRY:
1824                         res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
1825                         break;
1826                 case VPD_BLOCK_LIMITS:
1827                         res = nvme_trans_bdev_limits_page(ns, hdr, inq_response,
1828                                                                 alloc_len);
1829                         break;
1830                 case VPD_BLOCK_DEV_CHARACTERISTICS:
1831                         res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
1832                         break;
1833                 default:
1834                         res = nvme_trans_completion(hdr,
1835                                                 SAM_STAT_CHECK_CONDITION,
1836                                                 ILLEGAL_REQUEST,
1837                                                 SCSI_ASC_INVALID_CDB,
1838                                                 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1839                         break;
1840                 }
1841         }
1842         kfree(inq_response);
1843  out_mem:
1844         return res;
1845 }
1846
1847 static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1848                                                         u8 *cmd)
1849 {
1850         int res;
1851         u16 alloc_len;
1852         u8 pc;
1853         u8 page_code;
1854
1855         if (cmd[1] != LOG_SENSE_CDB_SP_NOT_ENABLED) {
1856                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1857                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1858                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1859                 goto out;
1860         }
1861
1862         page_code = cmd[2] & LOG_SENSE_CDB_PAGE_CODE_MASK;
1863         pc = (cmd[2] & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
1864         if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
1865                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1866                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1867                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1868                 goto out;
1869         }
1870         alloc_len = get_unaligned_be16(&cmd[7]);
1871         switch (page_code) {
1872         case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
1873                 res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
1874                 break;
1875         case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
1876                 res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
1877                 break;
1878         case LOG_PAGE_TEMPERATURE_PAGE:
1879                 res = nvme_trans_log_temperature(ns, hdr, alloc_len);
1880                 break;
1881         default:
1882                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1883                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1884                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1885                 break;
1886         }
1887
1888  out:
1889         return res;
1890 }
1891
1892 static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1893                                                         u8 *cmd)
1894 {
1895         u8 cdb10 = 0;
1896         u16 parm_list_len;
1897         u8 page_format;
1898         u8 save_pages;
1899
1900         page_format = cmd[1] & MODE_SELECT_CDB_PAGE_FORMAT_MASK;
1901         save_pages = cmd[1] & MODE_SELECT_CDB_SAVE_PAGES_MASK;
1902
1903         if (cmd[0] == MODE_SELECT) {
1904                 parm_list_len = cmd[4];
1905         } else {
1906                 parm_list_len = cmd[7];
1907                 cdb10 = 1;
1908         }
1909
1910         if (parm_list_len != 0) {
1911                 /*
1912                  * According to SPC-4 r24, a paramter list length field of 0
1913                  * shall not be considered an error
1914                  */
1915                 return nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
1916                                                 page_format, save_pages, cdb10);
1917         }
1918
1919         return 0;
1920 }
1921
1922 static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1923                                                         u8 *cmd)
1924 {
1925         int res = 0;
1926         u16 alloc_len;
1927         u8 cdb10 = 0;
1928
1929         if (cmd[0] == MODE_SENSE) {
1930                 alloc_len = cmd[4];
1931         } else {
1932                 alloc_len = get_unaligned_be16(&cmd[7]);
1933                 cdb10 = 1;
1934         }
1935
1936         if ((cmd[2] & MODE_SENSE_PAGE_CONTROL_MASK) !=
1937                         MODE_SENSE_PC_CURRENT_VALUES) {
1938                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1939                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1940                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1941                 goto out;
1942         }
1943
1944         switch (cmd[2] & MODE_SENSE_PAGE_CODE_MASK) {
1945         case MODE_PAGE_CACHING:
1946                 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
1947                                                 cdb10,
1948                                                 &nvme_trans_fill_caching_page,
1949                                                 MODE_PAGE_CACHING_LEN);
1950                 break;
1951         case MODE_PAGE_CONTROL:
1952                 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
1953                                                 cdb10,
1954                                                 &nvme_trans_fill_control_page,
1955                                                 MODE_PAGE_CONTROL_LEN);
1956                 break;
1957         case MODE_PAGE_POWER_CONDITION:
1958                 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
1959                                                 cdb10,
1960                                                 &nvme_trans_fill_pow_cnd_page,
1961                                                 MODE_PAGE_POW_CND_LEN);
1962                 break;
1963         case MODE_PAGE_INFO_EXCEP:
1964                 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
1965                                                 cdb10,
1966                                                 &nvme_trans_fill_inf_exc_page,
1967                                                 MODE_PAGE_INF_EXC_LEN);
1968                 break;
1969         case MODE_PAGE_RETURN_ALL:
1970                 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
1971                                                 cdb10,
1972                                                 &nvme_trans_fill_all_pages,
1973                                                 MODE_PAGE_ALL_LEN);
1974                 break;
1975         default:
1976                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1977                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1978                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1979                 break;
1980         }
1981
1982  out:
1983         return res;
1984 }
1985
1986 static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1987                                                         u8 *cmd, u8 cdb16)
1988 {
1989         int res;
1990         int nvme_sc;
1991         u32 alloc_len;
1992         u32 resp_size;
1993         u32 xfer_len;
1994         struct nvme_id_ns *id_ns;
1995         u8 *response;
1996
1997         if (cdb16) {
1998                 alloc_len = get_unaligned_be32(&cmd[10]);
1999                 resp_size = READ_CAP_16_RESP_SIZE;
2000         } else {
2001                 alloc_len = READ_CAP_10_RESP_SIZE;
2002                 resp_size = READ_CAP_10_RESP_SIZE;
2003         }
2004
2005         nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
2006         res = nvme_trans_status_code(hdr, nvme_sc);
2007         if (res)
2008                 return res;     
2009
2010         response = kzalloc(resp_size, GFP_KERNEL);
2011         if (response == NULL) {
2012                 res = -ENOMEM;
2013                 goto out_free_id;
2014         }
2015         nvme_trans_fill_read_cap(response, id_ns, cdb16);
2016
2017         xfer_len = min(alloc_len, resp_size);
2018         res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2019
2020         kfree(response);
2021  out_free_id:
2022         kfree(id_ns);
2023         return res;
2024 }
2025
2026 static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2027                                                         u8 *cmd)
2028 {
2029         int res;
2030         int nvme_sc;
2031         u32 alloc_len, xfer_len, resp_size;
2032         u8 *response;
2033         struct nvme_id_ctrl *id_ctrl;
2034         u32 ll_length, lun_id;
2035         u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
2036         __be32 tmp_len;
2037
2038         switch (cmd[2]) {
2039         default:
2040                 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2041                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2042                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2043         case ALL_LUNS_RETURNED:
2044         case ALL_WELL_KNOWN_LUNS_RETURNED:
2045         case RESTRICTED_LUNS_RETURNED:
2046                 nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
2047                 res = nvme_trans_status_code(hdr, nvme_sc);
2048                 if (res)
2049                         return res;
2050
2051                 ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
2052                 resp_size = ll_length + LUN_DATA_HEADER_SIZE;
2053
2054                 alloc_len = get_unaligned_be32(&cmd[6]);
2055                 if (alloc_len < resp_size) {
2056                         res = nvme_trans_completion(hdr,
2057                                         SAM_STAT_CHECK_CONDITION,
2058                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2059                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2060                         goto out_free_id;
2061                 }
2062
2063                 response = kzalloc(resp_size, GFP_KERNEL);
2064                 if (response == NULL) {
2065                         res = -ENOMEM;
2066                         goto out_free_id;
2067                 }
2068
2069                 /* The first LUN ID will always be 0 per the SAM spec */
2070                 for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
2071                         /*
2072                          * Set the LUN Id and then increment to the next LUN
2073                          * location in the parameter data.
2074                          */
2075                         __be64 tmp_id = cpu_to_be64(lun_id);
2076                         memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
2077                         lun_id_offset += LUN_ENTRY_SIZE;
2078                 }
2079                 tmp_len = cpu_to_be32(ll_length);
2080                 memcpy(response, &tmp_len, sizeof(u32));
2081         }
2082
2083         xfer_len = min(alloc_len, resp_size);
2084         res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2085
2086         kfree(response);
2087  out_free_id:
2088         kfree(id_ctrl);
2089         return res;
2090 }
2091
2092 static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2093                                                         u8 *cmd)
2094 {
2095         int res;
2096         u8 alloc_len, xfer_len, resp_size;
2097         u8 desc_format;
2098         u8 *response;
2099
2100         desc_format = cmd[1] & 0x01;
2101         alloc_len = cmd[4];
2102
2103         resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
2104                                         (FIXED_FMT_SENSE_DATA_SIZE));
2105         response = kzalloc(resp_size, GFP_KERNEL);
2106         if (response == NULL) {
2107                 res = -ENOMEM;
2108                 goto out;
2109         }
2110
2111         if (desc_format) {
2112                 /* Descriptor Format Sense Data */
2113                 response[0] = DESC_FORMAT_SENSE_DATA;
2114                 response[1] = NO_SENSE;
2115                 /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
2116                 response[2] = SCSI_ASC_NO_SENSE;
2117                 response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2118                 /* SDAT_OVFL = 0 | Additional Sense Length = 0 */
2119         } else {
2120                 /* Fixed Format Sense Data */
2121                 response[0] = FIXED_SENSE_DATA;
2122                 /* Byte 1 = Obsolete */
2123                 response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
2124                 /* Bytes 3-6 - Information - set to zero */
2125                 response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
2126                 /* Bytes 8-11 - Cmd Specific Information - set to zero */
2127                 response[12] = SCSI_ASC_NO_SENSE;
2128                 response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2129                 /* Byte 14 = Field Replaceable Unit Code = 0 */
2130                 /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
2131         }
2132
2133         xfer_len = min(alloc_len, resp_size);
2134         res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2135
2136         kfree(response);
2137  out:
2138         return res;
2139 }
2140
2141 static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2142                                         struct sg_io_hdr *hdr)
2143 {
2144         int nvme_sc;
2145         struct nvme_command c;
2146
2147         memset(&c, 0, sizeof(c));
2148         c.common.opcode = nvme_cmd_flush;
2149         c.common.nsid = cpu_to_le32(ns->ns_id);
2150
2151         nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
2152         return nvme_trans_status_code(hdr, nvme_sc);
2153 }
2154
2155 static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2156                                                         u8 *cmd)
2157 {
2158         int res;
2159         u8 parm_hdr_len = 0;
2160         u8 nvme_pf_code = 0;
2161         u8 format_prot_info, long_list, format_data;
2162
2163         format_prot_info = (cmd[1] & 0xc0) >> 6;
2164         long_list = cmd[1] & 0x20;
2165         format_data = cmd[1] & 0x10;
2166
2167         if (format_data != 0) {
2168                 if (format_prot_info != 0) {
2169                         if (long_list == 0)
2170                                 parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
2171                         else
2172                                 parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
2173                 }
2174         } else if (format_data == 0 && format_prot_info != 0) {
2175                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2176                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2177                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2178                 goto out;
2179         }
2180
2181         /* Get parm header from data-in/out buffer */
2182         /*
2183          * According to the translation spec, the only fields in the parameter
2184          * list we are concerned with are in the header. So allocate only that.
2185          */
2186         if (parm_hdr_len > 0) {
2187                 res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
2188                                         format_prot_info, &nvme_pf_code);
2189                 if (res)
2190                         goto out;
2191         }
2192
2193         /* Attempt to activate any previously downloaded firmware image */
2194         res = nvme_trans_send_activate_fw_cmd(ns, hdr, 0);
2195
2196         /* Determine Block size and count and send format command */
2197         res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
2198         if (res)
2199                 goto out;
2200
2201         res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
2202
2203  out:
2204         return res;
2205 }
2206
2207 static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
2208                                         struct sg_io_hdr *hdr,
2209                                         u8 *cmd)
2210 {
2211         if (nvme_ctrl_ready(ns->ctrl))
2212                 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2213                                             NOT_READY, SCSI_ASC_LUN_NOT_READY,
2214                                             SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2215         else
2216                 return nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
2217 }
2218
2219 static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2220                                                         u8 *cmd)
2221 {
2222         int res = 0;
2223         u32 buffer_offset, parm_list_length;
2224         u8 buffer_id, mode;
2225
2226         parm_list_length = get_unaligned_be24(&cmd[6]);
2227         if (parm_list_length % BYTES_TO_DWORDS != 0) {
2228                 /* NVMe expects Firmware file to be a whole number of DWORDS */
2229                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2230                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2231                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2232                 goto out;
2233         }
2234         buffer_id = cmd[2];
2235         if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
2236                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2237                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2238                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2239                 goto out;
2240         }
2241         mode = cmd[1] & 0x1f;
2242         buffer_offset = get_unaligned_be24(&cmd[3]);
2243
2244         switch (mode) {
2245         case DOWNLOAD_SAVE_ACTIVATE:
2246                 res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw,
2247                                                 parm_list_length, buffer_offset,
2248                                                 buffer_id);
2249                 if (res)
2250                         goto out;
2251                 res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id);
2252                 break;
2253         case DOWNLOAD_SAVE_DEFER_ACTIVATE:
2254                 res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw,
2255                                                 parm_list_length, buffer_offset,
2256                                                 buffer_id);
2257                 break;
2258         case ACTIVATE_DEFERRED_MICROCODE:
2259                 res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id);
2260                 break;
2261         default:
2262                 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2263                                         ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2264                                         SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2265                 break;
2266         }
2267
2268  out:
2269         return res;
2270 }
2271
2272 struct scsi_unmap_blk_desc {
2273         __be64  slba;
2274         __be32  nlb;
2275         u32     resv;
2276 };
2277
2278 struct scsi_unmap_parm_list {
2279         __be16  unmap_data_len;
2280         __be16  unmap_blk_desc_data_len;
2281         u32     resv;
2282         struct scsi_unmap_blk_desc desc[0];
2283 };
2284
2285 static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2286                                                         u8 *cmd)
2287 {
2288         struct scsi_unmap_parm_list *plist;
2289         struct nvme_dsm_range *range;
2290         struct nvme_command c;
2291         int i, nvme_sc, res;
2292         u16 ndesc, list_len;
2293
2294         list_len = get_unaligned_be16(&cmd[7]);
2295         if (!list_len)
2296                 return -EINVAL;
2297
2298         plist = kmalloc(list_len, GFP_KERNEL);
2299         if (!plist)
2300                 return -ENOMEM;
2301
2302         res = nvme_trans_copy_from_user(hdr, plist, list_len);
2303         if (res)
2304                 goto out;
2305
2306         ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
2307         if (!ndesc || ndesc > 256) {
2308                 res = -EINVAL;
2309                 goto out;
2310         }
2311
2312         range = kcalloc(ndesc, sizeof(*range), GFP_KERNEL);
2313         if (!range) {
2314                 res = -ENOMEM;
2315                 goto out;
2316         }
2317
2318         for (i = 0; i < ndesc; i++) {
2319                 range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
2320                 range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
2321                 range[i].cattr = 0;
2322         }
2323
2324         memset(&c, 0, sizeof(c));
2325         c.dsm.opcode = nvme_cmd_dsm;
2326         c.dsm.nsid = cpu_to_le32(ns->ns_id);
2327         c.dsm.nr = cpu_to_le32(ndesc - 1);
2328         c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
2329
2330         nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, range,
2331                         ndesc * sizeof(*range));
2332         res = nvme_trans_status_code(hdr, nvme_sc);
2333
2334         kfree(range);
2335  out:
2336         kfree(plist);
2337         return res;
2338 }
2339
2340 static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
2341 {
2342         u8 cmd[16];
2343         int retcode;
2344         unsigned int opcode;
2345
2346         if (hdr->cmdp == NULL)
2347                 return -EMSGSIZE;
2348         if (hdr->cmd_len > sizeof(cmd))
2349                 return -EINVAL;
2350         if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
2351                 return -EFAULT;
2352
2353         /*
2354          * Prime the hdr with good status for scsi commands that don't require
2355          * an nvme command for translation.
2356          */
2357         retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2358         if (retcode)
2359                 return retcode;
2360
2361         opcode = cmd[0];
2362
2363         switch (opcode) {
2364         case READ_6:
2365         case READ_10:
2366         case READ_12:
2367         case READ_16:
2368                 retcode = nvme_trans_io(ns, hdr, 0, cmd);
2369                 break;
2370         case WRITE_6:
2371         case WRITE_10:
2372         case WRITE_12:
2373         case WRITE_16:
2374                 retcode = nvme_trans_io(ns, hdr, 1, cmd);
2375                 break;
2376         case INQUIRY:
2377                 retcode = nvme_trans_inquiry(ns, hdr, cmd);
2378                 break;
2379         case LOG_SENSE:
2380                 retcode = nvme_trans_log_sense(ns, hdr, cmd);
2381                 break;
2382         case MODE_SELECT:
2383         case MODE_SELECT_10:
2384                 retcode = nvme_trans_mode_select(ns, hdr, cmd);
2385                 break;
2386         case MODE_SENSE:
2387         case MODE_SENSE_10:
2388                 retcode = nvme_trans_mode_sense(ns, hdr, cmd);
2389                 break;
2390         case READ_CAPACITY:
2391                 retcode = nvme_trans_read_capacity(ns, hdr, cmd, 0);
2392                 break;
2393         case SERVICE_ACTION_IN_16:
2394                 switch (cmd[1]) {
2395                 case SAI_READ_CAPACITY_16:
2396                         retcode = nvme_trans_read_capacity(ns, hdr, cmd, 1);
2397                         break;
2398                 default:
2399                         goto out;
2400                 }
2401                 break;
2402         case REPORT_LUNS:
2403                 retcode = nvme_trans_report_luns(ns, hdr, cmd);
2404                 break;
2405         case REQUEST_SENSE:
2406                 retcode = nvme_trans_request_sense(ns, hdr, cmd);
2407                 break;
2408         case SYNCHRONIZE_CACHE:
2409                 retcode = nvme_trans_synchronize_cache(ns, hdr);
2410                 break;
2411         case FORMAT_UNIT:
2412                 retcode = nvme_trans_format_unit(ns, hdr, cmd);
2413                 break;
2414         case TEST_UNIT_READY:
2415                 retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
2416                 break;
2417         case WRITE_BUFFER:
2418                 retcode = nvme_trans_write_buffer(ns, hdr, cmd);
2419                 break;
2420         case UNMAP:
2421                 retcode = nvme_trans_unmap(ns, hdr, cmd);
2422                 break;
2423         default:
2424  out:
2425                 retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2426                                 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2427                                 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2428                 break;
2429         }
2430         return retcode;
2431 }
2432
2433 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
2434 {
2435         struct sg_io_hdr hdr;
2436         int retcode;
2437
2438         if (!capable(CAP_SYS_ADMIN))
2439                 return -EACCES;
2440         if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
2441                 return -EFAULT;
2442         if (hdr.interface_id != 'S')
2443                 return -EINVAL;
2444
2445         /*
2446          * A positive return code means a NVMe status, which has been
2447          * translated to sense data.
2448          */
2449         retcode = nvme_scsi_translate(ns, &hdr);
2450         if (retcode < 0)
2451                 return retcode;
2452         if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
2453                 return -EFAULT;
2454         return 0;
2455 }
2456
2457 int nvme_sg_get_version_num(int __user *ip)
2458 {
2459         return put_user(sg_version_num, ip);
2460 }