2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
23 #include <linux/kernel.h>
24 #include <linux/wait.h>
25 #include <linux/sched.h>
26 #include <linux/completion.h>
27 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <linux/module.h>
33 #include <linux/device.h>
34 #include <linux/hyperv.h>
35 #include <linux/blkdev.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_tcq.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_devinfo.h>
43 #include <scsi/scsi_dbg.h>
46 * All wire protocol details (storage protocol between the guest and the host)
47 * are consolidated here.
49 * Begin protocol definitions.
55 * V1 RC < 2008/1/31: 1.0
56 * V1 RC > 2008/1/31: 2.0
63 #define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
66 #define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0)
67 #define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2)
68 #define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1)
69 #define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
70 #define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
72 /* Packet structure describing virtual storage requests. */
73 enum vstor_packet_operation {
74 VSTOR_OPERATION_COMPLETE_IO = 1,
75 VSTOR_OPERATION_REMOVE_DEVICE = 2,
76 VSTOR_OPERATION_EXECUTE_SRB = 3,
77 VSTOR_OPERATION_RESET_LUN = 4,
78 VSTOR_OPERATION_RESET_ADAPTER = 5,
79 VSTOR_OPERATION_RESET_BUS = 6,
80 VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
81 VSTOR_OPERATION_END_INITIALIZATION = 8,
82 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
83 VSTOR_OPERATION_QUERY_PROPERTIES = 10,
84 VSTOR_OPERATION_ENUMERATE_BUS = 11,
85 VSTOR_OPERATION_FCHBA_DATA = 12,
86 VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13,
87 VSTOR_OPERATION_MAXIMUM = 13
91 * WWN packet for Fibre Channel HBA
94 struct hv_fc_wwn_packet {
98 u8 primary_port_wwn[8];
99 u8 primary_node_wwn[8];
100 u8 secondary_port_wwn[8];
101 u8 secondary_node_wwn[8];
110 #define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002
111 #define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004
112 #define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008
113 #define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010
114 #define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020
115 #define SRB_FLAGS_DATA_IN 0x00000040
116 #define SRB_FLAGS_DATA_OUT 0x00000080
117 #define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000
118 #define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT)
119 #define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100
120 #define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200
121 #define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400
124 * This flag indicates the request is part of the workflow for processing a D3.
126 #define SRB_FLAGS_D3_PROCESSING 0x00000800
127 #define SRB_FLAGS_IS_ACTIVE 0x00010000
128 #define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000
129 #define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000
130 #define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000
131 #define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000
132 #define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000
133 #define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000
134 #define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000
135 #define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000
136 #define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000
140 * Platform neutral description of a scsi request -
141 * this remains the same across the write regardless of 32/64 bit
142 * note: it's patterned off the SCSI_PASS_THROUGH structure
144 #define STORVSC_MAX_CMD_LEN 0x10
146 #define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14
147 #define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12
149 #define STORVSC_SENSE_BUFFER_SIZE 0x14
150 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
153 * Sense buffer size changed in win8; have a run-time
154 * variable to track the size we should use.
156 static int sense_buffer_size;
159 * The size of the vmscsi_request has changed in win8. The
160 * additional size is because of new elements added to the
161 * structure. These elements are valid only when we are talking
163 * Track the correction to size we need to apply.
166 static int vmscsi_size_delta;
167 static int vmstor_proto_version;
169 struct vmscsi_win8_extension {
171 * The following were added in Windows 8
181 struct vmscsi_request {
192 u8 sense_info_length;
196 u32 data_transfer_length;
199 u8 cdb[STORVSC_MAX_CMD_LEN];
200 u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
201 u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
204 * The following was added in win8.
206 struct vmscsi_win8_extension win8_extension;
208 } __attribute((packed));
212 * The list of storage protocols in order of preference.
214 struct vmstor_protocol {
215 int protocol_version;
216 int sense_buffer_size;
217 int vmscsi_size_delta;
221 static const struct vmstor_protocol vmstor_protocols[] = {
223 VMSTOR_PROTO_VERSION_WIN10,
224 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
228 VMSTOR_PROTO_VERSION_WIN8_1,
229 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
233 VMSTOR_PROTO_VERSION_WIN8,
234 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
238 VMSTOR_PROTO_VERSION_WIN7,
239 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
240 sizeof(struct vmscsi_win8_extension),
243 VMSTOR_PROTO_VERSION_WIN6,
244 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
245 sizeof(struct vmscsi_win8_extension),
251 * This structure is sent during the intialization phase to get the different
252 * properties of the channel.
255 #define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1
257 struct vmstorage_channel_properties {
263 u32 max_transfer_bytes;
268 /* This structure is sent during the storage protocol negotiations. */
269 struct vmstorage_protocol_version {
270 /* Major (MSW) and minor (LSW) version numbers. */
274 * Revision number is auto-incremented whenever this file is changed
275 * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
276 * definitely indicate incompatibility--but it does indicate mismatched
278 * This is only used on the windows side. Just set it to 0.
283 /* Channel Property Flags */
284 #define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
285 #define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
287 struct vstor_packet {
288 /* Requested operation type */
289 enum vstor_packet_operation operation;
291 /* Flags - see below for values */
294 /* Status of the request returned from the server side. */
297 /* Data payload area */
300 * Structure used to forward SCSI commands from the
301 * client to the server.
303 struct vmscsi_request vm_srb;
305 /* Structure used to query channel properties. */
306 struct vmstorage_channel_properties storage_channel_properties;
308 /* Used during version negotiations. */
309 struct vmstorage_protocol_version version;
311 /* Fibre channel address packet */
312 struct hv_fc_wwn_packet wwn_packet;
314 /* Number of sub-channels to create */
315 u16 sub_channel_count;
317 /* This will be the maximum of the union members */
325 * This flag indicates that the server should send back a completion for this
329 #define REQUEST_COMPLETION_FLAG 0x1
331 /* Matches Windows-end */
332 enum storvsc_request_type {
339 * SRB status codes and masks; a subset of the codes used here.
342 #define SRB_STATUS_AUTOSENSE_VALID 0x80
343 #define SRB_STATUS_INVALID_LUN 0x20
344 #define SRB_STATUS_SUCCESS 0x01
345 #define SRB_STATUS_ABORTED 0x02
346 #define SRB_STATUS_ERROR 0x04
349 * This is the end of Protocol specific defines.
352 static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
353 static u32 max_outstanding_req_per_channel;
355 static int storvsc_vcpus_per_sub_channel = 4;
357 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
358 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
360 module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
361 MODULE_PARM_DESC(vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
363 * Timeout in seconds for all devices managed by this driver.
365 static int storvsc_timeout = 180;
367 static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
370 static void storvsc_on_channel_callback(void *context);
372 #define STORVSC_MAX_LUNS_PER_TARGET 255
373 #define STORVSC_MAX_TARGETS 2
374 #define STORVSC_MAX_CHANNELS 8
376 #define STORVSC_FC_MAX_LUNS_PER_TARGET 255
377 #define STORVSC_FC_MAX_TARGETS 128
378 #define STORVSC_FC_MAX_CHANNELS 8
380 #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
381 #define STORVSC_IDE_MAX_TARGETS 1
382 #define STORVSC_IDE_MAX_CHANNELS 1
384 struct storvsc_cmd_request {
385 struct scsi_cmnd *cmd;
387 unsigned int bounce_sgl_count;
388 struct scatterlist *bounce_sgl;
390 struct hv_device *device;
392 /* Synchronize the request/response if needed */
393 struct completion wait_event;
395 struct vmbus_channel_packet_multipage_buffer mpb;
396 struct vmbus_packet_mpb_array *payload;
399 struct vstor_packet vstor_packet;
403 /* A storvsc device is a device object that contains a vmbus channel */
404 struct storvsc_device {
405 struct hv_device *device;
409 bool open_sub_channel;
410 atomic_t num_outstanding_req;
411 struct Scsi_Host *host;
413 wait_queue_head_t waiting_to_drain;
416 * Each unique Port/Path/Target represents 1 channel ie scsi
417 * controller. In reality, the pathid, targetid is always 0
418 * and the port is set by us
420 unsigned int port_number;
421 unsigned char path_id;
422 unsigned char target_id;
425 * Max I/O, the device can support.
427 u32 max_transfer_bytes;
428 /* Used for vsc/vsp channel reset process */
429 struct storvsc_cmd_request init_request;
430 struct storvsc_cmd_request reset_request;
433 struct hv_host_device {
434 struct hv_device *dev;
437 unsigned char target;
440 struct storvsc_scan_work {
441 struct work_struct work;
442 struct Scsi_Host *host;
446 static void storvsc_device_scan(struct work_struct *work)
448 struct storvsc_scan_work *wrk;
450 struct scsi_device *sdev;
452 wrk = container_of(work, struct storvsc_scan_work, work);
455 sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
458 scsi_rescan_device(&sdev->sdev_gendev);
459 scsi_device_put(sdev);
465 static void storvsc_host_scan(struct work_struct *work)
467 struct storvsc_scan_work *wrk;
468 struct Scsi_Host *host;
469 struct scsi_device *sdev;
471 wrk = container_of(work, struct storvsc_scan_work, work);
475 * Before scanning the host, first check to see if any of the
476 * currrently known devices have been hot removed. We issue a
477 * "unit ready" command against all currently known devices.
478 * This I/O will result in an error for devices that have been
479 * removed. As part of handling the I/O error, we remove the device.
481 * When a LUN is added or removed, the host sends us a signal to
482 * scan the host. Thus we are forced to discover the LUNs that
483 * may have been removed this way.
485 mutex_lock(&host->scan_mutex);
486 shost_for_each_device(sdev, host)
487 scsi_test_unit_ready(sdev, 1, 1, NULL);
488 mutex_unlock(&host->scan_mutex);
490 * Now scan the host to discover LUNs that may have been added.
492 scsi_scan_host(host);
497 static void storvsc_remove_lun(struct work_struct *work)
499 struct storvsc_scan_work *wrk;
500 struct scsi_device *sdev;
502 wrk = container_of(work, struct storvsc_scan_work, work);
503 if (!scsi_host_get(wrk->host))
506 sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
509 scsi_remove_device(sdev);
510 scsi_device_put(sdev);
512 scsi_host_put(wrk->host);
520 * We can get incoming messages from the host that are not in response to
521 * messages that we have sent out. An example of this would be messages
522 * received by the guest to notify dynamic addition/removal of LUNs. To
523 * deal with potential race conditions where the driver may be in the
524 * midst of being unloaded when we might receive an unsolicited message
525 * from the host, we have implemented a mechanism to gurantee sequential
528 * 1) Once the device is marked as being destroyed, we will fail all
530 * 2) We permit incoming messages when the device is being destroyed,
531 * only to properly account for messages already sent out.
534 static inline struct storvsc_device *get_out_stor_device(
535 struct hv_device *device)
537 struct storvsc_device *stor_device;
539 stor_device = hv_get_drvdata(device);
541 if (stor_device && stor_device->destroy)
548 static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
550 dev->drain_notify = true;
551 wait_event(dev->waiting_to_drain,
552 atomic_read(&dev->num_outstanding_req) == 0);
553 dev->drain_notify = false;
556 static inline struct storvsc_device *get_in_stor_device(
557 struct hv_device *device)
559 struct storvsc_device *stor_device;
561 stor_device = hv_get_drvdata(device);
567 * If the device is being destroyed; allow incoming
568 * traffic only to cleanup outstanding requests.
571 if (stor_device->destroy &&
572 (atomic_read(&stor_device->num_outstanding_req) == 0))
580 static void destroy_bounce_buffer(struct scatterlist *sgl,
581 unsigned int sg_count)
584 struct page *page_buf;
586 for (i = 0; i < sg_count; i++) {
587 page_buf = sg_page((&sgl[i]));
588 if (page_buf != NULL)
589 __free_page(page_buf);
595 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
599 /* No need to check */
603 /* We have at least 2 sg entries */
604 for (i = 0; i < sg_count; i++) {
606 /* make sure 1st one does not have hole */
607 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
609 } else if (i == sg_count - 1) {
610 /* make sure last one does not have hole */
611 if (sgl[i].offset != 0)
614 /* make sure no hole in the middle */
615 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
622 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
623 unsigned int sg_count,
629 struct scatterlist *bounce_sgl;
630 struct page *page_buf;
631 unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
633 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
635 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
639 sg_init_table(bounce_sgl, num_pages);
640 for (i = 0; i < num_pages; i++) {
641 page_buf = alloc_page(GFP_ATOMIC);
644 sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
650 destroy_bounce_buffer(bounce_sgl, num_pages);
654 /* Assume the original sgl has enough room */
655 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
656 struct scatterlist *bounce_sgl,
657 unsigned int orig_sgl_count,
658 unsigned int bounce_sgl_count)
662 unsigned long src, dest;
663 unsigned int srclen, destlen, copylen;
664 unsigned int total_copied = 0;
665 unsigned long bounce_addr = 0;
666 unsigned long dest_addr = 0;
668 struct scatterlist *cur_dest_sgl;
669 struct scatterlist *cur_src_sgl;
671 local_irq_save(flags);
672 cur_dest_sgl = orig_sgl;
673 cur_src_sgl = bounce_sgl;
674 for (i = 0; i < orig_sgl_count; i++) {
675 dest_addr = (unsigned long)
676 kmap_atomic(sg_page(cur_dest_sgl)) +
677 cur_dest_sgl->offset;
679 destlen = cur_dest_sgl->length;
681 if (bounce_addr == 0)
682 bounce_addr = (unsigned long)kmap_atomic(
683 sg_page(cur_src_sgl));
686 src = bounce_addr + cur_src_sgl->offset;
687 srclen = cur_src_sgl->length - cur_src_sgl->offset;
689 copylen = min(srclen, destlen);
690 memcpy((void *)dest, (void *)src, copylen);
692 total_copied += copylen;
693 cur_src_sgl->offset += copylen;
697 if (cur_src_sgl->offset == cur_src_sgl->length) {
699 kunmap_atomic((void *)bounce_addr);
703 * It is possible that the number of elements
704 * in the bounce buffer may not be equal to
705 * the number of elements in the original
706 * scatter list. Handle this correctly.
709 if (j == bounce_sgl_count) {
711 * We are done; cleanup and return.
713 kunmap_atomic((void *)(dest_addr -
714 cur_dest_sgl->offset));
715 local_irq_restore(flags);
719 /* if we need to use another bounce buffer */
720 if (destlen || i != orig_sgl_count - 1) {
721 cur_src_sgl = sg_next(cur_src_sgl);
722 bounce_addr = (unsigned long)
724 sg_page(cur_src_sgl));
726 } else if (destlen == 0 && i == orig_sgl_count - 1) {
727 /* unmap the last bounce that is < PAGE_SIZE */
728 kunmap_atomic((void *)bounce_addr);
732 kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset));
733 cur_dest_sgl = sg_next(cur_dest_sgl);
736 local_irq_restore(flags);
741 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
742 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
743 struct scatterlist *bounce_sgl,
744 unsigned int orig_sgl_count)
748 unsigned long src, dest;
749 unsigned int srclen, destlen, copylen;
750 unsigned int total_copied = 0;
751 unsigned long bounce_addr = 0;
752 unsigned long src_addr = 0;
754 struct scatterlist *cur_src_sgl;
755 struct scatterlist *cur_dest_sgl;
757 local_irq_save(flags);
759 cur_src_sgl = orig_sgl;
760 cur_dest_sgl = bounce_sgl;
762 for (i = 0; i < orig_sgl_count; i++) {
763 src_addr = (unsigned long)
764 kmap_atomic(sg_page(cur_src_sgl)) +
767 srclen = cur_src_sgl->length;
769 if (bounce_addr == 0)
770 bounce_addr = (unsigned long)
771 kmap_atomic(sg_page(cur_dest_sgl));
774 /* assume bounce offset always == 0 */
775 dest = bounce_addr + cur_dest_sgl->length;
776 destlen = PAGE_SIZE - cur_dest_sgl->length;
778 copylen = min(srclen, destlen);
779 memcpy((void *)dest, (void *)src, copylen);
781 total_copied += copylen;
782 cur_dest_sgl->length += copylen;
786 if (cur_dest_sgl->length == PAGE_SIZE) {
787 /* full..move to next entry */
788 kunmap_atomic((void *)bounce_addr);
793 /* if we need to use another bounce buffer */
794 if (srclen && bounce_addr == 0) {
795 cur_dest_sgl = sg_next(cur_dest_sgl);
796 bounce_addr = (unsigned long)
798 sg_page(cur_dest_sgl));
803 kunmap_atomic((void *)(src_addr - cur_src_sgl->offset));
804 cur_src_sgl = sg_next(cur_src_sgl);
808 kunmap_atomic((void *)bounce_addr);
810 local_irq_restore(flags);
815 static void handle_sc_creation(struct vmbus_channel *new_sc)
817 struct hv_device *device = new_sc->primary_channel->device_obj;
818 struct storvsc_device *stor_device;
819 struct vmstorage_channel_properties props;
821 stor_device = get_out_stor_device(device);
825 if (stor_device->open_sub_channel == false)
828 memset(&props, 0, sizeof(struct vmstorage_channel_properties));
831 storvsc_ringbuffer_size,
832 storvsc_ringbuffer_size,
834 sizeof(struct vmstorage_channel_properties),
835 storvsc_on_channel_callback, new_sc);
838 static void handle_multichannel_storage(struct hv_device *device, int max_chns)
840 struct storvsc_device *stor_device;
841 int num_cpus = num_online_cpus();
843 struct storvsc_cmd_request *request;
844 struct vstor_packet *vstor_packet;
847 num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
848 stor_device = get_out_stor_device(device);
852 request = &stor_device->init_request;
853 vstor_packet = &request->vstor_packet;
855 stor_device->open_sub_channel = true;
857 * Establish a handler for dealing with subchannels.
859 vmbus_set_sc_create_callback(device->channel, handle_sc_creation);
862 * Check to see if sub-channels have already been created. This
863 * can happen when this driver is re-loaded after unloading.
866 if (vmbus_are_subchannels_present(device->channel))
869 stor_device->open_sub_channel = false;
871 * Request the host to create sub-channels.
873 memset(request, 0, sizeof(struct storvsc_cmd_request));
874 init_completion(&request->wait_event);
875 vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS;
876 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
877 vstor_packet->sub_channel_count = num_sc;
879 ret = vmbus_sendpacket(device->channel, vstor_packet,
880 (sizeof(struct vstor_packet) -
882 (unsigned long)request,
884 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
889 t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
893 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
894 vstor_packet->status != 0)
898 * Now that we created the sub-channels, invoke the check; this
899 * may trigger the callback.
901 stor_device->open_sub_channel = true;
902 vmbus_are_subchannels_present(device->channel);
905 static int storvsc_channel_init(struct hv_device *device)
907 struct storvsc_device *stor_device;
908 struct storvsc_cmd_request *request;
909 struct vstor_packet *vstor_packet;
912 bool process_sub_channels = false;
914 stor_device = get_out_stor_device(device);
918 request = &stor_device->init_request;
919 vstor_packet = &request->vstor_packet;
922 * Now, initiate the vsc/vsp initialization protocol on the open
925 memset(request, 0, sizeof(struct storvsc_cmd_request));
926 init_completion(&request->wait_event);
927 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
928 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
930 ret = vmbus_sendpacket(device->channel, vstor_packet,
931 (sizeof(struct vstor_packet) -
933 (unsigned long)request,
935 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
939 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
945 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
946 vstor_packet->status != 0)
950 for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) {
951 /* reuse the packet for version range supported */
952 memset(vstor_packet, 0, sizeof(struct vstor_packet));
953 vstor_packet->operation =
954 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
955 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
957 vstor_packet->version.major_minor =
958 vmstor_protocols[i].protocol_version;
961 * The revision number is only used in Windows; set it to 0.
963 vstor_packet->version.revision = 0;
965 ret = vmbus_sendpacket(device->channel, vstor_packet,
966 (sizeof(struct vstor_packet) -
968 (unsigned long)request,
970 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
974 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
980 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) {
985 if (vstor_packet->status == 0) {
986 vmstor_proto_version =
987 vmstor_protocols[i].protocol_version;
990 vmstor_protocols[i].sense_buffer_size;
993 vmstor_protocols[i].vmscsi_size_delta;
999 if (vstor_packet->status != 0) {
1005 memset(vstor_packet, 0, sizeof(struct vstor_packet));
1006 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
1007 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
1009 ret = vmbus_sendpacket(device->channel, vstor_packet,
1010 (sizeof(struct vstor_packet) -
1012 (unsigned long)request,
1014 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1019 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
1025 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
1026 vstor_packet->status != 0)
1030 * Check to see if multi-channel support is there.
1031 * Hosts that implement protocol version of 5.1 and above
1032 * support multi-channel.
1034 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
1035 if (vmbus_proto_version >= VERSION_WIN8) {
1036 if (vstor_packet->storage_channel_properties.flags &
1037 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
1038 process_sub_channels = true;
1040 stor_device->max_transfer_bytes =
1041 vstor_packet->storage_channel_properties.max_transfer_bytes;
1043 memset(vstor_packet, 0, sizeof(struct vstor_packet));
1044 vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
1045 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
1047 ret = vmbus_sendpacket(device->channel, vstor_packet,
1048 (sizeof(struct vstor_packet) -
1050 (unsigned long)request,
1052 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1057 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
1063 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
1064 vstor_packet->status != 0)
1067 if (process_sub_channels)
1068 handle_multichannel_storage(device, max_chns);
1075 static void storvsc_handle_error(struct vmscsi_request *vm_srb,
1076 struct scsi_cmnd *scmnd,
1077 struct Scsi_Host *host,
1080 struct storvsc_scan_work *wrk;
1081 void (*process_err_fn)(struct work_struct *work);
1082 bool do_work = false;
1084 switch (vm_srb->srb_status) {
1085 case SRB_STATUS_ERROR:
1087 * If there is an error; offline the device since all
1088 * error recovery strategies would have already been
1089 * deployed on the host side. However, if the command
1090 * were a pass-through command deal with it appropriately.
1092 switch (scmnd->cmnd[0]) {
1095 set_host_byte(scmnd, DID_PASSTHROUGH);
1098 * On Some Windows hosts TEST_UNIT_READY command can return
1099 * SRB_STATUS_ERROR, let the upper level code deal with it
1100 * based on the sense information.
1102 case TEST_UNIT_READY:
1105 set_host_byte(scmnd, DID_TARGET_FAILURE);
1108 case SRB_STATUS_INVALID_LUN:
1110 process_err_fn = storvsc_remove_lun;
1112 case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
1113 if ((asc == 0x2a) && (ascq == 0x9)) {
1115 process_err_fn = storvsc_device_scan;
1117 * Retry the I/O that trigerred this.
1119 set_host_byte(scmnd, DID_REQUEUE);
1128 * We need to schedule work to process this error; schedule it.
1130 wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
1132 set_host_byte(scmnd, DID_TARGET_FAILURE);
1137 wrk->lun = vm_srb->lun;
1138 INIT_WORK(&wrk->work, process_err_fn);
1139 schedule_work(&wrk->work);
1143 static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
1145 struct scsi_cmnd *scmnd = cmd_request->cmd;
1146 struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1147 struct scsi_sense_hdr sense_hdr;
1148 struct vmscsi_request *vm_srb;
1149 struct Scsi_Host *host;
1150 struct storvsc_device *stor_dev;
1151 struct hv_device *dev = host_dev->dev;
1152 u32 payload_sz = cmd_request->payload_sz;
1153 void *payload = cmd_request->payload;
1155 stor_dev = get_in_stor_device(dev);
1156 host = stor_dev->host;
1158 vm_srb = &cmd_request->vstor_packet.vm_srb;
1159 if (cmd_request->bounce_sgl_count) {
1160 if (vm_srb->data_in == READ_TYPE)
1161 copy_from_bounce_buffer(scsi_sglist(scmnd),
1162 cmd_request->bounce_sgl,
1163 scsi_sg_count(scmnd),
1164 cmd_request->bounce_sgl_count);
1165 destroy_bounce_buffer(cmd_request->bounce_sgl,
1166 cmd_request->bounce_sgl_count);
1169 scmnd->result = vm_srb->scsi_status;
1171 if (scmnd->result) {
1172 if (scsi_normalize_sense(scmnd->sense_buffer,
1173 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1174 scsi_print_sense_hdr(scmnd->device, "storvsc",
1178 if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
1179 storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
1182 scsi_set_resid(scmnd,
1183 cmd_request->payload->range.len -
1184 vm_srb->data_transfer_length);
1186 scmnd->scsi_done(scmnd);
1189 sizeof(struct vmbus_channel_packet_multipage_buffer))
1193 static void storvsc_on_io_completion(struct hv_device *device,
1194 struct vstor_packet *vstor_packet,
1195 struct storvsc_cmd_request *request)
1197 struct storvsc_device *stor_device;
1198 struct vstor_packet *stor_pkt;
1200 stor_device = hv_get_drvdata(device);
1201 stor_pkt = &request->vstor_packet;
1204 * The current SCSI handling on the host side does
1205 * not correctly handle:
1206 * INQUIRY command with page code parameter set to 0x80
1207 * MODE_SENSE command with cmd[2] == 0x1c
1209 * Setup srb and scsi status so this won't be fatal.
1210 * We do this so we can distinguish truly fatal failues
1211 * (srb status == 0x4) and off-line the device in that case.
1214 if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
1215 (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
1216 vstor_packet->vm_srb.scsi_status = 0;
1217 vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
1221 /* Copy over the status...etc */
1222 stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
1223 stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
1224 stor_pkt->vm_srb.sense_info_length =
1225 vstor_packet->vm_srb.sense_info_length;
1228 if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
1229 /* CHECK_CONDITION */
1230 if (vstor_packet->vm_srb.srb_status &
1231 SRB_STATUS_AUTOSENSE_VALID) {
1232 /* autosense data available */
1234 memcpy(request->cmd->sense_buffer,
1235 vstor_packet->vm_srb.sense_data,
1236 vstor_packet->vm_srb.sense_info_length);
1241 stor_pkt->vm_srb.data_transfer_length =
1242 vstor_packet->vm_srb.data_transfer_length;
1244 storvsc_command_completion(request);
1246 if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
1247 stor_device->drain_notify)
1248 wake_up(&stor_device->waiting_to_drain);
1253 static void storvsc_on_receive(struct hv_device *device,
1254 struct vstor_packet *vstor_packet,
1255 struct storvsc_cmd_request *request)
1257 struct storvsc_scan_work *work;
1258 struct storvsc_device *stor_device;
1260 switch (vstor_packet->operation) {
1261 case VSTOR_OPERATION_COMPLETE_IO:
1262 storvsc_on_io_completion(device, vstor_packet, request);
1265 case VSTOR_OPERATION_REMOVE_DEVICE:
1266 case VSTOR_OPERATION_ENUMERATE_BUS:
1267 stor_device = get_in_stor_device(device);
1268 work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
1272 INIT_WORK(&work->work, storvsc_host_scan);
1273 work->host = stor_device->host;
1274 schedule_work(&work->work);
1282 static void storvsc_on_channel_callback(void *context)
1284 struct vmbus_channel *channel = (struct vmbus_channel *)context;
1285 struct hv_device *device;
1286 struct storvsc_device *stor_device;
1289 unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
1290 struct storvsc_cmd_request *request;
1293 if (channel->primary_channel != NULL)
1294 device = channel->primary_channel->device_obj;
1296 device = channel->device_obj;
1298 stor_device = get_in_stor_device(device);
1303 ret = vmbus_recvpacket(channel, packet,
1304 ALIGN((sizeof(struct vstor_packet) -
1305 vmscsi_size_delta), 8),
1306 &bytes_recvd, &request_id);
1307 if (ret == 0 && bytes_recvd > 0) {
1309 request = (struct storvsc_cmd_request *)
1310 (unsigned long)request_id;
1312 if ((request == &stor_device->init_request) ||
1313 (request == &stor_device->reset_request)) {
1315 memcpy(&request->vstor_packet, packet,
1316 (sizeof(struct vstor_packet) -
1317 vmscsi_size_delta));
1318 complete(&request->wait_event);
1320 storvsc_on_receive(device,
1321 (struct vstor_packet *)packet,
1332 static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
1334 struct vmstorage_channel_properties props;
1337 memset(&props, 0, sizeof(struct vmstorage_channel_properties));
1339 ret = vmbus_open(device->channel,
1343 sizeof(struct vmstorage_channel_properties),
1344 storvsc_on_channel_callback, device->channel);
1349 ret = storvsc_channel_init(device);
1354 static int storvsc_dev_remove(struct hv_device *device)
1356 struct storvsc_device *stor_device;
1357 unsigned long flags;
1359 stor_device = hv_get_drvdata(device);
1361 spin_lock_irqsave(&device->channel->inbound_lock, flags);
1362 stor_device->destroy = true;
1363 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
1366 * At this point, all outbound traffic should be disable. We
1367 * only allow inbound traffic (responses) to proceed so that
1368 * outstanding requests can be completed.
1371 storvsc_wait_to_drain(stor_device);
1374 * Since we have already drained, we don't need to busy wait
1375 * as was done in final_release_stor_device()
1376 * Note that we cannot set the ext pointer to NULL until
1377 * we have drained - to drain the outgoing packets, we need to
1378 * allow incoming packets.
1380 spin_lock_irqsave(&device->channel->inbound_lock, flags);
1381 hv_set_drvdata(device, NULL);
1382 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
1384 /* Close the channel */
1385 vmbus_close(device->channel);
1391 static int storvsc_do_io(struct hv_device *device,
1392 struct storvsc_cmd_request *request)
1394 struct storvsc_device *stor_device;
1395 struct vstor_packet *vstor_packet;
1396 struct vmbus_channel *outgoing_channel;
1399 vstor_packet = &request->vstor_packet;
1400 stor_device = get_out_stor_device(device);
1406 request->device = device;
1408 * Select an an appropriate channel to send the request out.
1411 outgoing_channel = vmbus_get_outgoing_channel(device->channel);
1414 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
1416 vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
1420 vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
1423 vstor_packet->vm_srb.data_transfer_length =
1424 request->payload->range.len;
1426 vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
1428 if (request->payload->range.len) {
1430 ret = vmbus_sendpacket_mpb_desc(outgoing_channel,
1431 request->payload, request->payload_sz,
1433 (sizeof(struct vstor_packet) -
1435 (unsigned long)request);
1437 ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
1438 (sizeof(struct vstor_packet) -
1440 (unsigned long)request,
1442 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1448 atomic_inc(&stor_device->num_outstanding_req);
1453 static int storvsc_device_configure(struct scsi_device *sdevice)
1456 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
1458 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
1460 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
1462 sdevice->no_write_same = 1;
1465 * Add blist flags to permit the reading of the VPD pages even when
1466 * the target may claim SPC-2 compliance. MSFT targets currently
1467 * claim SPC-2 compliance while they implement post SPC-2 features.
1468 * With this patch we can correctly handle WRITE_SAME_16 issues.
1470 sdevice->sdev_bflags |= msft_blist_flags;
1473 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
1474 * if the device is a MSFT virtual device.
1476 if (!strncmp(sdevice->vendor, "Msft", 4)) {
1477 switch (vmbus_proto_version) {
1479 case VERSION_WIN8_1:
1480 sdevice->scsi_level = SCSI_SPC_3;
1488 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
1489 sector_t capacity, int *info)
1491 sector_t nsect = capacity;
1492 sector_t cylinders = nsect;
1493 int heads, sectors_pt;
1496 * We are making up these values; let us keep it simple.
1499 sectors_pt = 0x3f; /* Sectors per track */
1500 sector_div(cylinders, heads * sectors_pt);
1501 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
1505 info[1] = sectors_pt;
1506 info[2] = (int)cylinders;
1511 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1513 struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1514 struct hv_device *device = host_dev->dev;
1516 struct storvsc_device *stor_device;
1517 struct storvsc_cmd_request *request;
1518 struct vstor_packet *vstor_packet;
1522 stor_device = get_out_stor_device(device);
1526 request = &stor_device->reset_request;
1527 vstor_packet = &request->vstor_packet;
1529 init_completion(&request->wait_event);
1531 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
1532 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
1533 vstor_packet->vm_srb.path_id = stor_device->path_id;
1535 ret = vmbus_sendpacket(device->channel, vstor_packet,
1536 (sizeof(struct vstor_packet) -
1538 (unsigned long)&stor_device->reset_request,
1540 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1544 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
1546 return TIMEOUT_ERROR;
1550 * At this point, all outstanding requests in the adapter
1551 * should have been flushed out and return to us
1552 * There is a potential race here where the host may be in
1553 * the process of responding when we return from here.
1554 * Just wait for all in-transit packets to be accounted for
1555 * before we return from here.
1557 storvsc_wait_to_drain(stor_device);
1563 * The host guarantees to respond to each command, although I/O latencies might
1564 * be unbounded on Azure. Reset the timer unconditionally to give the host a
1565 * chance to perform EH.
1567 static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
1569 return BLK_EH_RESET_TIMER;
1572 static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
1574 bool allowed = true;
1575 u8 scsi_op = scmnd->cmnd[0];
1578 /* the host does not handle WRITE_SAME, log accident usage */
1581 * smartd sends this command and the host does not handle
1582 * this. So, don't send it.
1585 scmnd->result = ILLEGAL_REQUEST << 16;
1594 static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1597 struct hv_host_device *host_dev = shost_priv(host);
1598 struct hv_device *dev = host_dev->dev;
1599 struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
1601 struct scatterlist *sgl;
1602 unsigned int sg_count = 0;
1603 struct vmscsi_request *vm_srb;
1604 struct scatterlist *cur_sgl;
1605 struct vmbus_packet_mpb_array *payload;
1609 if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) {
1611 * On legacy hosts filter unimplemented commands.
1612 * Future hosts are expected to correctly handle
1613 * unsupported commands. Furthermore, it is
1614 * possible that some of the currently
1615 * unsupported commands maybe supported in
1616 * future versions of the host.
1618 if (!storvsc_scsi_cmd_ok(scmnd)) {
1619 scmnd->scsi_done(scmnd);
1624 /* Setup the cmd request */
1625 cmd_request->cmd = scmnd;
1627 vm_srb = &cmd_request->vstor_packet.vm_srb;
1628 vm_srb->win8_extension.time_out_value = 60;
1630 vm_srb->win8_extension.srb_flags |=
1631 (SRB_FLAGS_QUEUE_ACTION_ENABLE |
1632 SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
1635 switch (scmnd->sc_data_direction) {
1637 vm_srb->data_in = WRITE_TYPE;
1638 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
1640 case DMA_FROM_DEVICE:
1641 vm_srb->data_in = READ_TYPE;
1642 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
1645 vm_srb->data_in = UNKNOWN_TYPE;
1646 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
1650 * This is DMA_BIDIRECTIONAL or something else we are never
1651 * supposed to see here.
1653 WARN(1, "Unexpected data direction: %d\n",
1654 scmnd->sc_data_direction);
1659 vm_srb->port_number = host_dev->port;
1660 vm_srb->path_id = scmnd->device->channel;
1661 vm_srb->target_id = scmnd->device->id;
1662 vm_srb->lun = scmnd->device->lun;
1664 vm_srb->cdb_length = scmnd->cmd_len;
1666 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
1668 sgl = (struct scatterlist *)scsi_sglist(scmnd);
1669 sg_count = scsi_sg_count(scmnd);
1671 length = scsi_bufflen(scmnd);
1672 payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
1673 payload_sz = sizeof(cmd_request->mpb);
1676 /* check if we need to bounce the sgl */
1677 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
1678 cmd_request->bounce_sgl =
1679 create_bounce_buffer(sgl, sg_count,
1682 if (!cmd_request->bounce_sgl)
1683 return SCSI_MLQUEUE_HOST_BUSY;
1685 cmd_request->bounce_sgl_count =
1686 ALIGN(length, PAGE_SIZE) >> PAGE_SHIFT;
1688 if (vm_srb->data_in == WRITE_TYPE)
1689 copy_to_bounce_buffer(sgl,
1690 cmd_request->bounce_sgl, sg_count);
1692 sgl = cmd_request->bounce_sgl;
1693 sg_count = cmd_request->bounce_sgl_count;
1697 if (sg_count > MAX_PAGE_BUFFER_COUNT) {
1699 payload_sz = (sg_count * sizeof(void *) +
1700 sizeof(struct vmbus_packet_mpb_array));
1701 payload = kmalloc(payload_sz, GFP_ATOMIC);
1703 if (cmd_request->bounce_sgl_count)
1704 destroy_bounce_buffer(
1705 cmd_request->bounce_sgl,
1706 cmd_request->bounce_sgl_count);
1708 return SCSI_MLQUEUE_DEVICE_BUSY;
1712 payload->range.len = length;
1713 payload->range.offset = sgl[0].offset;
1716 for (i = 0; i < sg_count; i++) {
1717 payload->range.pfn_array[i] =
1718 page_to_pfn(sg_page((cur_sgl)));
1719 cur_sgl = sg_next(cur_sgl);
1722 } else if (scsi_sglist(scmnd)) {
1723 payload->range.len = length;
1724 payload->range.offset =
1725 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
1726 payload->range.pfn_array[0] =
1727 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
1730 cmd_request->payload = payload;
1731 cmd_request->payload_sz = payload_sz;
1733 /* Invokes the vsc to start an IO */
1734 ret = storvsc_do_io(dev, cmd_request);
1736 if (ret == -EAGAIN) {
1739 if (cmd_request->bounce_sgl_count)
1740 destroy_bounce_buffer(cmd_request->bounce_sgl,
1741 cmd_request->bounce_sgl_count);
1743 return SCSI_MLQUEUE_DEVICE_BUSY;
1749 static struct scsi_host_template scsi_driver = {
1750 .module = THIS_MODULE,
1751 .name = "storvsc_host_t",
1752 .cmd_size = sizeof(struct storvsc_cmd_request),
1753 .bios_param = storvsc_get_chs,
1754 .queuecommand = storvsc_queuecommand,
1755 .eh_host_reset_handler = storvsc_host_reset_handler,
1756 .proc_name = "storvsc_host",
1757 .eh_timed_out = storvsc_eh_timed_out,
1758 .slave_configure = storvsc_device_configure,
1761 .use_clustering = ENABLE_CLUSTERING,
1762 /* Make sure we dont get a sg segment crosses a page boundary */
1763 .dma_boundary = PAGE_SIZE-1,
1773 static const struct hv_vmbus_device_id id_table[] = {
1776 .driver_data = SCSI_GUID
1780 .driver_data = IDE_GUID
1782 /* Fibre Channel GUID */
1785 .driver_data = SFC_GUID
1790 MODULE_DEVICE_TABLE(vmbus, id_table);
1792 static int storvsc_probe(struct hv_device *device,
1793 const struct hv_vmbus_device_id *dev_id)
1796 int num_cpus = num_online_cpus();
1797 struct Scsi_Host *host;
1798 struct hv_host_device *host_dev;
1799 bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
1801 struct storvsc_device *stor_device;
1802 int max_luns_per_target;
1805 int max_sub_channels = 0;
1808 * Based on the windows host we are running on,
1809 * set state to properly communicate with the host.
1812 if (vmbus_proto_version < VERSION_WIN8) {
1813 sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
1814 vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
1815 max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
1816 max_targets = STORVSC_IDE_MAX_TARGETS;
1817 max_channels = STORVSC_IDE_MAX_CHANNELS;
1819 sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
1820 vmscsi_size_delta = 0;
1821 max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
1822 max_targets = STORVSC_MAX_TARGETS;
1823 max_channels = STORVSC_MAX_CHANNELS;
1825 * On Windows8 and above, we support sub-channels for storage.
1826 * The number of sub-channels offerred is based on the number of
1827 * VCPUs in the guest.
1829 max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
1832 scsi_driver.can_queue = (max_outstanding_req_per_channel *
1833 (max_sub_channels + 1));
1835 host = scsi_host_alloc(&scsi_driver,
1836 sizeof(struct hv_host_device));
1840 host_dev = shost_priv(host);
1841 memset(host_dev, 0, sizeof(struct hv_host_device));
1843 host_dev->port = host->host_no;
1844 host_dev->dev = device;
1847 stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
1853 stor_device->destroy = false;
1854 stor_device->open_sub_channel = false;
1855 init_waitqueue_head(&stor_device->waiting_to_drain);
1856 stor_device->device = device;
1857 stor_device->host = host;
1858 hv_set_drvdata(device, stor_device);
1860 stor_device->port_number = host->host_no;
1861 ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
1865 host_dev->path = stor_device->path_id;
1866 host_dev->target = stor_device->target_id;
1868 switch (dev_id->driver_data) {
1870 host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
1871 host->max_id = STORVSC_FC_MAX_TARGETS;
1872 host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
1876 host->max_lun = max_luns_per_target;
1877 host->max_id = max_targets;
1878 host->max_channel = max_channels - 1;
1882 host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
1883 host->max_id = STORVSC_IDE_MAX_TARGETS;
1884 host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
1887 /* max cmd length */
1888 host->max_cmd_len = STORVSC_MAX_CMD_LEN;
1891 * set the table size based on the info we got
1894 host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
1896 /* Register the HBA and start the scsi bus scan */
1897 ret = scsi_add_host(host, &device->device);
1902 scsi_scan_host(host);
1904 target = (device->dev_instance.b[5] << 8 |
1905 device->dev_instance.b[4]);
1906 ret = scsi_add_device(host, 0, target, 0);
1908 scsi_remove_host(host);
1916 * Once we have connected with the host, we would need to
1917 * to invoke storvsc_dev_remove() to rollback this state and
1918 * this call also frees up the stor_device; hence the jump around
1921 storvsc_dev_remove(device);
1928 scsi_host_put(host);
1932 static int storvsc_remove(struct hv_device *dev)
1934 struct storvsc_device *stor_device = hv_get_drvdata(dev);
1935 struct Scsi_Host *host = stor_device->host;
1937 scsi_remove_host(host);
1938 storvsc_dev_remove(dev);
1939 scsi_host_put(host);
1944 static struct hv_driver storvsc_drv = {
1945 .name = KBUILD_MODNAME,
1946 .id_table = id_table,
1947 .probe = storvsc_probe,
1948 .remove = storvsc_remove,
1951 static int __init storvsc_drv_init(void)
1955 * Divide the ring buffer data size (which is 1 page less
1956 * than the ring buffer size since that page is reserved for
1957 * the ring buffer indices) by the max request size (which is
1958 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
1960 max_outstanding_req_per_channel =
1961 ((storvsc_ringbuffer_size - PAGE_SIZE) /
1962 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
1963 sizeof(struct vstor_packet) + sizeof(u64) -
1967 return vmbus_driver_register(&storvsc_drv);
1970 static void __exit storvsc_drv_exit(void)
1972 vmbus_driver_unregister(&storvsc_drv);
1975 MODULE_LICENSE("GPL");
1976 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
1977 module_init(storvsc_drv_init);
1978 module_exit(storvsc_drv_exit);