2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_blk.h>
10 #include <linux/scatterlist.h>
11 #include <linux/string_helpers.h>
12 #include <scsi/scsi_cmnd.h>
13 #include <linux/idr.h>
18 module_param(use_bio, bool, S_IRUGO);
21 static DEFINE_IDA(vd_index_ida);
23 struct workqueue_struct *virtblk_wq;
27 struct virtio_device *vdev;
29 wait_queue_head_t queue_wait;
31 /* The disk structure for the kernel. */
36 /* Process context for config space updates */
37 struct work_struct config_work;
39 /* Lock for config space updates */
40 struct mutex config_lock;
42 /* enable config space updates */
45 /* What host tells us, plus 2 for header & tailer. */
46 unsigned int sg_elems;
48 /* Ida index - used to track minor number allocations. */
51 /* Scatterlist: can be too big for stack. */
52 struct scatterlist sg[/*sg_elems*/];
59 struct virtio_blk_outhdr out_hdr;
60 struct virtio_scsi_inhdr in_hdr;
61 struct work_struct work;
62 struct virtio_blk *vblk;
65 struct scatterlist sg[];
75 static inline int virtblk_result(struct virtblk_req *vbr)
77 switch (vbr->status) {
80 case VIRTIO_BLK_S_UNSUPP:
87 static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
90 struct virtblk_req *vbr;
92 vbr = mempool_alloc(vblk->pool, gfp_mask);
98 sg_init_table(vbr->sg, vblk->sg_elems);
103 static inline int __virtblk_add_req(struct virtqueue *vq,
104 struct virtblk_req *vbr,
108 return virtqueue_add_buf(vq, vbr->sg, out, in, vbr, GFP_ATOMIC);
111 static void virtblk_add_req(struct virtblk_req *vbr,
112 unsigned int out, unsigned int in)
114 struct virtio_blk *vblk = vbr->vblk;
118 spin_lock_irq(vblk->disk->queue->queue_lock);
119 while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr,
121 prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
122 TASK_UNINTERRUPTIBLE);
124 spin_unlock_irq(vblk->disk->queue->queue_lock);
126 spin_lock_irq(vblk->disk->queue->queue_lock);
128 finish_wait(&vblk->queue_wait, &wait);
131 virtqueue_kick(vblk->vq);
132 spin_unlock_irq(vblk->disk->queue->queue_lock);
135 static void virtblk_bio_send_flush(struct virtblk_req *vbr)
137 unsigned int out = 0, in = 0;
139 vbr->flags |= VBLK_IS_FLUSH;
140 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
141 vbr->out_hdr.sector = 0;
142 vbr->out_hdr.ioprio = 0;
143 sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
144 sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status));
146 virtblk_add_req(vbr, out, in);
149 static void virtblk_bio_send_data(struct virtblk_req *vbr)
151 struct virtio_blk *vblk = vbr->vblk;
152 unsigned int num, out = 0, in = 0;
153 struct bio *bio = vbr->bio;
155 vbr->flags &= ~VBLK_IS_FLUSH;
156 vbr->out_hdr.type = 0;
157 vbr->out_hdr.sector = bio->bi_sector;
158 vbr->out_hdr.ioprio = bio_prio(bio);
160 sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
162 num = blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg + out);
164 sg_set_buf(&vbr->sg[num + out + in++], &vbr->status,
165 sizeof(vbr->status));
168 if (bio->bi_rw & REQ_WRITE) {
169 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
172 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
177 virtblk_add_req(vbr, out, in);
180 static void virtblk_bio_send_data_work(struct work_struct *work)
182 struct virtblk_req *vbr;
184 vbr = container_of(work, struct virtblk_req, work);
186 virtblk_bio_send_data(vbr);
189 static void virtblk_bio_send_flush_work(struct work_struct *work)
191 struct virtblk_req *vbr;
193 vbr = container_of(work, struct virtblk_req, work);
195 virtblk_bio_send_flush(vbr);
198 static inline void virtblk_request_done(struct virtblk_req *vbr)
200 struct virtio_blk *vblk = vbr->vblk;
201 struct request *req = vbr->req;
202 int error = virtblk_result(vbr);
204 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
205 req->resid_len = vbr->in_hdr.residual;
206 req->sense_len = vbr->in_hdr.sense_len;
207 req->errors = vbr->in_hdr.errors;
208 } else if (req->cmd_type == REQ_TYPE_SPECIAL) {
209 req->errors = (error != 0);
212 __blk_end_request_all(req, error);
213 mempool_free(vbr, vblk->pool);
216 static inline void virtblk_bio_flush_done(struct virtblk_req *vbr)
218 struct virtio_blk *vblk = vbr->vblk;
220 if (vbr->flags & VBLK_REQ_DATA) {
221 /* Send out the actual write data */
222 INIT_WORK(&vbr->work, virtblk_bio_send_data_work);
223 queue_work(virtblk_wq, &vbr->work);
225 bio_endio(vbr->bio, virtblk_result(vbr));
226 mempool_free(vbr, vblk->pool);
230 static inline void virtblk_bio_data_done(struct virtblk_req *vbr)
232 struct virtio_blk *vblk = vbr->vblk;
234 if (unlikely(vbr->flags & VBLK_REQ_FUA)) {
235 /* Send out a flush before end the bio */
236 vbr->flags &= ~VBLK_REQ_DATA;
237 INIT_WORK(&vbr->work, virtblk_bio_send_flush_work);
238 queue_work(virtblk_wq, &vbr->work);
240 bio_endio(vbr->bio, virtblk_result(vbr));
241 mempool_free(vbr, vblk->pool);
245 static inline void virtblk_bio_done(struct virtblk_req *vbr)
247 if (unlikely(vbr->flags & VBLK_IS_FLUSH))
248 virtblk_bio_flush_done(vbr);
250 virtblk_bio_data_done(vbr);
253 static void virtblk_done(struct virtqueue *vq)
255 struct virtio_blk *vblk = vq->vdev->priv;
256 bool bio_done = false, req_done = false;
257 struct virtblk_req *vbr;
261 spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
263 virtqueue_disable_cb(vq);
264 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
266 virtblk_bio_done(vbr);
269 virtblk_request_done(vbr);
273 } while (!virtqueue_enable_cb(vq));
274 /* In case queue is stopped waiting for more buffers. */
276 blk_start_queue(vblk->disk->queue);
277 spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
280 wake_up(&vblk->queue_wait);
283 static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
286 unsigned long num, out = 0, in = 0;
287 struct virtblk_req *vbr;
289 vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
291 /* When another request finishes we'll try again. */
296 if (req->cmd_flags & REQ_FLUSH) {
297 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
298 vbr->out_hdr.sector = 0;
299 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
301 switch (req->cmd_type) {
303 vbr->out_hdr.type = 0;
304 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
305 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
307 case REQ_TYPE_BLOCK_PC:
308 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
309 vbr->out_hdr.sector = 0;
310 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
312 case REQ_TYPE_SPECIAL:
313 vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
314 vbr->out_hdr.sector = 0;
315 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
318 /* We don't put anything else in the queue. */
323 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
326 * If this is a packet command we need a couple of additional headers.
327 * Behind the normal outhdr we put a segment with the scsi command
328 * block, and before the normal inhdr we put the sense data and the
329 * inhdr with additional status information before the normal inhdr.
331 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
332 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
334 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
336 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
337 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
338 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
339 sizeof(vbr->in_hdr));
342 sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
343 sizeof(vbr->status));
346 if (rq_data_dir(vbr->req) == WRITE) {
347 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
350 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
355 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr,
357 mempool_free(vbr, vblk->pool);
364 static void virtblk_request(struct request_queue *q)
366 struct virtio_blk *vblk = q->queuedata;
368 unsigned int issued = 0;
370 while ((req = blk_peek_request(q)) != NULL) {
371 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
373 /* If this request fails, stop queue and wait for something to
374 finish to restart it. */
375 if (!do_req(q, vblk, req)) {
379 blk_start_request(req);
384 virtqueue_kick(vblk->vq);
387 static void virtblk_make_request(struct request_queue *q, struct bio *bio)
389 struct virtio_blk *vblk = q->queuedata;
390 struct virtblk_req *vbr;
392 BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems);
394 vbr = virtblk_alloc_req(vblk, GFP_NOIO);
396 bio_endio(bio, -ENOMEM);
402 if (bio->bi_rw & REQ_FLUSH)
403 vbr->flags |= VBLK_REQ_FLUSH;
404 if (bio->bi_rw & REQ_FUA)
405 vbr->flags |= VBLK_REQ_FUA;
407 vbr->flags |= VBLK_REQ_DATA;
409 if (unlikely(vbr->flags & VBLK_REQ_FLUSH))
410 virtblk_bio_send_flush(vbr);
412 virtblk_bio_send_data(vbr);
415 /* return id (s/n) string for *disk to *id_str
417 static int virtblk_get_id(struct gendisk *disk, char *id_str)
419 struct virtio_blk *vblk = disk->private_data;
424 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
429 req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
435 req->cmd_type = REQ_TYPE_SPECIAL;
436 err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
437 blk_put_request(req);
442 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
443 unsigned int cmd, unsigned long data)
445 struct gendisk *disk = bdev->bd_disk;
446 struct virtio_blk *vblk = disk->private_data;
449 * Only allow the generic SCSI ioctls if the host can support it.
451 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
454 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
455 (void __user *)data);
458 /* We provide getgeo only to please some old bootloader/partitioning tools */
459 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
461 struct virtio_blk *vblk = bd->bd_disk->private_data;
462 struct virtio_blk_geometry vgeo;
465 /* see if the host passed in geometry config */
466 err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
467 offsetof(struct virtio_blk_config, geometry),
471 geo->heads = vgeo.heads;
472 geo->sectors = vgeo.sectors;
473 geo->cylinders = vgeo.cylinders;
475 /* some standard values, similar to sd */
477 geo->sectors = 1 << 5;
478 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
483 static const struct block_device_operations virtblk_fops = {
484 .ioctl = virtblk_ioctl,
485 .owner = THIS_MODULE,
486 .getgeo = virtblk_getgeo,
489 static int index_to_minor(int index)
491 return index << PART_BITS;
494 static int minor_to_index(int minor)
496 return minor >> PART_BITS;
499 static ssize_t virtblk_serial_show(struct device *dev,
500 struct device_attribute *attr, char *buf)
502 struct gendisk *disk = dev_to_disk(dev);
505 /* sysfs gives us a PAGE_SIZE buffer */
506 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
508 buf[VIRTIO_BLK_ID_BYTES] = '\0';
509 err = virtblk_get_id(disk, buf);
513 if (err == -EIO) /* Unsupported? Make it empty. */
518 DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
520 static void virtblk_config_changed_work(struct work_struct *work)
522 struct virtio_blk *vblk =
523 container_of(work, struct virtio_blk, config_work);
524 struct virtio_device *vdev = vblk->vdev;
525 struct request_queue *q = vblk->disk->queue;
526 char cap_str_2[10], cap_str_10[10];
527 char *envp[] = { "RESIZE=1", NULL };
530 mutex_lock(&vblk->config_lock);
531 if (!vblk->config_enable)
534 /* Host must always specify the capacity. */
535 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
536 &capacity, sizeof(capacity));
538 /* If capacity is too big, truncate with warning. */
539 if ((sector_t)capacity != capacity) {
540 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
541 (unsigned long long)capacity);
542 capacity = (sector_t)-1;
545 size = capacity * queue_logical_block_size(q);
546 string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
547 string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
549 dev_notice(&vdev->dev,
550 "new size: %llu %d-byte logical blocks (%s/%s)\n",
551 (unsigned long long)capacity,
552 queue_logical_block_size(q),
553 cap_str_10, cap_str_2);
555 set_capacity(vblk->disk, capacity);
556 revalidate_disk(vblk->disk);
557 kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
559 mutex_unlock(&vblk->config_lock);
562 static void virtblk_config_changed(struct virtio_device *vdev)
564 struct virtio_blk *vblk = vdev->priv;
566 queue_work(virtblk_wq, &vblk->config_work);
569 static int init_vq(struct virtio_blk *vblk)
573 /* We expect one virtqueue, for output. */
574 vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests");
575 if (IS_ERR(vblk->vq))
576 err = PTR_ERR(vblk->vq);
582 * Legacy naming scheme used for virtio devices. We are stuck with it for
583 * virtio blk but don't ever use it for any new driver.
585 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
587 const int base = 'z' - 'a' + 1;
588 char *begin = buf + strlen(prefix);
589 char *end = buf + buflen;
599 *--p = 'a' + (index % unit);
600 index = (index / unit) - 1;
601 } while (index >= 0);
603 memmove(begin, p, end - p);
604 memcpy(buf, prefix, strlen(prefix));
609 static int virtblk_get_cache_mode(struct virtio_device *vdev)
614 err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE,
615 offsetof(struct virtio_blk_config, wce),
618 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
623 static void virtblk_update_cache_mode(struct virtio_device *vdev)
625 u8 writeback = virtblk_get_cache_mode(vdev);
626 struct virtio_blk *vblk = vdev->priv;
629 blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
631 blk_queue_flush(vblk->disk->queue, 0);
633 revalidate_disk(vblk->disk);
636 static const char *const virtblk_cache_types[] = {
637 "write through", "write back"
641 virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
642 const char *buf, size_t count)
644 struct gendisk *disk = dev_to_disk(dev);
645 struct virtio_blk *vblk = disk->private_data;
646 struct virtio_device *vdev = vblk->vdev;
650 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
651 for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
652 if (sysfs_streq(buf, virtblk_cache_types[i]))
659 vdev->config->set(vdev,
660 offsetof(struct virtio_blk_config, wce),
661 &writeback, sizeof(writeback));
663 virtblk_update_cache_mode(vdev);
668 virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
671 struct gendisk *disk = dev_to_disk(dev);
672 struct virtio_blk *vblk = disk->private_data;
673 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
675 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
676 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
679 static const struct device_attribute dev_attr_cache_type_ro =
680 __ATTR(cache_type, S_IRUGO,
681 virtblk_cache_type_show, NULL);
682 static const struct device_attribute dev_attr_cache_type_rw =
683 __ATTR(cache_type, S_IRUGO|S_IWUSR,
684 virtblk_cache_type_show, virtblk_cache_type_store);
686 static int virtblk_probe(struct virtio_device *vdev)
688 struct virtio_blk *vblk;
689 struct request_queue *q;
694 u32 v, blk_size, sg_elems, opt_io_size;
696 u8 physical_block_exp, alignment_offset;
698 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
704 /* We need to know how many segments before we allocate. */
705 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
706 offsetof(struct virtio_blk_config, seg_max),
709 /* We need at least one SG element, whatever they say. */
710 if (err || !sg_elems)
713 /* We need an extra sg elements at head and tail. */
715 vdev->priv = vblk = kmalloc(sizeof(*vblk) +
716 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
722 init_waitqueue_head(&vblk->queue_wait);
724 vblk->sg_elems = sg_elems;
725 sg_init_table(vblk->sg, vblk->sg_elems);
726 mutex_init(&vblk->config_lock);
728 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
729 vblk->config_enable = true;
735 pool_size = sizeof(struct virtblk_req);
737 pool_size += sizeof(struct scatterlist) * sg_elems;
738 vblk->pool = mempool_create_kmalloc_pool(1, pool_size);
744 /* FIXME: How many partitions? How long is a piece of string? */
745 vblk->disk = alloc_disk(1 << PART_BITS);
751 q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL);
758 blk_queue_make_request(q, virtblk_make_request);
761 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
763 vblk->disk->major = major;
764 vblk->disk->first_minor = index_to_minor(index);
765 vblk->disk->private_data = vblk;
766 vblk->disk->fops = &virtblk_fops;
767 vblk->disk->driverfs_dev = &vdev->dev;
770 /* configure queue flush support */
771 virtblk_update_cache_mode(vdev);
773 /* If disk is read-only in the host, the guest should obey */
774 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
775 set_disk_ro(vblk->disk, 1);
777 /* Host must always specify the capacity. */
778 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
781 /* If capacity is too big, truncate with warning. */
782 if ((sector_t)cap != cap) {
783 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
784 (unsigned long long)cap);
787 set_capacity(vblk->disk, cap);
789 /* We can handle whatever the host told us to handle. */
790 blk_queue_max_segments(q, vblk->sg_elems-2);
792 /* No need to bounce any requests */
793 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
795 /* No real sector limit. */
796 blk_queue_max_hw_sectors(q, -1U);
798 /* Host can optionally specify maximum segment size and number of
800 err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
801 offsetof(struct virtio_blk_config, size_max),
804 blk_queue_max_segment_size(q, v);
806 blk_queue_max_segment_size(q, -1U);
808 /* Host can optionally specify the block size of the device */
809 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
810 offsetof(struct virtio_blk_config, blk_size),
813 blk_queue_logical_block_size(q, blk_size);
815 blk_size = queue_logical_block_size(q);
817 /* Use topology information if available */
818 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
819 offsetof(struct virtio_blk_config, physical_block_exp),
820 &physical_block_exp);
821 if (!err && physical_block_exp)
822 blk_queue_physical_block_size(q,
823 blk_size * (1 << physical_block_exp));
825 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
826 offsetof(struct virtio_blk_config, alignment_offset),
828 if (!err && alignment_offset)
829 blk_queue_alignment_offset(q, blk_size * alignment_offset);
831 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
832 offsetof(struct virtio_blk_config, min_io_size),
834 if (!err && min_io_size)
835 blk_queue_io_min(q, blk_size * min_io_size);
837 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
838 offsetof(struct virtio_blk_config, opt_io_size),
840 if (!err && opt_io_size)
841 blk_queue_io_opt(q, blk_size * opt_io_size);
843 add_disk(vblk->disk);
844 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
848 if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
849 err = device_create_file(disk_to_dev(vblk->disk),
850 &dev_attr_cache_type_rw);
852 err = device_create_file(disk_to_dev(vblk->disk),
853 &dev_attr_cache_type_ro);
859 del_gendisk(vblk->disk);
860 blk_cleanup_queue(vblk->disk->queue);
862 put_disk(vblk->disk);
864 mempool_destroy(vblk->pool);
866 vdev->config->del_vqs(vdev);
870 ida_simple_remove(&vd_index_ida, index);
875 static void virtblk_remove(struct virtio_device *vdev)
877 struct virtio_blk *vblk = vdev->priv;
878 int index = vblk->index;
881 /* Prevent config work handler from accessing the device. */
882 mutex_lock(&vblk->config_lock);
883 vblk->config_enable = false;
884 mutex_unlock(&vblk->config_lock);
886 del_gendisk(vblk->disk);
887 blk_cleanup_queue(vblk->disk->queue);
889 /* Stop all the virtqueues. */
890 vdev->config->reset(vdev);
892 flush_work(&vblk->config_work);
894 refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
895 put_disk(vblk->disk);
896 mempool_destroy(vblk->pool);
897 vdev->config->del_vqs(vdev);
900 /* Only free device id if we don't have any users */
902 ida_simple_remove(&vd_index_ida, index);
906 static int virtblk_freeze(struct virtio_device *vdev)
908 struct virtio_blk *vblk = vdev->priv;
910 /* Ensure we don't receive any more interrupts */
911 vdev->config->reset(vdev);
913 /* Prevent config work handler from accessing the device. */
914 mutex_lock(&vblk->config_lock);
915 vblk->config_enable = false;
916 mutex_unlock(&vblk->config_lock);
918 flush_work(&vblk->config_work);
920 spin_lock_irq(vblk->disk->queue->queue_lock);
921 blk_stop_queue(vblk->disk->queue);
922 spin_unlock_irq(vblk->disk->queue->queue_lock);
923 blk_sync_queue(vblk->disk->queue);
925 vdev->config->del_vqs(vdev);
929 static int virtblk_restore(struct virtio_device *vdev)
931 struct virtio_blk *vblk = vdev->priv;
934 vblk->config_enable = true;
935 ret = init_vq(vdev->priv);
937 spin_lock_irq(vblk->disk->queue->queue_lock);
938 blk_start_queue(vblk->disk->queue);
939 spin_unlock_irq(vblk->disk->queue->queue_lock);
945 static const struct virtio_device_id id_table[] = {
946 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
950 static unsigned int features[] = {
951 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
952 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
953 VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE
956 static struct virtio_driver virtio_blk = {
957 .feature_table = features,
958 .feature_table_size = ARRAY_SIZE(features),
959 .driver.name = KBUILD_MODNAME,
960 .driver.owner = THIS_MODULE,
961 .id_table = id_table,
962 .probe = virtblk_probe,
963 .remove = virtblk_remove,
964 .config_changed = virtblk_config_changed,
966 .freeze = virtblk_freeze,
967 .restore = virtblk_restore,
971 static int __init init(void)
975 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
979 major = register_blkdev(0, "virtblk");
982 goto out_destroy_workqueue;
985 error = register_virtio_driver(&virtio_blk);
987 goto out_unregister_blkdev;
990 out_unregister_blkdev:
991 unregister_blkdev(major, "virtblk");
992 out_destroy_workqueue:
993 destroy_workqueue(virtblk_wq);
997 static void __exit fini(void)
999 unregister_blkdev(major, "virtblk");
1000 unregister_virtio_driver(&virtio_blk);
1001 destroy_workqueue(virtblk_wq);
1006 MODULE_DEVICE_TABLE(virtio, id_table);
1007 MODULE_DESCRIPTION("Virtio block driver");
1008 MODULE_LICENSE("GPL");