2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_blk.h>
10 #include <linux/scatterlist.h>
11 #include <linux/string_helpers.h>
12 #include <scsi/scsi_cmnd.h>
13 #include <linux/idr.h>
18 static DEFINE_IDA(vd_index_ida);
20 struct workqueue_struct *virtblk_wq;
26 struct virtio_device *vdev;
29 /* The disk structure for the kernel. */
34 /* Process context for config space updates */
35 struct work_struct config_work;
37 /* Lock for config space updates */
38 struct mutex config_lock;
40 /* enable config space updates */
43 /* What host tells us, plus 2 for header & tailer. */
44 unsigned int sg_elems;
46 /* Ida index - used to track minor number allocations. */
49 /* Scatterlist: can be too big for stack. */
50 struct scatterlist sg[/*sg_elems*/];
56 struct virtio_blk_outhdr out_hdr;
57 struct virtio_scsi_inhdr in_hdr;
61 static void blk_done(struct virtqueue *vq)
63 struct virtio_blk *vblk = vq->vdev->priv;
64 struct virtblk_req *vbr;
68 spin_lock_irqsave(&vblk->lock, flags);
69 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
72 switch (vbr->status) {
76 case VIRTIO_BLK_S_UNSUPP:
84 switch (vbr->req->cmd_type) {
85 case REQ_TYPE_BLOCK_PC:
86 vbr->req->resid_len = vbr->in_hdr.residual;
87 vbr->req->sense_len = vbr->in_hdr.sense_len;
88 vbr->req->errors = vbr->in_hdr.errors;
90 case REQ_TYPE_SPECIAL:
91 vbr->req->errors = (error != 0);
97 __blk_end_request_all(vbr->req, error);
98 mempool_free(vbr, vblk->pool);
100 /* In case queue is stopped waiting for more buffers. */
101 blk_start_queue(vblk->disk->queue);
102 spin_unlock_irqrestore(&vblk->lock, flags);
105 static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
108 unsigned long num, out = 0, in = 0;
109 struct virtblk_req *vbr;
111 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
113 /* When another request finishes we'll try again. */
118 if (req->cmd_flags & REQ_FLUSH) {
119 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
120 vbr->out_hdr.sector = 0;
121 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
123 switch (req->cmd_type) {
125 vbr->out_hdr.type = 0;
126 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
127 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
129 case REQ_TYPE_BLOCK_PC:
130 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
131 vbr->out_hdr.sector = 0;
132 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
134 case REQ_TYPE_SPECIAL:
135 vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
136 vbr->out_hdr.sector = 0;
137 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
140 /* We don't put anything else in the queue. */
145 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
148 * If this is a packet command we need a couple of additional headers.
149 * Behind the normal outhdr we put a segment with the scsi command
150 * block, and before the normal inhdr we put the sense data and the
151 * inhdr with additional status information before the normal inhdr.
153 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
154 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
156 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
158 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
159 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
160 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
161 sizeof(vbr->in_hdr));
164 sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
165 sizeof(vbr->status));
168 if (rq_data_dir(vbr->req) == WRITE) {
169 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
172 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
177 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, GFP_ATOMIC)<0) {
178 mempool_free(vbr, vblk->pool);
185 static void do_virtblk_request(struct request_queue *q)
187 struct virtio_blk *vblk = q->queuedata;
189 unsigned int issued = 0;
191 while ((req = blk_peek_request(q)) != NULL) {
192 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
194 /* If this request fails, stop queue and wait for something to
195 finish to restart it. */
196 if (!do_req(q, vblk, req)) {
200 blk_start_request(req);
205 virtqueue_kick(vblk->vq);
208 /* return id (s/n) string for *disk to *id_str
210 static int virtblk_get_id(struct gendisk *disk, char *id_str)
212 struct virtio_blk *vblk = disk->private_data;
217 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
222 req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
228 req->cmd_type = REQ_TYPE_SPECIAL;
229 err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
230 blk_put_request(req);
235 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
236 unsigned int cmd, unsigned long data)
238 struct gendisk *disk = bdev->bd_disk;
239 struct virtio_blk *vblk = disk->private_data;
242 * Only allow the generic SCSI ioctls if the host can support it.
244 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
247 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
248 (void __user *)data);
251 /* We provide getgeo only to please some old bootloader/partitioning tools */
252 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
254 struct virtio_blk *vblk = bd->bd_disk->private_data;
255 struct virtio_blk_geometry vgeo;
258 /* see if the host passed in geometry config */
259 err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
260 offsetof(struct virtio_blk_config, geometry),
264 geo->heads = vgeo.heads;
265 geo->sectors = vgeo.sectors;
266 geo->cylinders = vgeo.cylinders;
268 /* some standard values, similar to sd */
270 geo->sectors = 1 << 5;
271 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
276 static const struct block_device_operations virtblk_fops = {
277 .ioctl = virtblk_ioctl,
278 .owner = THIS_MODULE,
279 .getgeo = virtblk_getgeo,
282 static int index_to_minor(int index)
284 return index << PART_BITS;
287 static int minor_to_index(int minor)
289 return minor >> PART_BITS;
292 static ssize_t virtblk_serial_show(struct device *dev,
293 struct device_attribute *attr, char *buf)
295 struct gendisk *disk = dev_to_disk(dev);
298 /* sysfs gives us a PAGE_SIZE buffer */
299 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
301 buf[VIRTIO_BLK_ID_BYTES] = '\0';
302 err = virtblk_get_id(disk, buf);
306 if (err == -EIO) /* Unsupported? Make it empty. */
311 DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
313 static void virtblk_config_changed_work(struct work_struct *work)
315 struct virtio_blk *vblk =
316 container_of(work, struct virtio_blk, config_work);
317 struct virtio_device *vdev = vblk->vdev;
318 struct request_queue *q = vblk->disk->queue;
319 char cap_str_2[10], cap_str_10[10];
322 mutex_lock(&vblk->config_lock);
323 if (!vblk->config_enable)
326 /* Host must always specify the capacity. */
327 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
328 &capacity, sizeof(capacity));
330 /* If capacity is too big, truncate with warning. */
331 if ((sector_t)capacity != capacity) {
332 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
333 (unsigned long long)capacity);
334 capacity = (sector_t)-1;
337 size = capacity * queue_logical_block_size(q);
338 string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
339 string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
341 dev_notice(&vdev->dev,
342 "new size: %llu %d-byte logical blocks (%s/%s)\n",
343 (unsigned long long)capacity,
344 queue_logical_block_size(q),
345 cap_str_10, cap_str_2);
347 set_capacity(vblk->disk, capacity);
348 revalidate_disk(vblk->disk);
350 mutex_unlock(&vblk->config_lock);
353 static void virtblk_config_changed(struct virtio_device *vdev)
355 struct virtio_blk *vblk = vdev->priv;
357 queue_work(virtblk_wq, &vblk->config_work);
360 static int init_vq(struct virtio_blk *vblk)
364 /* We expect one virtqueue, for output. */
365 vblk->vq = virtio_find_single_vq(vblk->vdev, blk_done, "requests");
366 if (IS_ERR(vblk->vq))
367 err = PTR_ERR(vblk->vq);
373 * Legacy naming scheme used for virtio devices. We are stuck with it for
374 * virtio blk but don't ever use it for any new driver.
376 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
378 const int base = 'z' - 'a' + 1;
379 char *begin = buf + strlen(prefix);
380 char *end = buf + buflen;
390 *--p = 'a' + (index % unit);
391 index = (index / unit) - 1;
392 } while (index >= 0);
394 memmove(begin, p, end - p);
395 memcpy(buf, prefix, strlen(prefix));
400 static int __devinit virtblk_probe(struct virtio_device *vdev)
402 struct virtio_blk *vblk;
403 struct request_queue *q;
406 u32 v, blk_size, sg_elems, opt_io_size;
408 u8 physical_block_exp, alignment_offset;
410 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
416 /* We need to know how many segments before we allocate. */
417 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
418 offsetof(struct virtio_blk_config, seg_max),
421 /* We need at least one SG element, whatever they say. */
422 if (err || !sg_elems)
425 /* We need an extra sg elements at head and tail. */
427 vdev->priv = vblk = kmalloc(sizeof(*vblk) +
428 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
434 spin_lock_init(&vblk->lock);
436 vblk->sg_elems = sg_elems;
437 sg_init_table(vblk->sg, vblk->sg_elems);
438 mutex_init(&vblk->config_lock);
439 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
440 vblk->config_enable = true;
446 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
452 /* FIXME: How many partitions? How long is a piece of string? */
453 vblk->disk = alloc_disk(1 << PART_BITS);
459 q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
467 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
469 vblk->disk->major = major;
470 vblk->disk->first_minor = index_to_minor(index);
471 vblk->disk->private_data = vblk;
472 vblk->disk->fops = &virtblk_fops;
473 vblk->disk->driverfs_dev = &vdev->dev;
476 /* configure queue flush support */
477 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
478 blk_queue_flush(q, REQ_FLUSH);
480 /* If disk is read-only in the host, the guest should obey */
481 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
482 set_disk_ro(vblk->disk, 1);
484 /* Host must always specify the capacity. */
485 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
488 /* If capacity is too big, truncate with warning. */
489 if ((sector_t)cap != cap) {
490 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
491 (unsigned long long)cap);
494 set_capacity(vblk->disk, cap);
496 /* We can handle whatever the host told us to handle. */
497 blk_queue_max_segments(q, vblk->sg_elems-2);
499 /* No need to bounce any requests */
500 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
502 /* No real sector limit. */
503 blk_queue_max_hw_sectors(q, -1U);
505 /* Host can optionally specify maximum segment size and number of
507 err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
508 offsetof(struct virtio_blk_config, size_max),
511 blk_queue_max_segment_size(q, v);
513 blk_queue_max_segment_size(q, -1U);
515 /* Host can optionally specify the block size of the device */
516 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
517 offsetof(struct virtio_blk_config, blk_size),
520 blk_queue_logical_block_size(q, blk_size);
522 blk_size = queue_logical_block_size(q);
524 /* Use topology information if available */
525 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
526 offsetof(struct virtio_blk_config, physical_block_exp),
527 &physical_block_exp);
528 if (!err && physical_block_exp)
529 blk_queue_physical_block_size(q,
530 blk_size * (1 << physical_block_exp));
532 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
533 offsetof(struct virtio_blk_config, alignment_offset),
535 if (!err && alignment_offset)
536 blk_queue_alignment_offset(q, blk_size * alignment_offset);
538 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
539 offsetof(struct virtio_blk_config, min_io_size),
541 if (!err && min_io_size)
542 blk_queue_io_min(q, blk_size * min_io_size);
544 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
545 offsetof(struct virtio_blk_config, opt_io_size),
547 if (!err && opt_io_size)
548 blk_queue_io_opt(q, blk_size * opt_io_size);
551 add_disk(vblk->disk);
552 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
559 del_gendisk(vblk->disk);
560 blk_cleanup_queue(vblk->disk->queue);
562 put_disk(vblk->disk);
564 mempool_destroy(vblk->pool);
566 vdev->config->del_vqs(vdev);
570 ida_simple_remove(&vd_index_ida, index);
575 static void __devexit virtblk_remove(struct virtio_device *vdev)
577 struct virtio_blk *vblk = vdev->priv;
578 int index = vblk->index;
579 struct virtblk_req *vbr;
582 /* Prevent config work handler from accessing the device. */
583 mutex_lock(&vblk->config_lock);
584 vblk->config_enable = false;
585 mutex_unlock(&vblk->config_lock);
587 /* Stop all the virtqueues. */
588 vdev->config->reset(vdev);
590 flush_work(&vblk->config_work);
592 del_gendisk(vblk->disk);
594 /* Abort requests dispatched to driver. */
595 spin_lock_irqsave(&vblk->lock, flags);
596 while ((vbr = virtqueue_detach_unused_buf(vblk->vq))) {
597 __blk_end_request_all(vbr->req, -EIO);
598 mempool_free(vbr, vblk->pool);
600 spin_unlock_irqrestore(&vblk->lock, flags);
602 blk_cleanup_queue(vblk->disk->queue);
603 put_disk(vblk->disk);
604 mempool_destroy(vblk->pool);
605 vdev->config->del_vqs(vdev);
607 ida_simple_remove(&vd_index_ida, index);
611 static int virtblk_freeze(struct virtio_device *vdev)
613 struct virtio_blk *vblk = vdev->priv;
615 /* Ensure we don't receive any more interrupts */
616 vdev->config->reset(vdev);
618 /* Prevent config work handler from accessing the device. */
619 mutex_lock(&vblk->config_lock);
620 vblk->config_enable = false;
621 mutex_unlock(&vblk->config_lock);
623 flush_work(&vblk->config_work);
625 spin_lock_irq(vblk->disk->queue->queue_lock);
626 blk_stop_queue(vblk->disk->queue);
627 spin_unlock_irq(vblk->disk->queue->queue_lock);
628 blk_sync_queue(vblk->disk->queue);
630 vdev->config->del_vqs(vdev);
634 static int virtblk_restore(struct virtio_device *vdev)
636 struct virtio_blk *vblk = vdev->priv;
639 vblk->config_enable = true;
640 ret = init_vq(vdev->priv);
642 spin_lock_irq(vblk->disk->queue->queue_lock);
643 blk_start_queue(vblk->disk->queue);
644 spin_unlock_irq(vblk->disk->queue->queue_lock);
650 static const struct virtio_device_id id_table[] = {
651 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
655 static unsigned int features[] = {
656 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
657 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
658 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
662 * virtio_blk causes spurious section mismatch warning by
663 * simultaneously referring to a __devinit and a __devexit function.
664 * Use __refdata to avoid this warning.
666 static struct virtio_driver __refdata virtio_blk = {
667 .feature_table = features,
668 .feature_table_size = ARRAY_SIZE(features),
669 .driver.name = KBUILD_MODNAME,
670 .driver.owner = THIS_MODULE,
671 .id_table = id_table,
672 .probe = virtblk_probe,
673 .remove = __devexit_p(virtblk_remove),
674 .config_changed = virtblk_config_changed,
676 .freeze = virtblk_freeze,
677 .restore = virtblk_restore,
681 static int __init init(void)
685 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
689 major = register_blkdev(0, "virtblk");
692 goto out_destroy_workqueue;
695 error = register_virtio_driver(&virtio_blk);
697 goto out_unregister_blkdev;
700 out_unregister_blkdev:
701 unregister_blkdev(major, "virtblk");
702 out_destroy_workqueue:
703 destroy_workqueue(virtblk_wq);
707 static void __exit fini(void)
709 unregister_blkdev(major, "virtblk");
710 unregister_virtio_driver(&virtio_blk);
711 destroy_workqueue(virtblk_wq);
716 MODULE_DEVICE_TABLE(virtio, id_table);
717 MODULE_DESCRIPTION("Virtio block driver");
718 MODULE_LICENSE("GPL");