X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=drivers%2Fblock%2Fvirtio_blk.c;h=c0facaa55cf46ee96932d08ee453829735bdf939;hb=1d589bb16b825b3a7b4edd34d997f1f1f953033d;hp=85d79a02d48726c150e6f8f1b17889f84bc8cc8d;hpb=ef71b1b87521ff93ed77b3e8f3e149afb392761c;p=mv-sheeva.git diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 85d79a02d48..c0facaa55cf 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -6,7 +6,6 @@ #include #include -#define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS) #define PART_BITS 4 static int major, index; @@ -26,8 +25,11 @@ struct virtio_blk mempool_t *pool; + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + /* Scatterlist: can be too big for stack. */ - struct scatterlist sg[VIRTIO_MAX_SG]; + struct scatterlist sg[/*sg_elems*/]; }; struct virtblk_req @@ -35,6 +37,7 @@ struct virtblk_req struct list_head list; struct request *req; struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; u8 status; }; @@ -48,6 +51,7 @@ static void blk_done(struct virtqueue *vq) spin_lock_irqsave(&vblk->lock, flags); while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { int error; + switch (vbr->status) { case VIRTIO_BLK_S_OK: error = 0; @@ -60,7 +64,13 @@ static void blk_done(struct virtqueue *vq) break; } - __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req)); + if (blk_pc_request(vbr->req)) { + vbr->req->resid_len = vbr->in_hdr.residual; + vbr->req->sense_len = vbr->in_hdr.sense_len; + vbr->req->errors = vbr->in_hdr.errors; + } + + __blk_end_request_all(vbr->req, error); list_del(&vbr->list); mempool_free(vbr, vblk->pool); } @@ -72,7 +82,7 @@ static void blk_done(struct virtqueue *vq) static bool do_req(struct request_queue *q, struct virtio_blk *vblk, struct request *req) { - unsigned long num, out, in; + unsigned long num, out = 0, in = 0; struct virtblk_req *vbr; vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); @@ -83,7 +93,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, vbr->req = req; if (blk_fs_request(vbr->req)) { vbr->out_hdr.type = 0; - vbr->out_hdr.sector = vbr->req->sector; + vbr->out_hdr.sector = blk_rq_pos(vbr->req); vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); } else if (blk_pc_request(vbr->req)) { vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; @@ -97,20 +107,36 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, if (blk_barrier_rq(vbr->req)) vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; - /* This init could be done at vblk creation time */ - sg_init_table(vblk->sg, VIRTIO_MAX_SG); - sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); - num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); - sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status)); + sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); - if (rq_data_dir(vbr->req) == WRITE) { - vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; - out = 1 + num; - in = 1; - } else { - vbr->out_hdr.type |= VIRTIO_BLK_T_IN; - out = 1; - in = 1 + num; + /* + * If this is a packet command we need a couple of additional headers. + * Behind the normal outhdr we put a segment with the scsi command + * block, and before the normal inhdr we put the sense data and the + * inhdr with additional status information before the normal inhdr. + */ + if (blk_pc_request(vbr->req)) + sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); + + num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); + + if (blk_pc_request(vbr->req)) { + sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); + sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, + sizeof(vbr->in_hdr)); + } + + sg_set_buf(&vblk->sg[num + out + in++], &vbr->status, + sizeof(vbr->status)); + + if (num) { + if (rq_data_dir(vbr->req) == WRITE) { + vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; + out += num; + } else { + vbr->out_hdr.type |= VIRTIO_BLK_T_IN; + in += num; + } } if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { @@ -124,13 +150,12 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, static void do_virtblk_request(struct request_queue *q) { - struct virtio_blk *vblk = NULL; + struct virtio_blk *vblk = q->queuedata; struct request *req; unsigned int issued = 0; - while ((req = elv_next_request(q)) != NULL) { - vblk = req->rq_disk->private_data; - BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg)); + while ((req = blk_peek_request(q)) != NULL) { + BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); /* If this request fails, stop queue and wait for something to finish to restart it. */ @@ -138,7 +163,7 @@ static void do_virtblk_request(struct request_queue *q) blk_stop_queue(q); break; } - blkdev_dequeue_request(req); + blk_start_request(req); issued++; } @@ -146,12 +171,51 @@ static void do_virtblk_request(struct request_queue *q) vblk->vq->vq_ops->kick(vblk->vq); } +/* return ATA identify data + */ +static int virtblk_identify(struct gendisk *disk, void *argp) +{ + struct virtio_blk *vblk = disk->private_data; + void *opaque; + int err = -ENOMEM; + + opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL); + if (!opaque) + goto out; + + err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY, + offsetof(struct virtio_blk_config, identify), opaque, + VIRTIO_BLK_ID_BYTES); + + if (err) + goto out_kfree; + + if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES)) + err = -EFAULT; + +out_kfree: + kfree(opaque); +out: + return err; +} + static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long data) { - return scsi_cmd_ioctl(bdev->bd_disk->queue, - bdev->bd_disk, mode, cmd, - (void __user *)data); + struct gendisk *disk = bdev->bd_disk; + struct virtio_blk *vblk = disk->private_data; + void __user *argp = (void __user *)data; + + if (cmd == HDIO_GET_IDENTITY) + return virtblk_identify(disk, argp); + + /* + * Only allow the generic SCSI ioctls if the host can support it. + */ + if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) + return -ENOIOCTLCMD; + + return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); } /* We provide getgeo only to please some old bootloader/partitioning tools */ @@ -196,12 +260,22 @@ static int virtblk_probe(struct virtio_device *vdev) int err; u64 cap; u32 v; - u32 blk_size; + u32 blk_size, sg_elems; if (index_to_minor(index) >= 1 << MINORBITS) return -ENOSPC; - vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); + /* We need to know how many segments before we allocate. */ + err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, + offsetof(struct virtio_blk_config, seg_max), + &sg_elems); + if (err) + sg_elems = 1; + + /* We need an extra sg elements at head and tail. */ + sg_elems += 2; + vdev->priv = vblk = kmalloc(sizeof(*vblk) + + sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL); if (!vblk) { err = -ENOMEM; goto out; @@ -210,6 +284,8 @@ static int virtblk_probe(struct virtio_device *vdev) INIT_LIST_HEAD(&vblk->reqs); spin_lock_init(&vblk->lock); vblk->vdev = vdev; + vblk->sg_elems = sg_elems; + sg_init_table(vblk->sg, vblk->sg_elems); /* We expect one virtqueue, for output. */ vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); @@ -237,6 +313,9 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_put_disk; } + vblk->disk->queue->queuedata = vblk; + queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue); + if (index < 26) { sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); } else if (index < (26 + 1) * 26) { @@ -277,6 +356,13 @@ static int virtblk_probe(struct virtio_device *vdev) } set_capacity(vblk->disk, cap); + /* We can handle whatever the host told us to handle. */ + blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2); + blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2); + + /* No real sector limit. */ + blk_queue_max_sectors(vblk->disk->queue, -1U); + /* Host can optionally specify maximum segment size and number of * segments. */ err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, @@ -284,19 +370,15 @@ static int virtblk_probe(struct virtio_device *vdev) &v); if (!err) blk_queue_max_segment_size(vblk->disk->queue, v); - - err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, - offsetof(struct virtio_blk_config, seg_max), - &v); - if (!err) - blk_queue_max_hw_segments(vblk->disk->queue, v); + else + blk_queue_max_segment_size(vblk->disk->queue, -1U); /* Host can optionally specify the block size of the device */ err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, offsetof(struct virtio_blk_config, blk_size), &blk_size); if (!err) - blk_queue_hardsect_size(vblk->disk->queue, blk_size); + blk_queue_logical_block_size(vblk->disk->queue, blk_size); add_disk(vblk->disk); return 0; @@ -339,6 +421,7 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, + VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY }; static struct virtio_driver virtio_blk = {