struct page *page_buf;
unsigned char *buf;
struct scsi_sense_hdr sense_hdr;
+ struct vmscsi_request *vm_srb;
DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n");
return -ENOMEM;
}
+ vm_srb = &blkvsc_req->request.extension.vstor_packet.vm_srb;
init_waitqueue_head(&blkvsc_req->wevent);
blkvsc_req->dev = blkdev;
blkvsc_req->req = NULL;
wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
/* check error */
- if (blkvsc_req->request.status) {
+ if (vm_srb->scsi_status) {
scsi_normalize_sense(blkvsc_req->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr);
struct page *page_buf;
unsigned char *buf;
struct scsi_sense_hdr sense_hdr;
+ struct vmscsi_request *vm_srb;
DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n");
return -ENOMEM;
memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
+ vm_srb = &blkvsc_req->request.extension.vstor_packet.vm_srb;
page_buf = alloc_page(GFP_KERNEL);
if (!page_buf) {
kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
/* check error */
- if (blkvsc_req->request.status) {
+ if (vm_srb->scsi_status) {
scsi_normalize_sense(blkvsc_req->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (sense_hdr.asc == 0x3A) {
struct block_device_context *blkdev =
(struct block_device_context *)blkvsc_req->dev;
struct scsi_sense_hdr sense_hdr;
+ struct vmscsi_request *vm_srb;
DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n",
blkvsc_req);
+ vm_srb = &blkvsc_req->request.extension.vstor_packet.vm_srb;
blkdev->num_outstanding_reqs--;
- if (blkvsc_req->request.status)
+ if (vm_srb->scsi_status)
if (scsi_normalize_sense(blkvsc_req->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr))
scsi_print_sense_hdr("blkvsc", &sense_hdr);
(struct block_device_context *)blkvsc_req->dev;
unsigned long flags;
struct blkvsc_request *comp_req, *tmp;
+ struct vmscsi_request *vm_srb;
/* ASSERT(blkvsc_req->group); */
list_del(&comp_req->req_entry);
+ vm_srb =
+ &comp_req->request.extension.vstor_packet.vm_srb;
if (!__blk_end_request(comp_req->req,
- (!comp_req->request.status ? 0 : -EIO),
+ (!vm_srb->scsi_status ? 0 : -EIO),
comp_req->sector_count * blkdev->sector_size)) {
/*
* All the sectors have been xferred ie the
{
struct blkvsc_request *pend_req, *tmp;
struct blkvsc_request *comp_req, *tmp2;
+ struct vmscsi_request *vm_srb;
int ret = 0;
list_del(&comp_req->req_entry);
if (comp_req->req) {
+ vm_srb =
+ &comp_req->request.extension.vstor_packet.
+ vm_srb;
ret = __blk_end_request(comp_req->req,
- (!comp_req->request.status ? 0 : -EIO),
+ (!vm_srb->scsi_status ? 0 : -EIO),
comp_req->sector_count *
blkdev->sector_size);
/* Copy over the status...etc */
- request->status = vstor_packet->vm_srb.scsi_status;
- if (request->status != 0 || vstor_packet->vm_srb.srb_status != 1) {
+ if (vstor_packet->vm_srb.scsi_status != 0 ||
+ vstor_packet->vm_srb.srb_status != 1) {
DPRINT_WARN(STORVSC,
"cmd 0x%x scsi status 0x%x srb status 0x%x\n",
vstor_packet->vm_srb.cdb[0],
vstor_packet->vm_srb.srb_status);
}
- if ((request->status & 0xFF) == 0x02) {
+ if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
/* CHECK_CONDITION */
if (vstor_packet->vm_srb.srb_status & 0x80) {
/* autosense data available */
}
}
- /* TODO: */
- request->bytes_xfer = vstor_packet->vm_srb.data_transfer_length;
request->extension.on_io_completion(request);
(struct host_device_context *)scmnd->device->host->hostdata;
void (*scsi_done_fn)(struct scsi_cmnd *);
struct scsi_sense_hdr sense_hdr;
+ struct vmscsi_request *vm_srb;
/* ASSERT(request == &cmd_request->request); */
/* ASSERT(scmnd); */
cmd_request->bounce_sgl_count);
}
- scmnd->result = request->status;
+ vm_srb = &request->extension.vstor_packet.vm_srb;
+ scmnd->result = vm_srb->scsi_status;
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
/* ASSERT(request->BytesXfer <= request->data_buffer.Length); */
scsi_set_resid(scmnd,
- request->data_buffer.len - request->bytes_xfer);
+ request->data_buffer.len - vm_srb->data_transfer_length);
scsi_done_fn = scmnd->scsi_done;