int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
- if (blkif->blk_backend_type == BLKIF_BACKEND_PHY) {
- unsigned long secure = (blkif->vbd.discard_secure &&
- (req->u.discard.flag & BLKIF_OP_DISCARD_FLAG_SECURE)) ?
- BLKDEV_DISCARD_SECURE : 0;
+ if (blkif->blk_backend_type == BLKIF_BACKEND_PHY)
/* just forward the discard request */
err = blkdev_issue_discard(bdev,
req->u.discard.sector_number,
req->u.discard.nr_sectors,
- GFP_KERNEL, secure);
- } else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
+ GFP_KERNEL, 0);
+ else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
/* punch a hole in the backing file */
struct loop_device *lo = bdev->bd_disk->private_data;
struct file *file = lo->lo_backing_file;
make_response(blkif, req->id, req->operation, status);
}
-static void xen_blk_drain_io(struct xen_blkif *blkif)
-{
- atomic_set(&blkif->drain, 1);
- do {
- wait_for_completion_interruptible_timeout(
- &blkif->drain_complete, HZ);
-
- if (!atomic_read(&blkif->drain))
- break;
- /* The initial value is one, and one refcnt taken at the
- * start of the xen_blkif_schedule thread. */
- if (atomic_read(&blkif->refcnt) <= 2)
- break;
- } while (!kthread_should_stop());
- atomic_set(&blkif->drain, 0);
-}
-
/*
* Completion callback on the bio's. Called as bh->b_end_io()
*/
pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
- } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
- (error == -EOPNOTSUPP)) {
- pr_debug(DRV_PFX "write barrier op failed, not supported\n");
- xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
- pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if (error) {
pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
" error=%d\n", error);
pending_req->operation, pending_req->status);
xen_blkif_put(pending_req->blkif);
free_req(pending_req);
- if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
- if (atomic_read(&pending_req->blkif->drain))
- complete(&pending_req->blkif->drain_complete);
- }
}
}
return more_to_do;
}
+
/*
* Transmutation of the 'struct blkif_request' to a proper 'struct bio'
* and call the 'submit_bio' to pass it to the underlying storage.
int i, nbio = 0;
int operation;
struct blk_plug plug;
- bool drain = false;
switch (req->operation) {
case BLKIF_OP_READ:
blkif->st_wr_req++;
operation = WRITE_ODIRECT;
break;
- case BLKIF_OP_WRITE_BARRIER:
- drain = true;
case BLKIF_OP_FLUSH_DISKCACHE:
blkif->st_f_req++;
operation = WRITE_FLUSH;
blkif->st_ds_req++;
operation = REQ_DISCARD;
break;
+ case BLKIF_OP_WRITE_BARRIER:
default:
operation = 0; /* make gcc happy */
goto fail_response;
}
}
- /* Wait on all outstanding I/O's and once that has been completed
- * issue the WRITE_FLUSH.
- */
- if (drain)
- xen_blk_drain_io(pending_req->blkif);
-
/*
* If we have failed at this point, we need to undo the M2P override,
* set gnttab_set_unmap_op on all of the grant references and perform
* the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap.
*/
- if (operation != REQ_DISCARD &&
+ if (operation != BLKIF_OP_DISCARD &&
xen_blkbk_map(req, pending_req, seg))
goto fail_flush;
if (operation == READ)
blkif->st_rd_sect += preq.nr_sects;
- else if (operation == WRITE_ODIRECT || operation == WRITE_FLUSH)
+ else if (operation == WRITE || operation == WRITE_FLUSH)
blkif->st_wr_sect += preq.nr_sects;
return 0;
struct blkif_x86_32_request_discard {
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
uint64_t nr_sectors;
- uint32_t flag;
};
struct blkif_x86_32_request {
struct blkif_x86_64_request_discard {
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
uint64_t nr_sectors;
- uint32_t flag;
};
struct blkif_x86_64_request {
/* Cached size parameter. */
sector_t size;
bool flush_support;
- bool discard_secure;
};
struct backend_info;
atomic_t refcnt;
wait_queue_head_t wq;
- /* for barrier (drain) requests */
- struct completion drain_complete;
- atomic_t drain;
/* One thread per one blkif. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state);
-int xen_blkbk_barrier(struct xenbus_transaction xbt,
- struct backend_info *be, int state);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
case BLKIF_OP_DISCARD:
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
- dst->u.discard.flag = src->u.discard.flag;
break;
default:
break;
case BLKIF_OP_DISCARD:
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
- dst->u.discard.flag = src->u.discard.flag;
break;
default:
break;
spin_lock_init(&blkif->blk_ring_lock);
atomic_set(&blkif->refcnt, 1);
init_waitqueue_head(&blkif->wq);
- init_completion(&blkif->drain_complete);
- atomic_set(&blkif->drain, 0);
blkif->st_print = jiffies;
init_waitqueue_head(&blkif->waiting_to_free);
if (q && q->flush_flags)
vbd->flush_support = true;
- if (q && blk_queue_secdiscard(q))
- vbd->discard_secure = true;
-
DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
state = 1;
blkif->blk_backend_type = BLKIF_BACKEND_PHY;
}
- /* Optional. */
- err = xenbus_printf(xbt, dev->nodename,
- "discard-secure", "%d",
- blkif->vbd.discard_secure);
- if (err) {
- xenbus_dev_fatal(dev, err,
- "writting discard-secure");
- goto kfree;
- }
}
} else {
err = PTR_ERR(type);
out:
return err;
}
-int xen_blkbk_barrier(struct xenbus_transaction xbt,
- struct backend_info *be, int state)
-{
- struct xenbus_device *dev = be->dev;
- int err;
-
- err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
- "%d", state);
- if (err)
- xenbus_dev_fatal(dev, err, "writing feature-barrier");
-
- return err;
-}
/*
* Entry point to this code when a new device is created. Allocate the basic
err = xen_blkbk_discard(xbt, be);
- /* If we can't advertise it is OK. */
- err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
-
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
if (err) {
unsigned long shadow_free;
unsigned int feature_flush;
unsigned int flush_op;
- unsigned int feature_discard:1;
- unsigned int feature_secdiscard:1;
+ unsigned int feature_discard;
unsigned int discard_granularity;
unsigned int discard_alignment;
int is_ready;
ring_req->operation = info->flush_op;
}
- if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
+ if (unlikely(req->cmd_flags & REQ_DISCARD)) {
/* id, sector_number and handle are set above. */
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->nr_segments = 0;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
- if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
- ring_req->u.discard.flag = BLKIF_OP_DISCARD_FLAG_SECURE;
} else {
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity;
rq->limits.discard_alignment = info->discard_alignment;
- if (info->feature_secdiscard)
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
info->gd->disk_name);
error = -EOPNOTSUPP;
info->feature_discard = 0;
- info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
- queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
}
__blk_end_request_all(req, error);
break;
char *type;
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned int discard_secure;
type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
if (IS_ERR(type))
return;
- info->feature_secdiscard = 0;
if (strncmp(type, "phy", 3) == 0) {
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"discard-granularity", "%u", &discard_granularity,
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-secure", "%d", &discard_secure,
- NULL);
- if (!err)
- info->feature_secdiscard = discard_secure;
-
} else if (strncmp(type, "file", 4) == 0)
info->feature_discard = 1;
static int __init xlblk_init(void)
{
- int ret;
-
if (!xen_domain())
return -ENODEV;
return -ENODEV;
}
- ret = xenbus_register_frontend(&blkfront);
- if (ret) {
- unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
- return ret;
- }
-
- return 0;
+ return xenbus_register_frontend(&blkfront);
}
module_init(xlblk_init);
* e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
* http://www.seagate.com/staticfiles/support/disc/manuals/
* Interface%20manuals/100293068c.pdf
- * We also provide three extra XenBus options to the discard operation:
- * 'discard-granularity' - Max amount of sectors that can be discarded.
- * 'discard-alignment' - 4K, 128K, etc aligment on sectors to erased.
- * 'discard-secure' - whether the discard can also securely erase data.
*/
#define BLKIF_OP_DISCARD 5
struct blkif_request_discard {
blkif_sector_t sector_number;
uint64_t nr_sectors;
-#define BLKIF_OP_DISCARD_FLAG_SECURE (1<<1) /* ignored if discard-secure=0 */
- uint32_t flag;
};
struct blkif_request {