#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
-static int do_block_io_op(struct xen_blkif *blkif);
-static int dispatch_rw_block_io(struct xen_blkif *blkif,
+static int do_block_io_op(struct xen_blkif_ring *ring);
+static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct blkif_request *req,
struct pending_req *pending_req);
-static void make_response(struct xen_blkif *blkif, u64 id,
+static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st);
#define foreach_grant_safe(pos, n, rbtree, node) \
/*
- * We don't need locking around the persistent grant helpers
- * because blkback uses a single-thread for each backed, so we
- * can be sure that this functions will never be called recursively.
- *
- * The only exception to that is put_persistent_grant, that can be called
- * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
- * bit operations to modify the flags of a persistent grant and to count
- * the number of used grants.
+ * pers_gnts_lock must be used around all the persistent grant helpers
+ * because blkback may use multi-thread/queue for each backend.
*/
static int add_persistent_gnt(struct xen_blkif *blkif,
struct persistent_gnt *persistent_gnt)
struct rb_node **new = NULL, *parent = NULL;
struct persistent_gnt *this;
+ BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock));
if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
if (!blkif->vbd.overflow_max_grants)
blkif->vbd.overflow_max_grants = 1;
struct persistent_gnt *data;
struct rb_node *node = NULL;
+ BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock));
node = blkif->persistent_gnts.rb_node;
while (node) {
data = container_of(node, struct persistent_gnt, node);
static void put_persistent_gnt(struct xen_blkif *blkif,
struct persistent_gnt *persistent_gnt)
{
+ BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock));
if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
pr_alert_ratelimited("freeing a grant already unused\n");
set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
+ BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock));
foreach_grant_safe(persistent_gnt, n, root, node) {
BUG_ON(persistent_gnt->handle ==
BLKBACK_INVALID_HANDLE);
int segs_to_unmap = 0;
struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
struct gntab_unmap_queue_data unmap_data;
+ unsigned long flags;
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
+ spin_lock_irqsave(&blkif->pers_gnts_lock, flags);
while(!list_empty(&blkif->persistent_purge_list)) {
persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
struct persistent_gnt,
}
kfree(persistent_gnt);
}
+ spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
if (segs_to_unmap > 0) {
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
unsigned int num_clean, total;
bool scan_used = false, clean_used = false;
struct rb_root *root;
+ unsigned long flags;
+ spin_lock_irqsave(&blkif->pers_gnts_lock, flags);
if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
(blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
!blkif->vbd.overflow_max_grants)) {
- return;
+ goto out;
}
if (work_busy(&blkif->persistent_purge_work)) {
pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
- return;
+ goto out;
}
num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
num_clean = min(blkif->persistent_gnt_c, num_clean);
if ((num_clean == 0) ||
(num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
- return;
+ goto out;
/*
* At this point, we can assure that there will be no calls
}
blkif->persistent_gnt_c -= (total - num_clean);
+ spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
blkif->vbd.overflow_max_grants = 0;
/* We can defer this work */
schedule_work(&blkif->persistent_purge_work);
pr_debug("Purged %u/%u\n", (total - num_clean), total);
return;
+
+out:
+ spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
+
+ return;
}
/*
* Retrieve from the 'pending_reqs' a free pending_req structure to be used.
*/
-static struct pending_req *alloc_req(struct xen_blkif *blkif)
+static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
{
struct pending_req *req = NULL;
unsigned long flags;
- spin_lock_irqsave(&blkif->pending_free_lock, flags);
- if (!list_empty(&blkif->pending_free)) {
- req = list_entry(blkif->pending_free.next, struct pending_req,
+ spin_lock_irqsave(&ring->pending_free_lock, flags);
+ if (!list_empty(&ring->pending_free)) {
+ req = list_entry(ring->pending_free.next, struct pending_req,
free_list);
list_del(&req->free_list);
}
- spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
+ spin_unlock_irqrestore(&ring->pending_free_lock, flags);
return req;
}
* Return the 'pending_req' structure back to the freepool. We also
* wake up the thread if it was waiting for a free page.
*/
-static void free_req(struct xen_blkif *blkif, struct pending_req *req)
+static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
{
unsigned long flags;
int was_empty;
- spin_lock_irqsave(&blkif->pending_free_lock, flags);
- was_empty = list_empty(&blkif->pending_free);
- list_add(&req->free_list, &blkif->pending_free);
- spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
+ spin_lock_irqsave(&ring->pending_free_lock, flags);
+ was_empty = list_empty(&ring->pending_free);
+ list_add(&req->free_list, &ring->pending_free);
+ spin_unlock_irqrestore(&ring->pending_free_lock, flags);
if (was_empty)
- wake_up(&blkif->pending_free_wq);
+ wake_up(&ring->pending_free_wq);
}
/*
/*
* Notification from the guest OS.
*/
-static void blkif_notify_work(struct xen_blkif *blkif)
+static void blkif_notify_work(struct xen_blkif_ring *ring)
{
- blkif->waiting_reqs = 1;
- wake_up(&blkif->wq);
+ ring->waiting_reqs = 1;
+ wake_up(&ring->wq);
}
irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
int xen_blkif_schedule(void *arg)
{
- struct xen_blkif *blkif = arg;
+ struct xen_blkif_ring *ring = arg;
+ struct xen_blkif *blkif = ring->blkif;
struct xen_vbd *vbd = &blkif->vbd;
unsigned long timeout;
int ret;
timeout = msecs_to_jiffies(LRU_INTERVAL);
timeout = wait_event_interruptible_timeout(
- blkif->wq,
- blkif->waiting_reqs || kthread_should_stop(),
+ ring->wq,
+ ring->waiting_reqs || kthread_should_stop(),
timeout);
if (timeout == 0)
goto purge_gnt_list;
timeout = wait_event_interruptible_timeout(
- blkif->pending_free_wq,
- !list_empty(&blkif->pending_free) ||
+ ring->pending_free_wq,
+ !list_empty(&ring->pending_free) ||
kthread_should_stop(),
timeout);
if (timeout == 0)
goto purge_gnt_list;
- blkif->waiting_reqs = 0;
+ ring->waiting_reqs = 0;
smp_mb(); /* clear flag *before* checking for work */
- ret = do_block_io_op(blkif);
+ ret = do_block_io_op(ring);
if (ret > 0)
- blkif->waiting_reqs = 1;
+ ring->waiting_reqs = 1;
if (ret == -EACCES)
- wait_event_interruptible(blkif->shutdown_wq,
+ wait_event_interruptible(ring->shutdown_wq,
kthread_should_stop());
purge_gnt_list:
if (log_stats)
print_stats(blkif);
- blkif->xenblkd = NULL;
+ ring->xenblkd = NULL;
xen_blkif_put(blkif);
return 0;
/*
* Remove persistent grants and empty the pool of free pages
*/
-void xen_blkbk_free_caches(struct xen_blkif *blkif)
+void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
{
+ struct xen_blkif *blkif = ring->blkif;
+ unsigned long flags;
+
/* Free all persistent grant pages */
+ spin_lock_irqsave(&blkif->pers_gnts_lock, flags);
if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
free_persistent_gnts(blkif, &blkif->persistent_gnts,
blkif->persistent_gnt_c);
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
blkif->persistent_gnt_c = 0;
+ spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
/* Since we are shutting down remove all pages from the buffer */
shrink_free_pagepool(blkif, 0 /* All */);
}
static unsigned int xen_blkbk_unmap_prepare(
- struct xen_blkif *blkif,
+ struct xen_blkif_ring *ring,
struct grant_page **pages,
unsigned int num,
struct gnttab_unmap_grant_ref *unmap_ops,
struct page **unmap_pages)
{
unsigned int i, invcount = 0;
+ unsigned long flags;
for (i = 0; i < num; i++) {
if (pages[i]->persistent_gnt != NULL) {
- put_persistent_gnt(blkif, pages[i]->persistent_gnt);
+ spin_lock_irqsave(&ring->blkif->pers_gnts_lock, flags);
+ put_persistent_gnt(ring->blkif, pages[i]->persistent_gnt);
+ spin_unlock_irqrestore(&ring->blkif->pers_gnts_lock, flags);
continue;
}
if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
{
- struct pending_req* pending_req = (struct pending_req*) (data->data);
- struct xen_blkif *blkif = pending_req->blkif;
+ struct pending_req *pending_req = (struct pending_req *)(data->data);
+ struct xen_blkif_ring *ring = pending_req->ring;
+ struct xen_blkif *blkif = ring->blkif;
/* BUG_ON used to reproduce existing behaviour,
but is this the best way to deal with this? */
BUG_ON(result);
put_free_pages(blkif, data->pages, data->count);
- make_response(blkif, pending_req->id,
+ make_response(ring, pending_req->id,
pending_req->operation, pending_req->status);
- free_req(blkif, pending_req);
+ free_req(ring, pending_req);
/*
* Make sure the request is freed before releasing blkif,
* or there could be a race between free_req and the
* pending_free_wq if there's a drain going on, but it has
* to be taken into account if the current model is changed.
*/
- if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
+ if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
complete(&blkif->drain_complete);
}
xen_blkif_put(blkif);
static void xen_blkbk_unmap_and_respond(struct pending_req *req)
{
struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
- struct xen_blkif *blkif = req->blkif;
+ struct xen_blkif_ring *ring = req->ring;
struct grant_page **pages = req->segments;
unsigned int invcount;
- invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_segs,
+ invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
req->unmap, req->unmap_pages);
work->data = req;
* of hypercalls, but since this is only used in error paths there's
* no real need.
*/
-static void xen_blkbk_unmap(struct xen_blkif *blkif,
+static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
struct grant_page *pages[],
int num)
{
while (num) {
unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-
- invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
+
+ invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
unmap, unmap_pages);
if (invcount) {
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret);
- put_free_pages(blkif, unmap_pages, invcount);
+ put_free_pages(ring->blkif, unmap_pages, invcount);
}
pages += batch;
num -= batch;
}
}
-static int xen_blkbk_map(struct xen_blkif *blkif,
+static int xen_blkbk_map(struct xen_blkif_ring *ring,
struct grant_page *pages[],
int num, bool ro)
{
int ret = 0;
int last_map = 0, map_until = 0;
int use_persistent_gnts;
+ struct xen_blkif *blkif = ring->blkif;
+ unsigned long irq_flags;
use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
for (i = map_until; i < num; i++) {
uint32_t flags;
- if (use_persistent_gnts)
+ if (use_persistent_gnts) {
+ spin_lock_irqsave(&blkif->pers_gnts_lock, irq_flags);
persistent_gnt = get_persistent_gnt(
blkif,
pages[i]->gref);
+ spin_unlock_irqrestore(&blkif->pers_gnts_lock, irq_flags);
+ }
if (persistent_gnt) {
/*
persistent_gnt->gnt = map[new_map_idx].ref;
persistent_gnt->handle = map[new_map_idx].handle;
persistent_gnt->page = pages[seg_idx]->page;
+ spin_lock_irqsave(&blkif->pers_gnts_lock, irq_flags);
if (add_persistent_gnt(blkif,
persistent_gnt)) {
+ spin_unlock_irqrestore(&blkif->pers_gnts_lock, irq_flags);
kfree(persistent_gnt);
persistent_gnt = NULL;
goto next;
pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
persistent_gnt->gnt, blkif->persistent_gnt_c,
xen_blkif_max_pgrants);
+ spin_unlock_irqrestore(&blkif->pers_gnts_lock, irq_flags);
goto next;
}
if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
{
int rc;
- rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
+ rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
pending_req->nr_segs,
(pending_req->operation != BLKIF_OP_READ));
struct phys_req *preq)
{
struct grant_page **pages = pending_req->indirect_pages;
- struct xen_blkif *blkif = pending_req->blkif;
+ struct xen_blkif_ring *ring = pending_req->ring;
int indirect_grefs, rc, n, nseg, i;
struct blkif_request_segment *segments = NULL;
for (i = 0; i < indirect_grefs; i++)
pages[i]->gref = req->u.indirect.indirect_grefs[i];
- rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
+ rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
if (rc)
goto unmap;
unmap:
if (segments)
kunmap_atomic(segments);
- xen_blkbk_unmap(blkif, pages, indirect_grefs);
+ xen_blkbk_unmap(ring, pages, indirect_grefs);
return rc;
}
-static int dispatch_discard_io(struct xen_blkif *blkif,
+static int dispatch_discard_io(struct xen_blkif_ring *ring,
struct blkif_request *req)
{
int err = 0;
int status = BLKIF_RSP_OKAY;
+ struct xen_blkif *blkif = ring->blkif;
struct block_device *bdev = blkif->vbd.bdev;
unsigned long secure;
struct phys_req preq;
} else if (err)
status = BLKIF_RSP_ERROR;
- make_response(blkif, req->u.discard.id, req->operation, status);
+ make_response(ring, req->u.discard.id, req->operation, status);
xen_blkif_put(blkif);
return err;
}
-static int dispatch_other_io(struct xen_blkif *blkif,
+static int dispatch_other_io(struct xen_blkif_ring *ring,
struct blkif_request *req,
struct pending_req *pending_req)
{
- free_req(blkif, pending_req);
- make_response(blkif, req->u.other.id, req->operation,
+ free_req(ring, pending_req);
+ make_response(ring, req->u.other.id, req->operation,
BLKIF_RSP_EOPNOTSUPP);
return -EIO;
}
-static void xen_blk_drain_io(struct xen_blkif *blkif)
+static void xen_blk_drain_io(struct xen_blkif_ring *ring)
{
+ struct xen_blkif *blkif = ring->blkif;
+
atomic_set(&blkif->drain, 1);
do {
- if (atomic_read(&blkif->inflight) == 0)
+ if (atomic_read(&ring->inflight) == 0)
break;
wait_for_completion_interruptible_timeout(
&blkif->drain_complete, HZ);
if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
(error == -EOPNOTSUPP)) {
pr_debug("flush diskcache op failed, not supported\n");
- xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
+ xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
(error == -EOPNOTSUPP)) {
pr_debug("write barrier op failed, not supported\n");
- xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
+ xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if (error) {
pr_debug("Buffer not up-to-date at end of operation,"
* and transmute it to the block API to hand it over to the proper block disk.
*/
static int
-__do_block_io_op(struct xen_blkif *blkif)
+__do_block_io_op(struct xen_blkif_ring *ring)
{
- union blkif_back_rings *blk_rings = &blkif->blk_rings;
+ union blkif_back_rings *blk_rings = &ring->blk_rings;
struct blkif_request req;
struct pending_req *pending_req;
RING_IDX rc, rp;
if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
rc = blk_rings->common.rsp_prod_pvt;
pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
- rp, rc, rp - rc, blkif->vbd.pdevice);
+ rp, rc, rp - rc, ring->blkif->vbd.pdevice);
return -EACCES;
}
while (rc != rp) {
break;
}
- pending_req = alloc_req(blkif);
+ pending_req = alloc_req(ring);
if (NULL == pending_req) {
- blkif->st_oo_req++;
+ ring->blkif->st_oo_req++;
more_to_do = 1;
break;
}
- switch (blkif->blk_protocol) {
+ switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
break;
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_INDIRECT:
- if (dispatch_rw_block_io(blkif, &req, pending_req))
+ if (dispatch_rw_block_io(ring, &req, pending_req))
goto done;
break;
case BLKIF_OP_DISCARD:
- free_req(blkif, pending_req);
- if (dispatch_discard_io(blkif, &req))
+ free_req(ring, pending_req);
+ if (dispatch_discard_io(ring, &req))
goto done;
break;
default:
- if (dispatch_other_io(blkif, &req, pending_req))
+ if (dispatch_other_io(ring, &req, pending_req))
goto done;
break;
}
}
static int
-do_block_io_op(struct xen_blkif *blkif)
+do_block_io_op(struct xen_blkif_ring *ring)
{
- union blkif_back_rings *blk_rings = &blkif->blk_rings;
+ union blkif_back_rings *blk_rings = &ring->blk_rings;
int more_to_do;
do {
- more_to_do = __do_block_io_op(blkif);
+ more_to_do = __do_block_io_op(ring);
if (more_to_do)
break;
* Transmutation of the 'struct blkif_request' to a proper 'struct bio'
* and call the 'submit_bio' to pass it to the underlying storage.
*/
-static int dispatch_rw_block_io(struct xen_blkif *blkif,
+static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct blkif_request *req,
struct pending_req *pending_req)
{
switch (req_operation) {
case BLKIF_OP_READ:
- blkif->st_rd_req++;
+ ring->blkif->st_rd_req++;
operation = READ;
break;
case BLKIF_OP_WRITE:
- blkif->st_wr_req++;
+ ring->blkif->st_wr_req++;
operation = WRITE_ODIRECT;
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
case BLKIF_OP_FLUSH_DISKCACHE:
- blkif->st_f_req++;
+ ring->blkif->st_f_req++;
operation = WRITE_FLUSH;
break;
default:
preq.nr_sects = 0;
- pending_req->blkif = blkif;
+ pending_req->ring = ring;
pending_req->id = req->u.rw.id;
pending_req->operation = req_operation;
pending_req->status = BLKIF_RSP_OKAY;
goto fail_response;
}
- if (xen_vbd_translate(&preq, blkif, operation) != 0) {
+ if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
operation == READ ? "read" : "write",
preq.sector_number,
preq.sector_number + preq.nr_sects,
- blkif->vbd.pdevice);
+ ring->blkif->vbd.pdevice);
goto fail_response;
}
if (((int)preq.sector_number|(int)seg[i].nsec) &
((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
pr_debug("Misaligned I/O request from domain %d\n",
- blkif->domid);
+ ring->blkif->domid);
goto fail_response;
}
}
* issue the WRITE_FLUSH.
*/
if (drain)
- xen_blk_drain_io(pending_req->blkif);
+ xen_blk_drain_io(pending_req->ring);
/*
* If we have failed at this point, we need to undo the M2P override,
* This corresponding xen_blkif_put is done in __end_block_io_op, or
* below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
*/
- xen_blkif_get(blkif);
- atomic_inc(&blkif->inflight);
+ xen_blkif_get(ring->blkif);
+ atomic_inc(&ring->inflight);
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
blk_finish_plug(&plug);
if (operation == READ)
- blkif->st_rd_sect += preq.nr_sects;
+ ring->blkif->st_rd_sect += preq.nr_sects;
else if (operation & WRITE)
- blkif->st_wr_sect += preq.nr_sects;
+ ring->blkif->st_wr_sect += preq.nr_sects;
return 0;
fail_flush:
- xen_blkbk_unmap(blkif, pending_req->segments,
+ xen_blkbk_unmap(ring, pending_req->segments,
pending_req->nr_segs);
fail_response:
/* Haven't submitted any bio's yet. */
- make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
- free_req(blkif, pending_req);
+ make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
+ free_req(ring, pending_req);
msleep(1); /* back off a bit */
return -EIO;
/*
* Put a response on the ring on how the operation fared.
*/
-static void make_response(struct xen_blkif *blkif, u64 id,
+static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st)
{
struct blkif_response resp;
unsigned long flags;
- union blkif_back_rings *blk_rings = &blkif->blk_rings;
+ union blkif_back_rings *blk_rings;
int notify;
resp.id = id;
resp.operation = op;
resp.status = st;
- spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+ spin_lock_irqsave(&ring->blk_ring_lock, flags);
+ blk_rings = &ring->blk_rings;
/* Place on the response ring for the relevant domain. */
- switch (blkif->blk_protocol) {
+ switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
&resp, sizeof(resp));
}
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
- spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+ spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
if (notify)
- notify_remote_via_irq(blkif->irq);
+ notify_remote_via_irq(ring->irq);
}
static int __init xen_blkif_init(void)
char name[BLKBACK_NAME_LEN];
/* Not ready to connect? */
- if (!blkif->irq || !blkif->vbd.bdev)
+ if (!blkif->ring.irq || !blkif->vbd.bdev)
return;
/* Already connected? */
}
invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
- blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, "%s", name);
- if (IS_ERR(blkif->xenblkd)) {
- err = PTR_ERR(blkif->xenblkd);
- blkif->xenblkd = NULL;
+ blkif->ring.xenblkd = kthread_run(xen_blkif_schedule, &blkif->ring, "%s", name);
+ if (IS_ERR(blkif->ring.xenblkd)) {
+ err = PTR_ERR(blkif->ring.xenblkd);
+ blkif->ring.xenblkd = NULL;
xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
return;
}
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
struct xen_blkif *blkif;
+ struct xen_blkif_ring *ring;
BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
return ERR_PTR(-ENOMEM);
blkif->domid = domid;
- spin_lock_init(&blkif->blk_ring_lock);
atomic_set(&blkif->refcnt, 1);
- init_waitqueue_head(&blkif->wq);
init_completion(&blkif->drain_complete);
- atomic_set(&blkif->drain, 0);
- blkif->st_print = jiffies;
- blkif->persistent_gnts.rb_node = NULL;
+ INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
spin_lock_init(&blkif->free_pages_lock);
INIT_LIST_HEAD(&blkif->free_pages);
INIT_LIST_HEAD(&blkif->persistent_purge_list);
- blkif->free_pages_num = 0;
- atomic_set(&blkif->persistent_gnt_in_use, 0);
- atomic_set(&blkif->inflight, 0);
+ blkif->st_print = jiffies;
INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
- INIT_LIST_HEAD(&blkif->pending_free);
- INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
- spin_lock_init(&blkif->pending_free_lock);
- init_waitqueue_head(&blkif->pending_free_wq);
- init_waitqueue_head(&blkif->shutdown_wq);
+ ring = &blkif->ring;
+ ring->blkif = blkif;
+ spin_lock_init(&ring->blk_ring_lock);
+ init_waitqueue_head(&ring->wq);
+
+ INIT_LIST_HEAD(&ring->pending_free);
+ spin_lock_init(&ring->pending_free_lock);
+ init_waitqueue_head(&ring->pending_free_wq);
+ init_waitqueue_head(&ring->shutdown_wq);
return blkif;
}
-static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
+static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
unsigned int nr_grefs, unsigned int evtchn)
{
int err;
+ struct xen_blkif *blkif = ring->blkif;
/* Already connected through? */
- if (blkif->irq)
+ if (ring->irq)
return 0;
err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
- &blkif->blk_ring);
+ &ring->blk_ring);
if (err < 0)
return err;
case BLKIF_PROTOCOL_NATIVE:
{
struct blkif_sring *sring;
- sring = (struct blkif_sring *)blkif->blk_ring;
- BACK_RING_INIT(&blkif->blk_rings.native, sring,
+ sring = (struct blkif_sring *)ring->blk_ring;
+ BACK_RING_INIT(&ring->blk_rings.native, sring,
XEN_PAGE_SIZE * nr_grefs);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
struct blkif_x86_32_sring *sring_x86_32;
- sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring;
- BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32,
+ sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring;
+ BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32,
XEN_PAGE_SIZE * nr_grefs);
break;
}
case BLKIF_PROTOCOL_X86_64:
{
struct blkif_x86_64_sring *sring_x86_64;
- sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring;
- BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64,
+ sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring;
+ BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64,
XEN_PAGE_SIZE * nr_grefs);
break;
}
err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
xen_blkif_be_int, 0,
- "blkif-backend", blkif);
+ "blkif-backend", ring);
if (err < 0) {
- xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
- blkif->blk_rings.common.sring = NULL;
+ xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
+ ring->blk_rings.common.sring = NULL;
return err;
}
- blkif->irq = err;
+ ring->irq = err;
return 0;
}
{
struct pending_req *req, *n;
int i = 0, j;
+ struct xen_blkif_ring *ring = &blkif->ring;
- if (blkif->xenblkd) {
- kthread_stop(blkif->xenblkd);
- wake_up(&blkif->shutdown_wq);
- blkif->xenblkd = NULL;
+ if (ring->xenblkd) {
+ kthread_stop(ring->xenblkd);
+ wake_up(&ring->shutdown_wq);
+ ring->xenblkd = NULL;
}
/* The above kthread_stop() guarantees that at this point we
* don't have any discard_io or other_io requests. So, checking
* for inflight IO is enough.
*/
- if (atomic_read(&blkif->inflight) > 0)
+ if (atomic_read(&ring->inflight) > 0)
return -EBUSY;
- if (blkif->irq) {
- unbind_from_irqhandler(blkif->irq, blkif);
- blkif->irq = 0;
+ if (ring->irq) {
+ unbind_from_irqhandler(ring->irq, ring);
+ ring->irq = 0;
}
- if (blkif->blk_rings.common.sring) {
- xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
- blkif->blk_rings.common.sring = NULL;
+ if (ring->blk_rings.common.sring) {
+ xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
+ ring->blk_rings.common.sring = NULL;
}
/* Remove all persistent grants and the cache of ballooned pages. */
- xen_blkbk_free_caches(blkif);
+ xen_blkbk_free_caches(ring);
/* Check that there is no request in use */
- list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
+ list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
list_del(&req->free_list);
for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
char protocol[64] = "";
struct pending_req *req, *n;
int err, i, j;
+ struct xen_blkif_ring *ring = &be->blkif->ring;
pr_debug("%s %s\n", __func__, dev->otherend);
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
goto fail;
- list_add_tail(&req->free_list, &be->blkif->pending_free);
+ list_add_tail(&req->free_list, &ring->pending_free);
for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
if (!req->segments[j])
}
/* Map the shared frame, irq etc. */
- err = xen_blkif_map(be->blkif, ring_ref, nr_grefs, evtchn);
+ err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
if (err) {
xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
return err;
return 0;
fail:
- list_for_each_entry_safe(req, n, &be->blkif->pending_free, free_list) {
+ list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
list_del(&req->free_list);
for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
if (!req->segments[j])