]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/xen/blkback/blkback.c
xen/blkback: Seperate the bio allocation and the bio submission.
[mv-sheeva.git] / drivers / xen / blkback / blkback.c
1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
42
43 #include <xen/events.h>
44 #include <xen/page.h>
45 #include <asm/xen/hypervisor.h>
46 #include <asm/xen/hypercall.h>
47 #include "common.h"
48
49 #define WRITE_BARRIER   (REQ_WRITE | REQ_FLUSH | REQ_FUA)
50
51 /*
52  * These are rather arbitrary. They are fairly large because adjacent requests
53  * pulled from a communication ring are quite likely to end up being part of
54  * the same scatter/gather request at the disc.
55  *
56  * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
57  *
58  * This will increase the chances of being able to write whole tracks.
59  * 64 should be enough to keep us competitive with Linux.
60  */
61 static int blkif_reqs = 64;
62 module_param_named(reqs, blkif_reqs, int, 0);
63 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
64
65 /* Run-time switchable: /sys/module/blkback/parameters/ */
66 static unsigned int log_stats;
67 static unsigned int debug_lvl;
68 module_param(log_stats, int, 0644);
69 module_param(debug_lvl, int, 0644);
70
71 /*
72  * Each outstanding request that we've passed to the lower device layers has a
73  * 'pending_req' allocated to it. Each buffer_head that completes decrements
74  * the pendcnt towards zero. When it hits zero, the specified domain has a
75  * response queued for it, with the saved 'id' passed back.
76  */
77 struct pending_req {
78         struct blkif_st       *blkif;
79         u64            id;
80         int            nr_pages;
81         atomic_t       pendcnt;
82         unsigned short operation;
83         int            status;
84         struct list_head free_list;
85 };
86
87 #define BLKBACK_INVALID_HANDLE (~0)
88
89 struct xen_blkbk {
90         struct pending_req      *pending_reqs;
91         /* List of all 'pending_req' available */
92         struct list_head        pending_free;
93         /* And its spinlock. */
94         spinlock_t              pending_free_lock;
95         wait_queue_head_t       pending_free_wq;
96         /* The list of all pages that are available. */
97         struct page             **pending_pages;
98         /* And the grant handles that are available. */
99         grant_handle_t          *pending_grant_handles;
100 };
101
102 static struct xen_blkbk *blkbk;
103
104 /*
105  * Little helpful macro to figure out the index and virtual address of the
106  * pending_pages[..]. For each 'pending_req' we have have up to
107  * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
108  * 10 and would index in the pending_pages[..]. */
109 static inline int vaddr_pagenr(struct pending_req *req, int seg)
110 {
111         return (req - blkbk->pending_reqs) *
112                 BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
113 }
114
115 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
116
117 static inline unsigned long vaddr(struct pending_req *req, int seg)
118 {
119         unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
120         return (unsigned long)pfn_to_kaddr(pfn);
121 }
122
123 #define pending_handle(_req, _seg) \
124         (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
125
126
127 static int do_block_io_op(struct blkif_st *blkif);
128 static void dispatch_rw_block_io(struct blkif_st *blkif,
129                                  struct blkif_request *req,
130                                  struct pending_req *pending_req);
131 static void make_response(struct blkif_st *blkif, u64 id,
132                           unsigned short op, int st);
133
134 /*
135  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
136  */
137 static struct pending_req *alloc_req(void)
138 {
139         struct pending_req *req = NULL;
140         unsigned long flags;
141
142         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
143         if (!list_empty(&blkbk->pending_free)) {
144                 req = list_entry(blkbk->pending_free.next, struct pending_req,
145                                  free_list);
146                 list_del(&req->free_list);
147         }
148         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
149         return req;
150 }
151
152 /*
153  * Return the 'pending_req' structure back to the freepool. We also
154  * wake up the thread if it was waiting for a free page.
155  */
156 static void free_req(struct pending_req *req)
157 {
158         unsigned long flags;
159         int was_empty;
160
161         spin_lock_irqsave(&blkbk->pending_free_lock, flags);
162         was_empty = list_empty(&blkbk->pending_free);
163         list_add(&req->free_list, &blkbk->pending_free);
164         spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
165         if (was_empty)
166                 wake_up(&blkbk->pending_free_wq);
167 }
168
169 /*
170  * Unmap the grant references, and also remove the M2P over-rides
171  * used in the 'pending_req'.
172 */
173 static void fast_flush_area(struct pending_req *req)
174 {
175         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
176         unsigned int i, invcount = 0;
177         grant_handle_t handle;
178         int ret;
179
180         for (i = 0; i < req->nr_pages; i++) {
181                 handle = pending_handle(req, i);
182                 if (handle == BLKBACK_INVALID_HANDLE)
183                         continue;
184                 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
185                                     GNTMAP_host_map, handle);
186                 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
187                 invcount++;
188         }
189
190         ret = HYPERVISOR_grant_table_op(
191                 GNTTABOP_unmap_grant_ref, unmap, invcount);
192         BUG_ON(ret);
193         /* Note, we use invcount, so nr->pages, so we can't index
194          * using vaddr(req, i).
195          */
196         for (i = 0; i < invcount; i++) {
197                 ret = m2p_remove_override(
198                         virt_to_page(unmap[i].host_addr), false);
199                 if (ret) {
200                         printk(KERN_ALERT "Failed to remove M2P override for " \
201                                 "%lx\n", (unsigned long)unmap[i].host_addr);
202                         continue;
203                 }
204         }
205 }
206
207 /*
208  * SCHEDULER FUNCTIONS
209  */
210
211 static void print_stats(struct blkif_st *blkif)
212 {
213         printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d  |  br %4d\n",
214                current->comm, blkif->st_oo_req,
215                blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
216         blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
217         blkif->st_rd_req = 0;
218         blkif->st_wr_req = 0;
219         blkif->st_oo_req = 0;
220 }
221
222 int blkif_schedule(void *arg)
223 {
224         struct blkif_st *blkif = arg;
225         struct vbd *vbd = &blkif->vbd;
226
227         blkif_get(blkif);
228
229         if (debug_lvl)
230                 printk(KERN_DEBUG "%s: started\n", current->comm);
231
232         while (!kthread_should_stop()) {
233                 if (try_to_freeze())
234                         continue;
235                 if (unlikely(vbd->size != vbd_size(vbd)))
236                         vbd_resize(blkif);
237
238                 wait_event_interruptible(
239                         blkif->wq,
240                         blkif->waiting_reqs || kthread_should_stop());
241                 wait_event_interruptible(
242                         blkbk->pending_free_wq,
243                         !list_empty(&blkbk->pending_free) ||
244                         kthread_should_stop());
245
246                 blkif->waiting_reqs = 0;
247                 smp_mb(); /* clear flag *before* checking for work */
248
249                 if (do_block_io_op(blkif))
250                         blkif->waiting_reqs = 1;
251
252                 if (log_stats && time_after(jiffies, blkif->st_print))
253                         print_stats(blkif);
254         }
255
256         if (log_stats)
257                 print_stats(blkif);
258         if (debug_lvl)
259                 printk(KERN_DEBUG "%s: exiting\n", current->comm);
260
261         blkif->xenblkd = NULL;
262         blkif_put(blkif);
263
264         return 0;
265 }
266
267 /*
268  * Completion callback on the bio's. Called as bh->b_end_io()
269  */
270
271 static void __end_block_io_op(struct pending_req *pending_req, int error)
272 {
273         /* An error fails the entire request. */
274         if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
275             (error == -EOPNOTSUPP)) {
276                 DPRINTK("blkback: write barrier op failed, not supported\n");
277                 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
278                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
279         } else if (error) {
280                 DPRINTK("Buffer not up-to-date at end of operation, "
281                         "error=%d\n", error);
282                 pending_req->status = BLKIF_RSP_ERROR;
283         }
284
285         /* If all of the bio's have completed it is time to unmap
286          * the grant references associated with 'request' and provide
287          * the proper response on the ring.
288          */
289         if (atomic_dec_and_test(&pending_req->pendcnt)) {
290                 fast_flush_area(pending_req);
291                 make_response(pending_req->blkif, pending_req->id,
292                               pending_req->operation, pending_req->status);
293                 blkif_put(pending_req->blkif);
294                 free_req(pending_req);
295         }
296 }
297
298 /*
299  * bio callback.
300  */
301 static void end_block_io_op(struct bio *bio, int error)
302 {
303         __end_block_io_op(bio->bi_private, error);
304         bio_put(bio);
305 }
306
307
308 /*
309  * Notification from the guest OS.
310  */
311
312 static void blkif_notify_work(struct blkif_st *blkif)
313 {
314         blkif->waiting_reqs = 1;
315         wake_up(&blkif->wq);
316 }
317
318 irqreturn_t blkif_be_int(int irq, void *dev_id)
319 {
320         blkif_notify_work(dev_id);
321         return IRQ_HANDLED;
322 }
323
324
325
326 /*
327  * Function to copy the from the ring buffer the 'struct blkif_request'
328  * (which has the sectors we want, number of them, grant references, etc),
329  * and transmute  it to the block API to hand it over to the proper block disk.
330  */
331 static int do_block_io_op(struct blkif_st *blkif)
332 {
333         union blkif_back_rings *blk_rings = &blkif->blk_rings;
334         struct blkif_request req;
335         struct pending_req *pending_req;
336         RING_IDX rc, rp;
337         int more_to_do = 0;
338
339         rc = blk_rings->common.req_cons;
340         rp = blk_rings->common.sring->req_prod;
341         rmb(); /* Ensure we see queued requests up to 'rp'. */
342
343         while (rc != rp) {
344
345                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
346                         break;
347
348                 if (kthread_should_stop()) {
349                         more_to_do = 1;
350                         break;
351                 }
352
353                 pending_req = alloc_req();
354                 if (NULL == pending_req) {
355                         blkif->st_oo_req++;
356                         more_to_do = 1;
357                         break;
358                 }
359
360                 switch (blkif->blk_protocol) {
361                 case BLKIF_PROTOCOL_NATIVE:
362                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
363                         break;
364                 case BLKIF_PROTOCOL_X86_32:
365                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
366                         break;
367                 case BLKIF_PROTOCOL_X86_64:
368                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
369                         break;
370                 default:
371                         BUG();
372                 }
373                 blk_rings->common.req_cons = ++rc; /* before make_response() */
374
375                 /* Apply all sanity checks to /private copy/ of request. */
376                 barrier();
377
378                 switch (req.operation) {
379                 case BLKIF_OP_READ:
380                         blkif->st_rd_req++;
381                         dispatch_rw_block_io(blkif, &req, pending_req);
382                         break;
383                 case BLKIF_OP_WRITE_BARRIER:
384                         blkif->st_br_req++;
385                         /* fall through */
386                 case BLKIF_OP_WRITE:
387                         blkif->st_wr_req++;
388                         dispatch_rw_block_io(blkif, &req, pending_req);
389                         break;
390                 default:
391                         /* A good sign something is wrong: sleep for a while to
392                          * avoid excessive CPU consumption by a bad guest. */
393                         msleep(1);
394                         DPRINTK("error: unknown block io operation [%d]\n",
395                                 req.operation);
396                         make_response(blkif, req.id, req.operation,
397                                       BLKIF_RSP_ERROR);
398                         free_req(pending_req);
399                         break;
400                 }
401
402                 /* Yield point for this unbounded loop. */
403                 cond_resched();
404         }
405
406         return more_to_do;
407 }
408
409 /*
410  * Transumation of the 'struct blkif_request' to a proper 'struct bio'
411  * and call the 'submit_bio' to pass it to the underlaying storage.
412  */
413 static void dispatch_rw_block_io(struct blkif_st *blkif,
414                                  struct blkif_request *req,
415                                  struct pending_req *pending_req)
416 {
417         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
418         struct phys_req preq;
419         struct {
420                 unsigned long buf; unsigned int nsec;
421         } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
422         unsigned int nseg;
423         struct bio *bio = NULL;
424         struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
425         int ret, i, nbio = 0;
426         int operation;
427         struct blk_plug plug;
428         struct request_queue *q;
429
430         switch (req->operation) {
431         case BLKIF_OP_READ:
432                 operation = READ;
433                 break;
434         case BLKIF_OP_WRITE:
435                 operation = WRITE;
436                 break;
437         case BLKIF_OP_WRITE_BARRIER:
438                 operation = WRITE_BARRIER;
439                 break;
440         default:
441                 operation = 0; /* make gcc happy */
442                 BUG();
443         }
444
445         /* Check that the number of segments is sane. */
446         nseg = req->nr_segments;
447         if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
448             unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
449                 DPRINTK("Bad number of segments in request (%d)\n", nseg);
450                 goto fail_response;
451         }
452
453         preq.dev           = req->handle;
454         preq.sector_number = req->u.rw.sector_number;
455         preq.nr_sects      = 0;
456
457         pending_req->blkif     = blkif;
458         pending_req->id        = req->id;
459         pending_req->operation = req->operation;
460         pending_req->status    = BLKIF_RSP_OKAY;
461         pending_req->nr_pages  = nseg;
462
463         /* Fill out preq.nr_sects with proper amount of sectors, and setup
464          * assign map[..] with the PFN of the page in our domain with the
465          * corresponding grant reference for each page.
466          */
467         for (i = 0; i < nseg; i++) {
468                 uint32_t flags;
469
470                 seg[i].nsec = req->u.rw.seg[i].last_sect -
471                         req->u.rw.seg[i].first_sect + 1;
472                 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
473                     (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
474                         goto fail_response;
475                 preq.nr_sects += seg[i].nsec;
476
477                 flags = GNTMAP_host_map;
478                 if (operation != READ)
479                         flags |= GNTMAP_readonly;
480                 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
481                                   req->u.rw.seg[i].gref, blkif->domid);
482         }
483
484         ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
485         BUG_ON(ret);
486
487         /* Now swizzel the MFN in our domain with the MFN from the other domain
488          * so that when we access vaddr(pending_req,i) it has the contents of
489          * the page from the other domain.
490          */
491         for (i = 0; i < nseg; i++) {
492                 if (unlikely(map[i].status != 0)) {
493                         DPRINTK("invalid buffer -- could not remap it\n");
494                         map[i].handle = BLKBACK_INVALID_HANDLE;
495                         ret |= 1;
496                 }
497
498                 pending_handle(pending_req, i) = map[i].handle;
499
500                 if (ret)
501                         continue;
502
503                 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
504                         blkbk->pending_page(pending_req, i), false);
505                 if (ret) {
506                         printk(KERN_ALERT "Failed to install M2P override for"\
507                                 " %lx (ret: %d)\n", (unsigned long)
508                                 map[i].dev_bus_addr, ret);
509                         /* We could switch over to GNTTABOP_copy */
510                         continue;
511                 }
512
513                 seg[i].buf  = map[i].dev_bus_addr |
514                         (req->u.rw.seg[i].first_sect << 9);
515         }
516
517         /* If we have failed at this point, we need to undo the M2P override,
518          * set gnttab_set_unmap_op on all of the grant references and perform
519          * the hypercall to unmap the grants - that is all done in
520          * fast_flush_area.
521          */
522         if (ret)
523                 goto fail_flush;
524
525         if (vbd_translate(&preq, blkif, operation) != 0) {
526                 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
527                         operation == READ ? "read" : "write",
528                         preq.sector_number,
529                         preq.sector_number + preq.nr_sects, preq.dev);
530                 goto fail_flush;
531         }
532
533         /* This corresponding blkif_put is done in __end_block_io_op */
534         blkif_get(blkif);
535
536         for (i = 0; i < nseg; i++) {
537                 if (((int)preq.sector_number|(int)seg[i].nsec) &
538                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
539                         DPRINTK("Misaligned I/O request from domain %d",
540                                 blkif->domid);
541                         goto fail_put_bio;
542                 }
543
544                 while ((bio == NULL) ||
545                        (bio_add_page(bio,
546                                      blkbk->pending_page(pending_req, i),
547                                      seg[i].nsec << 9,
548                                      seg[i].buf & ~PAGE_MASK) == 0)) {
549
550                         bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
551                         if (unlikely(bio == NULL))
552                                 goto fail_put_bio;
553
554                         bio->bi_bdev    = preq.bdev;
555                         bio->bi_private = pending_req;
556                         bio->bi_end_io  = end_block_io_op;
557                         bio->bi_sector  = preq.sector_number;
558                 }
559
560                 preq.sector_number += seg[i].nsec;
561         }
562
563         /* This will be hit if the operation was a barrier. */
564         if (!bio) {
565                 BUG_ON(operation != WRITE_BARRIER);
566                 bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
567                 if (unlikely(bio == NULL))
568                         goto fail_put_bio;
569
570                 bio->bi_bdev    = preq.bdev;
571                 bio->bi_private = pending_req;
572                 bio->bi_end_io  = end_block_io_op;
573                 bio->bi_sector  = -1;
574         }
575
576
577         /* We set it one so that the last submit_bio does not have to call
578          * atomic_inc.
579          */
580         atomic_set(&pending_req->pendcnt, nbio);
581
582         /* Get a reference count for the disk queue and start sending I/O */
583         blk_get_queue(q);
584         blk_start_plug(&plug);
585
586         for (i = 0; i < nbio; i++)
587                 submit_bio(operation, biolist[i]);
588
589         blk_finish_plug(&plug);
590         /* Let the I/Os go.. */
591         blk_put_queue(q);
592
593         if (operation == READ)
594                 blkif->st_rd_sect += preq.nr_sects;
595         else if (operation == WRITE || operation == WRITE_BARRIER)
596                 blkif->st_wr_sect += preq.nr_sects;
597
598         return;
599
600  fail_flush:
601         fast_flush_area(pending_req);
602  fail_response:
603         /* Haven't submitted any bio's yet. */
604         make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
605         free_req(pending_req);
606         msleep(1); /* back off a bit */
607         return;
608
609  fail_put_bio:
610         for (i = 0; i < (nbio-1); i++)
611                 bio_put(biolist[i]);
612         __end_block_io_op(pending_req, -EINVAL);
613         msleep(1); /* back off a bit */
614         return;
615 }
616
617
618
619 /*
620  * Put a response on the ring on how the operation fared.
621  */
622 static void make_response(struct blkif_st *blkif, u64 id,
623                           unsigned short op, int st)
624 {
625         struct blkif_response  resp;
626         unsigned long     flags;
627         union blkif_back_rings *blk_rings = &blkif->blk_rings;
628         int more_to_do = 0;
629         int notify;
630
631         resp.id        = id;
632         resp.operation = op;
633         resp.status    = st;
634
635         spin_lock_irqsave(&blkif->blk_ring_lock, flags);
636         /* Place on the response ring for the relevant domain. */
637         switch (blkif->blk_protocol) {
638         case BLKIF_PROTOCOL_NATIVE:
639                 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
640                        &resp, sizeof(resp));
641                 break;
642         case BLKIF_PROTOCOL_X86_32:
643                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
644                        &resp, sizeof(resp));
645                 break;
646         case BLKIF_PROTOCOL_X86_64:
647                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
648                        &resp, sizeof(resp));
649                 break;
650         default:
651                 BUG();
652         }
653         blk_rings->common.rsp_prod_pvt++;
654         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
655         if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
656                 /*
657                  * Tail check for pending requests. Allows frontend to avoid
658                  * notifications if requests are already in flight (lower
659                  * overheads and promotes batching).
660                  */
661                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
662
663         } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
664                 more_to_do = 1;
665         }
666
667         spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
668
669         if (more_to_do)
670                 blkif_notify_work(blkif);
671         if (notify)
672                 notify_remote_via_irq(blkif->irq);
673 }
674
675 static int __init blkif_init(void)
676 {
677         int i, mmap_pages;
678         int rc = 0;
679
680         if (!xen_pv_domain())
681                 return -ENODEV;
682
683         blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
684         if (!blkbk) {
685                 printk(KERN_ALERT "%s: out of memory!\n", __func__);
686                 return -ENOMEM;
687         }
688
689         mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
690
691         blkbk->pending_reqs          = kmalloc(sizeof(blkbk->pending_reqs[0]) *
692                                         blkif_reqs, GFP_KERNEL);
693         blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
694                                         mmap_pages, GFP_KERNEL);
695         blkbk->pending_pages         = kzalloc(sizeof(blkbk->pending_pages[0]) *
696                                         mmap_pages, GFP_KERNEL);
697
698         if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
699             !blkbk->pending_pages) {
700                 rc = -ENOMEM;
701                 goto out_of_memory;
702         }
703
704         for (i = 0; i < mmap_pages; i++) {
705                 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
706                 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
707                 if (blkbk->pending_pages[i] == NULL) {
708                         rc = -ENOMEM;
709                         goto out_of_memory;
710                 }
711         }
712         rc = blkif_interface_init();
713         if (rc)
714                 goto failed_init;
715
716         memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
717
718         INIT_LIST_HEAD(&blkbk->pending_free);
719         spin_lock_init(&blkbk->pending_free_lock);
720         init_waitqueue_head(&blkbk->pending_free_wq);
721
722         for (i = 0; i < blkif_reqs; i++)
723                 list_add_tail(&blkbk->pending_reqs[i].free_list,
724                               &blkbk->pending_free);
725
726         rc = blkif_xenbus_init();
727         if (rc)
728                 goto failed_init;
729
730         return 0;
731
732  out_of_memory:
733         printk(KERN_ERR "%s: out of memory\n", __func__);
734  failed_init:
735         kfree(blkbk->pending_reqs);
736         kfree(blkbk->pending_grant_handles);
737         for (i = 0; i < mmap_pages; i++) {
738                 if (blkbk->pending_pages[i])
739                         __free_page(blkbk->pending_pages[i]);
740         }
741         kfree(blkbk->pending_pages);
742         kfree(blkbk);
743         blkbk = NULL;
744         return rc;
745 }
746
747 module_init(blkif_init);
748
749 MODULE_LICENSE("Dual BSD/GPL");