]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/nfs/blocklayout/blocklayout.c
NFS: blocklayout pipe creation per network namespace context introduced
[karo-tx-linux.git] / fs / nfs / blocklayout / blocklayout.c
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.c
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h>          /* struct bio */
38 #include <linux/buffer_head.h>  /* various write calls */
39 #include <linux/prefetch.h>
40
41 #include "blocklayout.h"
42
43 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
44
45 MODULE_LICENSE("GPL");
46 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
47 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
48
49 wait_queue_head_t bl_wq;
50
51 static void print_page(struct page *page)
52 {
53         dprintk("PRINTPAGE page %p\n", page);
54         dprintk("       PagePrivate %d\n", PagePrivate(page));
55         dprintk("       PageUptodate %d\n", PageUptodate(page));
56         dprintk("       PageError %d\n", PageError(page));
57         dprintk("       PageDirty %d\n", PageDirty(page));
58         dprintk("       PageReferenced %d\n", PageReferenced(page));
59         dprintk("       PageLocked %d\n", PageLocked(page));
60         dprintk("       PageWriteback %d\n", PageWriteback(page));
61         dprintk("       PageMappedToDisk %d\n", PageMappedToDisk(page));
62         dprintk("\n");
63 }
64
65 /* Given the be associated with isect, determine if page data needs to be
66  * initialized.
67  */
68 static int is_hole(struct pnfs_block_extent *be, sector_t isect)
69 {
70         if (be->be_state == PNFS_BLOCK_NONE_DATA)
71                 return 1;
72         else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
73                 return 0;
74         else
75                 return !bl_is_sector_init(be->be_inval, isect);
76 }
77
78 /* Given the be associated with isect, determine if page data can be
79  * written to disk.
80  */
81 static int is_writable(struct pnfs_block_extent *be, sector_t isect)
82 {
83         return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
84                 be->be_state == PNFS_BLOCK_INVALID_DATA);
85 }
86
87 /* The data we are handed might be spread across several bios.  We need
88  * to track when the last one is finished.
89  */
90 struct parallel_io {
91         struct kref refcnt;
92         void (*pnfs_callback) (void *data, int num_se);
93         void *data;
94         int bse_count;
95 };
96
97 static inline struct parallel_io *alloc_parallel(void *data)
98 {
99         struct parallel_io *rv;
100
101         rv  = kmalloc(sizeof(*rv), GFP_NOFS);
102         if (rv) {
103                 rv->data = data;
104                 kref_init(&rv->refcnt);
105                 rv->bse_count = 0;
106         }
107         return rv;
108 }
109
110 static inline void get_parallel(struct parallel_io *p)
111 {
112         kref_get(&p->refcnt);
113 }
114
115 static void destroy_parallel(struct kref *kref)
116 {
117         struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
118
119         dprintk("%s enter\n", __func__);
120         p->pnfs_callback(p->data, p->bse_count);
121         kfree(p);
122 }
123
124 static inline void put_parallel(struct parallel_io *p)
125 {
126         kref_put(&p->refcnt, destroy_parallel);
127 }
128
129 static struct bio *
130 bl_submit_bio(int rw, struct bio *bio)
131 {
132         if (bio) {
133                 get_parallel(bio->bi_private);
134                 dprintk("%s submitting %s bio %u@%llu\n", __func__,
135                         rw == READ ? "read" : "write",
136                         bio->bi_size, (unsigned long long)bio->bi_sector);
137                 submit_bio(rw, bio);
138         }
139         return NULL;
140 }
141
142 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
143                                      struct pnfs_block_extent *be,
144                                      void (*end_io)(struct bio *, int err),
145                                      struct parallel_io *par)
146 {
147         struct bio *bio;
148
149         npg = min(npg, BIO_MAX_PAGES);
150         bio = bio_alloc(GFP_NOIO, npg);
151         if (!bio && (current->flags & PF_MEMALLOC)) {
152                 while (!bio && (npg /= 2))
153                         bio = bio_alloc(GFP_NOIO, npg);
154         }
155
156         if (bio) {
157                 bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
158                 bio->bi_bdev = be->be_mdev;
159                 bio->bi_end_io = end_io;
160                 bio->bi_private = par;
161         }
162         return bio;
163 }
164
165 static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
166                                       sector_t isect, struct page *page,
167                                       struct pnfs_block_extent *be,
168                                       void (*end_io)(struct bio *, int err),
169                                       struct parallel_io *par)
170 {
171 retry:
172         if (!bio) {
173                 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
174                 if (!bio)
175                         return ERR_PTR(-ENOMEM);
176         }
177         if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
178                 bio = bl_submit_bio(rw, bio);
179                 goto retry;
180         }
181         return bio;
182 }
183
184 /* This is basically copied from mpage_end_io_read */
185 static void bl_end_io_read(struct bio *bio, int err)
186 {
187         struct parallel_io *par = bio->bi_private;
188         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
189         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
190         struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
191
192         do {
193                 struct page *page = bvec->bv_page;
194
195                 if (--bvec >= bio->bi_io_vec)
196                         prefetchw(&bvec->bv_page->flags);
197                 if (uptodate)
198                         SetPageUptodate(page);
199         } while (bvec >= bio->bi_io_vec);
200         if (!uptodate) {
201                 if (!rdata->pnfs_error)
202                         rdata->pnfs_error = -EIO;
203                 pnfs_set_lo_fail(rdata->lseg);
204         }
205         bio_put(bio);
206         put_parallel(par);
207 }
208
209 static void bl_read_cleanup(struct work_struct *work)
210 {
211         struct rpc_task *task;
212         struct nfs_read_data *rdata;
213         dprintk("%s enter\n", __func__);
214         task = container_of(work, struct rpc_task, u.tk_work);
215         rdata = container_of(task, struct nfs_read_data, task);
216         pnfs_ld_read_done(rdata);
217 }
218
219 static void
220 bl_end_par_io_read(void *data, int unused)
221 {
222         struct nfs_read_data *rdata = data;
223
224         rdata->task.tk_status = rdata->pnfs_error;
225         INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
226         schedule_work(&rdata->task.u.tk_work);
227 }
228
229 static enum pnfs_try_status
230 bl_read_pagelist(struct nfs_read_data *rdata)
231 {
232         int i, hole;
233         struct bio *bio = NULL;
234         struct pnfs_block_extent *be = NULL, *cow_read = NULL;
235         sector_t isect, extent_length = 0;
236         struct parallel_io *par;
237         loff_t f_offset = rdata->args.offset;
238         size_t count = rdata->args.count;
239         struct page **pages = rdata->args.pages;
240         int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
241
242         dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__,
243                rdata->npages, f_offset, count);
244
245         par = alloc_parallel(rdata);
246         if (!par)
247                 goto use_mds;
248         par->pnfs_callback = bl_end_par_io_read;
249         /* At this point, we can no longer jump to use_mds */
250
251         isect = (sector_t) (f_offset >> SECTOR_SHIFT);
252         /* Code assumes extents are page-aligned */
253         for (i = pg_index; i < rdata->npages; i++) {
254                 if (!extent_length) {
255                         /* We've used up the previous extent */
256                         bl_put_extent(be);
257                         bl_put_extent(cow_read);
258                         bio = bl_submit_bio(READ, bio);
259                         /* Get the next one */
260                         be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
261                                              isect, &cow_read);
262                         if (!be) {
263                                 rdata->pnfs_error = -EIO;
264                                 goto out;
265                         }
266                         extent_length = be->be_length -
267                                 (isect - be->be_f_offset);
268                         if (cow_read) {
269                                 sector_t cow_length = cow_read->be_length -
270                                         (isect - cow_read->be_f_offset);
271                                 extent_length = min(extent_length, cow_length);
272                         }
273                 }
274                 hole = is_hole(be, isect);
275                 if (hole && !cow_read) {
276                         bio = bl_submit_bio(READ, bio);
277                         /* Fill hole w/ zeroes w/o accessing device */
278                         dprintk("%s Zeroing page for hole\n", __func__);
279                         zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
280                         print_page(pages[i]);
281                         SetPageUptodate(pages[i]);
282                 } else {
283                         struct pnfs_block_extent *be_read;
284
285                         be_read = (hole && cow_read) ? cow_read : be;
286                         bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
287                                                  isect, pages[i], be_read,
288                                                  bl_end_io_read, par);
289                         if (IS_ERR(bio)) {
290                                 rdata->pnfs_error = PTR_ERR(bio);
291                                 bio = NULL;
292                                 goto out;
293                         }
294                 }
295                 isect += PAGE_CACHE_SECTORS;
296                 extent_length -= PAGE_CACHE_SECTORS;
297         }
298         if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
299                 rdata->res.eof = 1;
300                 rdata->res.count = rdata->inode->i_size - f_offset;
301         } else {
302                 rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
303         }
304 out:
305         bl_put_extent(be);
306         bl_put_extent(cow_read);
307         bl_submit_bio(READ, bio);
308         put_parallel(par);
309         return PNFS_ATTEMPTED;
310
311  use_mds:
312         dprintk("Giving up and using normal NFS\n");
313         return PNFS_NOT_ATTEMPTED;
314 }
315
316 static void mark_extents_written(struct pnfs_block_layout *bl,
317                                  __u64 offset, __u32 count)
318 {
319         sector_t isect, end;
320         struct pnfs_block_extent *be;
321         struct pnfs_block_short_extent *se;
322
323         dprintk("%s(%llu, %u)\n", __func__, offset, count);
324         if (count == 0)
325                 return;
326         isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
327         end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
328         end >>= SECTOR_SHIFT;
329         while (isect < end) {
330                 sector_t len;
331                 be = bl_find_get_extent(bl, isect, NULL);
332                 BUG_ON(!be); /* FIXME */
333                 len = min(end, be->be_f_offset + be->be_length) - isect;
334                 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
335                         se = bl_pop_one_short_extent(be->be_inval);
336                         BUG_ON(!se);
337                         bl_mark_for_commit(be, isect, len, se);
338                 }
339                 isect += len;
340                 bl_put_extent(be);
341         }
342 }
343
344 static void bl_end_io_write_zero(struct bio *bio, int err)
345 {
346         struct parallel_io *par = bio->bi_private;
347         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
348         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
349         struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
350
351         do {
352                 struct page *page = bvec->bv_page;
353
354                 if (--bvec >= bio->bi_io_vec)
355                         prefetchw(&bvec->bv_page->flags);
356                 /* This is the zeroing page we added */
357                 end_page_writeback(page);
358                 page_cache_release(page);
359         } while (bvec >= bio->bi_io_vec);
360
361         if (unlikely(!uptodate)) {
362                 if (!wdata->pnfs_error)
363                         wdata->pnfs_error = -EIO;
364                 pnfs_set_lo_fail(wdata->lseg);
365         }
366         bio_put(bio);
367         put_parallel(par);
368 }
369
370 static void bl_end_io_write(struct bio *bio, int err)
371 {
372         struct parallel_io *par = bio->bi_private;
373         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
374         struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
375
376         if (!uptodate) {
377                 if (!wdata->pnfs_error)
378                         wdata->pnfs_error = -EIO;
379                 pnfs_set_lo_fail(wdata->lseg);
380         }
381         bio_put(bio);
382         put_parallel(par);
383 }
384
385 /* Function scheduled for call during bl_end_par_io_write,
386  * it marks sectors as written and extends the commitlist.
387  */
388 static void bl_write_cleanup(struct work_struct *work)
389 {
390         struct rpc_task *task;
391         struct nfs_write_data *wdata;
392         dprintk("%s enter\n", __func__);
393         task = container_of(work, struct rpc_task, u.tk_work);
394         wdata = container_of(task, struct nfs_write_data, task);
395         if (likely(!wdata->pnfs_error)) {
396                 /* Marks for LAYOUTCOMMIT */
397                 mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
398                                      wdata->args.offset, wdata->args.count);
399         }
400         pnfs_ld_write_done(wdata);
401 }
402
403 /* Called when last of bios associated with a bl_write_pagelist call finishes */
404 static void bl_end_par_io_write(void *data, int num_se)
405 {
406         struct nfs_write_data *wdata = data;
407
408         if (unlikely(wdata->pnfs_error)) {
409                 bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
410                                         num_se);
411         }
412
413         wdata->task.tk_status = wdata->pnfs_error;
414         wdata->verf.committed = NFS_FILE_SYNC;
415         INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
416         schedule_work(&wdata->task.u.tk_work);
417 }
418
419 /* FIXME STUB - mark intersection of layout and page as bad, so is not
420  * used again.
421  */
422 static void mark_bad_read(void)
423 {
424         return;
425 }
426
427 /*
428  * map_block:  map a requested I/0 block (isect) into an offset in the LVM
429  * block_device
430  */
431 static void
432 map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
433 {
434         dprintk("%s enter be=%p\n", __func__, be);
435
436         set_buffer_mapped(bh);
437         bh->b_bdev = be->be_mdev;
438         bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
439             (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
440
441         dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
442                 __func__, (unsigned long long)isect, (long)bh->b_blocknr,
443                 bh->b_size);
444         return;
445 }
446
447 /* Given an unmapped page, zero it or read in page for COW, page is locked
448  * by caller.
449  */
450 static int
451 init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
452 {
453         struct buffer_head *bh = NULL;
454         int ret = 0;
455         sector_t isect;
456
457         dprintk("%s enter, %p\n", __func__, page);
458         BUG_ON(PageUptodate(page));
459         if (!cow_read) {
460                 zero_user_segment(page, 0, PAGE_SIZE);
461                 SetPageUptodate(page);
462                 goto cleanup;
463         }
464
465         bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
466         if (!bh) {
467                 ret = -ENOMEM;
468                 goto cleanup;
469         }
470
471         isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
472         map_block(bh, isect, cow_read);
473         if (!bh_uptodate_or_lock(bh))
474                 ret = bh_submit_read(bh);
475         if (ret)
476                 goto cleanup;
477         SetPageUptodate(page);
478
479 cleanup:
480         bl_put_extent(cow_read);
481         if (bh)
482                 free_buffer_head(bh);
483         if (ret) {
484                 /* Need to mark layout with bad read...should now
485                  * just use nfs4 for reads and writes.
486                  */
487                 mark_bad_read();
488         }
489         return ret;
490 }
491
492 /* Find or create a zeroing page marked being writeback.
493  * Return ERR_PTR on error, NULL to indicate skip this page and page itself
494  * to indicate write out.
495  */
496 static struct page *
497 bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
498                         struct pnfs_block_extent *cow_read)
499 {
500         struct page *page;
501         int locked = 0;
502         page = find_get_page(inode->i_mapping, index);
503         if (page)
504                 goto check_page;
505
506         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
507         if (unlikely(!page)) {
508                 dprintk("%s oom\n", __func__);
509                 return ERR_PTR(-ENOMEM);
510         }
511         locked = 1;
512
513 check_page:
514         /* PageDirty: Other will write this out
515          * PageWriteback: Other is writing this out
516          * PageUptodate: It was read before
517          */
518         if (PageDirty(page) || PageWriteback(page)) {
519                 print_page(page);
520                 if (locked)
521                         unlock_page(page);
522                 page_cache_release(page);
523                 return NULL;
524         }
525
526         if (!locked) {
527                 lock_page(page);
528                 locked = 1;
529                 goto check_page;
530         }
531         if (!PageUptodate(page)) {
532                 /* New page, readin or zero it */
533                 init_page_for_write(page, cow_read);
534         }
535         set_page_writeback(page);
536         unlock_page(page);
537
538         return page;
539 }
540
541 static enum pnfs_try_status
542 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
543 {
544         int i, ret, npg_zero, pg_index, last = 0;
545         struct bio *bio = NULL;
546         struct pnfs_block_extent *be = NULL, *cow_read = NULL;
547         sector_t isect, last_isect = 0, extent_length = 0;
548         struct parallel_io *par;
549         loff_t offset = wdata->args.offset;
550         size_t count = wdata->args.count;
551         struct page **pages = wdata->args.pages;
552         struct page *page;
553         pgoff_t index;
554         u64 temp;
555         int npg_per_block =
556             NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
557
558         dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
559         /* At this point, wdata->pages is a (sequential) list of nfs_pages.
560          * We want to write each, and if there is an error set pnfs_error
561          * to have it redone using nfs.
562          */
563         par = alloc_parallel(wdata);
564         if (!par)
565                 goto out_mds;
566         par->pnfs_callback = bl_end_par_io_write;
567         /* At this point, have to be more careful with error handling */
568
569         isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
570         be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
571         if (!be || !is_writable(be, isect)) {
572                 dprintk("%s no matching extents!\n", __func__);
573                 goto out_mds;
574         }
575
576         /* First page inside INVALID extent */
577         if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
578                 if (likely(!bl_push_one_short_extent(be->be_inval)))
579                         par->bse_count++;
580                 else
581                         goto out_mds;
582                 temp = offset >> PAGE_CACHE_SHIFT;
583                 npg_zero = do_div(temp, npg_per_block);
584                 isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
585                                      (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
586                 extent_length = be->be_length - (isect - be->be_f_offset);
587
588 fill_invalid_ext:
589                 dprintk("%s need to zero %d pages\n", __func__, npg_zero);
590                 for (;npg_zero > 0; npg_zero--) {
591                         if (bl_is_sector_init(be->be_inval, isect)) {
592                                 dprintk("isect %llu already init\n",
593                                         (unsigned long long)isect);
594                                 goto next_page;
595                         }
596                         /* page ref released in bl_end_io_write_zero */
597                         index = isect >> PAGE_CACHE_SECTOR_SHIFT;
598                         dprintk("%s zero %dth page: index %lu isect %llu\n",
599                                 __func__, npg_zero, index,
600                                 (unsigned long long)isect);
601                         page = bl_find_get_zeroing_page(wdata->inode, index,
602                                                         cow_read);
603                         if (unlikely(IS_ERR(page))) {
604                                 wdata->pnfs_error = PTR_ERR(page);
605                                 goto out;
606                         } else if (page == NULL)
607                                 goto next_page;
608
609                         ret = bl_mark_sectors_init(be->be_inval, isect,
610                                                        PAGE_CACHE_SECTORS);
611                         if (unlikely(ret)) {
612                                 dprintk("%s bl_mark_sectors_init fail %d\n",
613                                         __func__, ret);
614                                 end_page_writeback(page);
615                                 page_cache_release(page);
616                                 wdata->pnfs_error = ret;
617                                 goto out;
618                         }
619                         if (likely(!bl_push_one_short_extent(be->be_inval)))
620                                 par->bse_count++;
621                         else {
622                                 end_page_writeback(page);
623                                 page_cache_release(page);
624                                 wdata->pnfs_error = -ENOMEM;
625                                 goto out;
626                         }
627                         /* FIXME: This should be done in bi_end_io */
628                         mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
629                                              page->index << PAGE_CACHE_SHIFT,
630                                              PAGE_CACHE_SIZE);
631
632                         bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
633                                                  isect, page, be,
634                                                  bl_end_io_write_zero, par);
635                         if (IS_ERR(bio)) {
636                                 wdata->pnfs_error = PTR_ERR(bio);
637                                 bio = NULL;
638                                 goto out;
639                         }
640 next_page:
641                         isect += PAGE_CACHE_SECTORS;
642                         extent_length -= PAGE_CACHE_SECTORS;
643                 }
644                 if (last)
645                         goto write_done;
646         }
647         bio = bl_submit_bio(WRITE, bio);
648
649         /* Middle pages */
650         pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
651         for (i = pg_index; i < wdata->npages; i++) {
652                 if (!extent_length) {
653                         /* We've used up the previous extent */
654                         bl_put_extent(be);
655                         bio = bl_submit_bio(WRITE, bio);
656                         /* Get the next one */
657                         be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
658                                              isect, NULL);
659                         if (!be || !is_writable(be, isect)) {
660                                 wdata->pnfs_error = -EINVAL;
661                                 goto out;
662                         }
663                         if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
664                                 if (likely(!bl_push_one_short_extent(
665                                                                 be->be_inval)))
666                                         par->bse_count++;
667                                 else {
668                                         wdata->pnfs_error = -ENOMEM;
669                                         goto out;
670                                 }
671                         }
672                         extent_length = be->be_length -
673                             (isect - be->be_f_offset);
674                 }
675                 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
676                         ret = bl_mark_sectors_init(be->be_inval, isect,
677                                                        PAGE_CACHE_SECTORS);
678                         if (unlikely(ret)) {
679                                 dprintk("%s bl_mark_sectors_init fail %d\n",
680                                         __func__, ret);
681                                 wdata->pnfs_error = ret;
682                                 goto out;
683                         }
684                 }
685                 bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
686                                          isect, pages[i], be,
687                                          bl_end_io_write, par);
688                 if (IS_ERR(bio)) {
689                         wdata->pnfs_error = PTR_ERR(bio);
690                         bio = NULL;
691                         goto out;
692                 }
693                 isect += PAGE_CACHE_SECTORS;
694                 last_isect = isect;
695                 extent_length -= PAGE_CACHE_SECTORS;
696         }
697
698         /* Last page inside INVALID extent */
699         if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
700                 bio = bl_submit_bio(WRITE, bio);
701                 temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
702                 npg_zero = npg_per_block - do_div(temp, npg_per_block);
703                 if (npg_zero < npg_per_block) {
704                         last = 1;
705                         goto fill_invalid_ext;
706                 }
707         }
708
709 write_done:
710         wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
711         if (count < wdata->res.count) {
712                 wdata->res.count = count;
713         }
714 out:
715         bl_put_extent(be);
716         bl_submit_bio(WRITE, bio);
717         put_parallel(par);
718         return PNFS_ATTEMPTED;
719 out_mds:
720         bl_put_extent(be);
721         kfree(par);
722         return PNFS_NOT_ATTEMPTED;
723 }
724
725 /* FIXME - range ignored */
726 static void
727 release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
728 {
729         int i;
730         struct pnfs_block_extent *be;
731
732         spin_lock(&bl->bl_ext_lock);
733         for (i = 0; i < EXTENT_LISTS; i++) {
734                 while (!list_empty(&bl->bl_extents[i])) {
735                         be = list_first_entry(&bl->bl_extents[i],
736                                               struct pnfs_block_extent,
737                                               be_node);
738                         list_del(&be->be_node);
739                         bl_put_extent(be);
740                 }
741         }
742         spin_unlock(&bl->bl_ext_lock);
743 }
744
745 static void
746 release_inval_marks(struct pnfs_inval_markings *marks)
747 {
748         struct pnfs_inval_tracking *pos, *temp;
749         struct pnfs_block_short_extent *se, *stemp;
750
751         list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
752                 list_del(&pos->it_link);
753                 kfree(pos);
754         }
755
756         list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
757                 list_del(&se->bse_node);
758                 kfree(se);
759         }
760         return;
761 }
762
763 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
764 {
765         struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
766
767         dprintk("%s enter\n", __func__);
768         release_extents(bl, NULL);
769         release_inval_marks(&bl->bl_inval);
770         kfree(bl);
771 }
772
773 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
774                                                    gfp_t gfp_flags)
775 {
776         struct pnfs_block_layout *bl;
777
778         dprintk("%s enter\n", __func__);
779         bl = kzalloc(sizeof(*bl), gfp_flags);
780         if (!bl)
781                 return NULL;
782         spin_lock_init(&bl->bl_ext_lock);
783         INIT_LIST_HEAD(&bl->bl_extents[0]);
784         INIT_LIST_HEAD(&bl->bl_extents[1]);
785         INIT_LIST_HEAD(&bl->bl_commit);
786         INIT_LIST_HEAD(&bl->bl_committing);
787         bl->bl_count = 0;
788         bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
789         BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
790         return &bl->bl_layout;
791 }
792
793 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
794 {
795         dprintk("%s enter\n", __func__);
796         kfree(lseg);
797 }
798
799 /* We pretty much ignore lseg, and store all data layout wide, so we
800  * can correctly merge.
801  */
802 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
803                                                  struct nfs4_layoutget_res *lgr,
804                                                  gfp_t gfp_flags)
805 {
806         struct pnfs_layout_segment *lseg;
807         int status;
808
809         dprintk("%s enter\n", __func__);
810         lseg = kzalloc(sizeof(*lseg), gfp_flags);
811         if (!lseg)
812                 return ERR_PTR(-ENOMEM);
813         status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
814         if (status) {
815                 /* We don't want to call the full-blown bl_free_lseg,
816                  * since on error extents were not touched.
817                  */
818                 kfree(lseg);
819                 return ERR_PTR(status);
820         }
821         return lseg;
822 }
823
824 static void
825 bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
826                        const struct nfs4_layoutcommit_args *arg)
827 {
828         dprintk("%s enter\n", __func__);
829         encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
830 }
831
832 static void
833 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
834 {
835         struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
836
837         dprintk("%s enter\n", __func__);
838         clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
839 }
840
841 static void free_blk_mountid(struct block_mount_id *mid)
842 {
843         if (mid) {
844                 struct pnfs_block_dev *dev, *tmp;
845
846                 /* No need to take bm_lock as we are last user freeing bm_devlist */
847                 list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
848                         list_del(&dev->bm_node);
849                         bl_free_block_dev(dev);
850                 }
851                 kfree(mid);
852         }
853 }
854
855 /* This is mostly copied from the filelayout's get_device_info function.
856  * It seems much of this should be at the generic pnfs level.
857  */
858 static struct pnfs_block_dev *
859 nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
860                         struct nfs4_deviceid *d_id)
861 {
862         struct pnfs_device *dev;
863         struct pnfs_block_dev *rv;
864         u32 max_resp_sz;
865         int max_pages;
866         struct page **pages = NULL;
867         int i, rc;
868
869         /*
870          * Use the session max response size as the basis for setting
871          * GETDEVICEINFO's maxcount
872          */
873         max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
874         max_pages = max_resp_sz >> PAGE_SHIFT;
875         dprintk("%s max_resp_sz %u max_pages %d\n",
876                 __func__, max_resp_sz, max_pages);
877
878         dev = kmalloc(sizeof(*dev), GFP_NOFS);
879         if (!dev) {
880                 dprintk("%s kmalloc failed\n", __func__);
881                 return ERR_PTR(-ENOMEM);
882         }
883
884         pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
885         if (pages == NULL) {
886                 kfree(dev);
887                 return ERR_PTR(-ENOMEM);
888         }
889         for (i = 0; i < max_pages; i++) {
890                 pages[i] = alloc_page(GFP_NOFS);
891                 if (!pages[i]) {
892                         rv = ERR_PTR(-ENOMEM);
893                         goto out_free;
894                 }
895         }
896
897         memcpy(&dev->dev_id, d_id, sizeof(*d_id));
898         dev->layout_type = LAYOUT_BLOCK_VOLUME;
899         dev->pages = pages;
900         dev->pgbase = 0;
901         dev->pglen = PAGE_SIZE * max_pages;
902         dev->mincount = 0;
903
904         dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
905         rc = nfs4_proc_getdeviceinfo(server, dev);
906         dprintk("%s getdevice info returns %d\n", __func__, rc);
907         if (rc) {
908                 rv = ERR_PTR(rc);
909                 goto out_free;
910         }
911
912         rv = nfs4_blk_decode_device(server, dev);
913  out_free:
914         for (i = 0; i < max_pages; i++)
915                 __free_page(pages[i]);
916         kfree(pages);
917         kfree(dev);
918         return rv;
919 }
920
921 static int
922 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
923 {
924         struct block_mount_id *b_mt_id = NULL;
925         struct pnfs_devicelist *dlist = NULL;
926         struct pnfs_block_dev *bdev;
927         LIST_HEAD(block_disklist);
928         int status, i;
929
930         dprintk("%s enter\n", __func__);
931
932         if (server->pnfs_blksize == 0) {
933                 dprintk("%s Server did not return blksize\n", __func__);
934                 return -EINVAL;
935         }
936         b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
937         if (!b_mt_id) {
938                 status = -ENOMEM;
939                 goto out_error;
940         }
941         /* Initialize nfs4 block layout mount id */
942         spin_lock_init(&b_mt_id->bm_lock);
943         INIT_LIST_HEAD(&b_mt_id->bm_devlist);
944
945         dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
946         if (!dlist) {
947                 status = -ENOMEM;
948                 goto out_error;
949         }
950         dlist->eof = 0;
951         while (!dlist->eof) {
952                 status = nfs4_proc_getdevicelist(server, fh, dlist);
953                 if (status)
954                         goto out_error;
955                 dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
956                         __func__, dlist->num_devs, dlist->eof);
957                 for (i = 0; i < dlist->num_devs; i++) {
958                         bdev = nfs4_blk_get_deviceinfo(server, fh,
959                                                        &dlist->dev_id[i]);
960                         if (IS_ERR(bdev)) {
961                                 status = PTR_ERR(bdev);
962                                 goto out_error;
963                         }
964                         spin_lock(&b_mt_id->bm_lock);
965                         list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
966                         spin_unlock(&b_mt_id->bm_lock);
967                 }
968         }
969         dprintk("%s SUCCESS\n", __func__);
970         server->pnfs_ld_data = b_mt_id;
971
972  out_return:
973         kfree(dlist);
974         return status;
975
976  out_error:
977         free_blk_mountid(b_mt_id);
978         goto out_return;
979 }
980
981 static int
982 bl_clear_layoutdriver(struct nfs_server *server)
983 {
984         struct block_mount_id *b_mt_id = server->pnfs_ld_data;
985
986         dprintk("%s enter\n", __func__);
987         free_blk_mountid(b_mt_id);
988         dprintk("%s RETURNS\n", __func__);
989         return 0;
990 }
991
992 static const struct nfs_pageio_ops bl_pg_read_ops = {
993         .pg_init = pnfs_generic_pg_init_read,
994         .pg_test = pnfs_generic_pg_test,
995         .pg_doio = pnfs_generic_pg_readpages,
996 };
997
998 static const struct nfs_pageio_ops bl_pg_write_ops = {
999         .pg_init = pnfs_generic_pg_init_write,
1000         .pg_test = pnfs_generic_pg_test,
1001         .pg_doio = pnfs_generic_pg_writepages,
1002 };
1003
1004 static struct pnfs_layoutdriver_type blocklayout_type = {
1005         .id                             = LAYOUT_BLOCK_VOLUME,
1006         .name                           = "LAYOUT_BLOCK_VOLUME",
1007         .read_pagelist                  = bl_read_pagelist,
1008         .write_pagelist                 = bl_write_pagelist,
1009         .alloc_layout_hdr               = bl_alloc_layout_hdr,
1010         .free_layout_hdr                = bl_free_layout_hdr,
1011         .alloc_lseg                     = bl_alloc_lseg,
1012         .free_lseg                      = bl_free_lseg,
1013         .encode_layoutcommit            = bl_encode_layoutcommit,
1014         .cleanup_layoutcommit           = bl_cleanup_layoutcommit,
1015         .set_layoutdriver               = bl_set_layoutdriver,
1016         .clear_layoutdriver             = bl_clear_layoutdriver,
1017         .pg_read_ops                    = &bl_pg_read_ops,
1018         .pg_write_ops                   = &bl_pg_write_ops,
1019 };
1020
1021 static const struct rpc_pipe_ops bl_upcall_ops = {
1022         .upcall         = rpc_pipe_generic_upcall,
1023         .downcall       = bl_pipe_downcall,
1024         .destroy_msg    = bl_pipe_destroy_msg,
1025 };
1026
1027 static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
1028                                             struct rpc_pipe *pipe)
1029 {
1030         struct dentry *dir, *dentry;
1031
1032         dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
1033         if (dir == NULL)
1034                 return ERR_PTR(-ENOENT);
1035         dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
1036         dput(dir);
1037         return dentry;
1038 }
1039
1040 static void nfs4blocklayout_unregister_sb(struct super_block *sb,
1041                                           struct rpc_pipe *pipe)
1042 {
1043         if (pipe->dentry)
1044                 rpc_unlink(pipe->dentry);
1045 }
1046
1047 static struct dentry *nfs4blocklayout_register_net(struct net *net,
1048                                                    struct rpc_pipe *pipe)
1049 {
1050         struct super_block *pipefs_sb;
1051         struct dentry *dentry;
1052
1053         pipefs_sb = rpc_get_sb_net(net);
1054         if (!pipefs_sb)
1055                 return ERR_PTR(-ENOENT);
1056         dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
1057         rpc_put_sb_net(net);
1058         return dentry;
1059 }
1060
1061 static void nfs4blocklayout_unregister_net(struct net *net,
1062                                            struct rpc_pipe *pipe)
1063 {
1064         struct super_block *pipefs_sb;
1065
1066         pipefs_sb = rpc_get_sb_net(net);
1067         if (pipefs_sb) {
1068                 nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
1069                 rpc_put_sb_net(net);
1070         }
1071 }
1072
1073 static int nfs4blocklayout_net_init(struct net *net)
1074 {
1075         struct nfs_net *nn = net_generic(net, nfs_net_id);
1076         struct dentry *dentry;
1077
1078         nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
1079         if (IS_ERR(nn->bl_device_pipe))
1080                 return PTR_ERR(nn->bl_device_pipe);
1081         dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
1082         if (IS_ERR(dentry)) {
1083                 rpc_destroy_pipe_data(nn->bl_device_pipe);
1084                 return PTR_ERR(dentry);
1085         }
1086         nn->bl_device_pipe->dentry = dentry;
1087         return 0;
1088 }
1089
1090 static void nfs4blocklayout_net_exit(struct net *net)
1091 {
1092         struct nfs_net *nn = net_generic(net, nfs_net_id);
1093
1094         nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
1095         rpc_destroy_pipe_data(nn->bl_device_pipe);
1096         nn->bl_device_pipe = NULL;
1097 }
1098
1099 static struct pernet_operations nfs4blocklayout_net_ops = {
1100         .init = nfs4blocklayout_net_init,
1101         .exit = nfs4blocklayout_net_exit,
1102 };
1103
1104 static int __init nfs4blocklayout_init(void)
1105 {
1106         struct vfsmount *mnt;
1107         int ret;
1108
1109         dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1110
1111         ret = pnfs_register_layoutdriver(&blocklayout_type);
1112         if (ret)
1113                 goto out;
1114
1115         init_waitqueue_head(&bl_wq);
1116
1117         mnt = rpc_get_mount();
1118         if (IS_ERR(mnt)) {
1119                 ret = PTR_ERR(mnt);
1120                 goto out_remove;
1121         }
1122         ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1123         if (ret)
1124                 goto out_remove;
1125 out:
1126         return ret;
1127
1128 out_remove:
1129         pnfs_unregister_layoutdriver(&blocklayout_type);
1130         return ret;
1131 }
1132
1133 static void __exit nfs4blocklayout_exit(void)
1134 {
1135         dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1136                __func__);
1137
1138         unregister_pernet_subsys(&nfs4blocklayout_net_ops);
1139         pnfs_unregister_layoutdriver(&blocklayout_type);
1140 }
1141
1142 MODULE_ALIAS("nfs-layouttype4-3");
1143
1144 module_init(nfs4blocklayout_init);
1145 module_exit(nfs4blocklayout_exit);