2 * linux/fs/nfs/pagelist.c
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs.h>
17 #include <linux/nfs3.h>
18 #include <linux/nfs4.h>
19 #include <linux/nfs_page.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/export.h>
27 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
29 static struct kmem_cache *nfs_page_cachep;
31 static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
33 p->npages = pagecount;
34 if (pagecount <= ARRAY_SIZE(p->page_array))
35 p->pagevec = p->page_array;
37 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
41 return p->pagevec != NULL;
44 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
45 struct nfs_pgio_header *hdr,
46 void (*release)(struct nfs_pgio_header *hdr))
48 hdr->req = nfs_list_entry(desc->pg_list.next);
49 hdr->inode = desc->pg_inode;
50 hdr->cred = hdr->req->wb_context->cred;
51 hdr->io_start = req_offset(hdr->req);
52 hdr->good_bytes = desc->pg_count;
53 hdr->dreq = desc->pg_dreq;
54 hdr->layout_private = desc->pg_layout_private;
55 hdr->release = release;
56 hdr->completion_ops = desc->pg_completion_ops;
57 if (hdr->completion_ops->init_hdr)
58 hdr->completion_ops->init_hdr(hdr);
60 EXPORT_SYMBOL_GPL(nfs_pgheader_init);
62 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
64 spin_lock(&hdr->lock);
65 if (pos < hdr->io_start + hdr->good_bytes) {
66 set_bit(NFS_IOHDR_ERROR, &hdr->flags);
67 clear_bit(NFS_IOHDR_EOF, &hdr->flags);
68 hdr->good_bytes = pos - hdr->io_start;
71 spin_unlock(&hdr->lock);
74 static inline struct nfs_page *
77 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
79 INIT_LIST_HEAD(&p->wb_list);
84 nfs_page_free(struct nfs_page *p)
86 kmem_cache_free(nfs_page_cachep, p);
90 nfs_iocounter_inc(struct nfs_io_counter *c)
92 atomic_inc(&c->io_count);
96 nfs_iocounter_dec(struct nfs_io_counter *c)
98 if (atomic_dec_and_test(&c->io_count)) {
99 clear_bit(NFS_IO_INPROGRESS, &c->flags);
100 smp_mb__after_clear_bit();
101 wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
106 __nfs_iocounter_wait(struct nfs_io_counter *c)
108 wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
109 DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
113 prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
114 set_bit(NFS_IO_INPROGRESS, &c->flags);
115 if (atomic_read(&c->io_count) == 0)
117 ret = nfs_wait_bit_killable(&c->flags);
118 } while (atomic_read(&c->io_count) != 0);
119 finish_wait(wq, &q.wait);
124 * nfs_iocounter_wait - wait for i/o to complete
125 * @c: nfs_io_counter to use
127 * returns -ERESTARTSYS if interrupted by a fatal signal.
128 * Otherwise returns 0 once the io_count hits 0.
131 nfs_iocounter_wait(struct nfs_io_counter *c)
133 if (atomic_read(&c->io_count) == 0)
135 return __nfs_iocounter_wait(c);
139 * nfs_create_request - Create an NFS read/write request.
140 * @ctx: open context to use
141 * @inode: inode to which the request is attached
142 * @page: page to write
143 * @offset: starting offset within the page for the write
144 * @count: number of bytes to read/write
146 * The page must be locked by the caller. This makes sure we never
147 * create two different requests for the same page.
148 * User should ensure it is safe to sleep in this function.
151 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
153 unsigned int offset, unsigned int count)
155 struct nfs_page *req;
156 struct nfs_lock_context *l_ctx;
158 if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
159 return ERR_PTR(-EBADF);
160 /* try to allocate the request struct */
161 req = nfs_page_alloc();
163 return ERR_PTR(-ENOMEM);
165 /* get lock context early so we can deal with alloc failures */
166 l_ctx = nfs_get_lock_context(ctx);
169 return ERR_CAST(l_ctx);
171 req->wb_lock_context = l_ctx;
172 nfs_iocounter_inc(&l_ctx->io_count);
174 /* Initialize the request struct. Initially, we assume a
175 * long write-back delay. This will be adjusted in
176 * update_nfs_request below if the region is not locked. */
178 req->wb_index = page_file_index(page);
179 page_cache_get(page);
180 req->wb_offset = offset;
181 req->wb_pgbase = offset;
182 req->wb_bytes = count;
183 req->wb_context = get_nfs_open_context(ctx);
184 kref_init(&req->wb_kref);
189 * nfs_unlock_request - Unlock request and wake up sleepers.
192 void nfs_unlock_request(struct nfs_page *req)
194 if (!NFS_WBACK_BUSY(req)) {
195 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
198 smp_mb__before_clear_bit();
199 clear_bit(PG_BUSY, &req->wb_flags);
200 smp_mb__after_clear_bit();
201 wake_up_bit(&req->wb_flags, PG_BUSY);
205 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
208 void nfs_unlock_and_release_request(struct nfs_page *req)
210 nfs_unlock_request(req);
211 nfs_release_request(req);
215 * nfs_clear_request - Free up all resources allocated to the request
218 * Release page and open context resources associated with a read/write
219 * request after it has completed.
221 static void nfs_clear_request(struct nfs_page *req)
223 struct page *page = req->wb_page;
224 struct nfs_open_context *ctx = req->wb_context;
225 struct nfs_lock_context *l_ctx = req->wb_lock_context;
228 page_cache_release(page);
232 nfs_iocounter_dec(&l_ctx->io_count);
233 nfs_put_lock_context(l_ctx);
234 req->wb_lock_context = NULL;
237 put_nfs_open_context(ctx);
238 req->wb_context = NULL;
244 * nfs_release_request - Release the count on an NFS read/write request
245 * @req: request to release
247 * Note: Should never be called with the spinlock held!
249 static void nfs_free_request(struct kref *kref)
251 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
253 /* Release struct file and open context */
254 nfs_clear_request(req);
258 void nfs_release_request(struct nfs_page *req)
260 kref_put(&req->wb_kref, nfs_free_request);
263 static int nfs_wait_bit_uninterruptible(void *word)
270 * nfs_wait_on_request - Wait for a request to complete.
271 * @req: request to wait upon.
273 * Interruptible by fatal signals only.
274 * The user is responsible for holding a count on the request.
277 nfs_wait_on_request(struct nfs_page *req)
279 return wait_on_bit(&req->wb_flags, PG_BUSY,
280 nfs_wait_bit_uninterruptible,
281 TASK_UNINTERRUPTIBLE);
284 bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
287 * FIXME: ideally we should be able to coalesce all requests
288 * that are not block boundary aligned, but currently this
289 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
290 * since nfs_flush_multi and nfs_pagein_multi assume you
291 * can have only one struct nfs_page.
293 if (desc->pg_bsize < PAGE_SIZE)
296 return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
298 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
300 static inline struct nfs_rw_header *NFS_RW_HEADER(struct nfs_pgio_header *hdr)
302 return container_of(hdr, struct nfs_rw_header, header);
306 * nfs_rw_header_alloc - Allocate a header for a read or write
307 * @ops: Read or write function vector
309 struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *ops)
311 struct nfs_rw_header *header = ops->rw_alloc_header();
314 struct nfs_pgio_header *hdr = &header->header;
316 INIT_LIST_HEAD(&hdr->pages);
317 INIT_LIST_HEAD(&hdr->rpc_list);
318 spin_lock_init(&hdr->lock);
319 atomic_set(&hdr->refcnt, 0);
324 EXPORT_SYMBOL_GPL(nfs_rw_header_alloc);
327 * nfs_rw_header_free - Free a read or write header
328 * @hdr: The header to free
330 void nfs_rw_header_free(struct nfs_pgio_header *hdr)
332 hdr->rw_ops->rw_free_header(NFS_RW_HEADER(hdr));
334 EXPORT_SYMBOL_GPL(nfs_rw_header_free);
337 * nfs_pgio_data_alloc - Allocate pageio data
338 * @hdr: The header making a request
339 * @pagecount: Number of pages to create
341 struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr,
342 unsigned int pagecount)
344 struct nfs_pgio_data *data, *prealloc;
346 prealloc = &NFS_RW_HEADER(hdr)->rpc_data;
347 if (prealloc->header == NULL)
350 data = kzalloc(sizeof(*data), GFP_KERNEL);
354 if (nfs_pgarray_set(&data->pages, pagecount)) {
356 atomic_inc(&hdr->refcnt);
358 if (data != prealloc)
367 * nfs_pgio_data_release - Properly free pageio data
368 * @data: The data to release
370 void nfs_pgio_data_release(struct nfs_pgio_data *data)
372 struct nfs_pgio_header *hdr = data->header;
373 struct nfs_rw_header *pageio_header = NFS_RW_HEADER(hdr);
375 put_nfs_open_context(data->args.context);
376 if (data->pages.pagevec != data->pages.page_array)
377 kfree(data->pages.pagevec);
378 if (data == &pageio_header->rpc_data) {
382 if (atomic_dec_and_test(&hdr->refcnt))
383 hdr->completion_ops->completion(hdr);
384 /* Note: we only free the rpc_task after callbacks are done.
385 * See the comment in rpc_free_task() for why
389 EXPORT_SYMBOL_GPL(nfs_pgio_data_release);
392 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
393 * @data: The pageio data
394 * @count: Number of bytes to read
395 * @offset: Initial offset
396 * @how: How to commit data (writes only)
397 * @cinfo: Commit information for the call (writes only)
399 void nfs_pgio_rpcsetup(struct nfs_pgio_data *data,
400 unsigned int count, unsigned int offset,
401 int how, struct nfs_commit_info *cinfo)
403 struct nfs_page *req = data->header->req;
405 /* Set up the RPC argument and reply structs
406 * NB: take care not to mess about with data->commit et al. */
408 data->args.fh = NFS_FH(data->header->inode);
409 data->args.offset = req_offset(req) + offset;
410 /* pnfs_set_layoutcommit needs this */
411 data->mds_offset = data->args.offset;
412 data->args.pgbase = req->wb_pgbase + offset;
413 data->args.pages = data->pages.pagevec;
414 data->args.count = count;
415 data->args.context = get_nfs_open_context(req->wb_context);
416 data->args.lock_context = req->wb_lock_context;
417 data->args.stable = NFS_UNSTABLE;
418 switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
421 case FLUSH_COND_STABLE:
422 if (nfs_reqs_to_commit(cinfo))
425 data->args.stable = NFS_FILE_SYNC;
428 data->res.fattr = &data->fattr;
429 data->res.count = count;
431 data->res.verf = &data->verf;
432 nfs_fattr_init(&data->fattr);
436 * nfs_pgio_prepare - Prepare pageio data to go over the wire
437 * @task: The current task
438 * @calldata: pageio data to prepare
440 static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
442 struct nfs_pgio_data *data = calldata;
444 err = NFS_PROTO(data->header->inode)->pgio_rpc_prepare(task, data);
450 * nfs_pgio_error - Clean up from a pageio error
451 * @desc: IO descriptor
452 * @hdr: pageio header
454 int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
455 struct nfs_pgio_header *hdr)
457 struct nfs_pgio_data *data;
459 set_bit(NFS_IOHDR_REDO, &hdr->flags);
460 while (!list_empty(&hdr->rpc_list)) {
461 data = list_first_entry(&hdr->rpc_list, struct nfs_pgio_data, list);
462 list_del(&data->list);
463 nfs_pgio_data_release(data);
465 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
470 * nfs_pgio_release - Release pageio data
471 * @calldata: The pageio data to release
473 static void nfs_pgio_release(void *calldata)
475 struct nfs_pgio_data *data = calldata;
476 if (data->header->rw_ops->rw_release)
477 data->header->rw_ops->rw_release(data);
478 nfs_pgio_data_release(data);
482 * nfs_pageio_init - initialise a page io descriptor
483 * @desc: pointer to descriptor
484 * @inode: pointer to inode
485 * @doio: pointer to io function
486 * @bsize: io block size
487 * @io_flags: extra parameters for the io function
489 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
491 const struct nfs_pageio_ops *pg_ops,
492 const struct nfs_pgio_completion_ops *compl_ops,
493 const struct nfs_rw_ops *rw_ops,
497 INIT_LIST_HEAD(&desc->pg_list);
498 desc->pg_bytes_written = 0;
500 desc->pg_bsize = bsize;
503 desc->pg_recoalesce = 0;
504 desc->pg_inode = inode;
505 desc->pg_ops = pg_ops;
506 desc->pg_completion_ops = compl_ops;
507 desc->pg_rw_ops = rw_ops;
508 desc->pg_ioflags = io_flags;
510 desc->pg_lseg = NULL;
511 desc->pg_dreq = NULL;
512 desc->pg_layout_private = NULL;
514 EXPORT_SYMBOL_GPL(nfs_pageio_init);
517 * nfs_pgio_result - Basic pageio error handling
518 * @task: The task that ran
519 * @calldata: Pageio data to check
521 static void nfs_pgio_result(struct rpc_task *task, void *calldata)
523 struct nfs_pgio_data *data = calldata;
524 struct inode *inode = data->header->inode;
526 dprintk("NFS: %s: %5u, (status %d)\n", __func__,
527 task->tk_pid, task->tk_status);
529 if (data->header->rw_ops->rw_done(task, data, inode) != 0)
531 if (task->tk_status < 0)
532 nfs_set_pgio_error(data->header, task->tk_status, data->args.offset);
534 data->header->rw_ops->rw_result(task, data);
537 static bool nfs_match_open_context(const struct nfs_open_context *ctx1,
538 const struct nfs_open_context *ctx2)
540 return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state;
543 static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
544 const struct nfs_lock_context *l2)
546 return l1->lockowner.l_owner == l2->lockowner.l_owner
547 && l1->lockowner.l_pid == l2->lockowner.l_pid;
551 * nfs_can_coalesce_requests - test two requests for compatibility
552 * @prev: pointer to nfs_page
553 * @req: pointer to nfs_page
555 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
556 * page data area they describe is contiguous, and that their RPC
557 * credentials, NFSv4 open state, and lockowners are the same.
559 * Return 'true' if this is the case, else return 'false'.
561 static bool nfs_can_coalesce_requests(struct nfs_page *prev,
562 struct nfs_page *req,
563 struct nfs_pageio_descriptor *pgio)
565 if (!nfs_match_open_context(req->wb_context, prev->wb_context))
567 if (req->wb_context->dentry->d_inode->i_flock != NULL &&
568 !nfs_match_lock_context(req->wb_lock_context, prev->wb_lock_context))
570 if (req->wb_pgbase != 0)
572 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
574 if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
576 return pgio->pg_ops->pg_test(pgio, prev, req);
580 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
581 * @desc: destination io descriptor
584 * Returns true if the request 'req' was successfully coalesced into the
585 * existing list of pages 'desc'.
587 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
588 struct nfs_page *req)
590 if (desc->pg_count != 0) {
591 struct nfs_page *prev;
593 prev = nfs_list_entry(desc->pg_list.prev);
594 if (!nfs_can_coalesce_requests(prev, req, desc))
597 if (desc->pg_ops->pg_init)
598 desc->pg_ops->pg_init(desc, req);
599 desc->pg_base = req->wb_pgbase;
601 nfs_list_remove_request(req);
602 nfs_list_add_request(req, &desc->pg_list);
603 desc->pg_count += req->wb_bytes;
608 * Helper for nfs_pageio_add_request and nfs_pageio_complete
610 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
612 if (!list_empty(&desc->pg_list)) {
613 int error = desc->pg_ops->pg_doio(desc);
615 desc->pg_error = error;
617 desc->pg_bytes_written += desc->pg_count;
619 if (list_empty(&desc->pg_list)) {
626 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
627 * @desc: destination io descriptor
630 * Returns true if the request 'req' was successfully coalesced into the
631 * existing list of pages 'desc'.
633 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
634 struct nfs_page *req)
636 while (!nfs_pageio_do_add_request(desc, req)) {
638 nfs_pageio_doio(desc);
639 if (desc->pg_error < 0)
642 if (desc->pg_recoalesce)
648 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
653 list_splice_init(&desc->pg_list, &head);
654 desc->pg_bytes_written -= desc->pg_count;
657 desc->pg_recoalesce = 0;
659 while (!list_empty(&head)) {
660 struct nfs_page *req;
662 req = list_first_entry(&head, struct nfs_page, wb_list);
663 nfs_list_remove_request(req);
664 if (__nfs_pageio_add_request(desc, req))
666 if (desc->pg_error < 0)
670 } while (desc->pg_recoalesce);
674 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
675 struct nfs_page *req)
680 ret = __nfs_pageio_add_request(desc, req);
683 if (desc->pg_error < 0)
685 ret = nfs_do_recoalesce(desc);
689 EXPORT_SYMBOL_GPL(nfs_pageio_add_request);
692 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
693 * @desc: pointer to io descriptor
695 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
698 nfs_pageio_doio(desc);
699 if (!desc->pg_recoalesce)
701 if (!nfs_do_recoalesce(desc))
705 EXPORT_SYMBOL_GPL(nfs_pageio_complete);
708 * nfs_pageio_cond_complete - Conditional I/O completion
709 * @desc: pointer to io descriptor
712 * It is important to ensure that processes don't try to take locks
713 * on non-contiguous ranges of pages as that might deadlock. This
714 * function should be called before attempting to wait on a locked
715 * nfs_page. It will complete the I/O if the page index 'index'
716 * is not contiguous with the existing list of pages in 'desc'.
718 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
720 if (!list_empty(&desc->pg_list)) {
721 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
722 if (index != prev->wb_index + 1)
723 nfs_pageio_complete(desc);
727 int __init nfs_init_nfspagecache(void)
729 nfs_page_cachep = kmem_cache_create("nfs_page",
730 sizeof(struct nfs_page),
731 0, SLAB_HWCACHE_ALIGN,
733 if (nfs_page_cachep == NULL)
739 void nfs_destroy_nfspagecache(void)
741 kmem_cache_destroy(nfs_page_cachep);
744 const struct rpc_call_ops nfs_pgio_common_ops = {
745 .rpc_call_prepare = nfs_pgio_prepare,
746 .rpc_call_done = nfs_pgio_result,
747 .rpc_release = nfs_pgio_release,