2 * linux/fs/nfs/pagelist.c
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs.h>
17 #include <linux/nfs3.h>
18 #include <linux/nfs4.h>
19 #include <linux/nfs_page.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/export.h>
27 static struct kmem_cache *nfs_page_cachep;
29 static inline struct nfs_page *
32 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
34 INIT_LIST_HEAD(&p->wb_list);
39 nfs_page_free(struct nfs_page *p)
41 kmem_cache_free(nfs_page_cachep, p);
45 * nfs_create_request - Create an NFS read/write request.
46 * @ctx: open context to use
47 * @inode: inode to which the request is attached
48 * @page: page to write
49 * @offset: starting offset within the page for the write
50 * @count: number of bytes to read/write
52 * The page must be locked by the caller. This makes sure we never
53 * create two different requests for the same page.
54 * User should ensure it is safe to sleep in this function.
57 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
59 unsigned int offset, unsigned int count)
63 /* try to allocate the request struct */
64 req = nfs_page_alloc();
66 return ERR_PTR(-ENOMEM);
68 /* get lock context early so we can deal with alloc failures */
69 req->wb_lock_context = nfs_get_lock_context(ctx);
70 if (req->wb_lock_context == NULL) {
72 return ERR_PTR(-ENOMEM);
75 /* Initialize the request struct. Initially, we assume a
76 * long write-back delay. This will be adjusted in
77 * update_nfs_request below if the region is not locked. */
79 atomic_set(&req->wb_complete, 0);
80 req->wb_index = page->index;
82 BUG_ON(PagePrivate(page));
83 BUG_ON(!PageLocked(page));
84 BUG_ON(page->mapping->host != inode);
85 req->wb_offset = offset;
86 req->wb_pgbase = offset;
87 req->wb_bytes = count;
88 req->wb_context = get_nfs_open_context(ctx);
89 kref_init(&req->wb_kref);
94 * nfs_unlock_request - Unlock request and wake up sleepers.
97 void nfs_unlock_request(struct nfs_page *req)
99 if (!NFS_WBACK_BUSY(req)) {
100 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
103 smp_mb__before_clear_bit();
104 clear_bit(PG_BUSY, &req->wb_flags);
105 smp_mb__after_clear_bit();
106 wake_up_bit(&req->wb_flags, PG_BUSY);
107 nfs_release_request(req);
111 * nfs_clear_request - Free up all resources allocated to the request
114 * Release page and open context resources associated with a read/write
115 * request after it has completed.
117 static void nfs_clear_request(struct nfs_page *req)
119 struct page *page = req->wb_page;
120 struct nfs_open_context *ctx = req->wb_context;
121 struct nfs_lock_context *l_ctx = req->wb_lock_context;
124 page_cache_release(page);
128 nfs_put_lock_context(l_ctx);
129 req->wb_lock_context = NULL;
132 put_nfs_open_context(ctx);
133 req->wb_context = NULL;
139 * nfs_release_request - Release the count on an NFS read/write request
140 * @req: request to release
142 * Note: Should never be called with the spinlock held!
144 static void nfs_free_request(struct kref *kref)
146 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
148 /* Release struct file and open context */
149 nfs_clear_request(req);
153 void nfs_release_request(struct nfs_page *req)
155 kref_put(&req->wb_kref, nfs_free_request);
158 static int nfs_wait_bit_uninterruptible(void *word)
165 * nfs_wait_on_request - Wait for a request to complete.
166 * @req: request to wait upon.
168 * Interruptible by fatal signals only.
169 * The user is responsible for holding a count on the request.
172 nfs_wait_on_request(struct nfs_page *req)
174 return wait_on_bit(&req->wb_flags, PG_BUSY,
175 nfs_wait_bit_uninterruptible,
176 TASK_UNINTERRUPTIBLE);
179 bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
182 * FIXME: ideally we should be able to coalesce all requests
183 * that are not block boundary aligned, but currently this
184 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
185 * since nfs_flush_multi and nfs_pagein_multi assume you
186 * can have only one struct nfs_page.
188 if (desc->pg_bsize < PAGE_SIZE)
191 return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
193 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
196 * nfs_pageio_init - initialise a page io descriptor
197 * @desc: pointer to descriptor
198 * @inode: pointer to inode
199 * @doio: pointer to io function
200 * @bsize: io block size
201 * @io_flags: extra parameters for the io function
203 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
205 const struct nfs_pageio_ops *pg_ops,
209 INIT_LIST_HEAD(&desc->pg_list);
210 desc->pg_bytes_written = 0;
212 desc->pg_bsize = bsize;
215 desc->pg_recoalesce = 0;
216 desc->pg_inode = inode;
217 desc->pg_ops = pg_ops;
218 desc->pg_ioflags = io_flags;
220 desc->pg_lseg = NULL;
224 * nfs_can_coalesce_requests - test two requests for compatibility
225 * @prev: pointer to nfs_page
226 * @req: pointer to nfs_page
228 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
229 * page data area they describe is contiguous, and that their RPC
230 * credentials, NFSv4 open state, and lockowners are the same.
232 * Return 'true' if this is the case, else return 'false'.
234 static bool nfs_can_coalesce_requests(struct nfs_page *prev,
235 struct nfs_page *req,
236 struct nfs_pageio_descriptor *pgio)
238 if (req->wb_context->cred != prev->wb_context->cred)
240 if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
242 if (req->wb_context->state != prev->wb_context->state)
244 if (req->wb_index != (prev->wb_index + 1))
246 if (req->wb_pgbase != 0)
248 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
250 return pgio->pg_ops->pg_test(pgio, prev, req);
254 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
255 * @desc: destination io descriptor
258 * Returns true if the request 'req' was successfully coalesced into the
259 * existing list of pages 'desc'.
261 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
262 struct nfs_page *req)
264 if (desc->pg_count != 0) {
265 struct nfs_page *prev;
267 prev = nfs_list_entry(desc->pg_list.prev);
268 if (!nfs_can_coalesce_requests(prev, req, desc))
271 if (desc->pg_ops->pg_init)
272 desc->pg_ops->pg_init(desc, req);
273 desc->pg_base = req->wb_pgbase;
275 nfs_list_remove_request(req);
276 nfs_list_add_request(req, &desc->pg_list);
277 desc->pg_count += req->wb_bytes;
282 * Helper for nfs_pageio_add_request and nfs_pageio_complete
284 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
286 if (!list_empty(&desc->pg_list)) {
287 int error = desc->pg_ops->pg_doio(desc);
289 desc->pg_error = error;
291 desc->pg_bytes_written += desc->pg_count;
293 if (list_empty(&desc->pg_list)) {
300 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
301 * @desc: destination io descriptor
304 * Returns true if the request 'req' was successfully coalesced into the
305 * existing list of pages 'desc'.
307 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
308 struct nfs_page *req)
310 while (!nfs_pageio_do_add_request(desc, req)) {
312 nfs_pageio_doio(desc);
313 if (desc->pg_error < 0)
316 if (desc->pg_recoalesce)
322 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
327 list_splice_init(&desc->pg_list, &head);
328 desc->pg_bytes_written -= desc->pg_count;
331 desc->pg_recoalesce = 0;
333 while (!list_empty(&head)) {
334 struct nfs_page *req;
336 req = list_first_entry(&head, struct nfs_page, wb_list);
337 nfs_list_remove_request(req);
338 if (__nfs_pageio_add_request(desc, req))
340 if (desc->pg_error < 0)
344 } while (desc->pg_recoalesce);
348 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
349 struct nfs_page *req)
354 ret = __nfs_pageio_add_request(desc, req);
357 if (desc->pg_error < 0)
359 ret = nfs_do_recoalesce(desc);
365 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
366 * @desc: pointer to io descriptor
368 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
371 nfs_pageio_doio(desc);
372 if (!desc->pg_recoalesce)
374 if (!nfs_do_recoalesce(desc))
380 * nfs_pageio_cond_complete - Conditional I/O completion
381 * @desc: pointer to io descriptor
384 * It is important to ensure that processes don't try to take locks
385 * on non-contiguous ranges of pages as that might deadlock. This
386 * function should be called before attempting to wait on a locked
387 * nfs_page. It will complete the I/O if the page index 'index'
388 * is not contiguous with the existing list of pages in 'desc'.
390 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
392 if (!list_empty(&desc->pg_list)) {
393 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
394 if (index != prev->wb_index + 1)
395 nfs_pageio_complete(desc);
399 int __init nfs_init_nfspagecache(void)
401 nfs_page_cachep = kmem_cache_create("nfs_page",
402 sizeof(struct nfs_page),
403 0, SLAB_HWCACHE_ALIGN,
405 if (nfs_page_cachep == NULL)
411 void nfs_destroy_nfspagecache(void)
413 kmem_cache_destroy(nfs_page_cachep);