6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/smp_lock.h>
23 #include <asm/system.h>
28 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
30 static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int);
31 static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int);
32 static const struct rpc_call_ops nfs_read_partial_ops;
33 static const struct rpc_call_ops nfs_read_full_ops;
35 static struct kmem_cache *nfs_rdata_cachep;
36 static mempool_t *nfs_rdata_mempool;
38 #define MIN_POOL_READ (32)
40 struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
42 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
45 memset(p, 0, sizeof(*p));
46 INIT_LIST_HEAD(&p->pages);
47 p->npages = pagecount;
48 if (pagecount <= ARRAY_SIZE(p->page_array))
49 p->pagevec = p->page_array;
51 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
53 mempool_free(p, nfs_rdata_mempool);
61 static void nfs_readdata_free(struct nfs_read_data *p)
63 if (p && (p->pagevec != &p->page_array[0]))
65 mempool_free(p, nfs_rdata_mempool);
68 void nfs_readdata_release(void *data)
70 struct nfs_read_data *rdata = data;
72 put_nfs_open_context(rdata->args.context);
73 nfs_readdata_free(rdata);
77 int nfs_return_empty_page(struct page *page)
79 zero_user(page, 0, PAGE_CACHE_SIZE);
80 SetPageUptodate(page);
85 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
87 unsigned int remainder = data->args.count - data->res.count;
88 unsigned int base = data->args.pgbase + data->res.count;
92 if (data->res.eof == 0 || remainder == 0)
95 * Note: "remainder" can never be negative, since we check for
96 * this in the XDR code.
98 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
99 base &= ~PAGE_CACHE_MASK;
100 pglen = PAGE_CACHE_SIZE - base;
102 if (remainder <= pglen) {
103 zero_user(*pages, base, remainder);
106 zero_user(*pages, base, pglen);
109 pglen = PAGE_CACHE_SIZE;
114 static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
117 LIST_HEAD(one_request);
118 struct nfs_page *new;
121 len = nfs_page_length(page);
123 return nfs_return_empty_page(page);
124 new = nfs_create_request(ctx, inode, page, 0, len);
129 if (len < PAGE_CACHE_SIZE)
130 zero_user_segment(page, len, PAGE_CACHE_SIZE);
132 nfs_list_add_request(new, &one_request);
133 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
134 nfs_pagein_multi(inode, &one_request, 1, len, 0);
136 nfs_pagein_one(inode, &one_request, 1, len, 0);
140 static void nfs_readpage_release(struct nfs_page *req)
142 unlock_page(req->wb_page);
144 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
145 req->wb_context->path.dentry->d_inode->i_sb->s_id,
146 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
148 (long long)req_offset(req));
149 nfs_clear_request(req);
150 nfs_release_request(req);
154 * Set up the NFS read request struct
156 static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
157 const struct rpc_call_ops *call_ops,
158 unsigned int count, unsigned int offset)
160 struct inode *inode = req->wb_context->path.dentry->d_inode;
161 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
162 struct rpc_task *task;
163 struct rpc_message msg = {
164 .rpc_argp = &data->args,
165 .rpc_resp = &data->res,
166 .rpc_cred = req->wb_context->cred,
168 struct rpc_task_setup task_setup_data = {
170 .rpc_client = NFS_CLIENT(inode),
172 .callback_ops = call_ops,
173 .callback_data = data,
174 .workqueue = nfsiod_workqueue,
175 .flags = RPC_TASK_ASYNC | swap_flags,
180 data->cred = msg.rpc_cred;
182 data->args.fh = NFS_FH(inode);
183 data->args.offset = req_offset(req) + offset;
184 data->args.pgbase = req->wb_pgbase + offset;
185 data->args.pages = data->pagevec;
186 data->args.count = count;
187 data->args.context = get_nfs_open_context(req->wb_context);
189 data->res.fattr = &data->fattr;
190 data->res.count = count;
192 nfs_fattr_init(&data->fattr);
194 /* Set up the initial task struct. */
195 NFS_PROTO(inode)->read_setup(data, &msg);
197 dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
200 (long long)NFS_FILEID(inode),
202 (unsigned long long)data->args.offset);
204 task = rpc_run_task(&task_setup_data);
210 nfs_async_read_error(struct list_head *head)
212 struct nfs_page *req;
214 while (!list_empty(head)) {
215 req = nfs_list_entry(head->next);
216 nfs_list_remove_request(req);
217 SetPageError(req->wb_page);
218 nfs_readpage_release(req);
223 * Generate multiple requests to fill a single page.
225 * We optimize to reduce the number of read operations on the wire. If we
226 * detect that we're reading a page, or an area of a page, that is past the
227 * end of file, we do not generate NFS read operations but just clear the
228 * parts of the page that would have come back zero from the server anyway.
230 * We rely on the cached value of i_size to make this determination; another
231 * client can fill pages on the server past our cached end-of-file, but we
232 * won't see the new data until our attribute cache is updated. This is more
233 * or less conventional NFS client behavior.
235 static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
237 struct nfs_page *req = nfs_list_entry(head->next);
238 struct page *page = req->wb_page;
239 struct nfs_read_data *data;
240 size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
245 nfs_list_remove_request(req);
249 size_t len = min(nbytes,rsize);
251 data = nfs_readdata_alloc(1);
254 list_add(&data->pages, &list);
257 } while(nbytes != 0);
258 atomic_set(&req->wb_complete, requests);
260 ClearPageError(page);
264 data = list_entry(list.next, struct nfs_read_data, pages);
265 list_del_init(&data->pages);
267 data->pagevec[0] = page;
271 nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
275 } while (nbytes != 0);
280 while (!list_empty(&list)) {
281 data = list_entry(list.next, struct nfs_read_data, pages);
282 list_del(&data->pages);
283 nfs_readdata_free(data);
286 nfs_readpage_release(req);
290 static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
292 struct nfs_page *req;
294 struct nfs_read_data *data;
296 data = nfs_readdata_alloc(npages);
300 pages = data->pagevec;
301 while (!list_empty(head)) {
302 req = nfs_list_entry(head->next);
303 nfs_list_remove_request(req);
304 nfs_list_add_request(req, &data->pages);
305 ClearPageError(req->wb_page);
306 *pages++ = req->wb_page;
308 req = nfs_list_entry(data->pages.next);
310 nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
313 nfs_async_read_error(head);
318 * This is the callback from RPC telling us whether a reply was
319 * received or some error occurred (timeout or socket shutdown).
321 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
325 dprintk("NFS: %s: %5u, (status %d)\n", __FUNCTION__, task->tk_pid,
328 status = NFS_PROTO(data->inode)->read_done(task, data);
332 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
334 if (task->tk_status == -ESTALE) {
335 set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
336 nfs_mark_for_revalidate(data->inode);
341 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
343 struct nfs_readargs *argp = &data->args;
344 struct nfs_readres *resp = &data->res;
346 if (resp->eof || resp->count == argp->count)
349 /* This is a short read! */
350 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
351 /* Has the server at least made some progress? */
352 if (resp->count == 0)
355 /* Yes, so retry the read at the end of the data */
356 argp->offset += resp->count;
357 argp->pgbase += resp->count;
358 argp->count -= resp->count;
359 rpc_restart_call(task);
363 * Handle a read reply that fills part of a page.
365 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
367 struct nfs_read_data *data = calldata;
369 if (nfs_readpage_result(task, data) != 0)
371 if (task->tk_status < 0)
374 nfs_readpage_truncate_uninitialised_page(data);
375 nfs_readpage_retry(task, data);
378 static void nfs_readpage_release_partial(void *calldata)
380 struct nfs_read_data *data = calldata;
381 struct nfs_page *req = data->req;
382 struct page *page = req->wb_page;
383 int status = data->task.tk_status;
388 if (atomic_dec_and_test(&req->wb_complete)) {
389 if (!PageError(page))
390 SetPageUptodate(page);
391 nfs_readpage_release(req);
393 nfs_readdata_release(calldata);
396 static const struct rpc_call_ops nfs_read_partial_ops = {
397 .rpc_call_done = nfs_readpage_result_partial,
398 .rpc_release = nfs_readpage_release_partial,
401 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
403 unsigned int count = data->res.count;
404 unsigned int base = data->args.pgbase;
408 count = data->args.count;
409 if (unlikely(count == 0))
411 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
412 base &= ~PAGE_CACHE_MASK;
414 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
415 SetPageUptodate(*pages);
418 /* Was this a short read? */
419 if (data->res.eof || data->res.count == data->args.count)
420 SetPageUptodate(*pages);
424 * This is the callback from RPC telling us whether a reply was
425 * received or some error occurred (timeout or socket shutdown).
427 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
429 struct nfs_read_data *data = calldata;
431 if (nfs_readpage_result(task, data) != 0)
433 if (task->tk_status < 0)
436 * Note: nfs_readpage_retry may change the values of
437 * data->args. In the multi-page case, we therefore need
438 * to ensure that we call nfs_readpage_set_pages_uptodate()
441 nfs_readpage_truncate_uninitialised_page(data);
442 nfs_readpage_set_pages_uptodate(data);
443 nfs_readpage_retry(task, data);
446 static void nfs_readpage_release_full(void *calldata)
448 struct nfs_read_data *data = calldata;
450 while (!list_empty(&data->pages)) {
451 struct nfs_page *req = nfs_list_entry(data->pages.next);
453 nfs_list_remove_request(req);
454 nfs_readpage_release(req);
456 nfs_readdata_release(calldata);
459 static const struct rpc_call_ops nfs_read_full_ops = {
460 .rpc_call_done = nfs_readpage_result_full,
461 .rpc_release = nfs_readpage_release_full,
465 * Read a page over NFS.
466 * We read the page synchronously in the following case:
467 * - The error flag is set for this page. This happens only when a
468 * previous async read operation failed.
470 int nfs_readpage(struct file *file, struct page *page)
472 struct nfs_open_context *ctx;
473 struct inode *inode = page->mapping->host;
476 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
477 page, PAGE_CACHE_SIZE, page->index);
478 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
479 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
482 * Try to flush any pending writes to the file..
484 * NOTE! Because we own the page lock, there cannot
485 * be any new pending writes generated at this point
486 * for this page (other pages can be written to).
488 error = nfs_wb_page(inode, page);
491 if (PageUptodate(page))
495 if (NFS_STALE(inode))
500 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
504 ctx = get_nfs_open_context(nfs_file_open_context(file));
506 error = nfs_readpage_async(ctx, inode, page);
508 put_nfs_open_context(ctx);
515 struct nfs_readdesc {
516 struct nfs_pageio_descriptor *pgio;
517 struct nfs_open_context *ctx;
521 readpage_async_filler(void *data, struct page *page)
523 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
524 struct inode *inode = page->mapping->host;
525 struct nfs_page *new;
529 error = nfs_wb_page(inode, page);
532 if (PageUptodate(page))
535 len = nfs_page_length(page);
537 return nfs_return_empty_page(page);
539 new = nfs_create_request(desc->ctx, inode, page, 0, len);
543 if (len < PAGE_CACHE_SIZE)
544 zero_user_segment(page, len, PAGE_CACHE_SIZE);
545 if (!nfs_pageio_add_request(desc->pgio, new)) {
546 error = desc->pgio->pg_error;
551 error = PTR_ERR(new);
558 int nfs_readpages(struct file *filp, struct address_space *mapping,
559 struct list_head *pages, unsigned nr_pages)
561 struct nfs_pageio_descriptor pgio;
562 struct nfs_readdesc desc = {
565 struct inode *inode = mapping->host;
566 struct nfs_server *server = NFS_SERVER(inode);
567 size_t rsize = server->rsize;
568 unsigned long npages;
571 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
573 (long long)NFS_FILEID(inode),
575 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
577 if (NFS_STALE(inode))
581 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
582 if (desc.ctx == NULL)
585 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
586 if (rsize < PAGE_CACHE_SIZE)
587 nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
589 nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0);
591 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
593 nfs_pageio_complete(&pgio);
594 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
595 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
596 put_nfs_open_context(desc.ctx);
601 int __init nfs_init_readpagecache(void)
603 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
604 sizeof(struct nfs_read_data),
605 0, SLAB_HWCACHE_ALIGN,
607 if (nfs_rdata_cachep == NULL)
610 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
612 if (nfs_rdata_mempool == NULL)
618 void nfs_destroy_readpagecache(void)
620 mempool_destroy(nfs_rdata_mempool);
621 kmem_cache_destroy(nfs_rdata_cachep);