]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/nfs/direct.c
floppy: Run floppy initialization asynchronous
[karo-tx-linux.git] / fs / nfs / direct.c
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts)
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets
13  * considerably larger than the client's memory do not always benefit
14  * from a local cache.  A streaming video server, for instance, has no
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001  Initial implementation for 2.4  --cel
33  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003  Port to 2.5 APIs  --cel
35  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004  Parallel async reads  --cel
37  * 04 May 2005  support O_DIRECT with aio  --cel
38  *
39  */
40
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/file.h>
45 #include <linux/pagemap.h>
46 #include <linux/kref.h>
47 #include <linux/slab.h>
48 #include <linux/task_io_accounting_ops.h>
49
50 #include <linux/nfs_fs.h>
51 #include <linux/nfs_page.h>
52 #include <linux/sunrpc/clnt.h>
53
54 #include <asm/uaccess.h>
55 #include <linux/atomic.h>
56
57 #include "internal.h"
58 #include "iostat.h"
59 #include "pnfs.h"
60
61 #define NFSDBG_FACILITY         NFSDBG_VFS
62
63 static struct kmem_cache *nfs_direct_cachep;
64
65 /*
66  * This represents a set of asynchronous requests that we're waiting on
67  */
68 struct nfs_direct_req {
69         struct kref             kref;           /* release manager */
70
71         /* I/O parameters */
72         struct nfs_open_context *ctx;           /* file open context info */
73         struct nfs_lock_context *l_ctx;         /* Lock context info */
74         struct kiocb *          iocb;           /* controlling i/o request */
75         struct inode *          inode;          /* target file of i/o */
76
77         /* completion state */
78         atomic_t                io_count;       /* i/os we're waiting for */
79         spinlock_t              lock;           /* protect completion state */
80         ssize_t                 count,          /* bytes actually processed */
81                                 error;          /* any reported error */
82         struct completion       completion;     /* wait for i/o completion */
83
84         /* commit state */
85         struct nfs_mds_commit_info mds_cinfo;   /* Storage for cinfo */
86         struct pnfs_ds_commit_info ds_cinfo;    /* Storage for cinfo */
87         struct work_struct      work;
88         int                     flags;
89 #define NFS_ODIRECT_DO_COMMIT           (1)     /* an unstable reply was received */
90 #define NFS_ODIRECT_RESCHED_WRITES      (2)     /* write verification failed */
91         struct nfs_writeverf    verf;           /* unstable write verifier */
92 };
93
94 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
95 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
96 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
97 static void nfs_direct_write_schedule_work(struct work_struct *work);
98
99 static inline void get_dreq(struct nfs_direct_req *dreq)
100 {
101         atomic_inc(&dreq->io_count);
102 }
103
104 static inline int put_dreq(struct nfs_direct_req *dreq)
105 {
106         return atomic_dec_and_test(&dreq->io_count);
107 }
108
109 /**
110  * nfs_direct_IO - NFS address space operation for direct I/O
111  * @rw: direction (read or write)
112  * @iocb: target I/O control block
113  * @iov: array of vectors that define I/O buffer
114  * @pos: offset in file to begin the operation
115  * @nr_segs: size of iovec array
116  *
117  * The presence of this routine in the address space ops vector means
118  * the NFS client supports direct I/O.  However, we shunt off direct
119  * read and write requests before the VFS gets them, so this method
120  * should never be called.
121  */
122 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
123 {
124         dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
125                         iocb->ki_filp->f_path.dentry->d_name.name,
126                         (long long) pos, nr_segs);
127
128         return -EINVAL;
129 }
130
131 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
132 {
133         unsigned int i;
134         for (i = 0; i < npages; i++)
135                 page_cache_release(pages[i]);
136 }
137
138 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
139                               struct nfs_direct_req *dreq)
140 {
141         cinfo->lock = &dreq->lock;
142         cinfo->mds = &dreq->mds_cinfo;
143         cinfo->ds = &dreq->ds_cinfo;
144         cinfo->dreq = dreq;
145         cinfo->completion_ops = &nfs_direct_commit_completion_ops;
146 }
147
148 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
149 {
150         struct nfs_direct_req *dreq;
151
152         dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
153         if (!dreq)
154                 return NULL;
155
156         kref_init(&dreq->kref);
157         kref_get(&dreq->kref);
158         init_completion(&dreq->completion);
159         INIT_LIST_HEAD(&dreq->mds_cinfo.list);
160         INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
161         spin_lock_init(&dreq->lock);
162
163         return dreq;
164 }
165
166 static void nfs_direct_req_free(struct kref *kref)
167 {
168         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
169
170         if (dreq->l_ctx != NULL)
171                 nfs_put_lock_context(dreq->l_ctx);
172         if (dreq->ctx != NULL)
173                 put_nfs_open_context(dreq->ctx);
174         kmem_cache_free(nfs_direct_cachep, dreq);
175 }
176
177 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
178 {
179         kref_put(&dreq->kref, nfs_direct_req_free);
180 }
181
182 /*
183  * Collects and returns the final error value/byte-count.
184  */
185 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
186 {
187         ssize_t result = -EIOCBQUEUED;
188
189         /* Async requests don't wait here */
190         if (dreq->iocb)
191                 goto out;
192
193         result = wait_for_completion_killable(&dreq->completion);
194
195         if (!result)
196                 result = dreq->error;
197         if (!result)
198                 result = dreq->count;
199
200 out:
201         return (ssize_t) result;
202 }
203
204 /*
205  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
206  * the iocb is still valid here if this is a synchronous request.
207  */
208 static void nfs_direct_complete(struct nfs_direct_req *dreq)
209 {
210         if (dreq->iocb) {
211                 long res = (long) dreq->error;
212                 if (!res)
213                         res = (long) dreq->count;
214                 aio_complete(dreq->iocb, res, 0);
215         }
216         complete_all(&dreq->completion);
217
218         nfs_direct_req_release(dreq);
219 }
220
221 static void nfs_direct_readpage_release(struct nfs_page *req)
222 {
223         dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
224                 req->wb_context->dentry->d_inode->i_sb->s_id,
225                 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
226                 req->wb_bytes,
227                 (long long)req_offset(req));
228         nfs_release_request(req);
229 }
230
231 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
232 {
233         unsigned long bytes = 0;
234         struct nfs_direct_req *dreq = hdr->dreq;
235
236         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
237                 goto out_put;
238
239         spin_lock(&dreq->lock);
240         if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
241                 dreq->error = hdr->error;
242         else
243                 dreq->count += hdr->good_bytes;
244         spin_unlock(&dreq->lock);
245
246         while (!list_empty(&hdr->pages)) {
247                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
248                 struct page *page = req->wb_page;
249
250                 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
251                         if (bytes > hdr->good_bytes)
252                                 zero_user(page, 0, PAGE_SIZE);
253                         else if (hdr->good_bytes - bytes < PAGE_SIZE)
254                                 zero_user_segment(page,
255                                         hdr->good_bytes & ~PAGE_MASK,
256                                         PAGE_SIZE);
257                 }
258                 if (!PageCompound(page)) {
259                         if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
260                                 if (bytes < hdr->good_bytes)
261                                         set_page_dirty(page);
262                         } else
263                                 set_page_dirty(page);
264                 }
265                 bytes += req->wb_bytes;
266                 nfs_list_remove_request(req);
267                 nfs_direct_readpage_release(req);
268         }
269 out_put:
270         if (put_dreq(dreq))
271                 nfs_direct_complete(dreq);
272         hdr->release(hdr);
273 }
274
275 static void nfs_read_sync_pgio_error(struct list_head *head)
276 {
277         struct nfs_page *req;
278
279         while (!list_empty(head)) {
280                 req = nfs_list_entry(head->next);
281                 nfs_list_remove_request(req);
282                 nfs_release_request(req);
283         }
284 }
285
286 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
287 {
288         get_dreq(hdr->dreq);
289 }
290
291 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
292         .error_cleanup = nfs_read_sync_pgio_error,
293         .init_hdr = nfs_direct_pgio_init,
294         .completion = nfs_direct_read_completion,
295 };
296
297 /*
298  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
299  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
300  * bail and stop sending more reads.  Read length accounting is
301  * handled automatically by nfs_direct_read_result().  Otherwise, if
302  * no requests have been sent, just return an error.
303  */
304 static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
305                                                 const struct iovec *iov,
306                                                 loff_t pos)
307 {
308         struct nfs_direct_req *dreq = desc->pg_dreq;
309         struct nfs_open_context *ctx = dreq->ctx;
310         struct inode *inode = ctx->dentry->d_inode;
311         unsigned long user_addr = (unsigned long)iov->iov_base;
312         size_t count = iov->iov_len;
313         size_t rsize = NFS_SERVER(inode)->rsize;
314         unsigned int pgbase;
315         int result;
316         ssize_t started = 0;
317         struct page **pagevec = NULL;
318         unsigned int npages;
319
320         do {
321                 size_t bytes;
322                 int i;
323
324                 pgbase = user_addr & ~PAGE_MASK;
325                 bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
326
327                 result = -ENOMEM;
328                 npages = nfs_page_array_len(pgbase, bytes);
329                 if (!pagevec)
330                         pagevec = kmalloc(npages * sizeof(struct page *),
331                                           GFP_KERNEL);
332                 if (!pagevec)
333                         break;
334                 down_read(&current->mm->mmap_sem);
335                 result = get_user_pages(current, current->mm, user_addr,
336                                         npages, 1, 0, pagevec, NULL);
337                 up_read(&current->mm->mmap_sem);
338                 if (result < 0)
339                         break;
340                 if ((unsigned)result < npages) {
341                         bytes = result * PAGE_SIZE;
342                         if (bytes <= pgbase) {
343                                 nfs_direct_release_pages(pagevec, result);
344                                 break;
345                         }
346                         bytes -= pgbase;
347                         npages = result;
348                 }
349
350                 for (i = 0; i < npages; i++) {
351                         struct nfs_page *req;
352                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
353                         /* XXX do we need to do the eof zeroing found in async_filler? */
354                         req = nfs_create_request(dreq->ctx, dreq->inode,
355                                                  pagevec[i],
356                                                  pgbase, req_len);
357                         if (IS_ERR(req)) {
358                                 result = PTR_ERR(req);
359                                 break;
360                         }
361                         req->wb_index = pos >> PAGE_SHIFT;
362                         req->wb_offset = pos & ~PAGE_MASK;
363                         if (!nfs_pageio_add_request(desc, req)) {
364                                 result = desc->pg_error;
365                                 nfs_release_request(req);
366                                 break;
367                         }
368                         pgbase = 0;
369                         bytes -= req_len;
370                         started += req_len;
371                         user_addr += req_len;
372                         pos += req_len;
373                         count -= req_len;
374                 }
375                 /* The nfs_page now hold references to these pages */
376                 nfs_direct_release_pages(pagevec, npages);
377         } while (count != 0 && result >= 0);
378
379         kfree(pagevec);
380
381         if (started)
382                 return started;
383         return result < 0 ? (ssize_t) result : -EFAULT;
384 }
385
386 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
387                                               const struct iovec *iov,
388                                               unsigned long nr_segs,
389                                               loff_t pos)
390 {
391         struct nfs_pageio_descriptor desc;
392         ssize_t result = -EINVAL;
393         size_t requested_bytes = 0;
394         unsigned long seg;
395
396         nfs_pageio_init_read(&desc, dreq->inode,
397                              &nfs_direct_read_completion_ops);
398         get_dreq(dreq);
399         desc.pg_dreq = dreq;
400
401         for (seg = 0; seg < nr_segs; seg++) {
402                 const struct iovec *vec = &iov[seg];
403                 result = nfs_direct_read_schedule_segment(&desc, vec, pos);
404                 if (result < 0)
405                         break;
406                 requested_bytes += result;
407                 if ((size_t)result < vec->iov_len)
408                         break;
409                 pos += vec->iov_len;
410         }
411
412         nfs_pageio_complete(&desc);
413
414         /*
415          * If no bytes were started, return the error, and let the
416          * generic layer handle the completion.
417          */
418         if (requested_bytes == 0) {
419                 nfs_direct_req_release(dreq);
420                 return result < 0 ? result : -EIO;
421         }
422
423         if (put_dreq(dreq))
424                 nfs_direct_complete(dreq);
425         return 0;
426 }
427
428 static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
429                                unsigned long nr_segs, loff_t pos)
430 {
431         ssize_t result = -ENOMEM;
432         struct inode *inode = iocb->ki_filp->f_mapping->host;
433         struct nfs_direct_req *dreq;
434
435         dreq = nfs_direct_req_alloc();
436         if (dreq == NULL)
437                 goto out;
438
439         dreq->inode = inode;
440         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
441         dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
442         if (dreq->l_ctx == NULL)
443                 goto out_release;
444         if (!is_sync_kiocb(iocb))
445                 dreq->iocb = iocb;
446
447         result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
448         if (!result)
449                 result = nfs_direct_wait(dreq);
450         NFS_I(inode)->read_io += result;
451 out_release:
452         nfs_direct_req_release(dreq);
453 out:
454         return result;
455 }
456
457 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
458 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
459 {
460         struct nfs_pageio_descriptor desc;
461         struct nfs_page *req, *tmp;
462         LIST_HEAD(reqs);
463         struct nfs_commit_info cinfo;
464         LIST_HEAD(failed);
465
466         nfs_init_cinfo_from_dreq(&cinfo, dreq);
467         pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
468         spin_lock(cinfo.lock);
469         nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
470         spin_unlock(cinfo.lock);
471
472         dreq->count = 0;
473         get_dreq(dreq);
474
475         nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
476                               &nfs_direct_write_completion_ops);
477         desc.pg_dreq = dreq;
478
479         list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
480                 if (!nfs_pageio_add_request(&desc, req)) {
481                         nfs_list_add_request(req, &failed);
482                         spin_lock(cinfo.lock);
483                         dreq->flags = 0;
484                         dreq->error = -EIO;
485                         spin_unlock(cinfo.lock);
486                 }
487         }
488         nfs_pageio_complete(&desc);
489
490         while (!list_empty(&failed))
491                 nfs_unlock_and_release_request(req);
492
493         if (put_dreq(dreq))
494                 nfs_direct_write_complete(dreq, dreq->inode);
495 }
496
497 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
498 {
499         struct nfs_direct_req *dreq = data->dreq;
500         struct nfs_commit_info cinfo;
501         struct nfs_page *req;
502         int status = data->task.tk_status;
503
504         nfs_init_cinfo_from_dreq(&cinfo, dreq);
505         if (status < 0) {
506                 dprintk("NFS: %5u commit failed with error %d.\n",
507                         data->task.tk_pid, status);
508                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
509         } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
510                 dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
511                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
512         }
513
514         dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
515         while (!list_empty(&data->pages)) {
516                 req = nfs_list_entry(data->pages.next);
517                 nfs_list_remove_request(req);
518                 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
519                         /* Note the rewrite will go through mds */
520                         kref_get(&req->wb_kref);
521                         nfs_mark_request_commit(req, NULL, &cinfo);
522                 }
523                 nfs_unlock_and_release_request(req);
524         }
525
526         if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
527                 nfs_direct_write_complete(dreq, data->inode);
528 }
529
530 static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
531 {
532         /* There is no lock to clear */
533 }
534
535 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
536         .completion = nfs_direct_commit_complete,
537         .error_cleanup = nfs_direct_error_cleanup,
538 };
539
540 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
541 {
542         int res;
543         struct nfs_commit_info cinfo;
544         LIST_HEAD(mds_list);
545
546         nfs_init_cinfo_from_dreq(&cinfo, dreq);
547         nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
548         res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
549         if (res < 0) /* res == -ENOMEM */
550                 nfs_direct_write_reschedule(dreq);
551 }
552
553 static void nfs_direct_write_schedule_work(struct work_struct *work)
554 {
555         struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
556         int flags = dreq->flags;
557
558         dreq->flags = 0;
559         switch (flags) {
560                 case NFS_ODIRECT_DO_COMMIT:
561                         nfs_direct_commit_schedule(dreq);
562                         break;
563                 case NFS_ODIRECT_RESCHED_WRITES:
564                         nfs_direct_write_reschedule(dreq);
565                         break;
566                 default:
567                         nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
568                         nfs_direct_complete(dreq);
569         }
570 }
571
572 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
573 {
574         schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
575 }
576
577 #else
578 static void nfs_direct_write_schedule_work(struct work_struct *work)
579 {
580 }
581
582 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
583 {
584         nfs_zap_mapping(inode, inode->i_mapping);
585         nfs_direct_complete(dreq);
586 }
587 #endif
588
589 /*
590  * NB: Return the value of the first error return code.  Subsequent
591  *     errors after the first one are ignored.
592  */
593 /*
594  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
595  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
596  * bail and stop sending more writes.  Write length accounting is
597  * handled automatically by nfs_direct_write_result().  Otherwise, if
598  * no requests have been sent, just return an error.
599  */
600 static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
601                                                  const struct iovec *iov,
602                                                  loff_t pos)
603 {
604         struct nfs_direct_req *dreq = desc->pg_dreq;
605         struct nfs_open_context *ctx = dreq->ctx;
606         struct inode *inode = ctx->dentry->d_inode;
607         unsigned long user_addr = (unsigned long)iov->iov_base;
608         size_t count = iov->iov_len;
609         size_t wsize = NFS_SERVER(inode)->wsize;
610         unsigned int pgbase;
611         int result;
612         ssize_t started = 0;
613         struct page **pagevec = NULL;
614         unsigned int npages;
615
616         do {
617                 size_t bytes;
618                 int i;
619
620                 pgbase = user_addr & ~PAGE_MASK;
621                 bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
622
623                 result = -ENOMEM;
624                 npages = nfs_page_array_len(pgbase, bytes);
625                 if (!pagevec)
626                         pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
627                 if (!pagevec)
628                         break;
629
630                 down_read(&current->mm->mmap_sem);
631                 result = get_user_pages(current, current->mm, user_addr,
632                                         npages, 0, 0, pagevec, NULL);
633                 up_read(&current->mm->mmap_sem);
634                 if (result < 0)
635                         break;
636
637                 if ((unsigned)result < npages) {
638                         bytes = result * PAGE_SIZE;
639                         if (bytes <= pgbase) {
640                                 nfs_direct_release_pages(pagevec, result);
641                                 break;
642                         }
643                         bytes -= pgbase;
644                         npages = result;
645                 }
646
647                 for (i = 0; i < npages; i++) {
648                         struct nfs_page *req;
649                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
650
651                         req = nfs_create_request(dreq->ctx, dreq->inode,
652                                                  pagevec[i],
653                                                  pgbase, req_len);
654                         if (IS_ERR(req)) {
655                                 result = PTR_ERR(req);
656                                 break;
657                         }
658                         nfs_lock_request(req);
659                         req->wb_index = pos >> PAGE_SHIFT;
660                         req->wb_offset = pos & ~PAGE_MASK;
661                         if (!nfs_pageio_add_request(desc, req)) {
662                                 result = desc->pg_error;
663                                 nfs_unlock_and_release_request(req);
664                                 break;
665                         }
666                         pgbase = 0;
667                         bytes -= req_len;
668                         started += req_len;
669                         user_addr += req_len;
670                         pos += req_len;
671                         count -= req_len;
672                 }
673                 /* The nfs_page now hold references to these pages */
674                 nfs_direct_release_pages(pagevec, npages);
675         } while (count != 0 && result >= 0);
676
677         kfree(pagevec);
678
679         if (started)
680                 return started;
681         return result < 0 ? (ssize_t) result : -EFAULT;
682 }
683
684 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
685 {
686         struct nfs_direct_req *dreq = hdr->dreq;
687         struct nfs_commit_info cinfo;
688         int bit = -1;
689         struct nfs_page *req = nfs_list_entry(hdr->pages.next);
690
691         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
692                 goto out_put;
693
694         nfs_init_cinfo_from_dreq(&cinfo, dreq);
695
696         spin_lock(&dreq->lock);
697
698         if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
699                 dreq->flags = 0;
700                 dreq->error = hdr->error;
701         }
702         if (dreq->error != 0)
703                 bit = NFS_IOHDR_ERROR;
704         else {
705                 dreq->count += hdr->good_bytes;
706                 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
707                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
708                         bit = NFS_IOHDR_NEED_RESCHED;
709                 } else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
710                         if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
711                                 bit = NFS_IOHDR_NEED_RESCHED;
712                         else if (dreq->flags == 0) {
713                                 memcpy(&dreq->verf, &req->wb_verf,
714                                        sizeof(dreq->verf));
715                                 bit = NFS_IOHDR_NEED_COMMIT;
716                                 dreq->flags = NFS_ODIRECT_DO_COMMIT;
717                         } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
718                                 if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) {
719                                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
720                                         bit = NFS_IOHDR_NEED_RESCHED;
721                                 } else
722                                         bit = NFS_IOHDR_NEED_COMMIT;
723                         }
724                 }
725         }
726         spin_unlock(&dreq->lock);
727
728         while (!list_empty(&hdr->pages)) {
729                 req = nfs_list_entry(hdr->pages.next);
730                 nfs_list_remove_request(req);
731                 switch (bit) {
732                 case NFS_IOHDR_NEED_RESCHED:
733                 case NFS_IOHDR_NEED_COMMIT:
734                         kref_get(&req->wb_kref);
735                         nfs_mark_request_commit(req, hdr->lseg, &cinfo);
736                 }
737                 nfs_unlock_and_release_request(req);
738         }
739
740 out_put:
741         if (put_dreq(dreq))
742                 nfs_direct_write_complete(dreq, hdr->inode);
743         hdr->release(hdr);
744 }
745
746 static void nfs_write_sync_pgio_error(struct list_head *head)
747 {
748         struct nfs_page *req;
749
750         while (!list_empty(head)) {
751                 req = nfs_list_entry(head->next);
752                 nfs_list_remove_request(req);
753                 nfs_unlock_and_release_request(req);
754         }
755 }
756
757 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
758         .error_cleanup = nfs_write_sync_pgio_error,
759         .init_hdr = nfs_direct_pgio_init,
760         .completion = nfs_direct_write_completion,
761 };
762
763 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
764                                                const struct iovec *iov,
765                                                unsigned long nr_segs,
766                                                loff_t pos)
767 {
768         struct nfs_pageio_descriptor desc;
769         ssize_t result = 0;
770         size_t requested_bytes = 0;
771         unsigned long seg;
772
773         nfs_pageio_init_write(&desc, dreq->inode, FLUSH_COND_STABLE,
774                               &nfs_direct_write_completion_ops);
775         desc.pg_dreq = dreq;
776         get_dreq(dreq);
777
778         for (seg = 0; seg < nr_segs; seg++) {
779                 const struct iovec *vec = &iov[seg];
780                 result = nfs_direct_write_schedule_segment(&desc, vec, pos);
781                 if (result < 0)
782                         break;
783                 requested_bytes += result;
784                 if ((size_t)result < vec->iov_len)
785                         break;
786                 pos += vec->iov_len;
787         }
788         nfs_pageio_complete(&desc);
789         NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
790
791         /*
792          * If no bytes were started, return the error, and let the
793          * generic layer handle the completion.
794          */
795         if (requested_bytes == 0) {
796                 nfs_direct_req_release(dreq);
797                 return result < 0 ? result : -EIO;
798         }
799
800         if (put_dreq(dreq))
801                 nfs_direct_write_complete(dreq, dreq->inode);
802         return 0;
803 }
804
805 static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
806                                 unsigned long nr_segs, loff_t pos,
807                                 size_t count)
808 {
809         ssize_t result = -ENOMEM;
810         struct inode *inode = iocb->ki_filp->f_mapping->host;
811         struct nfs_direct_req *dreq;
812
813         dreq = nfs_direct_req_alloc();
814         if (!dreq)
815                 goto out;
816
817         dreq->inode = inode;
818         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
819         dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
820         if (dreq->l_ctx == NULL)
821                 goto out_release;
822         if (!is_sync_kiocb(iocb))
823                 dreq->iocb = iocb;
824
825         result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
826         if (!result)
827                 result = nfs_direct_wait(dreq);
828 out_release:
829         nfs_direct_req_release(dreq);
830 out:
831         return result;
832 }
833
834 /**
835  * nfs_file_direct_read - file direct read operation for NFS files
836  * @iocb: target I/O control block
837  * @iov: vector of user buffers into which to read data
838  * @nr_segs: size of iov vector
839  * @pos: byte offset in file where reading starts
840  *
841  * We use this function for direct reads instead of calling
842  * generic_file_aio_read() in order to avoid gfar's check to see if
843  * the request starts before the end of the file.  For that check
844  * to work, we must generate a GETATTR before each direct read, and
845  * even then there is a window between the GETATTR and the subsequent
846  * READ where the file size could change.  Our preference is simply
847  * to do all reads the application wants, and the server will take
848  * care of managing the end of file boundary.
849  *
850  * This function also eliminates unnecessarily updating the file's
851  * atime locally, as the NFS server sets the file's atime, and this
852  * client must read the updated atime from the server back into its
853  * cache.
854  */
855 ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
856                                 unsigned long nr_segs, loff_t pos)
857 {
858         ssize_t retval = -EINVAL;
859         struct file *file = iocb->ki_filp;
860         struct address_space *mapping = file->f_mapping;
861         size_t count;
862
863         count = iov_length(iov, nr_segs);
864         nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
865
866         dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
867                 file->f_path.dentry->d_parent->d_name.name,
868                 file->f_path.dentry->d_name.name,
869                 count, (long long) pos);
870
871         retval = 0;
872         if (!count)
873                 goto out;
874
875         retval = nfs_sync_mapping(mapping);
876         if (retval)
877                 goto out;
878
879         task_io_account_read(count);
880
881         retval = nfs_direct_read(iocb, iov, nr_segs, pos);
882         if (retval > 0)
883                 iocb->ki_pos = pos + retval;
884
885 out:
886         return retval;
887 }
888
889 /**
890  * nfs_file_direct_write - file direct write operation for NFS files
891  * @iocb: target I/O control block
892  * @iov: vector of user buffers from which to write data
893  * @nr_segs: size of iov vector
894  * @pos: byte offset in file where writing starts
895  *
896  * We use this function for direct writes instead of calling
897  * generic_file_aio_write() in order to avoid taking the inode
898  * semaphore and updating the i_size.  The NFS server will set
899  * the new i_size and this client must read the updated size
900  * back into its cache.  We let the server do generic write
901  * parameter checking and report problems.
902  *
903  * We eliminate local atime updates, see direct read above.
904  *
905  * We avoid unnecessary page cache invalidations for normal cached
906  * readers of this file.
907  *
908  * Note that O_APPEND is not supported for NFS direct writes, as there
909  * is no atomic O_APPEND write facility in the NFS protocol.
910  */
911 ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
912                                 unsigned long nr_segs, loff_t pos)
913 {
914         ssize_t retval = -EINVAL;
915         struct file *file = iocb->ki_filp;
916         struct address_space *mapping = file->f_mapping;
917         size_t count;
918
919         count = iov_length(iov, nr_segs);
920         nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
921
922         dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
923                 file->f_path.dentry->d_parent->d_name.name,
924                 file->f_path.dentry->d_name.name,
925                 count, (long long) pos);
926
927         retval = generic_write_checks(file, &pos, &count, 0);
928         if (retval)
929                 goto out;
930
931         retval = -EINVAL;
932         if ((ssize_t) count < 0)
933                 goto out;
934         retval = 0;
935         if (!count)
936                 goto out;
937
938         retval = nfs_sync_mapping(mapping);
939         if (retval)
940                 goto out;
941
942         task_io_account_write(count);
943
944         retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
945         if (retval > 0) {
946                 struct inode *inode = mapping->host;
947
948                 iocb->ki_pos = pos + retval;
949                 spin_lock(&inode->i_lock);
950                 if (i_size_read(inode) < iocb->ki_pos)
951                         i_size_write(inode, iocb->ki_pos);
952                 spin_unlock(&inode->i_lock);
953         }
954 out:
955         return retval;
956 }
957
958 /**
959  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
960  *
961  */
962 int __init nfs_init_directcache(void)
963 {
964         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
965                                                 sizeof(struct nfs_direct_req),
966                                                 0, (SLAB_RECLAIM_ACCOUNT|
967                                                         SLAB_MEM_SPREAD),
968                                                 NULL);
969         if (nfs_direct_cachep == NULL)
970                 return -ENOMEM;
971
972         return 0;
973 }
974
975 /**
976  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
977  *
978  */
979 void nfs_destroy_directcache(void)
980 {
981         kmem_cache_destroy(nfs_direct_cachep);
982 }