]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/nfs/write.c
NFS: Create a common pgio_alloc and pgio_release function
[karo-tx-linux.git] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
17
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23 #include <linux/export.h>
24
25 #include <asm/uaccess.h>
26
27 #include "delegation.h"
28 #include "internal.h"
29 #include "iostat.h"
30 #include "nfs4_fs.h"
31 #include "fscache.h"
32 #include "pnfs.h"
33
34 #include "nfstrace.h"
35
36 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
37
38 #define MIN_POOL_WRITE          (32)
39 #define MIN_POOL_COMMIT         (4)
40
41 /*
42  * Local function declarations
43  */
44 static void nfs_redirty_request(struct nfs_page *req);
45 static const struct rpc_call_ops nfs_write_common_ops;
46 static const struct rpc_call_ops nfs_commit_ops;
47 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
48 static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
49
50 static struct kmem_cache *nfs_wdata_cachep;
51 static mempool_t *nfs_wdata_mempool;
52 static struct kmem_cache *nfs_cdata_cachep;
53 static mempool_t *nfs_commit_mempool;
54
55 struct nfs_commit_data *nfs_commitdata_alloc(void)
56 {
57         struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
58
59         if (p) {
60                 memset(p, 0, sizeof(*p));
61                 INIT_LIST_HEAD(&p->pages);
62         }
63         return p;
64 }
65 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
66
67 void nfs_commit_free(struct nfs_commit_data *p)
68 {
69         mempool_free(p, nfs_commit_mempool);
70 }
71 EXPORT_SYMBOL_GPL(nfs_commit_free);
72
73 struct nfs_rw_header *nfs_writehdr_alloc(void)
74 {
75         struct nfs_rw_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
76
77         if (p) {
78                 struct nfs_pgio_header *hdr = &p->header;
79
80                 memset(p, 0, sizeof(*p));
81                 INIT_LIST_HEAD(&hdr->pages);
82                 INIT_LIST_HEAD(&hdr->rpc_list);
83                 spin_lock_init(&hdr->lock);
84                 atomic_set(&hdr->refcnt, 0);
85         }
86         return p;
87 }
88 EXPORT_SYMBOL_GPL(nfs_writehdr_alloc);
89
90 void nfs_writehdr_free(struct nfs_pgio_header *hdr)
91 {
92         struct nfs_rw_header *whdr = container_of(hdr, struct nfs_rw_header, header);
93         mempool_free(whdr, nfs_wdata_mempool);
94 }
95 EXPORT_SYMBOL_GPL(nfs_writehdr_free);
96
97 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
98 {
99         ctx->error = error;
100         smp_wmb();
101         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
102 }
103
104 static struct nfs_page *
105 nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
106 {
107         struct nfs_page *req = NULL;
108
109         if (PagePrivate(page))
110                 req = (struct nfs_page *)page_private(page);
111         else if (unlikely(PageSwapCache(page))) {
112                 struct nfs_page *freq, *t;
113
114                 /* Linearly search the commit list for the correct req */
115                 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
116                         if (freq->wb_page == page) {
117                                 req = freq;
118                                 break;
119                         }
120                 }
121         }
122
123         if (req)
124                 kref_get(&req->wb_kref);
125
126         return req;
127 }
128
129 static struct nfs_page *nfs_page_find_request(struct page *page)
130 {
131         struct inode *inode = page_file_mapping(page)->host;
132         struct nfs_page *req = NULL;
133
134         spin_lock(&inode->i_lock);
135         req = nfs_page_find_request_locked(NFS_I(inode), page);
136         spin_unlock(&inode->i_lock);
137         return req;
138 }
139
140 /* Adjust the file length if we're writing beyond the end */
141 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
142 {
143         struct inode *inode = page_file_mapping(page)->host;
144         loff_t end, i_size;
145         pgoff_t end_index;
146
147         spin_lock(&inode->i_lock);
148         i_size = i_size_read(inode);
149         end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
150         if (i_size > 0 && page_file_index(page) < end_index)
151                 goto out;
152         end = page_file_offset(page) + ((loff_t)offset+count);
153         if (i_size >= end)
154                 goto out;
155         i_size_write(inode, end);
156         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
157 out:
158         spin_unlock(&inode->i_lock);
159 }
160
161 /* A writeback failed: mark the page as bad, and invalidate the page cache */
162 static void nfs_set_pageerror(struct page *page)
163 {
164         nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
165 }
166
167 /* We can set the PG_uptodate flag if we see that a write request
168  * covers the full page.
169  */
170 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
171 {
172         if (PageUptodate(page))
173                 return;
174         if (base != 0)
175                 return;
176         if (count != nfs_page_length(page))
177                 return;
178         SetPageUptodate(page);
179 }
180
181 static int wb_priority(struct writeback_control *wbc)
182 {
183         if (wbc->for_reclaim)
184                 return FLUSH_HIGHPRI | FLUSH_STABLE;
185         if (wbc->for_kupdate || wbc->for_background)
186                 return FLUSH_LOWPRI | FLUSH_COND_STABLE;
187         return FLUSH_COND_STABLE;
188 }
189
190 /*
191  * NFS congestion control
192  */
193
194 int nfs_congestion_kb;
195
196 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
197 #define NFS_CONGESTION_OFF_THRESH       \
198         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
199
200 static void nfs_set_page_writeback(struct page *page)
201 {
202         struct nfs_server *nfss = NFS_SERVER(page_file_mapping(page)->host);
203         int ret = test_set_page_writeback(page);
204
205         WARN_ON_ONCE(ret != 0);
206
207         if (atomic_long_inc_return(&nfss->writeback) >
208                         NFS_CONGESTION_ON_THRESH) {
209                 set_bdi_congested(&nfss->backing_dev_info,
210                                         BLK_RW_ASYNC);
211         }
212 }
213
214 static void nfs_end_page_writeback(struct page *page)
215 {
216         struct inode *inode = page_file_mapping(page)->host;
217         struct nfs_server *nfss = NFS_SERVER(inode);
218
219         end_page_writeback(page);
220         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
221                 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
222 }
223
224 static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
225 {
226         struct inode *inode = page_file_mapping(page)->host;
227         struct nfs_page *req;
228         int ret;
229
230         spin_lock(&inode->i_lock);
231         for (;;) {
232                 req = nfs_page_find_request_locked(NFS_I(inode), page);
233                 if (req == NULL)
234                         break;
235                 if (nfs_lock_request(req))
236                         break;
237                 /* Note: If we hold the page lock, as is the case in nfs_writepage,
238                  *       then the call to nfs_lock_request() will always
239                  *       succeed provided that someone hasn't already marked the
240                  *       request as dirty (in which case we don't care).
241                  */
242                 spin_unlock(&inode->i_lock);
243                 if (!nonblock)
244                         ret = nfs_wait_on_request(req);
245                 else
246                         ret = -EAGAIN;
247                 nfs_release_request(req);
248                 if (ret != 0)
249                         return ERR_PTR(ret);
250                 spin_lock(&inode->i_lock);
251         }
252         spin_unlock(&inode->i_lock);
253         return req;
254 }
255
256 /*
257  * Find an associated nfs write request, and prepare to flush it out
258  * May return an error if the user signalled nfs_wait_on_request().
259  */
260 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
261                                 struct page *page, bool nonblock)
262 {
263         struct nfs_page *req;
264         int ret = 0;
265
266         req = nfs_find_and_lock_request(page, nonblock);
267         if (!req)
268                 goto out;
269         ret = PTR_ERR(req);
270         if (IS_ERR(req))
271                 goto out;
272
273         nfs_set_page_writeback(page);
274         WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
275
276         ret = 0;
277         if (!nfs_pageio_add_request(pgio, req)) {
278                 nfs_redirty_request(req);
279                 ret = pgio->pg_error;
280         }
281 out:
282         return ret;
283 }
284
285 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
286 {
287         struct inode *inode = page_file_mapping(page)->host;
288         int ret;
289
290         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
291         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
292
293         nfs_pageio_cond_complete(pgio, page_file_index(page));
294         ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
295         if (ret == -EAGAIN) {
296                 redirty_page_for_writepage(wbc, page);
297                 ret = 0;
298         }
299         return ret;
300 }
301
302 /*
303  * Write an mmapped page to the server.
304  */
305 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
306 {
307         struct nfs_pageio_descriptor pgio;
308         int err;
309
310         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
311                                 false, &nfs_async_write_completion_ops);
312         err = nfs_do_writepage(page, wbc, &pgio);
313         nfs_pageio_complete(&pgio);
314         if (err < 0)
315                 return err;
316         if (pgio.pg_error < 0)
317                 return pgio.pg_error;
318         return 0;
319 }
320
321 int nfs_writepage(struct page *page, struct writeback_control *wbc)
322 {
323         int ret;
324
325         ret = nfs_writepage_locked(page, wbc);
326         unlock_page(page);
327         return ret;
328 }
329
330 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
331 {
332         int ret;
333
334         ret = nfs_do_writepage(page, wbc, data);
335         unlock_page(page);
336         return ret;
337 }
338
339 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
340 {
341         struct inode *inode = mapping->host;
342         unsigned long *bitlock = &NFS_I(inode)->flags;
343         struct nfs_pageio_descriptor pgio;
344         int err;
345
346         /* Stop dirtying of new pages while we sync */
347         err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
348                         nfs_wait_bit_killable, TASK_KILLABLE);
349         if (err)
350                 goto out_err;
351
352         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
353
354         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
355                                 &nfs_async_write_completion_ops);
356         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
357         nfs_pageio_complete(&pgio);
358
359         clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
360         smp_mb__after_clear_bit();
361         wake_up_bit(bitlock, NFS_INO_FLUSHING);
362
363         if (err < 0)
364                 goto out_err;
365         err = pgio.pg_error;
366         if (err < 0)
367                 goto out_err;
368         return 0;
369 out_err:
370         return err;
371 }
372
373 /*
374  * Insert a write request into an inode
375  */
376 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
377 {
378         struct nfs_inode *nfsi = NFS_I(inode);
379
380         /* Lock the request! */
381         nfs_lock_request(req);
382
383         spin_lock(&inode->i_lock);
384         if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
385                 inode->i_version++;
386         /*
387          * Swap-space should not get truncated. Hence no need to plug the race
388          * with invalidate/truncate.
389          */
390         if (likely(!PageSwapCache(req->wb_page))) {
391                 set_bit(PG_MAPPED, &req->wb_flags);
392                 SetPagePrivate(req->wb_page);
393                 set_page_private(req->wb_page, (unsigned long)req);
394         }
395         nfsi->npages++;
396         kref_get(&req->wb_kref);
397         spin_unlock(&inode->i_lock);
398 }
399
400 /*
401  * Remove a write request from an inode
402  */
403 static void nfs_inode_remove_request(struct nfs_page *req)
404 {
405         struct inode *inode = req->wb_context->dentry->d_inode;
406         struct nfs_inode *nfsi = NFS_I(inode);
407
408         spin_lock(&inode->i_lock);
409         if (likely(!PageSwapCache(req->wb_page))) {
410                 set_page_private(req->wb_page, 0);
411                 ClearPagePrivate(req->wb_page);
412                 clear_bit(PG_MAPPED, &req->wb_flags);
413         }
414         nfsi->npages--;
415         spin_unlock(&inode->i_lock);
416         nfs_release_request(req);
417 }
418
419 static void
420 nfs_mark_request_dirty(struct nfs_page *req)
421 {
422         __set_page_dirty_nobuffers(req->wb_page);
423 }
424
425 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
426 /**
427  * nfs_request_add_commit_list - add request to a commit list
428  * @req: pointer to a struct nfs_page
429  * @dst: commit list head
430  * @cinfo: holds list lock and accounting info
431  *
432  * This sets the PG_CLEAN bit, updates the cinfo count of
433  * number of outstanding requests requiring a commit as well as
434  * the MM page stats.
435  *
436  * The caller must _not_ hold the cinfo->lock, but must be
437  * holding the nfs_page lock.
438  */
439 void
440 nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
441                             struct nfs_commit_info *cinfo)
442 {
443         set_bit(PG_CLEAN, &(req)->wb_flags);
444         spin_lock(cinfo->lock);
445         nfs_list_add_request(req, dst);
446         cinfo->mds->ncommit++;
447         spin_unlock(cinfo->lock);
448         if (!cinfo->dreq) {
449                 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
450                 inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
451                              BDI_RECLAIMABLE);
452                 __mark_inode_dirty(req->wb_context->dentry->d_inode,
453                                    I_DIRTY_DATASYNC);
454         }
455 }
456 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
457
458 /**
459  * nfs_request_remove_commit_list - Remove request from a commit list
460  * @req: pointer to a nfs_page
461  * @cinfo: holds list lock and accounting info
462  *
463  * This clears the PG_CLEAN bit, and updates the cinfo's count of
464  * number of outstanding requests requiring a commit
465  * It does not update the MM page stats.
466  *
467  * The caller _must_ hold the cinfo->lock and the nfs_page lock.
468  */
469 void
470 nfs_request_remove_commit_list(struct nfs_page *req,
471                                struct nfs_commit_info *cinfo)
472 {
473         if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
474                 return;
475         nfs_list_remove_request(req);
476         cinfo->mds->ncommit--;
477 }
478 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
479
480 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
481                                       struct inode *inode)
482 {
483         cinfo->lock = &inode->i_lock;
484         cinfo->mds = &NFS_I(inode)->commit_info;
485         cinfo->ds = pnfs_get_ds_info(inode);
486         cinfo->dreq = NULL;
487         cinfo->completion_ops = &nfs_commit_completion_ops;
488 }
489
490 void nfs_init_cinfo(struct nfs_commit_info *cinfo,
491                     struct inode *inode,
492                     struct nfs_direct_req *dreq)
493 {
494         if (dreq)
495                 nfs_init_cinfo_from_dreq(cinfo, dreq);
496         else
497                 nfs_init_cinfo_from_inode(cinfo, inode);
498 }
499 EXPORT_SYMBOL_GPL(nfs_init_cinfo);
500
501 /*
502  * Add a request to the inode's commit list.
503  */
504 void
505 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
506                         struct nfs_commit_info *cinfo)
507 {
508         if (pnfs_mark_request_commit(req, lseg, cinfo))
509                 return;
510         nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
511 }
512
513 static void
514 nfs_clear_page_commit(struct page *page)
515 {
516         dec_zone_page_state(page, NR_UNSTABLE_NFS);
517         dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
518 }
519
520 static void
521 nfs_clear_request_commit(struct nfs_page *req)
522 {
523         if (test_bit(PG_CLEAN, &req->wb_flags)) {
524                 struct inode *inode = req->wb_context->dentry->d_inode;
525                 struct nfs_commit_info cinfo;
526
527                 nfs_init_cinfo_from_inode(&cinfo, inode);
528                 if (!pnfs_clear_request_commit(req, &cinfo)) {
529                         spin_lock(cinfo.lock);
530                         nfs_request_remove_commit_list(req, &cinfo);
531                         spin_unlock(cinfo.lock);
532                 }
533                 nfs_clear_page_commit(req->wb_page);
534         }
535 }
536
537 static inline
538 int nfs_write_need_commit(struct nfs_pgio_data *data)
539 {
540         if (data->verf.committed == NFS_DATA_SYNC)
541                 return data->header->lseg == NULL;
542         return data->verf.committed != NFS_FILE_SYNC;
543 }
544
545 #else
546 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
547                                       struct inode *inode)
548 {
549 }
550
551 void nfs_init_cinfo(struct nfs_commit_info *cinfo,
552                     struct inode *inode,
553                     struct nfs_direct_req *dreq)
554 {
555 }
556
557 void
558 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
559                         struct nfs_commit_info *cinfo)
560 {
561 }
562
563 static void
564 nfs_clear_request_commit(struct nfs_page *req)
565 {
566 }
567
568 static inline
569 int nfs_write_need_commit(struct nfs_pgio_data *data)
570 {
571         return 0;
572 }
573
574 #endif
575
576 static void nfs_write_completion(struct nfs_pgio_header *hdr)
577 {
578         struct nfs_commit_info cinfo;
579         unsigned long bytes = 0;
580
581         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
582                 goto out;
583         nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
584         while (!list_empty(&hdr->pages)) {
585                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
586
587                 bytes += req->wb_bytes;
588                 nfs_list_remove_request(req);
589                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
590                     (hdr->good_bytes < bytes)) {
591                         nfs_set_pageerror(req->wb_page);
592                         nfs_context_set_write_error(req->wb_context, hdr->error);
593                         goto remove_req;
594                 }
595                 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
596                         nfs_mark_request_dirty(req);
597                         goto next;
598                 }
599                 if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
600                         memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
601                         nfs_mark_request_commit(req, hdr->lseg, &cinfo);
602                         goto next;
603                 }
604 remove_req:
605                 nfs_inode_remove_request(req);
606 next:
607                 nfs_unlock_request(req);
608                 nfs_end_page_writeback(req->wb_page);
609                 nfs_release_request(req);
610         }
611 out:
612         hdr->release(hdr);
613 }
614
615 #if  IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
616 static unsigned long
617 nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
618 {
619         return cinfo->mds->ncommit;
620 }
621
622 /* cinfo->lock held by caller */
623 int
624 nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
625                      struct nfs_commit_info *cinfo, int max)
626 {
627         struct nfs_page *req, *tmp;
628         int ret = 0;
629
630         list_for_each_entry_safe(req, tmp, src, wb_list) {
631                 if (!nfs_lock_request(req))
632                         continue;
633                 kref_get(&req->wb_kref);
634                 if (cond_resched_lock(cinfo->lock))
635                         list_safe_reset_next(req, tmp, wb_list);
636                 nfs_request_remove_commit_list(req, cinfo);
637                 nfs_list_add_request(req, dst);
638                 ret++;
639                 if ((ret == max) && !cinfo->dreq)
640                         break;
641         }
642         return ret;
643 }
644
645 /*
646  * nfs_scan_commit - Scan an inode for commit requests
647  * @inode: NFS inode to scan
648  * @dst: mds destination list
649  * @cinfo: mds and ds lists of reqs ready to commit
650  *
651  * Moves requests from the inode's 'commit' request list.
652  * The requests are *not* checked to ensure that they form a contiguous set.
653  */
654 int
655 nfs_scan_commit(struct inode *inode, struct list_head *dst,
656                 struct nfs_commit_info *cinfo)
657 {
658         int ret = 0;
659
660         spin_lock(cinfo->lock);
661         if (cinfo->mds->ncommit > 0) {
662                 const int max = INT_MAX;
663
664                 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
665                                            cinfo, max);
666                 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
667         }
668         spin_unlock(cinfo->lock);
669         return ret;
670 }
671
672 #else
673 static unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
674 {
675         return 0;
676 }
677
678 int nfs_scan_commit(struct inode *inode, struct list_head *dst,
679                     struct nfs_commit_info *cinfo)
680 {
681         return 0;
682 }
683 #endif
684
685 /*
686  * Search for an existing write request, and attempt to update
687  * it to reflect a new dirty region on a given page.
688  *
689  * If the attempt fails, then the existing request is flushed out
690  * to disk.
691  */
692 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
693                 struct page *page,
694                 unsigned int offset,
695                 unsigned int bytes)
696 {
697         struct nfs_page *req;
698         unsigned int rqend;
699         unsigned int end;
700         int error;
701
702         if (!PagePrivate(page))
703                 return NULL;
704
705         end = offset + bytes;
706         spin_lock(&inode->i_lock);
707
708         for (;;) {
709                 req = nfs_page_find_request_locked(NFS_I(inode), page);
710                 if (req == NULL)
711                         goto out_unlock;
712
713                 rqend = req->wb_offset + req->wb_bytes;
714                 /*
715                  * Tell the caller to flush out the request if
716                  * the offsets are non-contiguous.
717                  * Note: nfs_flush_incompatible() will already
718                  * have flushed out requests having wrong owners.
719                  */
720                 if (offset > rqend
721                     || end < req->wb_offset)
722                         goto out_flushme;
723
724                 if (nfs_lock_request(req))
725                         break;
726
727                 /* The request is locked, so wait and then retry */
728                 spin_unlock(&inode->i_lock);
729                 error = nfs_wait_on_request(req);
730                 nfs_release_request(req);
731                 if (error != 0)
732                         goto out_err;
733                 spin_lock(&inode->i_lock);
734         }
735
736         /* Okay, the request matches. Update the region */
737         if (offset < req->wb_offset) {
738                 req->wb_offset = offset;
739                 req->wb_pgbase = offset;
740         }
741         if (end > rqend)
742                 req->wb_bytes = end - req->wb_offset;
743         else
744                 req->wb_bytes = rqend - req->wb_offset;
745 out_unlock:
746         spin_unlock(&inode->i_lock);
747         if (req)
748                 nfs_clear_request_commit(req);
749         return req;
750 out_flushme:
751         spin_unlock(&inode->i_lock);
752         nfs_release_request(req);
753         error = nfs_wb_page(inode, page);
754 out_err:
755         return ERR_PTR(error);
756 }
757
758 /*
759  * Try to update an existing write request, or create one if there is none.
760  *
761  * Note: Should always be called with the Page Lock held to prevent races
762  * if we have to add a new request. Also assumes that the caller has
763  * already called nfs_flush_incompatible() if necessary.
764  */
765 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
766                 struct page *page, unsigned int offset, unsigned int bytes)
767 {
768         struct inode *inode = page_file_mapping(page)->host;
769         struct nfs_page *req;
770
771         req = nfs_try_to_update_request(inode, page, offset, bytes);
772         if (req != NULL)
773                 goto out;
774         req = nfs_create_request(ctx, inode, page, offset, bytes);
775         if (IS_ERR(req))
776                 goto out;
777         nfs_inode_add_request(inode, req);
778 out:
779         return req;
780 }
781
782 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
783                 unsigned int offset, unsigned int count)
784 {
785         struct nfs_page *req;
786
787         req = nfs_setup_write_request(ctx, page, offset, count);
788         if (IS_ERR(req))
789                 return PTR_ERR(req);
790         /* Update file length */
791         nfs_grow_file(page, offset, count);
792         nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
793         nfs_mark_request_dirty(req);
794         nfs_unlock_and_release_request(req);
795         return 0;
796 }
797
798 int nfs_flush_incompatible(struct file *file, struct page *page)
799 {
800         struct nfs_open_context *ctx = nfs_file_open_context(file);
801         struct nfs_lock_context *l_ctx;
802         struct nfs_page *req;
803         int do_flush, status;
804         /*
805          * Look for a request corresponding to this page. If there
806          * is one, and it belongs to another file, we flush it out
807          * before we try to copy anything into the page. Do this
808          * due to the lack of an ACCESS-type call in NFSv2.
809          * Also do the same if we find a request from an existing
810          * dropped page.
811          */
812         do {
813                 req = nfs_page_find_request(page);
814                 if (req == NULL)
815                         return 0;
816                 l_ctx = req->wb_lock_context;
817                 do_flush = req->wb_page != page || req->wb_context != ctx;
818                 if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) {
819                         do_flush |= l_ctx->lockowner.l_owner != current->files
820                                 || l_ctx->lockowner.l_pid != current->tgid;
821                 }
822                 nfs_release_request(req);
823                 if (!do_flush)
824                         return 0;
825                 status = nfs_wb_page(page_file_mapping(page)->host, page);
826         } while (status == 0);
827         return status;
828 }
829
830 /*
831  * Avoid buffered writes when a open context credential's key would
832  * expire soon.
833  *
834  * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
835  *
836  * Return 0 and set a credential flag which triggers the inode to flush
837  * and performs  NFS_FILE_SYNC writes if the key will expired within
838  * RPC_KEY_EXPIRE_TIMEO.
839  */
840 int
841 nfs_key_timeout_notify(struct file *filp, struct inode *inode)
842 {
843         struct nfs_open_context *ctx = nfs_file_open_context(filp);
844         struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
845
846         return rpcauth_key_timeout_notify(auth, ctx->cred);
847 }
848
849 /*
850  * Test if the open context credential key is marked to expire soon.
851  */
852 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx)
853 {
854         return rpcauth_cred_key_to_expire(ctx->cred);
855 }
856
857 /*
858  * If the page cache is marked as unsafe or invalid, then we can't rely on
859  * the PageUptodate() flag. In this case, we will need to turn off
860  * write optimisations that depend on the page contents being correct.
861  */
862 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
863 {
864         struct nfs_inode *nfsi = NFS_I(inode);
865
866         if (nfs_have_delegated_attributes(inode))
867                 goto out;
868         if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
869                 return false;
870         smp_rmb();
871         if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
872                 return false;
873 out:
874         return PageUptodate(page) != 0;
875 }
876
877 /* If we know the page is up to date, and we're not using byte range locks (or
878  * if we have the whole file locked for writing), it may be more efficient to
879  * extend the write to cover the entire page in order to avoid fragmentation
880  * inefficiencies.
881  *
882  * If the file is opened for synchronous writes then we can just skip the rest
883  * of the checks.
884  */
885 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
886 {
887         if (file->f_flags & O_DSYNC)
888                 return 0;
889         if (!nfs_write_pageuptodate(page, inode))
890                 return 0;
891         if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
892                 return 1;
893         if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
894                         inode->i_flock->fl_end == OFFSET_MAX &&
895                         inode->i_flock->fl_type != F_RDLCK))
896                 return 1;
897         return 0;
898 }
899
900 /*
901  * Update and possibly write a cached page of an NFS file.
902  *
903  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
904  * things with a page scheduled for an RPC call (e.g. invalidate it).
905  */
906 int nfs_updatepage(struct file *file, struct page *page,
907                 unsigned int offset, unsigned int count)
908 {
909         struct nfs_open_context *ctx = nfs_file_open_context(file);
910         struct inode    *inode = page_file_mapping(page)->host;
911         int             status = 0;
912
913         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
914
915         dprintk("NFS:       nfs_updatepage(%pD2 %d@%lld)\n",
916                 file, count, (long long)(page_file_offset(page) + offset));
917
918         if (nfs_can_extend_write(file, page, inode)) {
919                 count = max(count + offset, nfs_page_length(page));
920                 offset = 0;
921         }
922
923         status = nfs_writepage_setup(ctx, page, offset, count);
924         if (status < 0)
925                 nfs_set_pageerror(page);
926         else
927                 __set_page_dirty_nobuffers(page);
928
929         dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
930                         status, (long long)i_size_read(inode));
931         return status;
932 }
933
934 static int flush_task_priority(int how)
935 {
936         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
937                 case FLUSH_HIGHPRI:
938                         return RPC_PRIORITY_HIGH;
939                 case FLUSH_LOWPRI:
940                         return RPC_PRIORITY_LOW;
941         }
942         return RPC_PRIORITY_NORMAL;
943 }
944
945 int nfs_initiate_write(struct rpc_clnt *clnt,
946                        struct nfs_pgio_data *data,
947                        const struct rpc_call_ops *call_ops,
948                        int how, int flags)
949 {
950         struct inode *inode = data->header->inode;
951         int priority = flush_task_priority(how);
952         struct rpc_task *task;
953         struct rpc_message msg = {
954                 .rpc_argp = &data->args,
955                 .rpc_resp = &data->res,
956                 .rpc_cred = data->header->cred,
957         };
958         struct rpc_task_setup task_setup_data = {
959                 .rpc_client = clnt,
960                 .task = &data->task,
961                 .rpc_message = &msg,
962                 .callback_ops = call_ops,
963                 .callback_data = data,
964                 .workqueue = nfsiod_workqueue,
965                 .flags = RPC_TASK_ASYNC | flags,
966                 .priority = priority,
967         };
968         int ret = 0;
969
970         /* Set up the initial task struct.  */
971         NFS_PROTO(inode)->write_setup(data, &msg);
972
973         dprintk("NFS: %5u initiated write call "
974                 "(req %s/%llu, %u bytes @ offset %llu)\n",
975                 data->task.tk_pid,
976                 inode->i_sb->s_id,
977                 (unsigned long long)NFS_FILEID(inode),
978                 data->args.count,
979                 (unsigned long long)data->args.offset);
980
981         nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client,
982                                  &task_setup_data.rpc_client, &msg, data);
983
984         task = rpc_run_task(&task_setup_data);
985         if (IS_ERR(task)) {
986                 ret = PTR_ERR(task);
987                 goto out;
988         }
989         if (how & FLUSH_SYNC) {
990                 ret = rpc_wait_for_completion_task(task);
991                 if (ret == 0)
992                         ret = task->tk_status;
993         }
994         rpc_put_task(task);
995 out:
996         return ret;
997 }
998 EXPORT_SYMBOL_GPL(nfs_initiate_write);
999
1000 /*
1001  * Set up the argument/result storage required for the RPC call.
1002  */
1003 static void nfs_write_rpcsetup(struct nfs_pgio_data *data,
1004                 unsigned int count, unsigned int offset,
1005                 int how, struct nfs_commit_info *cinfo)
1006 {
1007         struct nfs_page *req = data->header->req;
1008
1009         /* Set up the RPC argument and reply structs
1010          * NB: take care not to mess about with data->commit et al. */
1011
1012         data->args.fh     = NFS_FH(data->header->inode);
1013         data->args.offset = req_offset(req) + offset;
1014         /* pnfs_set_layoutcommit needs this */
1015         data->mds_offset = data->args.offset;
1016         data->args.pgbase = req->wb_pgbase + offset;
1017         data->args.pages  = data->pages.pagevec;
1018         data->args.count  = count;
1019         data->args.context = get_nfs_open_context(req->wb_context);
1020         data->args.lock_context = req->wb_lock_context;
1021         data->args.stable  = NFS_UNSTABLE;
1022         switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
1023         case 0:
1024                 break;
1025         case FLUSH_COND_STABLE:
1026                 if (nfs_reqs_to_commit(cinfo))
1027                         break;
1028         default:
1029                 data->args.stable = NFS_FILE_SYNC;
1030         }
1031
1032         data->res.fattr   = &data->fattr;
1033         data->res.count   = count;
1034         data->res.verf    = &data->verf;
1035         nfs_fattr_init(&data->fattr);
1036 }
1037
1038 static int nfs_do_write(struct nfs_pgio_data *data,
1039                 const struct rpc_call_ops *call_ops,
1040                 int how)
1041 {
1042         struct inode *inode = data->header->inode;
1043
1044         return nfs_initiate_write(NFS_CLIENT(inode), data, call_ops, how, 0);
1045 }
1046
1047 static int nfs_do_multiple_writes(struct list_head *head,
1048                 const struct rpc_call_ops *call_ops,
1049                 int how)
1050 {
1051         struct nfs_pgio_data *data;
1052         int ret = 0;
1053
1054         while (!list_empty(head)) {
1055                 int ret2;
1056
1057                 data = list_first_entry(head, struct nfs_pgio_data, list);
1058                 list_del_init(&data->list);
1059                 
1060                 ret2 = nfs_do_write(data, call_ops, how);
1061                  if (ret == 0)
1062                          ret = ret2;
1063         }
1064         return ret;
1065 }
1066
1067 /* If a nfs_flush_* function fails, it should remove reqs from @head and
1068  * call this on each, which will prepare them to be retried on next
1069  * writeback using standard nfs.
1070  */
1071 static void nfs_redirty_request(struct nfs_page *req)
1072 {
1073         nfs_mark_request_dirty(req);
1074         nfs_unlock_request(req);
1075         nfs_end_page_writeback(req->wb_page);
1076         nfs_release_request(req);
1077 }
1078
1079 static void nfs_async_write_error(struct list_head *head)
1080 {
1081         struct nfs_page *req;
1082
1083         while (!list_empty(head)) {
1084                 req = nfs_list_entry(head->next);
1085                 nfs_list_remove_request(req);
1086                 nfs_redirty_request(req);
1087         }
1088 }
1089
1090 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1091         .error_cleanup = nfs_async_write_error,
1092         .completion = nfs_write_completion,
1093 };
1094
1095 static void nfs_flush_error(struct nfs_pageio_descriptor *desc,
1096                 struct nfs_pgio_header *hdr)
1097 {
1098         set_bit(NFS_IOHDR_REDO, &hdr->flags);
1099         while (!list_empty(&hdr->rpc_list)) {
1100                 struct nfs_pgio_data *data = list_first_entry(&hdr->rpc_list,
1101                                 struct nfs_pgio_data, list);
1102                 list_del(&data->list);
1103                 nfs_pgio_data_release(data);
1104         }
1105         desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1106 }
1107
1108 /*
1109  * Generate multiple small requests to write out a single
1110  * contiguous dirty area on one page.
1111  */
1112 static int nfs_flush_multi(struct nfs_pageio_descriptor *desc,
1113                            struct nfs_pgio_header *hdr)
1114 {
1115         struct nfs_page *req = hdr->req;
1116         struct page *page = req->wb_page;
1117         struct nfs_pgio_data *data;
1118         size_t wsize = desc->pg_bsize, nbytes;
1119         unsigned int offset;
1120         int requests = 0;
1121         struct nfs_commit_info cinfo;
1122
1123         nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
1124
1125         if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1126             (desc->pg_moreio || nfs_reqs_to_commit(&cinfo) ||
1127              desc->pg_count > wsize))
1128                 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1129
1130
1131         offset = 0;
1132         nbytes = desc->pg_count;
1133         do {
1134                 size_t len = min(nbytes, wsize);
1135
1136                 data = nfs_pgio_data_alloc(hdr, 1);
1137                 if (!data) {
1138                         nfs_flush_error(desc, hdr);
1139                         return -ENOMEM;
1140                 }
1141                 data->pages.pagevec[0] = page;
1142                 nfs_write_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo);
1143                 list_add(&data->list, &hdr->rpc_list);
1144                 requests++;
1145                 nbytes -= len;
1146                 offset += len;
1147         } while (nbytes != 0);
1148         nfs_list_remove_request(req);
1149         nfs_list_add_request(req, &hdr->pages);
1150         desc->pg_rpc_callops = &nfs_write_common_ops;
1151         return 0;
1152 }
1153
1154 /*
1155  * Create an RPC task for the given write request and kick it.
1156  * The page must have been locked by the caller.
1157  *
1158  * It may happen that the page we're passed is not marked dirty.
1159  * This is the case if nfs_updatepage detects a conflicting request
1160  * that has been written but not committed.
1161  */
1162 static int nfs_flush_one(struct nfs_pageio_descriptor *desc,
1163                          struct nfs_pgio_header *hdr)
1164 {
1165         struct nfs_page         *req;
1166         struct page             **pages;
1167         struct nfs_pgio_data    *data;
1168         struct list_head *head = &desc->pg_list;
1169         struct nfs_commit_info cinfo;
1170
1171         data = nfs_pgio_data_alloc(hdr, nfs_page_array_len(desc->pg_base,
1172                                                            desc->pg_count));
1173         if (!data) {
1174                 nfs_flush_error(desc, hdr);
1175                 return -ENOMEM;
1176         }
1177
1178         nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
1179         pages = data->pages.pagevec;
1180         while (!list_empty(head)) {
1181                 req = nfs_list_entry(head->next);
1182                 nfs_list_remove_request(req);
1183                 nfs_list_add_request(req, &hdr->pages);
1184                 *pages++ = req->wb_page;
1185         }
1186
1187         if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1188             (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
1189                 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1190
1191         /* Set up the argument struct */
1192         nfs_write_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
1193         list_add(&data->list, &hdr->rpc_list);
1194         desc->pg_rpc_callops = &nfs_write_common_ops;
1195         return 0;
1196 }
1197
1198 int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
1199                       struct nfs_pgio_header *hdr)
1200 {
1201         if (desc->pg_bsize < PAGE_CACHE_SIZE)
1202                 return nfs_flush_multi(desc, hdr);
1203         return nfs_flush_one(desc, hdr);
1204 }
1205 EXPORT_SYMBOL_GPL(nfs_generic_flush);
1206
1207 static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1208 {
1209         struct nfs_rw_header *whdr;
1210         struct nfs_pgio_header *hdr;
1211         int ret;
1212
1213         whdr = nfs_writehdr_alloc();
1214         if (!whdr) {
1215                 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1216                 return -ENOMEM;
1217         }
1218         hdr = &whdr->header;
1219         nfs_pgheader_init(desc, hdr, nfs_writehdr_free);
1220         atomic_inc(&hdr->refcnt);
1221         ret = nfs_generic_flush(desc, hdr);
1222         if (ret == 0)
1223                 ret = nfs_do_multiple_writes(&hdr->rpc_list,
1224                                              desc->pg_rpc_callops,
1225                                              desc->pg_ioflags);
1226         if (atomic_dec_and_test(&hdr->refcnt))
1227                 hdr->completion_ops->completion(hdr);
1228         return ret;
1229 }
1230
1231 static const struct nfs_pageio_ops nfs_pageio_write_ops = {
1232         .pg_test = nfs_generic_pg_test,
1233         .pg_doio = nfs_generic_pg_writepages,
1234 };
1235
1236 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1237                                struct inode *inode, int ioflags, bool force_mds,
1238                                const struct nfs_pgio_completion_ops *compl_ops)
1239 {
1240         struct nfs_server *server = NFS_SERVER(inode);
1241         const struct nfs_pageio_ops *pg_ops = &nfs_pageio_write_ops;
1242
1243 #ifdef CONFIG_NFS_V4_1
1244         if (server->pnfs_curr_ld && !force_mds)
1245                 pg_ops = server->pnfs_curr_ld->pg_write_ops;
1246 #endif
1247         nfs_pageio_init(pgio, inode, pg_ops, compl_ops, server->wsize, ioflags);
1248 }
1249 EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1250
1251 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1252 {
1253         pgio->pg_ops = &nfs_pageio_write_ops;
1254         pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1255 }
1256 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1257
1258
1259 void nfs_write_prepare(struct rpc_task *task, void *calldata)
1260 {
1261         struct nfs_pgio_data *data = calldata;
1262         int err;
1263         err = NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data);
1264         if (err)
1265                 rpc_exit(task, err);
1266 }
1267
1268 void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1269 {
1270         struct nfs_commit_data *data = calldata;
1271
1272         NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1273 }
1274
1275 /*
1276  * Handle a write reply that flushes a whole page.
1277  *
1278  * FIXME: There is an inherent race with invalidate_inode_pages and
1279  *        writebacks since the page->count is kept > 1 for as long
1280  *        as the page has a write request pending.
1281  */
1282 static void nfs_writeback_done_common(struct rpc_task *task, void *calldata)
1283 {
1284         struct nfs_pgio_data    *data = calldata;
1285
1286         nfs_writeback_done(task, data);
1287 }
1288
1289 static void nfs_writeback_release_common(void *calldata)
1290 {
1291         struct nfs_pgio_data    *data = calldata;
1292         struct nfs_pgio_header *hdr = data->header;
1293         int status = data->task.tk_status;
1294
1295         if ((status >= 0) && nfs_write_need_commit(data)) {
1296                 spin_lock(&hdr->lock);
1297                 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
1298                         ; /* Do nothing */
1299                 else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
1300                         memcpy(&hdr->verf, &data->verf, sizeof(hdr->verf));
1301                 else if (memcmp(&hdr->verf, &data->verf, sizeof(hdr->verf)))
1302                         set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
1303                 spin_unlock(&hdr->lock);
1304         }
1305         nfs_pgio_data_release(data);
1306 }
1307
1308 static const struct rpc_call_ops nfs_write_common_ops = {
1309         .rpc_call_prepare = nfs_write_prepare,
1310         .rpc_call_done = nfs_writeback_done_common,
1311         .rpc_release = nfs_writeback_release_common,
1312 };
1313
1314 /*
1315  * Special version of should_remove_suid() that ignores capabilities.
1316  */
1317 static int nfs_should_remove_suid(const struct inode *inode)
1318 {
1319         umode_t mode = inode->i_mode;
1320         int kill = 0;
1321
1322         /* suid always must be killed */
1323         if (unlikely(mode & S_ISUID))
1324                 kill = ATTR_KILL_SUID;
1325
1326         /*
1327          * sgid without any exec bits is just a mandatory locking mark; leave
1328          * it alone.  If some exec bits are set, it's a real sgid; kill it.
1329          */
1330         if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1331                 kill |= ATTR_KILL_SGID;
1332
1333         if (unlikely(kill && S_ISREG(mode)))
1334                 return kill;
1335
1336         return 0;
1337 }
1338
1339 /*
1340  * This function is called when the WRITE call is complete.
1341  */
1342 void nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data)
1343 {
1344         struct nfs_pgio_args    *argp = &data->args;
1345         struct nfs_pgio_res     *resp = &data->res;
1346         struct inode            *inode = data->header->inode;
1347         int status;
1348
1349         dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1350                 task->tk_pid, task->tk_status);
1351
1352         /*
1353          * ->write_done will attempt to use post-op attributes to detect
1354          * conflicting writes by other clients.  A strict interpretation
1355          * of close-to-open would allow us to continue caching even if
1356          * another writer had changed the file, but some applications
1357          * depend on tighter cache coherency when writing.
1358          */
1359         status = NFS_PROTO(inode)->write_done(task, data);
1360         if (status != 0)
1361                 return;
1362         nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1363
1364 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
1365         if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1366                 /* We tried a write call, but the server did not
1367                  * commit data to stable storage even though we
1368                  * requested it.
1369                  * Note: There is a known bug in Tru64 < 5.0 in which
1370                  *       the server reports NFS_DATA_SYNC, but performs
1371                  *       NFS_FILE_SYNC. We therefore implement this checking
1372                  *       as a dprintk() in order to avoid filling syslog.
1373                  */
1374                 static unsigned long    complain;
1375
1376                 /* Note this will print the MDS for a DS write */
1377                 if (time_before(complain, jiffies)) {
1378                         dprintk("NFS:       faulty NFS server %s:"
1379                                 " (committed = %d) != (stable = %d)\n",
1380                                 NFS_SERVER(inode)->nfs_client->cl_hostname,
1381                                 resp->verf->committed, argp->stable);
1382                         complain = jiffies + 300 * HZ;
1383                 }
1384         }
1385 #endif
1386         if (task->tk_status < 0) {
1387                 nfs_set_pgio_error(data->header, task->tk_status, argp->offset);
1388                 return;
1389         }
1390
1391         /* Deal with the suid/sgid bit corner case */
1392         if (nfs_should_remove_suid(inode))
1393                 nfs_mark_for_revalidate(inode);
1394
1395         if (resp->count < argp->count) {
1396                 static unsigned long    complain;
1397
1398                 /* This a short write! */
1399                 nfs_inc_stats(inode, NFSIOS_SHORTWRITE);
1400
1401                 /* Has the server at least made some progress? */
1402                 if (resp->count == 0) {
1403                         if (time_before(complain, jiffies)) {
1404                                 printk(KERN_WARNING
1405                                        "NFS: Server wrote zero bytes, expected %u.\n",
1406                                        argp->count);
1407                                 complain = jiffies + 300 * HZ;
1408                         }
1409                         nfs_set_pgio_error(data->header, -EIO, argp->offset);
1410                         task->tk_status = -EIO;
1411                         return;
1412                 }
1413                 /* Was this an NFSv2 write or an NFSv3 stable write? */
1414                 if (resp->verf->committed != NFS_UNSTABLE) {
1415                         /* Resend from where the server left off */
1416                         data->mds_offset += resp->count;
1417                         argp->offset += resp->count;
1418                         argp->pgbase += resp->count;
1419                         argp->count -= resp->count;
1420                 } else {
1421                         /* Resend as a stable write in order to avoid
1422                          * headaches in the case of a server crash.
1423                          */
1424                         argp->stable = NFS_FILE_SYNC;
1425                 }
1426                 rpc_restart_call_prepare(task);
1427         }
1428 }
1429
1430
1431 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
1432 static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
1433 {
1434         int ret;
1435
1436         if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
1437                 return 1;
1438         if (!may_wait)
1439                 return 0;
1440         ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
1441                                 NFS_INO_COMMIT,
1442                                 nfs_wait_bit_killable,
1443                                 TASK_KILLABLE);
1444         return (ret < 0) ? ret : 1;
1445 }
1446
1447 static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1448 {
1449         clear_bit(NFS_INO_COMMIT, &nfsi->flags);
1450         smp_mb__after_clear_bit();
1451         wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
1452 }
1453
1454 void nfs_commitdata_release(struct nfs_commit_data *data)
1455 {
1456         put_nfs_open_context(data->context);
1457         nfs_commit_free(data);
1458 }
1459 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1460
1461 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1462                         const struct rpc_call_ops *call_ops,
1463                         int how, int flags)
1464 {
1465         struct rpc_task *task;
1466         int priority = flush_task_priority(how);
1467         struct rpc_message msg = {
1468                 .rpc_argp = &data->args,
1469                 .rpc_resp = &data->res,
1470                 .rpc_cred = data->cred,
1471         };
1472         struct rpc_task_setup task_setup_data = {
1473                 .task = &data->task,
1474                 .rpc_client = clnt,
1475                 .rpc_message = &msg,
1476                 .callback_ops = call_ops,
1477                 .callback_data = data,
1478                 .workqueue = nfsiod_workqueue,
1479                 .flags = RPC_TASK_ASYNC | flags,
1480                 .priority = priority,
1481         };
1482         /* Set up the initial task struct.  */
1483         NFS_PROTO(data->inode)->commit_setup(data, &msg);
1484
1485         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1486
1487         nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client,
1488                 NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg);
1489
1490         task = rpc_run_task(&task_setup_data);
1491         if (IS_ERR(task))
1492                 return PTR_ERR(task);
1493         if (how & FLUSH_SYNC)
1494                 rpc_wait_for_completion_task(task);
1495         rpc_put_task(task);
1496         return 0;
1497 }
1498 EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1499
1500 /*
1501  * Set up the argument/result storage required for the RPC call.
1502  */
1503 void nfs_init_commit(struct nfs_commit_data *data,
1504                      struct list_head *head,
1505                      struct pnfs_layout_segment *lseg,
1506                      struct nfs_commit_info *cinfo)
1507 {
1508         struct nfs_page *first = nfs_list_entry(head->next);
1509         struct inode *inode = first->wb_context->dentry->d_inode;
1510
1511         /* Set up the RPC argument and reply structs
1512          * NB: take care not to mess about with data->commit et al. */
1513
1514         list_splice_init(head, &data->pages);
1515
1516         data->inode       = inode;
1517         data->cred        = first->wb_context->cred;
1518         data->lseg        = lseg; /* reference transferred */
1519         data->mds_ops     = &nfs_commit_ops;
1520         data->completion_ops = cinfo->completion_ops;
1521         data->dreq        = cinfo->dreq;
1522
1523         data->args.fh     = NFS_FH(data->inode);
1524         /* Note: we always request a commit of the entire inode */
1525         data->args.offset = 0;
1526         data->args.count  = 0;
1527         data->context     = get_nfs_open_context(first->wb_context);
1528         data->res.fattr   = &data->fattr;
1529         data->res.verf    = &data->verf;
1530         nfs_fattr_init(&data->fattr);
1531 }
1532 EXPORT_SYMBOL_GPL(nfs_init_commit);
1533
1534 void nfs_retry_commit(struct list_head *page_list,
1535                       struct pnfs_layout_segment *lseg,
1536                       struct nfs_commit_info *cinfo)
1537 {
1538         struct nfs_page *req;
1539
1540         while (!list_empty(page_list)) {
1541                 req = nfs_list_entry(page_list->next);
1542                 nfs_list_remove_request(req);
1543                 nfs_mark_request_commit(req, lseg, cinfo);
1544                 if (!cinfo->dreq) {
1545                         dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1546                         dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
1547                                      BDI_RECLAIMABLE);
1548                 }
1549                 nfs_unlock_and_release_request(req);
1550         }
1551 }
1552 EXPORT_SYMBOL_GPL(nfs_retry_commit);
1553
1554 /*
1555  * Commit dirty pages
1556  */
1557 static int
1558 nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1559                 struct nfs_commit_info *cinfo)
1560 {
1561         struct nfs_commit_data  *data;
1562
1563         data = nfs_commitdata_alloc();
1564
1565         if (!data)
1566                 goto out_bad;
1567
1568         /* Set up the argument struct */
1569         nfs_init_commit(data, head, NULL, cinfo);
1570         atomic_inc(&cinfo->mds->rpcs_out);
1571         return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
1572                                    how, 0);
1573  out_bad:
1574         nfs_retry_commit(head, NULL, cinfo);
1575         cinfo->completion_ops->error_cleanup(NFS_I(inode));
1576         return -ENOMEM;
1577 }
1578
1579 /*
1580  * COMMIT call returned
1581  */
1582 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1583 {
1584         struct nfs_commit_data  *data = calldata;
1585
1586         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1587                                 task->tk_pid, task->tk_status);
1588
1589         /* Call the NFS version-specific code */
1590         NFS_PROTO(data->inode)->commit_done(task, data);
1591 }
1592
1593 static void nfs_commit_release_pages(struct nfs_commit_data *data)
1594 {
1595         struct nfs_page *req;
1596         int status = data->task.tk_status;
1597         struct nfs_commit_info cinfo;
1598
1599         while (!list_empty(&data->pages)) {
1600                 req = nfs_list_entry(data->pages.next);
1601                 nfs_list_remove_request(req);
1602                 nfs_clear_page_commit(req->wb_page);
1603
1604                 dprintk("NFS:       commit (%s/%llu %d@%lld)",
1605                         req->wb_context->dentry->d_sb->s_id,
1606                         (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1607                         req->wb_bytes,
1608                         (long long)req_offset(req));
1609                 if (status < 0) {
1610                         nfs_context_set_write_error(req->wb_context, status);
1611                         nfs_inode_remove_request(req);
1612                         dprintk(", error = %d\n", status);
1613                         goto next;
1614                 }
1615
1616                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1617                  * returned by the server against all stored verfs. */
1618                 if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) {
1619                         /* We have a match */
1620                         nfs_inode_remove_request(req);
1621                         dprintk(" OK\n");
1622                         goto next;
1623                 }
1624                 /* We have a mismatch. Write the page again */
1625                 dprintk(" mismatch\n");
1626                 nfs_mark_request_dirty(req);
1627                 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1628         next:
1629                 nfs_unlock_and_release_request(req);
1630         }
1631         nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1632         if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
1633                 nfs_commit_clear_lock(NFS_I(data->inode));
1634 }
1635
1636 static void nfs_commit_release(void *calldata)
1637 {
1638         struct nfs_commit_data *data = calldata;
1639
1640         data->completion_ops->completion(data);
1641         nfs_commitdata_release(calldata);
1642 }
1643
1644 static const struct rpc_call_ops nfs_commit_ops = {
1645         .rpc_call_prepare = nfs_commit_prepare,
1646         .rpc_call_done = nfs_commit_done,
1647         .rpc_release = nfs_commit_release,
1648 };
1649
1650 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1651         .completion = nfs_commit_release_pages,
1652         .error_cleanup = nfs_commit_clear_lock,
1653 };
1654
1655 int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1656                             int how, struct nfs_commit_info *cinfo)
1657 {
1658         int status;
1659
1660         status = pnfs_commit_list(inode, head, how, cinfo);
1661         if (status == PNFS_NOT_ATTEMPTED)
1662                 status = nfs_commit_list(inode, head, how, cinfo);
1663         return status;
1664 }
1665
1666 int nfs_commit_inode(struct inode *inode, int how)
1667 {
1668         LIST_HEAD(head);
1669         struct nfs_commit_info cinfo;
1670         int may_wait = how & FLUSH_SYNC;
1671         int res;
1672
1673         res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1674         if (res <= 0)
1675                 goto out_mark_dirty;
1676         nfs_init_cinfo_from_inode(&cinfo, inode);
1677         res = nfs_scan_commit(inode, &head, &cinfo);
1678         if (res) {
1679                 int error;
1680
1681                 error = nfs_generic_commit_list(inode, &head, how, &cinfo);
1682                 if (error < 0)
1683                         return error;
1684                 if (!may_wait)
1685                         goto out_mark_dirty;
1686                 error = wait_on_bit(&NFS_I(inode)->flags,
1687                                 NFS_INO_COMMIT,
1688                                 nfs_wait_bit_killable,
1689                                 TASK_KILLABLE);
1690                 if (error < 0)
1691                         return error;
1692         } else
1693                 nfs_commit_clear_lock(NFS_I(inode));
1694         return res;
1695         /* Note: If we exit without ensuring that the commit is complete,
1696          * we must mark the inode as dirty. Otherwise, future calls to
1697          * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1698          * that the data is on the disk.
1699          */
1700 out_mark_dirty:
1701         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1702         return res;
1703 }
1704
1705 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1706 {
1707         struct nfs_inode *nfsi = NFS_I(inode);
1708         int flags = FLUSH_SYNC;
1709         int ret = 0;
1710
1711         /* no commits means nothing needs to be done */
1712         if (!nfsi->commit_info.ncommit)
1713                 return ret;
1714
1715         if (wbc->sync_mode == WB_SYNC_NONE) {
1716                 /* Don't commit yet if this is a non-blocking flush and there
1717                  * are a lot of outstanding writes for this mapping.
1718                  */
1719                 if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1))
1720                         goto out_mark_dirty;
1721
1722                 /* don't wait for the COMMIT response */
1723                 flags = 0;
1724         }
1725
1726         ret = nfs_commit_inode(inode, flags);
1727         if (ret >= 0) {
1728                 if (wbc->sync_mode == WB_SYNC_NONE) {
1729                         if (ret < wbc->nr_to_write)
1730                                 wbc->nr_to_write -= ret;
1731                         else
1732                                 wbc->nr_to_write = 0;
1733                 }
1734                 return 0;
1735         }
1736 out_mark_dirty:
1737         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1738         return ret;
1739 }
1740 #else
1741 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1742 {
1743         return 0;
1744 }
1745 #endif
1746
1747 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1748 {
1749         return nfs_commit_unstable_pages(inode, wbc);
1750 }
1751 EXPORT_SYMBOL_GPL(nfs_write_inode);
1752
1753 /*
1754  * flush the inode to disk.
1755  */
1756 int nfs_wb_all(struct inode *inode)
1757 {
1758         struct writeback_control wbc = {
1759                 .sync_mode = WB_SYNC_ALL,
1760                 .nr_to_write = LONG_MAX,
1761                 .range_start = 0,
1762                 .range_end = LLONG_MAX,
1763         };
1764         int ret;
1765
1766         trace_nfs_writeback_inode_enter(inode);
1767
1768         ret = sync_inode(inode, &wbc);
1769
1770         trace_nfs_writeback_inode_exit(inode, ret);
1771         return ret;
1772 }
1773 EXPORT_SYMBOL_GPL(nfs_wb_all);
1774
1775 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1776 {
1777         struct nfs_page *req;
1778         int ret = 0;
1779
1780         for (;;) {
1781                 wait_on_page_writeback(page);
1782                 req = nfs_page_find_request(page);
1783                 if (req == NULL)
1784                         break;
1785                 if (nfs_lock_request(req)) {
1786                         nfs_clear_request_commit(req);
1787                         nfs_inode_remove_request(req);
1788                         /*
1789                          * In case nfs_inode_remove_request has marked the
1790                          * page as being dirty
1791                          */
1792                         cancel_dirty_page(page, PAGE_CACHE_SIZE);
1793                         nfs_unlock_and_release_request(req);
1794                         break;
1795                 }
1796                 ret = nfs_wait_on_request(req);
1797                 nfs_release_request(req);
1798                 if (ret < 0)
1799                         break;
1800         }
1801         return ret;
1802 }
1803
1804 /*
1805  * Write back all requests on one page - we do this before reading it.
1806  */
1807 int nfs_wb_page(struct inode *inode, struct page *page)
1808 {
1809         loff_t range_start = page_file_offset(page);
1810         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1811         struct writeback_control wbc = {
1812                 .sync_mode = WB_SYNC_ALL,
1813                 .nr_to_write = 0,
1814                 .range_start = range_start,
1815                 .range_end = range_end,
1816         };
1817         int ret;
1818
1819         trace_nfs_writeback_page_enter(inode);
1820
1821         for (;;) {
1822                 wait_on_page_writeback(page);
1823                 if (clear_page_dirty_for_io(page)) {
1824                         ret = nfs_writepage_locked(page, &wbc);
1825                         if (ret < 0)
1826                                 goto out_error;
1827                         continue;
1828                 }
1829                 ret = 0;
1830                 if (!PagePrivate(page))
1831                         break;
1832                 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1833                 if (ret < 0)
1834                         goto out_error;
1835         }
1836 out_error:
1837         trace_nfs_writeback_page_exit(inode, ret);
1838         return ret;
1839 }
1840
1841 #ifdef CONFIG_MIGRATION
1842 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1843                 struct page *page, enum migrate_mode mode)
1844 {
1845         /*
1846          * If PagePrivate is set, then the page is currently associated with
1847          * an in-progress read or write request. Don't try to migrate it.
1848          *
1849          * FIXME: we could do this in principle, but we'll need a way to ensure
1850          *        that we can safely release the inode reference while holding
1851          *        the page lock.
1852          */
1853         if (PagePrivate(page))
1854                 return -EBUSY;
1855
1856         if (!nfs_fscache_release_page(page, GFP_KERNEL))
1857                 return -EBUSY;
1858
1859         return migrate_page(mapping, newpage, page, mode);
1860 }
1861 #endif
1862
1863 int __init nfs_init_writepagecache(void)
1864 {
1865         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1866                                              sizeof(struct nfs_rw_header),
1867                                              0, SLAB_HWCACHE_ALIGN,
1868                                              NULL);
1869         if (nfs_wdata_cachep == NULL)
1870                 return -ENOMEM;
1871
1872         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1873                                                      nfs_wdata_cachep);
1874         if (nfs_wdata_mempool == NULL)
1875                 goto out_destroy_write_cache;
1876
1877         nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
1878                                              sizeof(struct nfs_commit_data),
1879                                              0, SLAB_HWCACHE_ALIGN,
1880                                              NULL);
1881         if (nfs_cdata_cachep == NULL)
1882                 goto out_destroy_write_mempool;
1883
1884         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1885                                                       nfs_cdata_cachep);
1886         if (nfs_commit_mempool == NULL)
1887                 goto out_destroy_commit_cache;
1888
1889         /*
1890          * NFS congestion size, scale with available memory.
1891          *
1892          *  64MB:    8192k
1893          * 128MB:   11585k
1894          * 256MB:   16384k
1895          * 512MB:   23170k
1896          *   1GB:   32768k
1897          *   2GB:   46340k
1898          *   4GB:   65536k
1899          *   8GB:   92681k
1900          *  16GB:  131072k
1901          *
1902          * This allows larger machines to have larger/more transfers.
1903          * Limit the default to 256M
1904          */
1905         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1906         if (nfs_congestion_kb > 256*1024)
1907                 nfs_congestion_kb = 256*1024;
1908
1909         return 0;
1910
1911 out_destroy_commit_cache:
1912         kmem_cache_destroy(nfs_cdata_cachep);
1913 out_destroy_write_mempool:
1914         mempool_destroy(nfs_wdata_mempool);
1915 out_destroy_write_cache:
1916         kmem_cache_destroy(nfs_wdata_cachep);
1917         return -ENOMEM;
1918 }
1919
1920 void nfs_destroy_writepagecache(void)
1921 {
1922         mempool_destroy(nfs_commit_mempool);
1923         kmem_cache_destroy(nfs_cdata_cachep);
1924         mempool_destroy(nfs_wdata_mempool);
1925         kmem_cache_destroy(nfs_wdata_cachep);
1926 }
1927