]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/afs/write.c
afs: Fix missing put_page()
[karo-tx-linux.git] / fs / afs / write.c
1 /* handling of writes to regular files and writing back to the server
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/backing-dev.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
16 #include <linux/pagevec.h>
17 #include "internal.h"
18
19 static int afs_write_back_from_locked_page(struct afs_writeback *wb,
20                                            struct page *page);
21
22 /*
23  * mark a page as having been made dirty and thus needing writeback
24  */
25 int afs_set_page_dirty(struct page *page)
26 {
27         _enter("");
28         return __set_page_dirty_nobuffers(page);
29 }
30
31 /*
32  * unlink a writeback record because its usage has reached zero
33  * - must be called with the wb->vnode->writeback_lock held
34  */
35 static void afs_unlink_writeback(struct afs_writeback *wb)
36 {
37         struct afs_writeback *front;
38         struct afs_vnode *vnode = wb->vnode;
39
40         list_del_init(&wb->link);
41         if (!list_empty(&vnode->writebacks)) {
42                 /* if an fsync rises to the front of the queue then wake it
43                  * up */
44                 front = list_entry(vnode->writebacks.next,
45                                    struct afs_writeback, link);
46                 if (front->state == AFS_WBACK_SYNCING) {
47                         _debug("wake up sync");
48                         front->state = AFS_WBACK_COMPLETE;
49                         wake_up(&front->waitq);
50                 }
51         }
52 }
53
54 /*
55  * free a writeback record
56  */
57 static void afs_free_writeback(struct afs_writeback *wb)
58 {
59         _enter("");
60         key_put(wb->key);
61         kfree(wb);
62 }
63
64 /*
65  * dispose of a reference to a writeback record
66  */
67 void afs_put_writeback(struct afs_writeback *wb)
68 {
69         struct afs_vnode *vnode = wb->vnode;
70
71         _enter("{%d}", wb->usage);
72
73         spin_lock(&vnode->writeback_lock);
74         if (--wb->usage == 0)
75                 afs_unlink_writeback(wb);
76         else
77                 wb = NULL;
78         spin_unlock(&vnode->writeback_lock);
79         if (wb)
80                 afs_free_writeback(wb);
81 }
82
83 /*
84  * partly or wholly fill a page that's under preparation for writing
85  */
86 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
87                          loff_t pos, struct page *page)
88 {
89         struct afs_read *req;
90         loff_t i_size;
91         int ret;
92
93         _enter(",,%llu", (unsigned long long)pos);
94
95         req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
96                       GFP_KERNEL);
97         if (!req)
98                 return -ENOMEM;
99
100         atomic_set(&req->usage, 1);
101         req->pos = pos;
102         req->nr_pages = 1;
103         req->pages[0] = page;
104
105         i_size = i_size_read(&vnode->vfs_inode);
106         if (pos + PAGE_SIZE > i_size)
107                 req->len = i_size - pos;
108         else
109                 req->len = PAGE_SIZE;
110
111         ret = afs_vnode_fetch_data(vnode, key, req);
112         afs_put_read(req);
113         if (ret < 0) {
114                 if (ret == -ENOENT) {
115                         _debug("got NOENT from server"
116                                " - marking file deleted and stale");
117                         set_bit(AFS_VNODE_DELETED, &vnode->flags);
118                         ret = -ESTALE;
119                 }
120         }
121
122         _leave(" = %d", ret);
123         return ret;
124 }
125
126 /*
127  * prepare to perform part of a write to a page
128  */
129 int afs_write_begin(struct file *file, struct address_space *mapping,
130                     loff_t pos, unsigned len, unsigned flags,
131                     struct page **pagep, void **fsdata)
132 {
133         struct afs_writeback *candidate, *wb;
134         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
135         struct page *page;
136         struct key *key = file->private_data;
137         unsigned from = pos & (PAGE_SIZE - 1);
138         unsigned to = from + len;
139         pgoff_t index = pos >> PAGE_SHIFT;
140         int ret;
141
142         _enter("{%x:%u},{%lx},%u,%u",
143                vnode->fid.vid, vnode->fid.vnode, index, from, to);
144
145         candidate = kzalloc(sizeof(*candidate), GFP_KERNEL);
146         if (!candidate)
147                 return -ENOMEM;
148         candidate->vnode = vnode;
149         candidate->first = candidate->last = index;
150         candidate->offset_first = from;
151         candidate->to_last = to;
152         INIT_LIST_HEAD(&candidate->link);
153         candidate->usage = 1;
154         candidate->state = AFS_WBACK_PENDING;
155         init_waitqueue_head(&candidate->waitq);
156
157         page = grab_cache_page_write_begin(mapping, index, flags);
158         if (!page) {
159                 kfree(candidate);
160                 return -ENOMEM;
161         }
162         *pagep = page;
163         /* page won't leak in error case: it eventually gets cleaned off LRU */
164
165         if (!PageUptodate(page) && len != PAGE_SIZE) {
166                 ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
167                 if (ret < 0) {
168                         kfree(candidate);
169                         _leave(" = %d [prep]", ret);
170                         return ret;
171                 }
172                 SetPageUptodate(page);
173         }
174
175 try_again:
176         spin_lock(&vnode->writeback_lock);
177
178         /* see if this page is already pending a writeback under a suitable key
179          * - if so we can just join onto that one */
180         wb = (struct afs_writeback *) page_private(page);
181         if (wb) {
182                 if (wb->key == key && wb->state == AFS_WBACK_PENDING)
183                         goto subsume_in_current_wb;
184                 goto flush_conflicting_wb;
185         }
186
187         if (index > 0) {
188                 /* see if we can find an already pending writeback that we can
189                  * append this page to */
190                 list_for_each_entry(wb, &vnode->writebacks, link) {
191                         if (wb->last == index - 1 && wb->key == key &&
192                             wb->state == AFS_WBACK_PENDING)
193                                 goto append_to_previous_wb;
194                 }
195         }
196
197         list_add_tail(&candidate->link, &vnode->writebacks);
198         candidate->key = key_get(key);
199         spin_unlock(&vnode->writeback_lock);
200         SetPagePrivate(page);
201         set_page_private(page, (unsigned long) candidate);
202         _leave(" = 0 [new]");
203         return 0;
204
205 subsume_in_current_wb:
206         _debug("subsume");
207         ASSERTRANGE(wb->first, <=, index, <=, wb->last);
208         if (index == wb->first && from < wb->offset_first)
209                 wb->offset_first = from;
210         if (index == wb->last && to > wb->to_last)
211                 wb->to_last = to;
212         spin_unlock(&vnode->writeback_lock);
213         kfree(candidate);
214         _leave(" = 0 [sub]");
215         return 0;
216
217 append_to_previous_wb:
218         _debug("append into %lx-%lx", wb->first, wb->last);
219         wb->usage++;
220         wb->last++;
221         wb->to_last = to;
222         spin_unlock(&vnode->writeback_lock);
223         SetPagePrivate(page);
224         set_page_private(page, (unsigned long) wb);
225         kfree(candidate);
226         _leave(" = 0 [app]");
227         return 0;
228
229         /* the page is currently bound to another context, so if it's dirty we
230          * need to flush it before we can use the new context */
231 flush_conflicting_wb:
232         _debug("flush conflict");
233         if (wb->state == AFS_WBACK_PENDING)
234                 wb->state = AFS_WBACK_CONFLICTING;
235         spin_unlock(&vnode->writeback_lock);
236         if (PageDirty(page)) {
237                 ret = afs_write_back_from_locked_page(wb, page);
238                 if (ret < 0) {
239                         afs_put_writeback(candidate);
240                         _leave(" = %d", ret);
241                         return ret;
242                 }
243         }
244
245         /* the page holds a ref on the writeback record */
246         afs_put_writeback(wb);
247         set_page_private(page, 0);
248         ClearPagePrivate(page);
249         goto try_again;
250 }
251
252 /*
253  * finalise part of a write to a page
254  */
255 int afs_write_end(struct file *file, struct address_space *mapping,
256                   loff_t pos, unsigned len, unsigned copied,
257                   struct page *page, void *fsdata)
258 {
259         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
260         loff_t i_size, maybe_i_size;
261
262         _enter("{%x:%u},{%lx}",
263                vnode->fid.vid, vnode->fid.vnode, page->index);
264
265         maybe_i_size = pos + copied;
266
267         i_size = i_size_read(&vnode->vfs_inode);
268         if (maybe_i_size > i_size) {
269                 spin_lock(&vnode->writeback_lock);
270                 i_size = i_size_read(&vnode->vfs_inode);
271                 if (maybe_i_size > i_size)
272                         i_size_write(&vnode->vfs_inode, maybe_i_size);
273                 spin_unlock(&vnode->writeback_lock);
274         }
275
276         set_page_dirty(page);
277         if (PageDirty(page))
278                 _debug("dirtied");
279         unlock_page(page);
280         put_page(page);
281
282         return copied;
283 }
284
285 /*
286  * kill all the pages in the given range
287  */
288 static void afs_kill_pages(struct afs_vnode *vnode, bool error,
289                            pgoff_t first, pgoff_t last)
290 {
291         struct pagevec pv;
292         unsigned count, loop;
293
294         _enter("{%x:%u},%lx-%lx",
295                vnode->fid.vid, vnode->fid.vnode, first, last);
296
297         pagevec_init(&pv, 0);
298
299         do {
300                 _debug("kill %lx-%lx", first, last);
301
302                 count = last - first + 1;
303                 if (count > PAGEVEC_SIZE)
304                         count = PAGEVEC_SIZE;
305                 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
306                                               first, count, pv.pages);
307                 ASSERTCMP(pv.nr, ==, count);
308
309                 for (loop = 0; loop < count; loop++) {
310                         ClearPageUptodate(pv.pages[loop]);
311                         if (error)
312                                 SetPageError(pv.pages[loop]);
313                         end_page_writeback(pv.pages[loop]);
314                 }
315
316                 __pagevec_release(&pv);
317         } while (first < last);
318
319         _leave("");
320 }
321
322 /*
323  * synchronously write back the locked page and any subsequent non-locked dirty
324  * pages also covered by the same writeback record
325  */
326 static int afs_write_back_from_locked_page(struct afs_writeback *wb,
327                                            struct page *primary_page)
328 {
329         struct page *pages[8], *page;
330         unsigned long count;
331         unsigned n, offset, to;
332         pgoff_t start, first, last;
333         int loop, ret;
334
335         _enter(",%lx", primary_page->index);
336
337         count = 1;
338         if (!clear_page_dirty_for_io(primary_page))
339                 BUG();
340         if (test_set_page_writeback(primary_page))
341                 BUG();
342
343         /* find all consecutive lockable dirty pages, stopping when we find a
344          * page that is not immediately lockable, is not dirty or is missing,
345          * or we reach the end of the range */
346         start = primary_page->index;
347         if (start >= wb->last)
348                 goto no_more;
349         start++;
350         do {
351                 _debug("more %lx [%lx]", start, count);
352                 n = wb->last - start + 1;
353                 if (n > ARRAY_SIZE(pages))
354                         n = ARRAY_SIZE(pages);
355                 n = find_get_pages_contig(wb->vnode->vfs_inode.i_mapping,
356                                           start, n, pages);
357                 _debug("fgpc %u", n);
358                 if (n == 0)
359                         goto no_more;
360                 if (pages[0]->index != start) {
361                         do {
362                                 put_page(pages[--n]);
363                         } while (n > 0);
364                         goto no_more;
365                 }
366
367                 for (loop = 0; loop < n; loop++) {
368                         page = pages[loop];
369                         if (page->index > wb->last)
370                                 break;
371                         if (!trylock_page(page))
372                                 break;
373                         if (!PageDirty(page) ||
374                             page_private(page) != (unsigned long) wb) {
375                                 unlock_page(page);
376                                 break;
377                         }
378                         if (!clear_page_dirty_for_io(page))
379                                 BUG();
380                         if (test_set_page_writeback(page))
381                                 BUG();
382                         unlock_page(page);
383                         put_page(page);
384                 }
385                 count += loop;
386                 if (loop < n) {
387                         for (; loop < n; loop++)
388                                 put_page(pages[loop]);
389                         goto no_more;
390                 }
391
392                 start += loop;
393         } while (start <= wb->last && count < 65536);
394
395 no_more:
396         /* we now have a contiguous set of dirty pages, each with writeback set
397          * and the dirty mark cleared; the first page is locked and must remain
398          * so, all the rest are unlocked */
399         first = primary_page->index;
400         last = first + count - 1;
401
402         offset = (first == wb->first) ? wb->offset_first : 0;
403         to = (last == wb->last) ? wb->to_last : PAGE_SIZE;
404
405         _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
406
407         ret = afs_vnode_store_data(wb, first, last, offset, to);
408         if (ret < 0) {
409                 switch (ret) {
410                 case -EDQUOT:
411                 case -ENOSPC:
412                         mapping_set_error(wb->vnode->vfs_inode.i_mapping, -ENOSPC);
413                         break;
414                 case -EROFS:
415                 case -EIO:
416                 case -EREMOTEIO:
417                 case -EFBIG:
418                 case -ENOENT:
419                 case -ENOMEDIUM:
420                 case -ENXIO:
421                         afs_kill_pages(wb->vnode, true, first, last);
422                         mapping_set_error(wb->vnode->vfs_inode.i_mapping, -EIO);
423                         break;
424                 case -EACCES:
425                 case -EPERM:
426                 case -ENOKEY:
427                 case -EKEYEXPIRED:
428                 case -EKEYREJECTED:
429                 case -EKEYREVOKED:
430                         afs_kill_pages(wb->vnode, false, first, last);
431                         break;
432                 default:
433                         break;
434                 }
435         } else {
436                 ret = count;
437         }
438
439         _leave(" = %d", ret);
440         return ret;
441 }
442
443 /*
444  * write a page back to the server
445  * - the caller locked the page for us
446  */
447 int afs_writepage(struct page *page, struct writeback_control *wbc)
448 {
449         struct afs_writeback *wb;
450         int ret;
451
452         _enter("{%lx},", page->index);
453
454         wb = (struct afs_writeback *) page_private(page);
455         ASSERT(wb != NULL);
456
457         ret = afs_write_back_from_locked_page(wb, page);
458         unlock_page(page);
459         if (ret < 0) {
460                 _leave(" = %d", ret);
461                 return 0;
462         }
463
464         wbc->nr_to_write -= ret;
465
466         _leave(" = 0");
467         return 0;
468 }
469
470 /*
471  * write a region of pages back to the server
472  */
473 static int afs_writepages_region(struct address_space *mapping,
474                                  struct writeback_control *wbc,
475                                  pgoff_t index, pgoff_t end, pgoff_t *_next)
476 {
477         struct afs_writeback *wb;
478         struct page *page;
479         int ret, n;
480
481         _enter(",,%lx,%lx,", index, end);
482
483         do {
484                 n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY,
485                                        1, &page);
486                 if (!n)
487                         break;
488
489                 _debug("wback %lx", page->index);
490
491                 if (page->index > end) {
492                         *_next = index;
493                         put_page(page);
494                         _leave(" = 0 [%lx]", *_next);
495                         return 0;
496                 }
497
498                 /* at this point we hold neither mapping->tree_lock nor lock on
499                  * the page itself: the page may be truncated or invalidated
500                  * (changing page->mapping to NULL), or even swizzled back from
501                  * swapper_space to tmpfs file mapping
502                  */
503                 lock_page(page);
504
505                 if (page->mapping != mapping) {
506                         unlock_page(page);
507                         put_page(page);
508                         continue;
509                 }
510
511                 if (wbc->sync_mode != WB_SYNC_NONE)
512                         wait_on_page_writeback(page);
513
514                 if (PageWriteback(page) || !PageDirty(page)) {
515                         unlock_page(page);
516                         put_page(page);
517                         continue;
518                 }
519
520                 wb = (struct afs_writeback *) page_private(page);
521                 ASSERT(wb != NULL);
522
523                 spin_lock(&wb->vnode->writeback_lock);
524                 wb->state = AFS_WBACK_WRITING;
525                 spin_unlock(&wb->vnode->writeback_lock);
526
527                 ret = afs_write_back_from_locked_page(wb, page);
528                 unlock_page(page);
529                 put_page(page);
530                 if (ret < 0) {
531                         _leave(" = %d", ret);
532                         return ret;
533                 }
534
535                 wbc->nr_to_write -= ret;
536
537                 cond_resched();
538         } while (index < end && wbc->nr_to_write > 0);
539
540         *_next = index;
541         _leave(" = 0 [%lx]", *_next);
542         return 0;
543 }
544
545 /*
546  * write some of the pending data back to the server
547  */
548 int afs_writepages(struct address_space *mapping,
549                    struct writeback_control *wbc)
550 {
551         pgoff_t start, end, next;
552         int ret;
553
554         _enter("");
555
556         if (wbc->range_cyclic) {
557                 start = mapping->writeback_index;
558                 end = -1;
559                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
560                 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
561                         ret = afs_writepages_region(mapping, wbc, 0, start,
562                                                     &next);
563                 mapping->writeback_index = next;
564         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
565                 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
566                 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
567                 if (wbc->nr_to_write > 0)
568                         mapping->writeback_index = next;
569         } else {
570                 start = wbc->range_start >> PAGE_SHIFT;
571                 end = wbc->range_end >> PAGE_SHIFT;
572                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
573         }
574
575         _leave(" = %d", ret);
576         return ret;
577 }
578
579 /*
580  * completion of write to server
581  */
582 void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
583 {
584         struct afs_writeback *wb = call->wb;
585         struct pagevec pv;
586         unsigned count, loop;
587         pgoff_t first = call->first, last = call->last;
588         bool free_wb;
589
590         _enter("{%x:%u},{%lx-%lx}",
591                vnode->fid.vid, vnode->fid.vnode, first, last);
592
593         ASSERT(wb != NULL);
594
595         pagevec_init(&pv, 0);
596
597         do {
598                 _debug("done %lx-%lx", first, last);
599
600                 count = last - first + 1;
601                 if (count > PAGEVEC_SIZE)
602                         count = PAGEVEC_SIZE;
603                 pv.nr = find_get_pages_contig(call->mapping, first, count,
604                                               pv.pages);
605                 ASSERTCMP(pv.nr, ==, count);
606
607                 spin_lock(&vnode->writeback_lock);
608                 for (loop = 0; loop < count; loop++) {
609                         struct page *page = pv.pages[loop];
610                         end_page_writeback(page);
611                         if (page_private(page) == (unsigned long) wb) {
612                                 set_page_private(page, 0);
613                                 ClearPagePrivate(page);
614                                 wb->usage--;
615                         }
616                 }
617                 free_wb = false;
618                 if (wb->usage == 0) {
619                         afs_unlink_writeback(wb);
620                         free_wb = true;
621                 }
622                 spin_unlock(&vnode->writeback_lock);
623                 first += count;
624                 if (free_wb) {
625                         afs_free_writeback(wb);
626                         wb = NULL;
627                 }
628
629                 __pagevec_release(&pv);
630         } while (first <= last);
631
632         _leave("");
633 }
634
635 /*
636  * write to an AFS file
637  */
638 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
639 {
640         struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
641         ssize_t result;
642         size_t count = iov_iter_count(from);
643
644         _enter("{%x.%u},{%zu},",
645                vnode->fid.vid, vnode->fid.vnode, count);
646
647         if (IS_SWAPFILE(&vnode->vfs_inode)) {
648                 printk(KERN_INFO
649                        "AFS: Attempt to write to active swap file!\n");
650                 return -EBUSY;
651         }
652
653         if (!count)
654                 return 0;
655
656         result = generic_file_write_iter(iocb, from);
657
658         _leave(" = %zd", result);
659         return result;
660 }
661
662 /*
663  * flush the vnode to the fileserver
664  */
665 int afs_writeback_all(struct afs_vnode *vnode)
666 {
667         struct address_space *mapping = vnode->vfs_inode.i_mapping;
668         struct writeback_control wbc = {
669                 .sync_mode      = WB_SYNC_ALL,
670                 .nr_to_write    = LONG_MAX,
671                 .range_cyclic   = 1,
672         };
673         int ret;
674
675         _enter("");
676
677         ret = mapping->a_ops->writepages(mapping, &wbc);
678         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
679
680         _leave(" = %d", ret);
681         return ret;
682 }
683
684 /*
685  * flush any dirty pages for this process, and check for write errors.
686  * - the return status from this call provides a reliable indication of
687  *   whether any write errors occurred for this process.
688  */
689 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
690 {
691         struct inode *inode = file_inode(file);
692         struct afs_writeback *wb, *xwb;
693         struct afs_vnode *vnode = AFS_FS_I(inode);
694         int ret;
695
696         _enter("{%x:%u},{n=%pD},%d",
697                vnode->fid.vid, vnode->fid.vnode, file,
698                datasync);
699
700         ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
701         if (ret)
702                 return ret;
703         inode_lock(inode);
704
705         /* use a writeback record as a marker in the queue - when this reaches
706          * the front of the queue, all the outstanding writes are either
707          * completed or rejected */
708         wb = kzalloc(sizeof(*wb), GFP_KERNEL);
709         if (!wb) {
710                 ret = -ENOMEM;
711                 goto out;
712         }
713         wb->vnode = vnode;
714         wb->first = 0;
715         wb->last = -1;
716         wb->offset_first = 0;
717         wb->to_last = PAGE_SIZE;
718         wb->usage = 1;
719         wb->state = AFS_WBACK_SYNCING;
720         init_waitqueue_head(&wb->waitq);
721
722         spin_lock(&vnode->writeback_lock);
723         list_for_each_entry(xwb, &vnode->writebacks, link) {
724                 if (xwb->state == AFS_WBACK_PENDING)
725                         xwb->state = AFS_WBACK_CONFLICTING;
726         }
727         list_add_tail(&wb->link, &vnode->writebacks);
728         spin_unlock(&vnode->writeback_lock);
729
730         /* push all the outstanding writebacks to the server */
731         ret = afs_writeback_all(vnode);
732         if (ret < 0) {
733                 afs_put_writeback(wb);
734                 _leave(" = %d [wb]", ret);
735                 goto out;
736         }
737
738         /* wait for the preceding writes to actually complete */
739         ret = wait_event_interruptible(wb->waitq,
740                                        wb->state == AFS_WBACK_COMPLETE ||
741                                        vnode->writebacks.next == &wb->link);
742         afs_put_writeback(wb);
743         _leave(" = %d", ret);
744 out:
745         inode_unlock(inode);
746         return ret;
747 }
748
749 /*
750  * notification that a previously read-only page is about to become writable
751  * - if it returns an error, the caller will deliver a bus error signal
752  */
753 int afs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
754 {
755         struct afs_vnode *vnode = AFS_FS_I(vma->vm_file->f_mapping->host);
756
757         _enter("{{%x:%u}},{%lx}",
758                vnode->fid.vid, vnode->fid.vnode, page->index);
759
760         /* wait for the page to be written to the cache before we allow it to
761          * be modified */
762 #ifdef CONFIG_AFS_FSCACHE
763         fscache_wait_on_page_write(vnode->cache, page);
764 #endif
765
766         _leave(" = 0");
767         return 0;
768 }