]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/nfs/pnfs.c
NFSv4.1/pnfs: Remove redundant checks in pnfs_layoutgets_blocked()
[karo-tx-linux.git] / fs / nfs / pnfs.c
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include "internal.h"
34 #include "pnfs.h"
35 #include "iostat.h"
36 #include "nfs4trace.h"
37 #include "delegation.h"
38 #include "nfs42.h"
39
40 #define NFSDBG_FACILITY         NFSDBG_PNFS
41 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
42
43 /* Locking:
44  *
45  * pnfs_spinlock:
46  *      protects pnfs_modules_tbl.
47  */
48 static DEFINE_SPINLOCK(pnfs_spinlock);
49
50 /*
51  * pnfs_modules_tbl holds all pnfs modules
52  */
53 static LIST_HEAD(pnfs_modules_tbl);
54
55 static int
56 pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid,
57                        enum pnfs_iomode iomode, bool sync);
58
59 /* Return the registered pnfs layout driver module matching given id */
60 static struct pnfs_layoutdriver_type *
61 find_pnfs_driver_locked(u32 id)
62 {
63         struct pnfs_layoutdriver_type *local;
64
65         list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
66                 if (local->id == id)
67                         goto out;
68         local = NULL;
69 out:
70         dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
71         return local;
72 }
73
74 static struct pnfs_layoutdriver_type *
75 find_pnfs_driver(u32 id)
76 {
77         struct pnfs_layoutdriver_type *local;
78
79         spin_lock(&pnfs_spinlock);
80         local = find_pnfs_driver_locked(id);
81         if (local != NULL && !try_module_get(local->owner)) {
82                 dprintk("%s: Could not grab reference on module\n", __func__);
83                 local = NULL;
84         }
85         spin_unlock(&pnfs_spinlock);
86         return local;
87 }
88
89 void
90 unset_pnfs_layoutdriver(struct nfs_server *nfss)
91 {
92         if (nfss->pnfs_curr_ld) {
93                 if (nfss->pnfs_curr_ld->clear_layoutdriver)
94                         nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
95                 /* Decrement the MDS count. Purge the deviceid cache if zero */
96                 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
97                         nfs4_deviceid_purge_client(nfss->nfs_client);
98                 module_put(nfss->pnfs_curr_ld->owner);
99         }
100         nfss->pnfs_curr_ld = NULL;
101 }
102
103 /*
104  * Try to set the server's pnfs module to the pnfs layout type specified by id.
105  * Currently only one pNFS layout driver per filesystem is supported.
106  *
107  * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
108  */
109 void
110 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
111                       u32 id)
112 {
113         struct pnfs_layoutdriver_type *ld_type = NULL;
114
115         if (id == 0)
116                 goto out_no_driver;
117         if (!(server->nfs_client->cl_exchange_flags &
118                  (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
119                 printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
120                         __func__, id, server->nfs_client->cl_exchange_flags);
121                 goto out_no_driver;
122         }
123         ld_type = find_pnfs_driver(id);
124         if (!ld_type) {
125                 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
126                 ld_type = find_pnfs_driver(id);
127                 if (!ld_type) {
128                         dprintk("%s: No pNFS module found for %u.\n",
129                                 __func__, id);
130                         goto out_no_driver;
131                 }
132         }
133         server->pnfs_curr_ld = ld_type;
134         if (ld_type->set_layoutdriver
135             && ld_type->set_layoutdriver(server, mntfh)) {
136                 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
137                         "driver %u.\n", __func__, id);
138                 module_put(ld_type->owner);
139                 goto out_no_driver;
140         }
141         /* Bump the MDS count */
142         atomic_inc(&server->nfs_client->cl_mds_count);
143
144         dprintk("%s: pNFS module for %u set\n", __func__, id);
145         return;
146
147 out_no_driver:
148         dprintk("%s: Using NFSv4 I/O\n", __func__);
149         server->pnfs_curr_ld = NULL;
150 }
151
152 int
153 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
154 {
155         int status = -EINVAL;
156         struct pnfs_layoutdriver_type *tmp;
157
158         if (ld_type->id == 0) {
159                 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
160                 return status;
161         }
162         if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
163                 printk(KERN_ERR "NFS: %s Layout driver must provide "
164                        "alloc_lseg and free_lseg.\n", __func__);
165                 return status;
166         }
167
168         spin_lock(&pnfs_spinlock);
169         tmp = find_pnfs_driver_locked(ld_type->id);
170         if (!tmp) {
171                 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
172                 status = 0;
173                 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
174                         ld_type->name);
175         } else {
176                 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
177                         __func__, ld_type->id);
178         }
179         spin_unlock(&pnfs_spinlock);
180
181         return status;
182 }
183 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
184
185 void
186 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
187 {
188         dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
189         spin_lock(&pnfs_spinlock);
190         list_del(&ld_type->pnfs_tblid);
191         spin_unlock(&pnfs_spinlock);
192 }
193 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
194
195 /*
196  * pNFS client layout cache
197  */
198
199 /* Need to hold i_lock if caller does not already hold reference */
200 void
201 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
202 {
203         atomic_inc(&lo->plh_refcount);
204 }
205
206 static struct pnfs_layout_hdr *
207 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
208 {
209         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
210         return ld->alloc_layout_hdr(ino, gfp_flags);
211 }
212
213 static void
214 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
215 {
216         struct nfs_server *server = NFS_SERVER(lo->plh_inode);
217         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
218
219         if (!list_empty(&lo->plh_layouts)) {
220                 struct nfs_client *clp = server->nfs_client;
221
222                 spin_lock(&clp->cl_lock);
223                 list_del_init(&lo->plh_layouts);
224                 spin_unlock(&clp->cl_lock);
225         }
226         put_rpccred(lo->plh_lc_cred);
227         return ld->free_layout_hdr(lo);
228 }
229
230 static void
231 pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
232 {
233         struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
234         dprintk("%s: freeing layout cache %p\n", __func__, lo);
235         nfsi->layout = NULL;
236         /* Reset MDS Threshold I/O counters */
237         nfsi->write_io = 0;
238         nfsi->read_io = 0;
239 }
240
241 void
242 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
243 {
244         struct inode *inode = lo->plh_inode;
245
246         if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
247                 if (!list_empty(&lo->plh_segs))
248                         WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
249                 pnfs_detach_layout_hdr(lo);
250                 spin_unlock(&inode->i_lock);
251                 pnfs_free_layout_hdr(lo);
252         }
253 }
254
255 static int
256 pnfs_iomode_to_fail_bit(u32 iomode)
257 {
258         return iomode == IOMODE_RW ?
259                 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
260 }
261
262 static void
263 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
264 {
265         lo->plh_retry_timestamp = jiffies;
266         if (!test_and_set_bit(fail_bit, &lo->plh_flags))
267                 atomic_inc(&lo->plh_refcount);
268 }
269
270 static void
271 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
272 {
273         if (test_and_clear_bit(fail_bit, &lo->plh_flags))
274                 atomic_dec(&lo->plh_refcount);
275 }
276
277 static void
278 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
279 {
280         struct inode *inode = lo->plh_inode;
281         struct pnfs_layout_range range = {
282                 .iomode = iomode,
283                 .offset = 0,
284                 .length = NFS4_MAX_UINT64,
285         };
286         LIST_HEAD(head);
287
288         spin_lock(&inode->i_lock);
289         pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
290         pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
291         spin_unlock(&inode->i_lock);
292         pnfs_free_lseg_list(&head);
293         dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
294                         iomode == IOMODE_RW ?  "RW" : "READ");
295 }
296
297 static bool
298 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
299 {
300         unsigned long start, end;
301         int fail_bit = pnfs_iomode_to_fail_bit(iomode);
302
303         if (test_bit(fail_bit, &lo->plh_flags) == 0)
304                 return false;
305         end = jiffies;
306         start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
307         if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
308                 /* It is time to retry the failed layoutgets */
309                 pnfs_layout_clear_fail_bit(lo, fail_bit);
310                 return false;
311         }
312         return true;
313 }
314
315 static void
316 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
317 {
318         INIT_LIST_HEAD(&lseg->pls_list);
319         INIT_LIST_HEAD(&lseg->pls_lc_list);
320         atomic_set(&lseg->pls_refcount, 1);
321         smp_mb();
322         set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
323         lseg->pls_layout = lo;
324 }
325
326 static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
327 {
328         struct inode *ino = lseg->pls_layout->plh_inode;
329
330         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
331 }
332
333 static void
334 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
335                 struct pnfs_layout_segment *lseg)
336 {
337         struct inode *inode = lo->plh_inode;
338
339         WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
340         list_del_init(&lseg->pls_list);
341         /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
342         atomic_dec(&lo->plh_refcount);
343         if (list_empty(&lo->plh_segs))
344                 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
345         rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
346 }
347
348 /* Return true if layoutreturn is needed */
349 static bool
350 pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
351                         struct pnfs_layout_segment *lseg)
352 {
353         struct pnfs_layout_segment *s;
354
355         if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
356                 return false;
357
358         list_for_each_entry(s, &lo->plh_segs, pls_list)
359                 if (s != lseg && test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
360                         return false;
361
362         return true;
363 }
364
365 static bool
366 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
367 {
368         if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
369                 return false;
370         lo->plh_return_iomode = 0;
371         lo->plh_block_lgets++;
372         pnfs_get_layout_hdr(lo);
373         clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
374         return true;
375 }
376
377 static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
378                 struct pnfs_layout_hdr *lo, struct inode *inode)
379 {
380         lo = lseg->pls_layout;
381         inode = lo->plh_inode;
382
383         spin_lock(&inode->i_lock);
384         if (pnfs_layout_need_return(lo, lseg)) {
385                 nfs4_stateid stateid;
386                 enum pnfs_iomode iomode;
387                 bool send;
388
389                 stateid = lo->plh_stateid;
390                 iomode = lo->plh_return_iomode;
391                 send = pnfs_prepare_layoutreturn(lo);
392                 spin_unlock(&inode->i_lock);
393                 if (send) {
394                         /* Send an async layoutreturn so we dont deadlock */
395                         pnfs_send_layoutreturn(lo, stateid, iomode, false);
396                 }
397         } else
398                 spin_unlock(&inode->i_lock);
399 }
400
401 void
402 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
403 {
404         struct pnfs_layout_hdr *lo;
405         struct inode *inode;
406
407         if (!lseg)
408                 return;
409
410         dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
411                 atomic_read(&lseg->pls_refcount),
412                 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
413
414         /* Handle the case where refcount != 1 */
415         if (atomic_add_unless(&lseg->pls_refcount, -1, 1))
416                 return;
417
418         lo = lseg->pls_layout;
419         inode = lo->plh_inode;
420         /* Do we need a layoutreturn? */
421         if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
422                 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
423
424         if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
425                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
426                         spin_unlock(&inode->i_lock);
427                         return;
428                 }
429                 pnfs_get_layout_hdr(lo);
430                 pnfs_layout_remove_lseg(lo, lseg);
431                 spin_unlock(&inode->i_lock);
432                 pnfs_free_lseg(lseg);
433                 pnfs_put_layout_hdr(lo);
434         }
435 }
436 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
437
438 static void pnfs_free_lseg_async_work(struct work_struct *work)
439 {
440         struct pnfs_layout_segment *lseg;
441         struct pnfs_layout_hdr *lo;
442
443         lseg = container_of(work, struct pnfs_layout_segment, pls_work);
444         lo = lseg->pls_layout;
445
446         pnfs_free_lseg(lseg);
447         pnfs_put_layout_hdr(lo);
448 }
449
450 static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg)
451 {
452         INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work);
453         schedule_work(&lseg->pls_work);
454 }
455
456 void
457 pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
458 {
459         if (!lseg)
460                 return;
461
462         assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock);
463
464         dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
465                 atomic_read(&lseg->pls_refcount),
466                 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
467         if (atomic_dec_and_test(&lseg->pls_refcount)) {
468                 struct pnfs_layout_hdr *lo = lseg->pls_layout;
469                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
470                         return;
471                 pnfs_get_layout_hdr(lo);
472                 pnfs_layout_remove_lseg(lo, lseg);
473                 pnfs_free_lseg_async(lseg);
474         }
475 }
476 EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
477
478 static u64
479 end_offset(u64 start, u64 len)
480 {
481         u64 end;
482
483         end = start + len;
484         return end >= start ? end : NFS4_MAX_UINT64;
485 }
486
487 /*
488  * is l2 fully contained in l1?
489  *   start1                             end1
490  *   [----------------------------------)
491  *           start2           end2
492  *           [----------------)
493  */
494 static bool
495 pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
496                  const struct pnfs_layout_range *l2)
497 {
498         u64 start1 = l1->offset;
499         u64 end1 = end_offset(start1, l1->length);
500         u64 start2 = l2->offset;
501         u64 end2 = end_offset(start2, l2->length);
502
503         return (start1 <= start2) && (end1 >= end2);
504 }
505
506 /*
507  * is l1 and l2 intersecting?
508  *   start1                             end1
509  *   [----------------------------------)
510  *                              start2           end2
511  *                              [----------------)
512  */
513 static bool
514 pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
515                     const struct pnfs_layout_range *l2)
516 {
517         u64 start1 = l1->offset;
518         u64 end1 = end_offset(start1, l1->length);
519         u64 start2 = l2->offset;
520         u64 end2 = end_offset(start2, l2->length);
521
522         return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
523                (end2 == NFS4_MAX_UINT64 || end2 > start1);
524 }
525
526 static bool
527 should_free_lseg(const struct pnfs_layout_range *lseg_range,
528                  const struct pnfs_layout_range *recall_range)
529 {
530         return (recall_range->iomode == IOMODE_ANY ||
531                 lseg_range->iomode == recall_range->iomode) &&
532                pnfs_lseg_range_intersecting(lseg_range, recall_range);
533 }
534
535 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
536                 struct list_head *tmp_list)
537 {
538         if (!atomic_dec_and_test(&lseg->pls_refcount))
539                 return false;
540         pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
541         list_add(&lseg->pls_list, tmp_list);
542         return true;
543 }
544
545 /* Returns 1 if lseg is removed from list, 0 otherwise */
546 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
547                              struct list_head *tmp_list)
548 {
549         int rv = 0;
550
551         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
552                 /* Remove the reference keeping the lseg in the
553                  * list.  It will now be removed when all
554                  * outstanding io is finished.
555                  */
556                 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
557                         atomic_read(&lseg->pls_refcount));
558                 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
559                         rv = 1;
560         }
561         return rv;
562 }
563
564 /* Returns count of number of matching invalid lsegs remaining in list
565  * after call.
566  */
567 int
568 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
569                             struct list_head *tmp_list,
570                             struct pnfs_layout_range *recall_range)
571 {
572         struct pnfs_layout_segment *lseg, *next;
573         int invalid = 0, removed = 0;
574
575         dprintk("%s:Begin lo %p\n", __func__, lo);
576
577         if (list_empty(&lo->plh_segs))
578                 return 0;
579         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
580                 if (!recall_range ||
581                     should_free_lseg(&lseg->pls_range, recall_range)) {
582                         dprintk("%s: freeing lseg %p iomode %d "
583                                 "offset %llu length %llu\n", __func__,
584                                 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
585                                 lseg->pls_range.length);
586                         invalid++;
587                         removed += mark_lseg_invalid(lseg, tmp_list);
588                 }
589         dprintk("%s:Return %i\n", __func__, invalid - removed);
590         return invalid - removed;
591 }
592
593 /* note free_me must contain lsegs from a single layout_hdr */
594 void
595 pnfs_free_lseg_list(struct list_head *free_me)
596 {
597         struct pnfs_layout_segment *lseg, *tmp;
598
599         if (list_empty(free_me))
600                 return;
601
602         list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
603                 list_del(&lseg->pls_list);
604                 pnfs_free_lseg(lseg);
605         }
606 }
607
608 void
609 pnfs_destroy_layout(struct nfs_inode *nfsi)
610 {
611         struct pnfs_layout_hdr *lo;
612         LIST_HEAD(tmp_list);
613
614         spin_lock(&nfsi->vfs_inode.i_lock);
615         lo = nfsi->layout;
616         if (lo) {
617                 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
618                 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
619                 pnfs_get_layout_hdr(lo);
620                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
621                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
622                 pnfs_clear_retry_layoutget(lo);
623                 spin_unlock(&nfsi->vfs_inode.i_lock);
624                 pnfs_free_lseg_list(&tmp_list);
625                 pnfs_put_layout_hdr(lo);
626         } else
627                 spin_unlock(&nfsi->vfs_inode.i_lock);
628 }
629 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
630
631 static bool
632 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
633                 struct list_head *layout_list)
634 {
635         struct pnfs_layout_hdr *lo;
636         bool ret = false;
637
638         spin_lock(&inode->i_lock);
639         lo = NFS_I(inode)->layout;
640         if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
641                 pnfs_get_layout_hdr(lo);
642                 list_add(&lo->plh_bulk_destroy, layout_list);
643                 ret = true;
644         }
645         spin_unlock(&inode->i_lock);
646         return ret;
647 }
648
649 /* Caller must hold rcu_read_lock and clp->cl_lock */
650 static int
651 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
652                 struct nfs_server *server,
653                 struct list_head *layout_list)
654 {
655         struct pnfs_layout_hdr *lo, *next;
656         struct inode *inode;
657
658         list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
659                 inode = igrab(lo->plh_inode);
660                 if (inode == NULL)
661                         continue;
662                 list_del_init(&lo->plh_layouts);
663                 if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
664                         continue;
665                 rcu_read_unlock();
666                 spin_unlock(&clp->cl_lock);
667                 iput(inode);
668                 spin_lock(&clp->cl_lock);
669                 rcu_read_lock();
670                 return -EAGAIN;
671         }
672         return 0;
673 }
674
675 static int
676 pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
677                 bool is_bulk_recall)
678 {
679         struct pnfs_layout_hdr *lo;
680         struct inode *inode;
681         struct pnfs_layout_range range = {
682                 .iomode = IOMODE_ANY,
683                 .offset = 0,
684                 .length = NFS4_MAX_UINT64,
685         };
686         LIST_HEAD(lseg_list);
687         int ret = 0;
688
689         while (!list_empty(layout_list)) {
690                 lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
691                                 plh_bulk_destroy);
692                 dprintk("%s freeing layout for inode %lu\n", __func__,
693                         lo->plh_inode->i_ino);
694                 inode = lo->plh_inode;
695
696                 pnfs_layoutcommit_inode(inode, false);
697
698                 spin_lock(&inode->i_lock);
699                 list_del_init(&lo->plh_bulk_destroy);
700                 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
701                 if (is_bulk_recall)
702                         set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
703                 if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
704                         ret = -EAGAIN;
705                 spin_unlock(&inode->i_lock);
706                 pnfs_free_lseg_list(&lseg_list);
707                 pnfs_put_layout_hdr(lo);
708                 iput(inode);
709         }
710         return ret;
711 }
712
713 int
714 pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
715                 struct nfs_fsid *fsid,
716                 bool is_recall)
717 {
718         struct nfs_server *server;
719         LIST_HEAD(layout_list);
720
721         spin_lock(&clp->cl_lock);
722         rcu_read_lock();
723 restart:
724         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
725                 if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
726                         continue;
727                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
728                                 server,
729                                 &layout_list) != 0)
730                         goto restart;
731         }
732         rcu_read_unlock();
733         spin_unlock(&clp->cl_lock);
734
735         if (list_empty(&layout_list))
736                 return 0;
737         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
738 }
739
740 int
741 pnfs_destroy_layouts_byclid(struct nfs_client *clp,
742                 bool is_recall)
743 {
744         struct nfs_server *server;
745         LIST_HEAD(layout_list);
746
747         spin_lock(&clp->cl_lock);
748         rcu_read_lock();
749 restart:
750         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
751                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
752                                         server,
753                                         &layout_list) != 0)
754                         goto restart;
755         }
756         rcu_read_unlock();
757         spin_unlock(&clp->cl_lock);
758
759         if (list_empty(&layout_list))
760                 return 0;
761         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
762 }
763
764 /*
765  * Called by the state manger to remove all layouts established under an
766  * expired lease.
767  */
768 void
769 pnfs_destroy_all_layouts(struct nfs_client *clp)
770 {
771         nfs4_deviceid_mark_client_invalid(clp);
772         nfs4_deviceid_purge_client(clp);
773
774         pnfs_destroy_layouts_byclid(clp, false);
775 }
776
777 /*
778  * Compare 2 layout stateid sequence ids, to see which is newer,
779  * taking into account wraparound issues.
780  */
781 static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
782 {
783         return (s32)(s1 - s2) > 0;
784 }
785
786 /* update lo->plh_stateid with new if is more recent */
787 void
788 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
789                         bool update_barrier)
790 {
791         u32 oldseq, newseq, new_barrier;
792         int empty = list_empty(&lo->plh_segs);
793
794         oldseq = be32_to_cpu(lo->plh_stateid.seqid);
795         newseq = be32_to_cpu(new->seqid);
796         if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
797                 nfs4_stateid_copy(&lo->plh_stateid, new);
798                 if (update_barrier) {
799                         new_barrier = be32_to_cpu(new->seqid);
800                 } else {
801                         /* Because of wraparound, we want to keep the barrier
802                          * "close" to the current seqids.
803                          */
804                         new_barrier = newseq - atomic_read(&lo->plh_outstanding);
805                 }
806                 if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
807                         lo->plh_barrier = new_barrier;
808         }
809 }
810
811 static bool
812 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
813                 const nfs4_stateid *stateid)
814 {
815         u32 seqid = be32_to_cpu(stateid->seqid);
816
817         return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
818 }
819
820 static bool
821 pnfs_layout_returning(const struct pnfs_layout_hdr *lo,
822                       struct pnfs_layout_range *range)
823 {
824         return test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
825                 (lo->plh_return_iomode == IOMODE_ANY ||
826                  lo->plh_return_iomode == range->iomode);
827 }
828
829 /* lget is set to 1 if called from inside send_layoutget call chain */
830 static bool
831 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo,
832                         struct pnfs_layout_range *range)
833 {
834         return lo->plh_block_lgets ||
835                 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
836                 pnfs_layout_returning(lo, range);
837 }
838
839 int
840 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
841                               struct pnfs_layout_range *range,
842                               struct nfs4_state *open_state)
843 {
844         int status = 0;
845
846         dprintk("--> %s\n", __func__);
847         spin_lock(&lo->plh_inode->i_lock);
848         if (pnfs_layoutgets_blocked(lo, range)) {
849                 status = -EAGAIN;
850         } else if (!nfs4_valid_open_stateid(open_state)) {
851                 status = -EBADF;
852         } else if (list_empty(&lo->plh_segs) ||
853                    test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
854                 int seq;
855
856                 do {
857                         seq = read_seqbegin(&open_state->seqlock);
858                         nfs4_stateid_copy(dst, &open_state->stateid);
859                 } while (read_seqretry(&open_state->seqlock, seq));
860         } else
861                 nfs4_stateid_copy(dst, &lo->plh_stateid);
862         spin_unlock(&lo->plh_inode->i_lock);
863         dprintk("<-- %s\n", __func__);
864         return status;
865 }
866
867 /*
868 * Get layout from server.
869 *    for now, assume that whole file layouts are requested.
870 *    arg->offset: 0
871 *    arg->length: all ones
872 */
873 static struct pnfs_layout_segment *
874 send_layoutget(struct pnfs_layout_hdr *lo,
875            struct nfs_open_context *ctx,
876            struct pnfs_layout_range *range,
877            gfp_t gfp_flags)
878 {
879         struct inode *ino = lo->plh_inode;
880         struct nfs_server *server = NFS_SERVER(ino);
881         struct nfs4_layoutget *lgp;
882         struct pnfs_layout_segment *lseg;
883
884         dprintk("--> %s\n", __func__);
885
886         lgp = kzalloc(sizeof(*lgp), gfp_flags);
887         if (lgp == NULL)
888                 return NULL;
889
890         lgp->args.minlength = PAGE_CACHE_SIZE;
891         if (lgp->args.minlength > range->length)
892                 lgp->args.minlength = range->length;
893         lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
894         lgp->args.range = *range;
895         lgp->args.type = server->pnfs_curr_ld->id;
896         lgp->args.inode = ino;
897         lgp->args.ctx = get_nfs_open_context(ctx);
898         lgp->gfp_flags = gfp_flags;
899         lgp->cred = lo->plh_lc_cred;
900
901         /* Synchronously retrieve layout information from server and
902          * store in lseg.
903          */
904         lseg = nfs4_proc_layoutget(lgp, gfp_flags);
905         if (IS_ERR(lseg)) {
906                 switch (PTR_ERR(lseg)) {
907                 case -ENOMEM:
908                 case -ERESTARTSYS:
909                         break;
910                 default:
911                         /* remember that LAYOUTGET failed and suspend trying */
912                         pnfs_layout_io_set_failed(lo, range->iomode);
913                 }
914                 return NULL;
915         } else
916                 pnfs_layout_clear_fail_bit(lo,
917                                 pnfs_iomode_to_fail_bit(range->iomode));
918
919         return lseg;
920 }
921
922 static void pnfs_clear_layoutcommit(struct inode *inode,
923                 struct list_head *head)
924 {
925         struct nfs_inode *nfsi = NFS_I(inode);
926         struct pnfs_layout_segment *lseg, *tmp;
927
928         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
929                 return;
930         list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
931                 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
932                         continue;
933                 pnfs_lseg_dec_and_remove_zero(lseg, head);
934         }
935 }
936
937 void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
938 {
939         clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
940         smp_mb__after_atomic();
941         wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
942         rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
943 }
944
945 static int
946 pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid,
947                        enum pnfs_iomode iomode, bool sync)
948 {
949         struct inode *ino = lo->plh_inode;
950         struct nfs4_layoutreturn *lrp;
951         int status = 0;
952
953         lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
954         if (unlikely(lrp == NULL)) {
955                 status = -ENOMEM;
956                 spin_lock(&ino->i_lock);
957                 lo->plh_block_lgets--;
958                 pnfs_clear_layoutreturn_waitbit(lo);
959                 rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
960                 spin_unlock(&ino->i_lock);
961                 pnfs_put_layout_hdr(lo);
962                 goto out;
963         }
964
965         lrp->args.stateid = stateid;
966         lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
967         lrp->args.inode = ino;
968         lrp->args.range.iomode = iomode;
969         lrp->args.range.offset = 0;
970         lrp->args.range.length = NFS4_MAX_UINT64;
971         lrp->args.layout = lo;
972         lrp->clp = NFS_SERVER(ino)->nfs_client;
973         lrp->cred = lo->plh_lc_cred;
974
975         status = nfs4_proc_layoutreturn(lrp, sync);
976 out:
977         dprintk("<-- %s status: %d\n", __func__, status);
978         return status;
979 }
980
981 /*
982  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
983  * when the layout segment list is empty.
984  *
985  * Note that a pnfs_layout_hdr can exist with an empty layout segment
986  * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
987  * deviceid is marked invalid.
988  */
989 int
990 _pnfs_return_layout(struct inode *ino)
991 {
992         struct pnfs_layout_hdr *lo = NULL;
993         struct nfs_inode *nfsi = NFS_I(ino);
994         LIST_HEAD(tmp_list);
995         nfs4_stateid stateid;
996         int status = 0, empty;
997         bool send;
998
999         dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
1000
1001         spin_lock(&ino->i_lock);
1002         lo = nfsi->layout;
1003         if (!lo) {
1004                 spin_unlock(&ino->i_lock);
1005                 dprintk("NFS: %s no layout to return\n", __func__);
1006                 goto out;
1007         }
1008         stateid = nfsi->layout->plh_stateid;
1009         /* Reference matched in nfs4_layoutreturn_release */
1010         pnfs_get_layout_hdr(lo);
1011         empty = list_empty(&lo->plh_segs);
1012         pnfs_clear_layoutcommit(ino, &tmp_list);
1013         pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
1014
1015         if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
1016                 struct pnfs_layout_range range = {
1017                         .iomode         = IOMODE_ANY,
1018                         .offset         = 0,
1019                         .length         = NFS4_MAX_UINT64,
1020                 };
1021                 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
1022         }
1023
1024         /* Don't send a LAYOUTRETURN if list was initially empty */
1025         if (empty) {
1026                 spin_unlock(&ino->i_lock);
1027                 dprintk("NFS: %s no layout segments to return\n", __func__);
1028                 goto out_put_layout_hdr;
1029         }
1030
1031         set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1032         send = pnfs_prepare_layoutreturn(lo);
1033         spin_unlock(&ino->i_lock);
1034         pnfs_free_lseg_list(&tmp_list);
1035         if (send)
1036                 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
1037 out_put_layout_hdr:
1038         pnfs_put_layout_hdr(lo);
1039 out:
1040         dprintk("<-- %s status: %d\n", __func__, status);
1041         return status;
1042 }
1043 EXPORT_SYMBOL_GPL(_pnfs_return_layout);
1044
1045 int
1046 pnfs_commit_and_return_layout(struct inode *inode)
1047 {
1048         struct pnfs_layout_hdr *lo;
1049         int ret;
1050
1051         spin_lock(&inode->i_lock);
1052         lo = NFS_I(inode)->layout;
1053         if (lo == NULL) {
1054                 spin_unlock(&inode->i_lock);
1055                 return 0;
1056         }
1057         pnfs_get_layout_hdr(lo);
1058         /* Block new layoutgets and read/write to ds */
1059         lo->plh_block_lgets++;
1060         spin_unlock(&inode->i_lock);
1061         filemap_fdatawait(inode->i_mapping);
1062         ret = pnfs_layoutcommit_inode(inode, true);
1063         if (ret == 0)
1064                 ret = _pnfs_return_layout(inode);
1065         spin_lock(&inode->i_lock);
1066         lo->plh_block_lgets--;
1067         spin_unlock(&inode->i_lock);
1068         pnfs_put_layout_hdr(lo);
1069         return ret;
1070 }
1071
1072 bool pnfs_roc(struct inode *ino)
1073 {
1074         struct nfs_inode *nfsi = NFS_I(ino);
1075         struct nfs_open_context *ctx;
1076         struct nfs4_state *state;
1077         struct pnfs_layout_hdr *lo;
1078         struct pnfs_layout_segment *lseg, *tmp;
1079         nfs4_stateid stateid;
1080         LIST_HEAD(tmp_list);
1081         bool found = false, layoutreturn = false;
1082
1083         spin_lock(&ino->i_lock);
1084         lo = nfsi->layout;
1085         if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
1086             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
1087                 goto out_noroc;
1088
1089         /* Don't return layout if we hold a delegation */
1090         if (nfs4_check_delegation(ino, FMODE_READ))
1091                 goto out_noroc;
1092
1093         list_for_each_entry(ctx, &nfsi->open_files, list) {
1094                 state = ctx->state;
1095                 /* Don't return layout if there is open file state */
1096                 if (state != NULL && state->state != 0)
1097                         goto out_noroc;
1098         }
1099
1100         pnfs_clear_retry_layoutget(lo);
1101         list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
1102                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
1103                         mark_lseg_invalid(lseg, &tmp_list);
1104                         found = true;
1105                 }
1106         if (!found)
1107                 goto out_noroc;
1108         lo->plh_block_lgets++;
1109         pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
1110         spin_unlock(&ino->i_lock);
1111         pnfs_free_lseg_list(&tmp_list);
1112         pnfs_layoutcommit_inode(ino, true);
1113         return true;
1114
1115 out_noroc:
1116         if (lo) {
1117                 stateid = lo->plh_stateid;
1118                 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1119                                            &lo->plh_flags))
1120                         layoutreturn = pnfs_prepare_layoutreturn(lo);
1121         }
1122         spin_unlock(&ino->i_lock);
1123         if (layoutreturn) {
1124                 pnfs_layoutcommit_inode(ino, true);
1125                 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
1126         }
1127         return false;
1128 }
1129
1130 void pnfs_roc_release(struct inode *ino)
1131 {
1132         struct pnfs_layout_hdr *lo;
1133
1134         spin_lock(&ino->i_lock);
1135         lo = NFS_I(ino)->layout;
1136         lo->plh_block_lgets--;
1137         if (atomic_dec_and_test(&lo->plh_refcount)) {
1138                 pnfs_detach_layout_hdr(lo);
1139                 spin_unlock(&ino->i_lock);
1140                 pnfs_free_layout_hdr(lo);
1141         } else
1142                 spin_unlock(&ino->i_lock);
1143 }
1144
1145 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
1146 {
1147         struct pnfs_layout_hdr *lo;
1148
1149         spin_lock(&ino->i_lock);
1150         lo = NFS_I(ino)->layout;
1151         if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
1152                 lo->plh_barrier = barrier;
1153         spin_unlock(&ino->i_lock);
1154 }
1155
1156 bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1157 {
1158         struct nfs_inode *nfsi = NFS_I(ino);
1159         struct pnfs_layout_hdr *lo;
1160         struct pnfs_layout_segment *lseg;
1161         nfs4_stateid stateid;
1162         u32 current_seqid;
1163         bool layoutreturn = false;
1164
1165         spin_lock(&ino->i_lock);
1166         list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) {
1167                 if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1168                         continue;
1169                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
1170                         continue;
1171                 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1172                 spin_unlock(&ino->i_lock);
1173                 return true;
1174         }
1175         lo = nfsi->layout;
1176         current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
1177
1178         /* Since close does not return a layout stateid for use as
1179          * a barrier, we choose the worst-case barrier.
1180          */
1181         *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
1182         stateid = lo->plh_stateid;
1183         if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1184                                            &lo->plh_flags))
1185                 layoutreturn = pnfs_prepare_layoutreturn(lo);
1186         if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
1187                 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1188
1189         spin_unlock(&ino->i_lock);
1190         if (layoutreturn) {
1191                 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
1192                 return true;
1193         }
1194         return false;
1195 }
1196
1197 /*
1198  * Compare two layout segments for sorting into layout cache.
1199  * We want to preferentially return RW over RO layouts, so ensure those
1200  * are seen first.
1201  */
1202 static s64
1203 pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1204            const struct pnfs_layout_range *l2)
1205 {
1206         s64 d;
1207
1208         /* high offset > low offset */
1209         d = l1->offset - l2->offset;
1210         if (d)
1211                 return d;
1212
1213         /* short length > long length */
1214         d = l2->length - l1->length;
1215         if (d)
1216                 return d;
1217
1218         /* read > read/write */
1219         return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1220 }
1221
1222 static void
1223 pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1224                    struct pnfs_layout_segment *lseg)
1225 {
1226         struct pnfs_layout_segment *lp;
1227
1228         dprintk("%s:Begin\n", __func__);
1229
1230         list_for_each_entry(lp, &lo->plh_segs, pls_list) {
1231                 if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0)
1232                         continue;
1233                 list_add_tail(&lseg->pls_list, &lp->pls_list);
1234                 dprintk("%s: inserted lseg %p "
1235                         "iomode %d offset %llu length %llu before "
1236                         "lp %p iomode %d offset %llu length %llu\n",
1237                         __func__, lseg, lseg->pls_range.iomode,
1238                         lseg->pls_range.offset, lseg->pls_range.length,
1239                         lp, lp->pls_range.iomode, lp->pls_range.offset,
1240                         lp->pls_range.length);
1241                 goto out;
1242         }
1243         list_add_tail(&lseg->pls_list, &lo->plh_segs);
1244         dprintk("%s: inserted lseg %p "
1245                 "iomode %d offset %llu length %llu at tail\n",
1246                 __func__, lseg, lseg->pls_range.iomode,
1247                 lseg->pls_range.offset, lseg->pls_range.length);
1248 out:
1249         pnfs_get_layout_hdr(lo);
1250
1251         dprintk("%s:Return\n", __func__);
1252 }
1253
1254 static struct pnfs_layout_hdr *
1255 alloc_init_layout_hdr(struct inode *ino,
1256                       struct nfs_open_context *ctx,
1257                       gfp_t gfp_flags)
1258 {
1259         struct pnfs_layout_hdr *lo;
1260
1261         lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1262         if (!lo)
1263                 return NULL;
1264         atomic_set(&lo->plh_refcount, 1);
1265         INIT_LIST_HEAD(&lo->plh_layouts);
1266         INIT_LIST_HEAD(&lo->plh_segs);
1267         INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1268         lo->plh_inode = ino;
1269         lo->plh_lc_cred = get_rpccred(ctx->cred);
1270         return lo;
1271 }
1272
1273 static struct pnfs_layout_hdr *
1274 pnfs_find_alloc_layout(struct inode *ino,
1275                        struct nfs_open_context *ctx,
1276                        gfp_t gfp_flags)
1277 {
1278         struct nfs_inode *nfsi = NFS_I(ino);
1279         struct pnfs_layout_hdr *new = NULL;
1280
1281         dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1282
1283         if (nfsi->layout != NULL)
1284                 goto out_existing;
1285         spin_unlock(&ino->i_lock);
1286         new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1287         spin_lock(&ino->i_lock);
1288
1289         if (likely(nfsi->layout == NULL)) {     /* Won the race? */
1290                 nfsi->layout = new;
1291                 return new;
1292         } else if (new != NULL)
1293                 pnfs_free_layout_hdr(new);
1294 out_existing:
1295         pnfs_get_layout_hdr(nfsi->layout);
1296         return nfsi->layout;
1297 }
1298
1299 /*
1300  * iomode matching rules:
1301  * iomode       lseg    match
1302  * -----        -----   -----
1303  * ANY          READ    true
1304  * ANY          RW      true
1305  * RW           READ    false
1306  * RW           RW      true
1307  * READ         READ    true
1308  * READ         RW      true
1309  */
1310 static bool
1311 pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1312                  const struct pnfs_layout_range *range)
1313 {
1314         struct pnfs_layout_range range1;
1315
1316         if ((range->iomode == IOMODE_RW &&
1317              ls_range->iomode != IOMODE_RW) ||
1318             !pnfs_lseg_range_intersecting(ls_range, range))
1319                 return 0;
1320
1321         /* range1 covers only the first byte in the range */
1322         range1 = *range;
1323         range1.length = 1;
1324         return pnfs_lseg_range_contained(ls_range, &range1);
1325 }
1326
1327 /*
1328  * lookup range in layout
1329  */
1330 static struct pnfs_layout_segment *
1331 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1332                 struct pnfs_layout_range *range)
1333 {
1334         struct pnfs_layout_segment *lseg, *ret = NULL;
1335
1336         dprintk("%s:Begin\n", __func__);
1337
1338         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1339                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1340                     !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
1341                     pnfs_lseg_range_match(&lseg->pls_range, range)) {
1342                         ret = pnfs_get_lseg(lseg);
1343                         break;
1344                 }
1345                 if (lseg->pls_range.offset > range->offset)
1346                         break;
1347         }
1348
1349         dprintk("%s:Return lseg %p ref %d\n",
1350                 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1351         return ret;
1352 }
1353
1354 /*
1355  * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1356  * to the MDS or over pNFS
1357  *
1358  * The nfs_inode read_io and write_io fields are cumulative counters reset
1359  * when there are no layout segments. Note that in pnfs_update_layout iomode
1360  * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1361  * WRITE request.
1362  *
1363  * A return of true means use MDS I/O.
1364  *
1365  * From rfc 5661:
1366  * If a file's size is smaller than the file size threshold, data accesses
1367  * SHOULD be sent to the metadata server.  If an I/O request has a length that
1368  * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1369  * server.  If both file size and I/O size are provided, the client SHOULD
1370  * reach or exceed  both thresholds before sending its read or write
1371  * requests to the data server.
1372  */
1373 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1374                                      struct inode *ino, int iomode)
1375 {
1376         struct nfs4_threshold *t = ctx->mdsthreshold;
1377         struct nfs_inode *nfsi = NFS_I(ino);
1378         loff_t fsize = i_size_read(ino);
1379         bool size = false, size_set = false, io = false, io_set = false, ret = false;
1380
1381         if (t == NULL)
1382                 return ret;
1383
1384         dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1385                 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1386
1387         switch (iomode) {
1388         case IOMODE_READ:
1389                 if (t->bm & THRESHOLD_RD) {
1390                         dprintk("%s fsize %llu\n", __func__, fsize);
1391                         size_set = true;
1392                         if (fsize < t->rd_sz)
1393                                 size = true;
1394                 }
1395                 if (t->bm & THRESHOLD_RD_IO) {
1396                         dprintk("%s nfsi->read_io %llu\n", __func__,
1397                                 nfsi->read_io);
1398                         io_set = true;
1399                         if (nfsi->read_io < t->rd_io_sz)
1400                                 io = true;
1401                 }
1402                 break;
1403         case IOMODE_RW:
1404                 if (t->bm & THRESHOLD_WR) {
1405                         dprintk("%s fsize %llu\n", __func__, fsize);
1406                         size_set = true;
1407                         if (fsize < t->wr_sz)
1408                                 size = true;
1409                 }
1410                 if (t->bm & THRESHOLD_WR_IO) {
1411                         dprintk("%s nfsi->write_io %llu\n", __func__,
1412                                 nfsi->write_io);
1413                         io_set = true;
1414                         if (nfsi->write_io < t->wr_io_sz)
1415                                 io = true;
1416                 }
1417                 break;
1418         }
1419         if (size_set && io_set) {
1420                 if (size && io)
1421                         ret = true;
1422         } else if (size || io)
1423                 ret = true;
1424
1425         dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1426         return ret;
1427 }
1428
1429 /* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
1430 static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key)
1431 {
1432         if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags))
1433                 return 1;
1434         return nfs_wait_bit_killable(key);
1435 }
1436
1437 static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
1438 {
1439         /*
1440          * send layoutcommit as it can hold up layoutreturn due to lseg
1441          * reference
1442          */
1443         pnfs_layoutcommit_inode(lo->plh_inode, false);
1444         return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1445                                    pnfs_layoutget_retry_bit_wait,
1446                                    TASK_UNINTERRUPTIBLE);
1447 }
1448
1449 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
1450 {
1451         unsigned long *bitlock = &lo->plh_flags;
1452
1453         clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
1454         smp_mb__after_atomic();
1455         wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
1456 }
1457
1458 /*
1459  * Layout segment is retreived from the server if not cached.
1460  * The appropriate layout segment is referenced and returned to the caller.
1461  */
1462 struct pnfs_layout_segment *
1463 pnfs_update_layout(struct inode *ino,
1464                    struct nfs_open_context *ctx,
1465                    loff_t pos,
1466                    u64 count,
1467                    enum pnfs_iomode iomode,
1468                    gfp_t gfp_flags)
1469 {
1470         struct pnfs_layout_range arg = {
1471                 .iomode = iomode,
1472                 .offset = pos,
1473                 .length = count,
1474         };
1475         unsigned pg_offset;
1476         struct nfs_server *server = NFS_SERVER(ino);
1477         struct nfs_client *clp = server->nfs_client;
1478         struct pnfs_layout_hdr *lo;
1479         struct pnfs_layout_segment *lseg = NULL;
1480         bool first;
1481
1482         if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1483                 goto out;
1484
1485         if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1486                 goto out;
1487
1488 lookup_again:
1489         first = false;
1490         spin_lock(&ino->i_lock);
1491         lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1492         if (lo == NULL) {
1493                 spin_unlock(&ino->i_lock);
1494                 goto out;
1495         }
1496
1497         /* Do we even need to bother with this? */
1498         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1499                 dprintk("%s matches recall, use MDS\n", __func__);
1500                 goto out_unlock;
1501         }
1502
1503         /* if LAYOUTGET already failed once we don't try again */
1504         if (pnfs_layout_io_test_failed(lo, iomode) &&
1505             !pnfs_should_retry_layoutget(lo))
1506                 goto out_unlock;
1507
1508         first = list_empty(&lo->plh_segs);
1509         if (first) {
1510                 /* The first layoutget for the file. Need to serialize per
1511                  * RFC 5661 Errata 3208.
1512                  */
1513                 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
1514                                      &lo->plh_flags)) {
1515                         spin_unlock(&ino->i_lock);
1516                         wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
1517                                     TASK_UNINTERRUPTIBLE);
1518                         pnfs_put_layout_hdr(lo);
1519                         goto lookup_again;
1520                 }
1521         } else {
1522                 /* Check to see if the layout for the given range
1523                  * already exists
1524                  */
1525                 lseg = pnfs_find_lseg(lo, &arg);
1526                 if (lseg)
1527                         goto out_unlock;
1528         }
1529
1530         /*
1531          * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1532          * for LAYOUTRETURN even if first is true.
1533          */
1534         if (!lseg && pnfs_should_retry_layoutget(lo) &&
1535             test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1536                 spin_unlock(&ino->i_lock);
1537                 dprintk("%s wait for layoutreturn\n", __func__);
1538                 if (pnfs_prepare_to_retry_layoutget(lo)) {
1539                         if (first)
1540                                 pnfs_clear_first_layoutget(lo);
1541                         pnfs_put_layout_hdr(lo);
1542                         dprintk("%s retrying\n", __func__);
1543                         goto lookup_again;
1544                 }
1545                 goto out_put_layout_hdr;
1546         }
1547
1548         if (pnfs_layoutgets_blocked(lo, &arg))
1549                 goto out_unlock;
1550         atomic_inc(&lo->plh_outstanding);
1551         spin_unlock(&ino->i_lock);
1552
1553         if (list_empty(&lo->plh_layouts)) {
1554                 /* The lo must be on the clp list if there is any
1555                  * chance of a CB_LAYOUTRECALL(FILE) coming in.
1556                  */
1557                 spin_lock(&clp->cl_lock);
1558                 if (list_empty(&lo->plh_layouts))
1559                         list_add_tail(&lo->plh_layouts, &server->layouts);
1560                 spin_unlock(&clp->cl_lock);
1561         }
1562
1563         pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1564         if (pg_offset) {
1565                 arg.offset -= pg_offset;
1566                 arg.length += pg_offset;
1567         }
1568         if (arg.length != NFS4_MAX_UINT64)
1569                 arg.length = PAGE_CACHE_ALIGN(arg.length);
1570
1571         lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1572         pnfs_clear_retry_layoutget(lo);
1573         atomic_dec(&lo->plh_outstanding);
1574 out_put_layout_hdr:
1575         if (first)
1576                 pnfs_clear_first_layoutget(lo);
1577         pnfs_put_layout_hdr(lo);
1578 out:
1579         dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1580                         "(%s, offset: %llu, length: %llu)\n",
1581                         __func__, ino->i_sb->s_id,
1582                         (unsigned long long)NFS_FILEID(ino),
1583                         lseg == NULL ? "not found" : "found",
1584                         iomode==IOMODE_RW ?  "read/write" : "read-only",
1585                         (unsigned long long)pos,
1586                         (unsigned long long)count);
1587         return lseg;
1588 out_unlock:
1589         spin_unlock(&ino->i_lock);
1590         goto out_put_layout_hdr;
1591 }
1592 EXPORT_SYMBOL_GPL(pnfs_update_layout);
1593
1594 struct pnfs_layout_segment *
1595 pnfs_layout_process(struct nfs4_layoutget *lgp)
1596 {
1597         struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1598         struct nfs4_layoutget_res *res = &lgp->res;
1599         struct pnfs_layout_segment *lseg;
1600         struct inode *ino = lo->plh_inode;
1601         LIST_HEAD(free_me);
1602         int status = 0;
1603
1604         /* Inject layout blob into I/O device driver */
1605         lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1606         if (!lseg || IS_ERR(lseg)) {
1607                 if (!lseg)
1608                         status = -ENOMEM;
1609                 else
1610                         status = PTR_ERR(lseg);
1611                 dprintk("%s: Could not allocate layout: error %d\n",
1612                        __func__, status);
1613                 goto out;
1614         }
1615
1616         init_lseg(lo, lseg);
1617         lseg->pls_range = res->range;
1618
1619         spin_lock(&ino->i_lock);
1620         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1621                 dprintk("%s forget reply due to recall\n", __func__);
1622                 goto out_forget_reply;
1623         }
1624
1625         if (pnfs_layoutgets_blocked(lo, &lgp->args.range)) {
1626                 dprintk("%s forget reply due to state\n", __func__);
1627                 goto out_forget_reply;
1628         }
1629
1630         if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
1631                 /* existing state ID, make sure the sequence number matches. */
1632                 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1633                         dprintk("%s forget reply due to sequence\n", __func__);
1634                         goto out_forget_reply;
1635                 }
1636                 pnfs_set_layout_stateid(lo, &res->stateid, false);
1637         } else {
1638                 /*
1639                  * We got an entirely new state ID.  Mark all segments for the
1640                  * inode invalid, and don't bother validating the stateid
1641                  * sequence number.
1642                  */
1643                 pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL);
1644
1645                 nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
1646                 lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
1647         }
1648
1649         clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1650
1651         pnfs_get_lseg(lseg);
1652         pnfs_layout_insert_lseg(lo, lseg);
1653
1654         if (res->return_on_close) {
1655                 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1656                 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
1657         }
1658
1659         spin_unlock(&ino->i_lock);
1660         pnfs_free_lseg_list(&free_me);
1661         return lseg;
1662 out:
1663         return ERR_PTR(status);
1664
1665 out_forget_reply:
1666         spin_unlock(&ino->i_lock);
1667         lseg->pls_layout = lo;
1668         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1669         goto out;
1670 }
1671
1672 static void
1673 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
1674                                 struct list_head *tmp_list,
1675                                 struct pnfs_layout_range *return_range)
1676 {
1677         struct pnfs_layout_segment *lseg, *next;
1678
1679         dprintk("%s:Begin lo %p\n", __func__, lo);
1680
1681         if (list_empty(&lo->plh_segs))
1682                 return;
1683
1684         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
1685                 if (should_free_lseg(&lseg->pls_range, return_range)) {
1686                         dprintk("%s: marking lseg %p iomode %d "
1687                                 "offset %llu length %llu\n", __func__,
1688                                 lseg, lseg->pls_range.iomode,
1689                                 lseg->pls_range.offset,
1690                                 lseg->pls_range.length);
1691                         set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
1692                         mark_lseg_invalid(lseg, tmp_list);
1693                 }
1694 }
1695
1696 void pnfs_error_mark_layout_for_return(struct inode *inode,
1697                                        struct pnfs_layout_segment *lseg)
1698 {
1699         struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
1700         int iomode = pnfs_iomode_to_fail_bit(lseg->pls_range.iomode);
1701         struct pnfs_layout_range range = {
1702                 .iomode = lseg->pls_range.iomode,
1703                 .offset = 0,
1704                 .length = NFS4_MAX_UINT64,
1705         };
1706         LIST_HEAD(free_me);
1707
1708         spin_lock(&inode->i_lock);
1709         /* set failure bit so that pnfs path will be retried later */
1710         pnfs_layout_set_fail_bit(lo, iomode);
1711         if (lo->plh_return_iomode == 0)
1712                 lo->plh_return_iomode = range.iomode;
1713         else if (lo->plh_return_iomode != range.iomode)
1714                 lo->plh_return_iomode = IOMODE_ANY;
1715         /*
1716          * mark all matching lsegs so that we are sure to have no live
1717          * segments at hand when sending layoutreturn. See pnfs_put_lseg()
1718          * for how it works.
1719          */
1720         pnfs_mark_matching_lsegs_return(lo, &free_me, &range);
1721         spin_unlock(&inode->i_lock);
1722         pnfs_free_lseg_list(&free_me);
1723 }
1724 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
1725
1726 void
1727 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1728 {
1729         u64 rd_size = req->wb_bytes;
1730
1731         if (pgio->pg_lseg == NULL) {
1732                 if (pgio->pg_dreq == NULL)
1733                         rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
1734                 else
1735                         rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1736
1737                 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1738                                                    req->wb_context,
1739                                                    req_offset(req),
1740                                                    rd_size,
1741                                                    IOMODE_READ,
1742                                                    GFP_KERNEL);
1743         }
1744         /* If no lseg, fall back to read through mds */
1745         if (pgio->pg_lseg == NULL)
1746                 nfs_pageio_reset_read_mds(pgio);
1747
1748 }
1749 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1750
1751 void
1752 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
1753                            struct nfs_page *req, u64 wb_size)
1754 {
1755         if (pgio->pg_lseg == NULL)
1756                 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1757                                                    req->wb_context,
1758                                                    req_offset(req),
1759                                                    wb_size,
1760                                                    IOMODE_RW,
1761                                                    GFP_NOFS);
1762         /* If no lseg, fall back to write through mds */
1763         if (pgio->pg_lseg == NULL)
1764                 nfs_pageio_reset_write_mds(pgio);
1765 }
1766 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1767
1768 void
1769 pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
1770 {
1771         if (desc->pg_lseg) {
1772                 pnfs_put_lseg(desc->pg_lseg);
1773                 desc->pg_lseg = NULL;
1774         }
1775 }
1776 EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
1777
1778 /*
1779  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1780  * of bytes (maximum @req->wb_bytes) that can be coalesced.
1781  */
1782 size_t
1783 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
1784                      struct nfs_page *prev, struct nfs_page *req)
1785 {
1786         unsigned int size;
1787         u64 seg_end, req_start, seg_left;
1788
1789         size = nfs_generic_pg_test(pgio, prev, req);
1790         if (!size)
1791                 return 0;
1792
1793         /*
1794          * 'size' contains the number of bytes left in the current page (up
1795          * to the original size asked for in @req->wb_bytes).
1796          *
1797          * Calculate how many bytes are left in the layout segment
1798          * and if there are less bytes than 'size', return that instead.
1799          *
1800          * Please also note that 'end_offset' is actually the offset of the
1801          * first byte that lies outside the pnfs_layout_range. FIXME?
1802          *
1803          */
1804         if (pgio->pg_lseg) {
1805                 seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
1806                                      pgio->pg_lseg->pls_range.length);
1807                 req_start = req_offset(req);
1808                 WARN_ON_ONCE(req_start >= seg_end);
1809                 /* start of request is past the last byte of this segment */
1810                 if (req_start >= seg_end) {
1811                         /* reference the new lseg */
1812                         if (pgio->pg_ops->pg_cleanup)
1813                                 pgio->pg_ops->pg_cleanup(pgio);
1814                         if (pgio->pg_ops->pg_init)
1815                                 pgio->pg_ops->pg_init(pgio, req);
1816                         return 0;
1817                 }
1818
1819                 /* adjust 'size' iff there are fewer bytes left in the
1820                  * segment than what nfs_generic_pg_test returned */
1821                 seg_left = seg_end - req_start;
1822                 if (seg_left < size)
1823                         size = (unsigned int)seg_left;
1824         }
1825
1826         return size;
1827 }
1828 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1829
1830 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
1831 {
1832         struct nfs_pageio_descriptor pgio;
1833
1834         /* Resend all requests through the MDS */
1835         nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
1836                               hdr->completion_ops);
1837         set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
1838         return nfs_pageio_resend(&pgio, hdr);
1839 }
1840 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1841
1842 static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
1843 {
1844
1845         dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1846         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1847             PNFS_LAYOUTRET_ON_ERROR) {
1848                 pnfs_return_layout(hdr->inode);
1849         }
1850         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1851                 hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
1852 }
1853
1854 /*
1855  * Called by non rpc-based layout drivers
1856  */
1857 void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
1858 {
1859         trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
1860         if (!hdr->pnfs_error) {
1861                 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
1862                                 hdr->mds_offset + hdr->res.count);
1863                 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1864         } else
1865                 pnfs_ld_handle_write_error(hdr);
1866         hdr->mds_ops->rpc_release(hdr);
1867 }
1868 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1869
1870 static void
1871 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1872                 struct nfs_pgio_header *hdr)
1873 {
1874         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1875
1876         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1877                 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
1878                 nfs_pageio_reset_write_mds(desc);
1879                 mirror->pg_recoalesce = 1;
1880         }
1881         nfs_pgio_data_destroy(hdr);
1882         hdr->release(hdr);
1883 }
1884
1885 static enum pnfs_try_status
1886 pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
1887                         const struct rpc_call_ops *call_ops,
1888                         struct pnfs_layout_segment *lseg,
1889                         int how)
1890 {
1891         struct inode *inode = hdr->inode;
1892         enum pnfs_try_status trypnfs;
1893         struct nfs_server *nfss = NFS_SERVER(inode);
1894
1895         hdr->mds_ops = call_ops;
1896
1897         dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1898                 inode->i_ino, hdr->args.count, hdr->args.offset, how);
1899         trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
1900         if (trypnfs != PNFS_NOT_ATTEMPTED)
1901                 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1902         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1903         return trypnfs;
1904 }
1905
1906 static void
1907 pnfs_do_write(struct nfs_pageio_descriptor *desc,
1908               struct nfs_pgio_header *hdr, int how)
1909 {
1910         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1911         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1912         enum pnfs_try_status trypnfs;
1913
1914         trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
1915         if (trypnfs == PNFS_NOT_ATTEMPTED)
1916                 pnfs_write_through_mds(desc, hdr);
1917 }
1918
1919 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
1920 {
1921         pnfs_put_lseg(hdr->lseg);
1922         nfs_pgio_header_free(hdr);
1923 }
1924
1925 int
1926 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1927 {
1928         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1929
1930         struct nfs_pgio_header *hdr;
1931         int ret;
1932
1933         hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
1934         if (!hdr) {
1935                 desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
1936                 return -ENOMEM;
1937         }
1938         nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1939
1940         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1941         ret = nfs_generic_pgio(desc, hdr);
1942         if (!ret)
1943                 pnfs_do_write(desc, hdr, desc->pg_ioflags);
1944
1945         return ret;
1946 }
1947 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1948
1949 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
1950 {
1951         struct nfs_pageio_descriptor pgio;
1952
1953         /* Resend all requests through the MDS */
1954         nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
1955         return nfs_pageio_resend(&pgio, hdr);
1956 }
1957 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1958
1959 static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
1960 {
1961         dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1962         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1963             PNFS_LAYOUTRET_ON_ERROR) {
1964                 pnfs_return_layout(hdr->inode);
1965         }
1966         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1967                 hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
1968 }
1969
1970 /*
1971  * Called by non rpc-based layout drivers
1972  */
1973 void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
1974 {
1975         trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
1976         if (likely(!hdr->pnfs_error)) {
1977                 __nfs4_read_done_cb(hdr);
1978                 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1979         } else
1980                 pnfs_ld_handle_read_error(hdr);
1981         hdr->mds_ops->rpc_release(hdr);
1982 }
1983 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1984
1985 static void
1986 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1987                 struct nfs_pgio_header *hdr)
1988 {
1989         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1990
1991         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1992                 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
1993                 nfs_pageio_reset_read_mds(desc);
1994                 mirror->pg_recoalesce = 1;
1995         }
1996         nfs_pgio_data_destroy(hdr);
1997         hdr->release(hdr);
1998 }
1999
2000 /*
2001  * Call the appropriate parallel I/O subsystem read function.
2002  */
2003 static enum pnfs_try_status
2004 pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2005                        const struct rpc_call_ops *call_ops,
2006                        struct pnfs_layout_segment *lseg)
2007 {
2008         struct inode *inode = hdr->inode;
2009         struct nfs_server *nfss = NFS_SERVER(inode);
2010         enum pnfs_try_status trypnfs;
2011
2012         hdr->mds_ops = call_ops;
2013
2014         dprintk("%s: Reading ino:%lu %u@%llu\n",
2015                 __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
2016
2017         trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2018         if (trypnfs != PNFS_NOT_ATTEMPTED)
2019                 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
2020         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2021         return trypnfs;
2022 }
2023
2024 /* Resend all requests through pnfs. */
2025 int pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
2026 {
2027         struct nfs_pageio_descriptor pgio;
2028
2029         nfs_pageio_init_read(&pgio, hdr->inode, false, hdr->completion_ops);
2030         return nfs_pageio_resend(&pgio, hdr);
2031 }
2032 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
2033
2034 static void
2035 pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
2036 {
2037         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2038         struct pnfs_layout_segment *lseg = desc->pg_lseg;
2039         enum pnfs_try_status trypnfs;
2040         int err = 0;
2041
2042         trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
2043         if (trypnfs == PNFS_TRY_AGAIN)
2044                 err = pnfs_read_resend_pnfs(hdr);
2045         if (trypnfs == PNFS_NOT_ATTEMPTED || err)
2046                 pnfs_read_through_mds(desc, hdr);
2047 }
2048
2049 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
2050 {
2051         pnfs_put_lseg(hdr->lseg);
2052         nfs_pgio_header_free(hdr);
2053 }
2054
2055 int
2056 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
2057 {
2058         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2059
2060         struct nfs_pgio_header *hdr;
2061         int ret;
2062
2063         hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2064         if (!hdr) {
2065                 desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
2066                 return -ENOMEM;
2067         }
2068         nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
2069         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2070         ret = nfs_generic_pgio(desc, hdr);
2071         if (!ret)
2072                 pnfs_do_read(desc, hdr);
2073         return ret;
2074 }
2075 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
2076
2077 static void pnfs_clear_layoutcommitting(struct inode *inode)
2078 {
2079         unsigned long *bitlock = &NFS_I(inode)->flags;
2080
2081         clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
2082         smp_mb__after_atomic();
2083         wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
2084 }
2085
2086 /*
2087  * There can be multiple RW segments.
2088  */
2089 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
2090 {
2091         struct pnfs_layout_segment *lseg;
2092
2093         list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
2094                 if (lseg->pls_range.iomode == IOMODE_RW &&
2095                     test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
2096                         list_add(&lseg->pls_lc_list, listp);
2097         }
2098 }
2099
2100 static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
2101 {
2102         struct pnfs_layout_segment *lseg, *tmp;
2103
2104         /* Matched by references in pnfs_set_layoutcommit */
2105         list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
2106                 list_del_init(&lseg->pls_lc_list);
2107                 pnfs_put_lseg(lseg);
2108         }
2109
2110         pnfs_clear_layoutcommitting(inode);
2111 }
2112
2113 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
2114 {
2115         pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
2116 }
2117 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
2118
2119 void
2120 pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
2121                 loff_t end_pos)
2122 {
2123         struct nfs_inode *nfsi = NFS_I(inode);
2124         bool mark_as_dirty = false;
2125
2126         spin_lock(&inode->i_lock);
2127         if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
2128                 nfsi->layout->plh_lwb = end_pos;
2129                 mark_as_dirty = true;
2130                 dprintk("%s: Set layoutcommit for inode %lu ",
2131                         __func__, inode->i_ino);
2132         } else if (end_pos > nfsi->layout->plh_lwb)
2133                 nfsi->layout->plh_lwb = end_pos;
2134         if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
2135                 /* references matched in nfs4_layoutcommit_release */
2136                 pnfs_get_lseg(lseg);
2137         }
2138         spin_unlock(&inode->i_lock);
2139         dprintk("%s: lseg %p end_pos %llu\n",
2140                 __func__, lseg, nfsi->layout->plh_lwb);
2141
2142         /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
2143          * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
2144         if (mark_as_dirty)
2145                 mark_inode_dirty_sync(inode);
2146 }
2147 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
2148
2149 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
2150 {
2151         struct nfs_server *nfss = NFS_SERVER(data->args.inode);
2152
2153         if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
2154                 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
2155         pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
2156 }
2157
2158 /*
2159  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
2160  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
2161  * data to disk to allow the server to recover the data if it crashes.
2162  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
2163  * is off, and a COMMIT is sent to a data server, or
2164  * if WRITEs to a data server return NFS_DATA_SYNC.
2165  */
2166 int
2167 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2168 {
2169         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2170         struct nfs4_layoutcommit_data *data;
2171         struct nfs_inode *nfsi = NFS_I(inode);
2172         loff_t end_pos;
2173         int status;
2174
2175         if (!pnfs_layoutcommit_outstanding(inode))
2176                 return 0;
2177
2178         dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
2179
2180         status = -EAGAIN;
2181         if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
2182                 if (!sync)
2183                         goto out;
2184                 status = wait_on_bit_lock_action(&nfsi->flags,
2185                                 NFS_INO_LAYOUTCOMMITTING,
2186                                 nfs_wait_bit_killable,
2187                                 TASK_KILLABLE);
2188                 if (status)
2189                         goto out;
2190         }
2191
2192         status = -ENOMEM;
2193         /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
2194         data = kzalloc(sizeof(*data), GFP_NOFS);
2195         if (!data)
2196                 goto clear_layoutcommitting;
2197
2198         status = 0;
2199         spin_lock(&inode->i_lock);
2200         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
2201                 goto out_unlock;
2202
2203         INIT_LIST_HEAD(&data->lseg_list);
2204         pnfs_list_write_lseg(inode, &data->lseg_list);
2205
2206         end_pos = nfsi->layout->plh_lwb;
2207
2208         nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
2209         spin_unlock(&inode->i_lock);
2210
2211         data->args.inode = inode;
2212         data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
2213         nfs_fattr_init(&data->fattr);
2214         data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
2215         data->res.fattr = &data->fattr;
2216         data->args.lastbytewritten = end_pos - 1;
2217         data->res.server = NFS_SERVER(inode);
2218
2219         if (ld->prepare_layoutcommit) {
2220                 status = ld->prepare_layoutcommit(&data->args);
2221                 if (status) {
2222                         put_rpccred(data->cred);
2223                         spin_lock(&inode->i_lock);
2224                         set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2225                         if (end_pos > nfsi->layout->plh_lwb)
2226                                 nfsi->layout->plh_lwb = end_pos;
2227                         goto out_unlock;
2228                 }
2229         }
2230
2231
2232         status = nfs4_proc_layoutcommit(data, sync);
2233 out:
2234         if (status)
2235                 mark_inode_dirty_sync(inode);
2236         dprintk("<-- %s status %d\n", __func__, status);
2237         return status;
2238 out_unlock:
2239         spin_unlock(&inode->i_lock);
2240         kfree(data);
2241 clear_layoutcommitting:
2242         pnfs_clear_layoutcommitting(inode);
2243         goto out;
2244 }
2245 EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
2246
2247 int
2248 pnfs_generic_sync(struct inode *inode, bool datasync)
2249 {
2250         return pnfs_layoutcommit_inode(inode, true);
2251 }
2252 EXPORT_SYMBOL_GPL(pnfs_generic_sync);
2253
2254 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
2255 {
2256         struct nfs4_threshold *thp;
2257
2258         thp = kzalloc(sizeof(*thp), GFP_NOFS);
2259         if (!thp) {
2260                 dprintk("%s mdsthreshold allocation failed\n", __func__);
2261                 return NULL;
2262         }
2263         return thp;
2264 }
2265
2266 #if IS_ENABLED(CONFIG_NFS_V4_2)
2267 int
2268 pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
2269 {
2270         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2271         struct nfs_server *server = NFS_SERVER(inode);
2272         struct nfs_inode *nfsi = NFS_I(inode);
2273         struct nfs42_layoutstat_data *data;
2274         struct pnfs_layout_hdr *hdr;
2275         int status = 0;
2276
2277         if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
2278                 goto out;
2279
2280         if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
2281                 goto out;
2282
2283         if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
2284                 goto out;
2285
2286         spin_lock(&inode->i_lock);
2287         if (!NFS_I(inode)->layout) {
2288                 spin_unlock(&inode->i_lock);
2289                 goto out;
2290         }
2291         hdr = NFS_I(inode)->layout;
2292         pnfs_get_layout_hdr(hdr);
2293         spin_unlock(&inode->i_lock);
2294
2295         data = kzalloc(sizeof(*data), gfp_flags);
2296         if (!data) {
2297                 status = -ENOMEM;
2298                 goto out_put;
2299         }
2300
2301         data->args.fh = NFS_FH(inode);
2302         data->args.inode = inode;
2303         nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid);
2304         status = ld->prepare_layoutstats(&data->args);
2305         if (status)
2306                 goto out_free;
2307
2308         status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
2309
2310 out:
2311         dprintk("%s returns %d\n", __func__, status);
2312         return status;
2313
2314 out_free:
2315         kfree(data);
2316 out_put:
2317         pnfs_put_layout_hdr(hdr);
2318         smp_mb__before_atomic();
2319         clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
2320         smp_mb__after_atomic();
2321         goto out;
2322 }
2323 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
2324 #endif