2 * Copyright (c) 2014 Christoph Hellwig.
4 #include <linux/blkdev.h>
5 #include <linux/kmod.h>
6 #include <linux/file.h>
7 #include <linux/jhash.h>
8 #include <linux/sched.h>
9 #include <linux/sunrpc/addr.h>
15 #define NFSDDBG_FACILITY NFSDDBG_PNFS
18 struct list_head lo_perstate;
19 struct nfs4_layout_stateid *lo_state;
20 struct nfsd4_layout_seg lo_seg;
23 static struct kmem_cache *nfs4_layout_cache;
24 static struct kmem_cache *nfs4_layout_stateid_cache;
26 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
27 static const struct lock_manager_operations nfsd4_layouts_lm_ops;
29 const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = {
30 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
31 [LAYOUT_FLEX_FILES] = &ff_layout_ops,
33 #ifdef CONFIG_NFSD_BLOCKLAYOUT
34 [LAYOUT_BLOCK_VOLUME] = &bl_layout_ops,
36 #ifdef CONFIG_NFSD_SCSILAYOUT
37 [LAYOUT_SCSI] = &scsi_layout_ops,
41 /* pNFS device ID to export fsid mapping */
42 #define DEVID_HASH_BITS 8
43 #define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS)
44 #define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1)
45 static u64 nfsd_devid_seq = 1;
46 static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
47 static DEFINE_SPINLOCK(nfsd_devid_lock);
49 static inline u32 devid_hashfn(u64 idx)
51 return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
55 nfsd4_alloc_devid_map(const struct svc_fh *fhp)
57 const struct knfsd_fh *fh = &fhp->fh_handle;
58 size_t fsid_len = key_len(fh->fh_fsid_type);
59 struct nfsd4_deviceid_map *map, *old;
62 map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
66 map->fsid_type = fh->fh_fsid_type;
67 memcpy(&map->fsid, fh->fh_fsid, fsid_len);
69 spin_lock(&nfsd_devid_lock);
70 if (fhp->fh_export->ex_devid_map)
73 for (i = 0; i < DEVID_HASH_SIZE; i++) {
74 list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
75 if (old->fsid_type != fh->fh_fsid_type)
77 if (memcmp(old->fsid, fh->fh_fsid,
78 key_len(old->fsid_type)))
81 fhp->fh_export->ex_devid_map = old;
86 map->idx = nfsd_devid_seq++;
87 list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
88 fhp->fh_export->ex_devid_map = map;
92 spin_unlock(&nfsd_devid_lock);
96 struct nfsd4_deviceid_map *
97 nfsd4_find_devid_map(int idx)
99 struct nfsd4_deviceid_map *map, *ret = NULL;
102 list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
111 nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
112 u32 device_generation)
114 if (!fhp->fh_export->ex_devid_map) {
115 nfsd4_alloc_devid_map(fhp);
116 if (!fhp->fh_export->ex_devid_map)
120 id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
121 id->generation = device_generation;
126 void nfsd4_setup_layout_type(struct svc_export *exp)
128 #if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
129 struct super_block *sb = exp->ex_path.mnt->mnt_sb;
132 if (!(exp->ex_flags & NFSEXP_PNFS))
136 * If flex file is configured, use it by default. Otherwise
137 * check if the file system supports exporting a block-like layout.
138 * If the block device supports reservations prefer the SCSI layout,
139 * otherwise advertise the block layout.
141 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
142 exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
144 #ifdef CONFIG_NFSD_BLOCKLAYOUT
145 /* overwrite flex file layout selection if needed */
146 if (sb->s_export_op->get_uuid &&
147 sb->s_export_op->map_blocks &&
148 sb->s_export_op->commit_blocks)
149 exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
151 #ifdef CONFIG_NFSD_SCSILAYOUT
152 /* overwrite block layout selection if needed */
153 if (sb->s_export_op->map_blocks &&
154 sb->s_export_op->commit_blocks &&
155 sb->s_bdev && sb->s_bdev->bd_disk->fops->pr_ops)
156 exp->ex_layout_types |= 1 << LAYOUT_SCSI;
161 nfsd4_free_layout_stateid(struct nfs4_stid *stid)
163 struct nfs4_layout_stateid *ls = layoutstateid(stid);
164 struct nfs4_client *clp = ls->ls_stid.sc_client;
165 struct nfs4_file *fp = ls->ls_stid.sc_file;
167 trace_layoutstate_free(&ls->ls_stid.sc_stateid);
169 spin_lock(&clp->cl_lock);
170 list_del_init(&ls->ls_perclnt);
171 spin_unlock(&clp->cl_lock);
173 spin_lock(&fp->fi_lock);
174 list_del_init(&ls->ls_perfile);
175 spin_unlock(&fp->fi_lock);
177 vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
181 atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
183 kmem_cache_free(nfs4_layout_stateid_cache, ls);
187 nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
189 struct file_lock *fl;
192 fl = locks_alloc_lock();
196 fl->fl_lmops = &nfsd4_layouts_lm_ops;
197 fl->fl_flags = FL_LAYOUT;
198 fl->fl_type = F_RDLCK;
199 fl->fl_end = OFFSET_MAX;
201 fl->fl_pid = current->tgid;
202 fl->fl_file = ls->ls_file;
204 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
213 static struct nfs4_layout_stateid *
214 nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
215 struct nfs4_stid *parent, u32 layout_type)
217 struct nfs4_client *clp = cstate->clp;
218 struct nfs4_file *fp = parent->sc_file;
219 struct nfs4_layout_stateid *ls;
220 struct nfs4_stid *stp;
222 stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
225 stp->sc_free = nfsd4_free_layout_stateid;
229 ls = layoutstateid(stp);
230 INIT_LIST_HEAD(&ls->ls_perclnt);
231 INIT_LIST_HEAD(&ls->ls_perfile);
232 spin_lock_init(&ls->ls_lock);
233 INIT_LIST_HEAD(&ls->ls_layouts);
234 mutex_init(&ls->ls_mutex);
235 ls->ls_layout_type = layout_type;
236 nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
237 NFSPROC4_CLNT_CB_LAYOUT);
239 if (parent->sc_type == NFS4_DELEG_STID)
240 ls->ls_file = get_file(fp->fi_deleg_file);
242 ls->ls_file = find_any_file(fp);
243 BUG_ON(!ls->ls_file);
245 if (nfsd4_layout_setlease(ls)) {
248 kmem_cache_free(nfs4_layout_stateid_cache, ls);
252 spin_lock(&clp->cl_lock);
253 stp->sc_type = NFS4_LAYOUT_STID;
254 list_add(&ls->ls_perclnt, &clp->cl_lo_states);
255 spin_unlock(&clp->cl_lock);
257 spin_lock(&fp->fi_lock);
258 list_add(&ls->ls_perfile, &fp->fi_lo_states);
259 spin_unlock(&fp->fi_lock);
261 trace_layoutstate_alloc(&ls->ls_stid.sc_stateid);
266 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
267 struct nfsd4_compound_state *cstate, stateid_t *stateid,
268 bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
270 struct nfs4_layout_stateid *ls;
271 struct nfs4_stid *stid;
272 unsigned char typemask = NFS4_LAYOUT_STID;
276 typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
278 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
279 net_generic(SVC_NET(rqstp), nfsd_net_id));
283 if (!fh_match(&cstate->current_fh.fh_handle,
284 &stid->sc_file->fi_fhandle)) {
285 status = nfserr_bad_stateid;
289 if (stid->sc_type != NFS4_LAYOUT_STID) {
290 ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
293 status = nfserr_jukebox;
296 mutex_lock(&ls->ls_mutex);
298 ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
300 status = nfserr_bad_stateid;
301 mutex_lock(&ls->ls_mutex);
302 if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
303 goto out_unlock_stid;
304 if (layout_type != ls->ls_layout_type)
305 goto out_unlock_stid;
312 mutex_unlock(&ls->ls_mutex);
320 nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
322 spin_lock(&ls->ls_lock);
326 ls->ls_recalled = true;
327 atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
328 if (list_empty(&ls->ls_layouts))
331 trace_layout_recall(&ls->ls_stid.sc_stateid);
333 atomic_inc(&ls->ls_stid.sc_count);
334 nfsd4_run_cb(&ls->ls_recall);
337 spin_unlock(&ls->ls_lock);
341 layout_end(struct nfsd4_layout_seg *seg)
343 u64 end = seg->offset + seg->length;
344 return end >= seg->offset ? end : NFS4_MAX_UINT64;
348 layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
350 if (end == NFS4_MAX_UINT64)
351 lo->length = NFS4_MAX_UINT64;
353 lo->length = end - lo->offset;
357 layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
359 if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
361 if (layout_end(&lo->lo_seg) <= s->offset)
363 if (layout_end(s) <= lo->lo_seg.offset)
369 layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
371 if (lo->iomode != new->iomode)
373 if (layout_end(new) < lo->offset)
375 if (layout_end(lo) < new->offset)
378 lo->offset = min(lo->offset, new->offset);
379 layout_update_len(lo, max(layout_end(lo), layout_end(new)));
384 nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
386 struct nfs4_file *fp = ls->ls_stid.sc_file;
387 struct nfs4_layout_stateid *l, *n;
388 __be32 nfserr = nfs_ok;
390 assert_spin_locked(&fp->fi_lock);
392 list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
394 nfsd4_recall_file_layout(l);
395 nfserr = nfserr_recallconflict;
403 nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
405 struct nfsd4_layout_seg *seg = &lgp->lg_seg;
406 struct nfs4_file *fp = ls->ls_stid.sc_file;
407 struct nfs4_layout *lp, *new = NULL;
410 spin_lock(&fp->fi_lock);
411 nfserr = nfsd4_recall_conflict(ls);
414 spin_lock(&ls->ls_lock);
415 list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
416 if (layouts_try_merge(&lp->lo_seg, seg))
419 spin_unlock(&ls->ls_lock);
420 spin_unlock(&fp->fi_lock);
422 new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
424 return nfserr_jukebox;
425 memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg));
428 spin_lock(&fp->fi_lock);
429 nfserr = nfsd4_recall_conflict(ls);
432 spin_lock(&ls->ls_lock);
433 list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
434 if (layouts_try_merge(&lp->lo_seg, seg))
438 atomic_inc(&ls->ls_stid.sc_count);
439 list_add_tail(&new->lo_perstate, &ls->ls_layouts);
442 nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
443 spin_unlock(&ls->ls_lock);
445 spin_unlock(&fp->fi_lock);
447 kmem_cache_free(nfs4_layout_cache, new);
452 nfsd4_free_layouts(struct list_head *reaplist)
454 while (!list_empty(reaplist)) {
455 struct nfs4_layout *lp = list_first_entry(reaplist,
456 struct nfs4_layout, lo_perstate);
458 list_del(&lp->lo_perstate);
459 nfs4_put_stid(&lp->lo_state->ls_stid);
460 kmem_cache_free(nfs4_layout_cache, lp);
465 nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
466 struct list_head *reaplist)
468 struct nfsd4_layout_seg *lo = &lp->lo_seg;
469 u64 end = layout_end(lo);
471 if (seg->offset <= lo->offset) {
472 if (layout_end(seg) >= end) {
473 list_move_tail(&lp->lo_perstate, reaplist);
476 lo->offset = layout_end(seg);
478 /* retain the whole layout segment on a split. */
479 if (layout_end(seg) < end) {
480 dprintk("%s: split not supported\n", __func__);
486 layout_update_len(lo, end);
490 nfsd4_return_file_layouts(struct svc_rqst *rqstp,
491 struct nfsd4_compound_state *cstate,
492 struct nfsd4_layoutreturn *lrp)
494 struct nfs4_layout_stateid *ls;
495 struct nfs4_layout *lp, *n;
500 nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
501 false, lrp->lr_layout_type,
504 trace_layout_return_lookup_fail(&lrp->lr_sid);
508 spin_lock(&ls->ls_lock);
509 list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
510 if (layouts_overlapping(lp, &lrp->lr_seg)) {
511 nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
515 if (!list_empty(&ls->ls_layouts)) {
517 nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
518 lrp->lrs_present = 1;
520 trace_layoutstate_unhash(&ls->ls_stid.sc_stateid);
521 nfs4_unhash_stid(&ls->ls_stid);
522 lrp->lrs_present = 0;
524 spin_unlock(&ls->ls_lock);
526 mutex_unlock(&ls->ls_mutex);
527 nfs4_put_stid(&ls->ls_stid);
528 nfsd4_free_layouts(&reaplist);
533 nfsd4_return_client_layouts(struct svc_rqst *rqstp,
534 struct nfsd4_compound_state *cstate,
535 struct nfsd4_layoutreturn *lrp)
537 struct nfs4_layout_stateid *ls, *n;
538 struct nfs4_client *clp = cstate->clp;
539 struct nfs4_layout *lp, *t;
542 lrp->lrs_present = 0;
544 spin_lock(&clp->cl_lock);
545 list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
546 if (ls->ls_layout_type != lrp->lr_layout_type)
549 if (lrp->lr_return_type == RETURN_FSID &&
550 !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
551 &cstate->current_fh.fh_handle))
554 spin_lock(&ls->ls_lock);
555 list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
556 if (lrp->lr_seg.iomode == IOMODE_ANY ||
557 lrp->lr_seg.iomode == lp->lo_seg.iomode)
558 list_move_tail(&lp->lo_perstate, &reaplist);
560 spin_unlock(&ls->ls_lock);
562 spin_unlock(&clp->cl_lock);
564 nfsd4_free_layouts(&reaplist);
569 nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
570 struct list_head *reaplist)
572 spin_lock(&ls->ls_lock);
573 list_splice_init(&ls->ls_layouts, reaplist);
574 spin_unlock(&ls->ls_lock);
578 nfsd4_return_all_client_layouts(struct nfs4_client *clp)
580 struct nfs4_layout_stateid *ls, *n;
583 spin_lock(&clp->cl_lock);
584 list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
585 nfsd4_return_all_layouts(ls, &reaplist);
586 spin_unlock(&clp->cl_lock);
588 nfsd4_free_layouts(&reaplist);
592 nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
594 struct nfs4_layout_stateid *ls, *n;
597 spin_lock(&fp->fi_lock);
598 list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
599 if (ls->ls_stid.sc_client == clp)
600 nfsd4_return_all_layouts(ls, &reaplist);
602 spin_unlock(&fp->fi_lock);
604 nfsd4_free_layouts(&reaplist);
608 nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
610 struct nfs4_client *clp = ls->ls_stid.sc_client;
611 char addr_str[INET6_ADDRSTRLEN];
612 static char *envp[] = {
615 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
621 rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
624 "nfsd: client %s failed to respond to layout recall. "
625 " Fencing..\n", addr_str);
627 argv[0] = "/sbin/nfsd-recall-failed";
629 argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id;
632 error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
634 printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
640 nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
642 struct nfs4_layout_stateid *ls =
643 container_of(cb, struct nfs4_layout_stateid, ls_recall);
645 mutex_lock(&ls->ls_mutex);
646 nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
647 mutex_unlock(&ls->ls_mutex);
651 nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
653 struct nfs4_layout_stateid *ls =
654 container_of(cb, struct nfs4_layout_stateid, ls_recall);
657 const struct nfsd4_layout_ops *ops;
661 switch (task->tk_status) {
665 * Anything left? If not, then call it done. Note that we don't
666 * take the spinlock since this is an optimization and nothing
667 * should get added until the cb counter goes to zero.
669 if (list_empty(&ls->ls_layouts))
672 /* Poll the client until it's done with the layout */
674 nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id);
676 /* Client gets 2 lease periods to return it */
677 cutoff = ktime_add_ns(task->tk_start,
678 nn->nfsd4_lease * NSEC_PER_SEC * 2);
680 if (ktime_before(now, cutoff)) {
681 rpc_delay(task, HZ/100); /* 10 mili-seconds */
685 case -NFS4ERR_NOMATCHING_LAYOUT:
686 trace_layout_recall_done(&ls->ls_stid.sc_stateid);
691 * Unknown error or non-responding client, we'll need to fence.
693 trace_layout_recall_fail(&ls->ls_stid.sc_stateid);
695 ops = nfsd4_layout_ops[ls->ls_layout_type];
696 if (ops->fence_client)
697 ops->fence_client(ls);
699 nfsd4_cb_layout_fail(ls);
705 nfsd4_cb_layout_release(struct nfsd4_callback *cb)
707 struct nfs4_layout_stateid *ls =
708 container_of(cb, struct nfs4_layout_stateid, ls_recall);
711 trace_layout_recall_release(&ls->ls_stid.sc_stateid);
713 nfsd4_return_all_layouts(ls, &reaplist);
714 nfsd4_free_layouts(&reaplist);
715 nfs4_put_stid(&ls->ls_stid);
718 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
719 .prepare = nfsd4_cb_layout_prepare,
720 .done = nfsd4_cb_layout_done,
721 .release = nfsd4_cb_layout_release,
725 nfsd4_layout_lm_break(struct file_lock *fl)
728 * We don't want the locks code to timeout the lease for us;
729 * we'll remove it ourself if a layout isn't returned
732 fl->fl_break_time = 0;
733 nfsd4_recall_file_layout(fl->fl_owner);
738 nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
739 struct list_head *dispose)
741 BUG_ON(!(arg & F_UNLCK));
742 return lease_modify(onlist, arg, dispose);
745 static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
746 .lm_break = nfsd4_layout_lm_break,
747 .lm_change = nfsd4_layout_lm_change,
751 nfsd4_init_pnfs(void)
755 for (i = 0; i < DEVID_HASH_SIZE; i++)
756 INIT_LIST_HEAD(&nfsd_devid_hash[i]);
758 nfs4_layout_cache = kmem_cache_create("nfs4_layout",
759 sizeof(struct nfs4_layout), 0, 0, NULL);
760 if (!nfs4_layout_cache)
763 nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
764 sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
765 if (!nfs4_layout_stateid_cache) {
766 kmem_cache_destroy(nfs4_layout_cache);
773 nfsd4_exit_pnfs(void)
777 kmem_cache_destroy(nfs4_layout_cache);
778 kmem_cache_destroy(nfs4_layout_stateid_cache);
780 for (i = 0; i < DEVID_HASH_SIZE; i++) {
781 struct nfsd4_deviceid_map *map, *n;
783 list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)