]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/ceph/mds_client.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[karo-tx-linux.git] / fs / ceph / mds_client.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/fs.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
6 #include <linux/gfp.h>
7 #include <linux/sched.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/utsname.h>
11 #include <linux/ratelimit.h>
12
13 #include "super.h"
14 #include "mds_client.h"
15
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
22
23 /*
24  * A cluster of MDS (metadata server) daemons is responsible for
25  * managing the file system namespace (the directory hierarchy and
26  * inodes) and for coordinating shared access to storage.  Metadata is
27  * partitioning hierarchically across a number of servers, and that
28  * partition varies over time as the cluster adjusts the distribution
29  * in order to balance load.
30  *
31  * The MDS client is primarily responsible to managing synchronous
32  * metadata requests for operations like open, unlink, and so forth.
33  * If there is a MDS failure, we find out about it when we (possibly
34  * request and) receive a new MDS map, and can resubmit affected
35  * requests.
36  *
37  * For the most part, though, we take advantage of a lossless
38  * communications channel to the MDS, and do not need to worry about
39  * timing out or resubmitting requests.
40  *
41  * We maintain a stateful "session" with each MDS we interact with.
42  * Within each session, we sent periodic heartbeat messages to ensure
43  * any capabilities or leases we have been issues remain valid.  If
44  * the session times out and goes stale, our leases and capabilities
45  * are no longer valid.
46  */
47
48 struct ceph_reconnect_state {
49         int nr_caps;
50         struct ceph_pagelist *pagelist;
51         unsigned msg_version;
52 };
53
54 static void __wake_requests(struct ceph_mds_client *mdsc,
55                             struct list_head *head);
56
57 static const struct ceph_connection_operations mds_con_ops;
58
59
60 /*
61  * mds reply parsing
62  */
63
64 /*
65  * parse individual inode info
66  */
67 static int parse_reply_info_in(void **p, void *end,
68                                struct ceph_mds_reply_info_in *info,
69                                u64 features)
70 {
71         int err = -EIO;
72
73         info->in = *p;
74         *p += sizeof(struct ceph_mds_reply_inode) +
75                 sizeof(*info->in->fragtree.splits) *
76                 le32_to_cpu(info->in->fragtree.nsplits);
77
78         ceph_decode_32_safe(p, end, info->symlink_len, bad);
79         ceph_decode_need(p, end, info->symlink_len, bad);
80         info->symlink = *p;
81         *p += info->symlink_len;
82
83         if (features & CEPH_FEATURE_DIRLAYOUTHASH)
84                 ceph_decode_copy_safe(p, end, &info->dir_layout,
85                                       sizeof(info->dir_layout), bad);
86         else
87                 memset(&info->dir_layout, 0, sizeof(info->dir_layout));
88
89         ceph_decode_32_safe(p, end, info->xattr_len, bad);
90         ceph_decode_need(p, end, info->xattr_len, bad);
91         info->xattr_data = *p;
92         *p += info->xattr_len;
93
94         if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
95                 ceph_decode_64_safe(p, end, info->inline_version, bad);
96                 ceph_decode_32_safe(p, end, info->inline_len, bad);
97                 ceph_decode_need(p, end, info->inline_len, bad);
98                 info->inline_data = *p;
99                 *p += info->inline_len;
100         } else
101                 info->inline_version = CEPH_INLINE_NONE;
102
103         info->pool_ns_len = 0;
104         info->pool_ns_data = NULL;
105         if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
106                 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
107                 if (info->pool_ns_len > 0) {
108                         ceph_decode_need(p, end, info->pool_ns_len, bad);
109                         info->pool_ns_data = *p;
110                         *p += info->pool_ns_len;
111                 }
112         }
113
114         return 0;
115 bad:
116         return err;
117 }
118
119 /*
120  * parse a normal reply, which may contain a (dir+)dentry and/or a
121  * target inode.
122  */
123 static int parse_reply_info_trace(void **p, void *end,
124                                   struct ceph_mds_reply_info_parsed *info,
125                                   u64 features)
126 {
127         int err;
128
129         if (info->head->is_dentry) {
130                 err = parse_reply_info_in(p, end, &info->diri, features);
131                 if (err < 0)
132                         goto out_bad;
133
134                 if (unlikely(*p + sizeof(*info->dirfrag) > end))
135                         goto bad;
136                 info->dirfrag = *p;
137                 *p += sizeof(*info->dirfrag) +
138                         sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
139                 if (unlikely(*p > end))
140                         goto bad;
141
142                 ceph_decode_32_safe(p, end, info->dname_len, bad);
143                 ceph_decode_need(p, end, info->dname_len, bad);
144                 info->dname = *p;
145                 *p += info->dname_len;
146                 info->dlease = *p;
147                 *p += sizeof(*info->dlease);
148         }
149
150         if (info->head->is_target) {
151                 err = parse_reply_info_in(p, end, &info->targeti, features);
152                 if (err < 0)
153                         goto out_bad;
154         }
155
156         if (unlikely(*p != end))
157                 goto bad;
158         return 0;
159
160 bad:
161         err = -EIO;
162 out_bad:
163         pr_err("problem parsing mds trace %d\n", err);
164         return err;
165 }
166
167 /*
168  * parse readdir results
169  */
170 static int parse_reply_info_dir(void **p, void *end,
171                                 struct ceph_mds_reply_info_parsed *info,
172                                 u64 features)
173 {
174         u32 num, i = 0;
175         int err;
176
177         info->dir_dir = *p;
178         if (*p + sizeof(*info->dir_dir) > end)
179                 goto bad;
180         *p += sizeof(*info->dir_dir) +
181                 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
182         if (*p > end)
183                 goto bad;
184
185         ceph_decode_need(p, end, sizeof(num) + 2, bad);
186         num = ceph_decode_32(p);
187         {
188                 u16 flags = ceph_decode_16(p);
189                 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
190                 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
191                 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
192         }
193         if (num == 0)
194                 goto done;
195
196         BUG_ON(!info->dir_entries);
197         if ((unsigned long)(info->dir_entries + num) >
198             (unsigned long)info->dir_entries + info->dir_buf_size) {
199                 pr_err("dir contents are larger than expected\n");
200                 WARN_ON(1);
201                 goto bad;
202         }
203
204         info->dir_nr = num;
205         while (num) {
206                 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
207                 /* dentry */
208                 ceph_decode_need(p, end, sizeof(u32)*2, bad);
209                 rde->name_len = ceph_decode_32(p);
210                 ceph_decode_need(p, end, rde->name_len, bad);
211                 rde->name = *p;
212                 *p += rde->name_len;
213                 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
214                 rde->lease = *p;
215                 *p += sizeof(struct ceph_mds_reply_lease);
216
217                 /* inode */
218                 err = parse_reply_info_in(p, end, &rde->inode, features);
219                 if (err < 0)
220                         goto out_bad;
221                 /* ceph_readdir_prepopulate() will update it */
222                 rde->offset = 0;
223                 i++;
224                 num--;
225         }
226
227 done:
228         if (*p != end)
229                 goto bad;
230         return 0;
231
232 bad:
233         err = -EIO;
234 out_bad:
235         pr_err("problem parsing dir contents %d\n", err);
236         return err;
237 }
238
239 /*
240  * parse fcntl F_GETLK results
241  */
242 static int parse_reply_info_filelock(void **p, void *end,
243                                      struct ceph_mds_reply_info_parsed *info,
244                                      u64 features)
245 {
246         if (*p + sizeof(*info->filelock_reply) > end)
247                 goto bad;
248
249         info->filelock_reply = *p;
250         *p += sizeof(*info->filelock_reply);
251
252         if (unlikely(*p != end))
253                 goto bad;
254         return 0;
255
256 bad:
257         return -EIO;
258 }
259
260 /*
261  * parse create results
262  */
263 static int parse_reply_info_create(void **p, void *end,
264                                   struct ceph_mds_reply_info_parsed *info,
265                                   u64 features)
266 {
267         if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
268                 if (*p == end) {
269                         info->has_create_ino = false;
270                 } else {
271                         info->has_create_ino = true;
272                         info->ino = ceph_decode_64(p);
273                 }
274         }
275
276         if (unlikely(*p != end))
277                 goto bad;
278         return 0;
279
280 bad:
281         return -EIO;
282 }
283
284 /*
285  * parse extra results
286  */
287 static int parse_reply_info_extra(void **p, void *end,
288                                   struct ceph_mds_reply_info_parsed *info,
289                                   u64 features)
290 {
291         u32 op = le32_to_cpu(info->head->op);
292
293         if (op == CEPH_MDS_OP_GETFILELOCK)
294                 return parse_reply_info_filelock(p, end, info, features);
295         else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
296                 return parse_reply_info_dir(p, end, info, features);
297         else if (op == CEPH_MDS_OP_CREATE)
298                 return parse_reply_info_create(p, end, info, features);
299         else
300                 return -EIO;
301 }
302
303 /*
304  * parse entire mds reply
305  */
306 static int parse_reply_info(struct ceph_msg *msg,
307                             struct ceph_mds_reply_info_parsed *info,
308                             u64 features)
309 {
310         void *p, *end;
311         u32 len;
312         int err;
313
314         info->head = msg->front.iov_base;
315         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
316         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
317
318         /* trace */
319         ceph_decode_32_safe(&p, end, len, bad);
320         if (len > 0) {
321                 ceph_decode_need(&p, end, len, bad);
322                 err = parse_reply_info_trace(&p, p+len, info, features);
323                 if (err < 0)
324                         goto out_bad;
325         }
326
327         /* extra */
328         ceph_decode_32_safe(&p, end, len, bad);
329         if (len > 0) {
330                 ceph_decode_need(&p, end, len, bad);
331                 err = parse_reply_info_extra(&p, p+len, info, features);
332                 if (err < 0)
333                         goto out_bad;
334         }
335
336         /* snap blob */
337         ceph_decode_32_safe(&p, end, len, bad);
338         info->snapblob_len = len;
339         info->snapblob = p;
340         p += len;
341
342         if (p != end)
343                 goto bad;
344         return 0;
345
346 bad:
347         err = -EIO;
348 out_bad:
349         pr_err("mds parse_reply err %d\n", err);
350         return err;
351 }
352
353 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
354 {
355         if (!info->dir_entries)
356                 return;
357         free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
358 }
359
360
361 /*
362  * sessions
363  */
364 const char *ceph_session_state_name(int s)
365 {
366         switch (s) {
367         case CEPH_MDS_SESSION_NEW: return "new";
368         case CEPH_MDS_SESSION_OPENING: return "opening";
369         case CEPH_MDS_SESSION_OPEN: return "open";
370         case CEPH_MDS_SESSION_HUNG: return "hung";
371         case CEPH_MDS_SESSION_CLOSING: return "closing";
372         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
373         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
374         case CEPH_MDS_SESSION_REJECTED: return "rejected";
375         default: return "???";
376         }
377 }
378
379 static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
380 {
381         if (atomic_inc_not_zero(&s->s_ref)) {
382                 dout("mdsc get_session %p %d -> %d\n", s,
383                      atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
384                 return s;
385         } else {
386                 dout("mdsc get_session %p 0 -- FAIL", s);
387                 return NULL;
388         }
389 }
390
391 void ceph_put_mds_session(struct ceph_mds_session *s)
392 {
393         dout("mdsc put_session %p %d -> %d\n", s,
394              atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
395         if (atomic_dec_and_test(&s->s_ref)) {
396                 if (s->s_auth.authorizer)
397                         ceph_auth_destroy_authorizer(s->s_auth.authorizer);
398                 kfree(s);
399         }
400 }
401
402 /*
403  * called under mdsc->mutex
404  */
405 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
406                                                    int mds)
407 {
408         struct ceph_mds_session *session;
409
410         if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
411                 return NULL;
412         session = mdsc->sessions[mds];
413         dout("lookup_mds_session %p %d\n", session,
414              atomic_read(&session->s_ref));
415         get_session(session);
416         return session;
417 }
418
419 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
420 {
421         if (mds >= mdsc->max_sessions)
422                 return false;
423         return mdsc->sessions[mds];
424 }
425
426 static int __verify_registered_session(struct ceph_mds_client *mdsc,
427                                        struct ceph_mds_session *s)
428 {
429         if (s->s_mds >= mdsc->max_sessions ||
430             mdsc->sessions[s->s_mds] != s)
431                 return -ENOENT;
432         return 0;
433 }
434
435 /*
436  * create+register a new session for given mds.
437  * called under mdsc->mutex.
438  */
439 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
440                                                  int mds)
441 {
442         struct ceph_mds_session *s;
443
444         if (mds >= mdsc->mdsmap->m_max_mds)
445                 return ERR_PTR(-EINVAL);
446
447         s = kzalloc(sizeof(*s), GFP_NOFS);
448         if (!s)
449                 return ERR_PTR(-ENOMEM);
450         s->s_mdsc = mdsc;
451         s->s_mds = mds;
452         s->s_state = CEPH_MDS_SESSION_NEW;
453         s->s_ttl = 0;
454         s->s_seq = 0;
455         mutex_init(&s->s_mutex);
456
457         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
458
459         spin_lock_init(&s->s_gen_ttl_lock);
460         s->s_cap_gen = 0;
461         s->s_cap_ttl = jiffies - 1;
462
463         spin_lock_init(&s->s_cap_lock);
464         s->s_renew_requested = 0;
465         s->s_renew_seq = 0;
466         INIT_LIST_HEAD(&s->s_caps);
467         s->s_nr_caps = 0;
468         s->s_trim_caps = 0;
469         atomic_set(&s->s_ref, 1);
470         INIT_LIST_HEAD(&s->s_waiting);
471         INIT_LIST_HEAD(&s->s_unsafe);
472         s->s_num_cap_releases = 0;
473         s->s_cap_reconnect = 0;
474         s->s_cap_iterator = NULL;
475         INIT_LIST_HEAD(&s->s_cap_releases);
476         INIT_LIST_HEAD(&s->s_cap_flushing);
477
478         dout("register_session mds%d\n", mds);
479         if (mds >= mdsc->max_sessions) {
480                 int newmax = 1 << get_count_order(mds+1);
481                 struct ceph_mds_session **sa;
482
483                 dout("register_session realloc to %d\n", newmax);
484                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
485                 if (sa == NULL)
486                         goto fail_realloc;
487                 if (mdsc->sessions) {
488                         memcpy(sa, mdsc->sessions,
489                                mdsc->max_sessions * sizeof(void *));
490                         kfree(mdsc->sessions);
491                 }
492                 mdsc->sessions = sa;
493                 mdsc->max_sessions = newmax;
494         }
495         mdsc->sessions[mds] = s;
496         atomic_inc(&mdsc->num_sessions);
497         atomic_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
498
499         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
500                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
501
502         return s;
503
504 fail_realloc:
505         kfree(s);
506         return ERR_PTR(-ENOMEM);
507 }
508
509 /*
510  * called under mdsc->mutex
511  */
512 static void __unregister_session(struct ceph_mds_client *mdsc,
513                                struct ceph_mds_session *s)
514 {
515         dout("__unregister_session mds%d %p\n", s->s_mds, s);
516         BUG_ON(mdsc->sessions[s->s_mds] != s);
517         mdsc->sessions[s->s_mds] = NULL;
518         ceph_con_close(&s->s_con);
519         ceph_put_mds_session(s);
520         atomic_dec(&mdsc->num_sessions);
521 }
522
523 /*
524  * drop session refs in request.
525  *
526  * should be last request ref, or hold mdsc->mutex
527  */
528 static void put_request_session(struct ceph_mds_request *req)
529 {
530         if (req->r_session) {
531                 ceph_put_mds_session(req->r_session);
532                 req->r_session = NULL;
533         }
534 }
535
536 void ceph_mdsc_release_request(struct kref *kref)
537 {
538         struct ceph_mds_request *req = container_of(kref,
539                                                     struct ceph_mds_request,
540                                                     r_kref);
541         destroy_reply_info(&req->r_reply_info);
542         if (req->r_request)
543                 ceph_msg_put(req->r_request);
544         if (req->r_reply)
545                 ceph_msg_put(req->r_reply);
546         if (req->r_inode) {
547                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
548                 iput(req->r_inode);
549         }
550         if (req->r_parent)
551                 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
552         iput(req->r_target_inode);
553         if (req->r_dentry)
554                 dput(req->r_dentry);
555         if (req->r_old_dentry)
556                 dput(req->r_old_dentry);
557         if (req->r_old_dentry_dir) {
558                 /*
559                  * track (and drop pins for) r_old_dentry_dir
560                  * separately, since r_old_dentry's d_parent may have
561                  * changed between the dir mutex being dropped and
562                  * this request being freed.
563                  */
564                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
565                                   CEPH_CAP_PIN);
566                 iput(req->r_old_dentry_dir);
567         }
568         kfree(req->r_path1);
569         kfree(req->r_path2);
570         if (req->r_pagelist)
571                 ceph_pagelist_release(req->r_pagelist);
572         put_request_session(req);
573         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
574         kfree(req);
575 }
576
577 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
578
579 /*
580  * lookup session, bump ref if found.
581  *
582  * called under mdsc->mutex.
583  */
584 static struct ceph_mds_request *
585 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
586 {
587         struct ceph_mds_request *req;
588
589         req = lookup_request(&mdsc->request_tree, tid);
590         if (req)
591                 ceph_mdsc_get_request(req);
592
593         return req;
594 }
595
596 /*
597  * Register an in-flight request, and assign a tid.  Link to directory
598  * are modifying (if any).
599  *
600  * Called under mdsc->mutex.
601  */
602 static void __register_request(struct ceph_mds_client *mdsc,
603                                struct ceph_mds_request *req,
604                                struct inode *dir)
605 {
606         req->r_tid = ++mdsc->last_tid;
607         if (req->r_num_caps)
608                 ceph_reserve_caps(mdsc, &req->r_caps_reservation,
609                                   req->r_num_caps);
610         dout("__register_request %p tid %lld\n", req, req->r_tid);
611         ceph_mdsc_get_request(req);
612         insert_request(&mdsc->request_tree, req);
613
614         req->r_uid = current_fsuid();
615         req->r_gid = current_fsgid();
616
617         if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
618                 mdsc->oldest_tid = req->r_tid;
619
620         if (dir) {
621                 ihold(dir);
622                 req->r_unsafe_dir = dir;
623         }
624 }
625
626 static void __unregister_request(struct ceph_mds_client *mdsc,
627                                  struct ceph_mds_request *req)
628 {
629         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
630
631         /* Never leave an unregistered request on an unsafe list! */
632         list_del_init(&req->r_unsafe_item);
633
634         if (req->r_tid == mdsc->oldest_tid) {
635                 struct rb_node *p = rb_next(&req->r_node);
636                 mdsc->oldest_tid = 0;
637                 while (p) {
638                         struct ceph_mds_request *next_req =
639                                 rb_entry(p, struct ceph_mds_request, r_node);
640                         if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
641                                 mdsc->oldest_tid = next_req->r_tid;
642                                 break;
643                         }
644                         p = rb_next(p);
645                 }
646         }
647
648         erase_request(&mdsc->request_tree, req);
649
650         if (req->r_unsafe_dir  &&
651             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
652                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
653                 spin_lock(&ci->i_unsafe_lock);
654                 list_del_init(&req->r_unsafe_dir_item);
655                 spin_unlock(&ci->i_unsafe_lock);
656         }
657         if (req->r_target_inode &&
658             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
659                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
660                 spin_lock(&ci->i_unsafe_lock);
661                 list_del_init(&req->r_unsafe_target_item);
662                 spin_unlock(&ci->i_unsafe_lock);
663         }
664
665         if (req->r_unsafe_dir) {
666                 iput(req->r_unsafe_dir);
667                 req->r_unsafe_dir = NULL;
668         }
669
670         complete_all(&req->r_safe_completion);
671
672         ceph_mdsc_put_request(req);
673 }
674
675 /*
676  * Walk back up the dentry tree until we hit a dentry representing a
677  * non-snapshot inode. We do this using the rcu_read_lock (which must be held
678  * when calling this) to ensure that the objects won't disappear while we're
679  * working with them. Once we hit a candidate dentry, we attempt to take a
680  * reference to it, and return that as the result.
681  */
682 static struct inode *get_nonsnap_parent(struct dentry *dentry)
683 {
684         struct inode *inode = NULL;
685
686         while (dentry && !IS_ROOT(dentry)) {
687                 inode = d_inode_rcu(dentry);
688                 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
689                         break;
690                 dentry = dentry->d_parent;
691         }
692         if (inode)
693                 inode = igrab(inode);
694         return inode;
695 }
696
697 /*
698  * Choose mds to send request to next.  If there is a hint set in the
699  * request (e.g., due to a prior forward hint from the mds), use that.
700  * Otherwise, consult frag tree and/or caps to identify the
701  * appropriate mds.  If all else fails, choose randomly.
702  *
703  * Called under mdsc->mutex.
704  */
705 static int __choose_mds(struct ceph_mds_client *mdsc,
706                         struct ceph_mds_request *req)
707 {
708         struct inode *inode;
709         struct ceph_inode_info *ci;
710         struct ceph_cap *cap;
711         int mode = req->r_direct_mode;
712         int mds = -1;
713         u32 hash = req->r_direct_hash;
714         bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
715
716         /*
717          * is there a specific mds we should try?  ignore hint if we have
718          * no session and the mds is not up (active or recovering).
719          */
720         if (req->r_resend_mds >= 0 &&
721             (__have_session(mdsc, req->r_resend_mds) ||
722              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
723                 dout("choose_mds using resend_mds mds%d\n",
724                      req->r_resend_mds);
725                 return req->r_resend_mds;
726         }
727
728         if (mode == USE_RANDOM_MDS)
729                 goto random;
730
731         inode = NULL;
732         if (req->r_inode) {
733                 inode = req->r_inode;
734                 ihold(inode);
735         } else if (req->r_dentry) {
736                 /* ignore race with rename; old or new d_parent is okay */
737                 struct dentry *parent;
738                 struct inode *dir;
739
740                 rcu_read_lock();
741                 parent = req->r_dentry->d_parent;
742                 dir = req->r_parent ? : d_inode_rcu(parent);
743
744                 if (!dir || dir->i_sb != mdsc->fsc->sb) {
745                         /*  not this fs or parent went negative */
746                         inode = d_inode(req->r_dentry);
747                         if (inode)
748                                 ihold(inode);
749                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
750                         /* direct snapped/virtual snapdir requests
751                          * based on parent dir inode */
752                         inode = get_nonsnap_parent(parent);
753                         dout("__choose_mds using nonsnap parent %p\n", inode);
754                 } else {
755                         /* dentry target */
756                         inode = d_inode(req->r_dentry);
757                         if (!inode || mode == USE_AUTH_MDS) {
758                                 /* dir + name */
759                                 inode = igrab(dir);
760                                 hash = ceph_dentry_hash(dir, req->r_dentry);
761                                 is_hash = true;
762                         } else {
763                                 ihold(inode);
764                         }
765                 }
766                 rcu_read_unlock();
767         }
768
769         dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
770              (int)hash, mode);
771         if (!inode)
772                 goto random;
773         ci = ceph_inode(inode);
774
775         if (is_hash && S_ISDIR(inode->i_mode)) {
776                 struct ceph_inode_frag frag;
777                 int found;
778
779                 ceph_choose_frag(ci, hash, &frag, &found);
780                 if (found) {
781                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
782                                 u8 r;
783
784                                 /* choose a random replica */
785                                 get_random_bytes(&r, 1);
786                                 r %= frag.ndist;
787                                 mds = frag.dist[r];
788                                 dout("choose_mds %p %llx.%llx "
789                                      "frag %u mds%d (%d/%d)\n",
790                                      inode, ceph_vinop(inode),
791                                      frag.frag, mds,
792                                      (int)r, frag.ndist);
793                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
794                                     CEPH_MDS_STATE_ACTIVE)
795                                         goto out;
796                         }
797
798                         /* since this file/dir wasn't known to be
799                          * replicated, then we want to look for the
800                          * authoritative mds. */
801                         mode = USE_AUTH_MDS;
802                         if (frag.mds >= 0) {
803                                 /* choose auth mds */
804                                 mds = frag.mds;
805                                 dout("choose_mds %p %llx.%llx "
806                                      "frag %u mds%d (auth)\n",
807                                      inode, ceph_vinop(inode), frag.frag, mds);
808                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
809                                     CEPH_MDS_STATE_ACTIVE)
810                                         goto out;
811                         }
812                 }
813         }
814
815         spin_lock(&ci->i_ceph_lock);
816         cap = NULL;
817         if (mode == USE_AUTH_MDS)
818                 cap = ci->i_auth_cap;
819         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
820                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
821         if (!cap) {
822                 spin_unlock(&ci->i_ceph_lock);
823                 iput(inode);
824                 goto random;
825         }
826         mds = cap->session->s_mds;
827         dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
828              inode, ceph_vinop(inode), mds,
829              cap == ci->i_auth_cap ? "auth " : "", cap);
830         spin_unlock(&ci->i_ceph_lock);
831 out:
832         iput(inode);
833         return mds;
834
835 random:
836         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
837         dout("choose_mds chose random mds%d\n", mds);
838         return mds;
839 }
840
841
842 /*
843  * session messages
844  */
845 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
846 {
847         struct ceph_msg *msg;
848         struct ceph_mds_session_head *h;
849
850         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
851                            false);
852         if (!msg) {
853                 pr_err("create_session_msg ENOMEM creating msg\n");
854                 return NULL;
855         }
856         h = msg->front.iov_base;
857         h->op = cpu_to_le32(op);
858         h->seq = cpu_to_le64(seq);
859
860         return msg;
861 }
862
863 /*
864  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
865  * to include additional client metadata fields.
866  */
867 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
868 {
869         struct ceph_msg *msg;
870         struct ceph_mds_session_head *h;
871         int i = -1;
872         int metadata_bytes = 0;
873         int metadata_key_count = 0;
874         struct ceph_options *opt = mdsc->fsc->client->options;
875         struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
876         void *p;
877
878         const char* metadata[][2] = {
879                 {"hostname", utsname()->nodename},
880                 {"kernel_version", utsname()->release},
881                 {"entity_id", opt->name ? : ""},
882                 {"root", fsopt->server_path ? : "/"},
883                 {NULL, NULL}
884         };
885
886         /* Calculate serialized length of metadata */
887         metadata_bytes = 4;  /* map length */
888         for (i = 0; metadata[i][0] != NULL; ++i) {
889                 metadata_bytes += 8 + strlen(metadata[i][0]) +
890                         strlen(metadata[i][1]);
891                 metadata_key_count++;
892         }
893
894         /* Allocate the message */
895         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes,
896                            GFP_NOFS, false);
897         if (!msg) {
898                 pr_err("create_session_msg ENOMEM creating msg\n");
899                 return NULL;
900         }
901         h = msg->front.iov_base;
902         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
903         h->seq = cpu_to_le64(seq);
904
905         /*
906          * Serialize client metadata into waiting buffer space, using
907          * the format that userspace expects for map<string, string>
908          *
909          * ClientSession messages with metadata are v2
910          */
911         msg->hdr.version = cpu_to_le16(2);
912         msg->hdr.compat_version = cpu_to_le16(1);
913
914         /* The write pointer, following the session_head structure */
915         p = msg->front.iov_base + sizeof(*h);
916
917         /* Number of entries in the map */
918         ceph_encode_32(&p, metadata_key_count);
919
920         /* Two length-prefixed strings for each entry in the map */
921         for (i = 0; metadata[i][0] != NULL; ++i) {
922                 size_t const key_len = strlen(metadata[i][0]);
923                 size_t const val_len = strlen(metadata[i][1]);
924
925                 ceph_encode_32(&p, key_len);
926                 memcpy(p, metadata[i][0], key_len);
927                 p += key_len;
928                 ceph_encode_32(&p, val_len);
929                 memcpy(p, metadata[i][1], val_len);
930                 p += val_len;
931         }
932
933         return msg;
934 }
935
936 /*
937  * send session open request.
938  *
939  * called under mdsc->mutex
940  */
941 static int __open_session(struct ceph_mds_client *mdsc,
942                           struct ceph_mds_session *session)
943 {
944         struct ceph_msg *msg;
945         int mstate;
946         int mds = session->s_mds;
947
948         /* wait for mds to go active? */
949         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
950         dout("open_session to mds%d (%s)\n", mds,
951              ceph_mds_state_name(mstate));
952         session->s_state = CEPH_MDS_SESSION_OPENING;
953         session->s_renew_requested = jiffies;
954
955         /* send connect message */
956         msg = create_session_open_msg(mdsc, session->s_seq);
957         if (!msg)
958                 return -ENOMEM;
959         ceph_con_send(&session->s_con, msg);
960         return 0;
961 }
962
963 /*
964  * open sessions for any export targets for the given mds
965  *
966  * called under mdsc->mutex
967  */
968 static struct ceph_mds_session *
969 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
970 {
971         struct ceph_mds_session *session;
972
973         session = __ceph_lookup_mds_session(mdsc, target);
974         if (!session) {
975                 session = register_session(mdsc, target);
976                 if (IS_ERR(session))
977                         return session;
978         }
979         if (session->s_state == CEPH_MDS_SESSION_NEW ||
980             session->s_state == CEPH_MDS_SESSION_CLOSING)
981                 __open_session(mdsc, session);
982
983         return session;
984 }
985
986 struct ceph_mds_session *
987 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
988 {
989         struct ceph_mds_session *session;
990
991         dout("open_export_target_session to mds%d\n", target);
992
993         mutex_lock(&mdsc->mutex);
994         session = __open_export_target_session(mdsc, target);
995         mutex_unlock(&mdsc->mutex);
996
997         return session;
998 }
999
1000 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1001                                           struct ceph_mds_session *session)
1002 {
1003         struct ceph_mds_info *mi;
1004         struct ceph_mds_session *ts;
1005         int i, mds = session->s_mds;
1006
1007         if (mds >= mdsc->mdsmap->m_max_mds)
1008                 return;
1009
1010         mi = &mdsc->mdsmap->m_info[mds];
1011         dout("open_export_target_sessions for mds%d (%d targets)\n",
1012              session->s_mds, mi->num_export_targets);
1013
1014         for (i = 0; i < mi->num_export_targets; i++) {
1015                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1016                 if (!IS_ERR(ts))
1017                         ceph_put_mds_session(ts);
1018         }
1019 }
1020
1021 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1022                                            struct ceph_mds_session *session)
1023 {
1024         mutex_lock(&mdsc->mutex);
1025         __open_export_target_sessions(mdsc, session);
1026         mutex_unlock(&mdsc->mutex);
1027 }
1028
1029 /*
1030  * session caps
1031  */
1032
1033 /* caller holds s_cap_lock, we drop it */
1034 static void cleanup_cap_releases(struct ceph_mds_client *mdsc,
1035                                  struct ceph_mds_session *session)
1036         __releases(session->s_cap_lock)
1037 {
1038         LIST_HEAD(tmp_list);
1039         list_splice_init(&session->s_cap_releases, &tmp_list);
1040         session->s_num_cap_releases = 0;
1041         spin_unlock(&session->s_cap_lock);
1042
1043         dout("cleanup_cap_releases mds%d\n", session->s_mds);
1044         while (!list_empty(&tmp_list)) {
1045                 struct ceph_cap *cap;
1046                 /* zero out the in-progress message */
1047                 cap = list_first_entry(&tmp_list,
1048                                         struct ceph_cap, session_caps);
1049                 list_del(&cap->session_caps);
1050                 ceph_put_cap(mdsc, cap);
1051         }
1052 }
1053
1054 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1055                                      struct ceph_mds_session *session)
1056 {
1057         struct ceph_mds_request *req;
1058         struct rb_node *p;
1059
1060         dout("cleanup_session_requests mds%d\n", session->s_mds);
1061         mutex_lock(&mdsc->mutex);
1062         while (!list_empty(&session->s_unsafe)) {
1063                 req = list_first_entry(&session->s_unsafe,
1064                                        struct ceph_mds_request, r_unsafe_item);
1065                 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1066                                     req->r_tid);
1067                 __unregister_request(mdsc, req);
1068         }
1069         /* zero r_attempts, so kick_requests() will re-send requests */
1070         p = rb_first(&mdsc->request_tree);
1071         while (p) {
1072                 req = rb_entry(p, struct ceph_mds_request, r_node);
1073                 p = rb_next(p);
1074                 if (req->r_session &&
1075                     req->r_session->s_mds == session->s_mds)
1076                         req->r_attempts = 0;
1077         }
1078         mutex_unlock(&mdsc->mutex);
1079 }
1080
1081 /*
1082  * Helper to safely iterate over all caps associated with a session, with
1083  * special care taken to handle a racing __ceph_remove_cap().
1084  *
1085  * Caller must hold session s_mutex.
1086  */
1087 static int iterate_session_caps(struct ceph_mds_session *session,
1088                                  int (*cb)(struct inode *, struct ceph_cap *,
1089                                             void *), void *arg)
1090 {
1091         struct list_head *p;
1092         struct ceph_cap *cap;
1093         struct inode *inode, *last_inode = NULL;
1094         struct ceph_cap *old_cap = NULL;
1095         int ret;
1096
1097         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1098         spin_lock(&session->s_cap_lock);
1099         p = session->s_caps.next;
1100         while (p != &session->s_caps) {
1101                 cap = list_entry(p, struct ceph_cap, session_caps);
1102                 inode = igrab(&cap->ci->vfs_inode);
1103                 if (!inode) {
1104                         p = p->next;
1105                         continue;
1106                 }
1107                 session->s_cap_iterator = cap;
1108                 spin_unlock(&session->s_cap_lock);
1109
1110                 if (last_inode) {
1111                         iput(last_inode);
1112                         last_inode = NULL;
1113                 }
1114                 if (old_cap) {
1115                         ceph_put_cap(session->s_mdsc, old_cap);
1116                         old_cap = NULL;
1117                 }
1118
1119                 ret = cb(inode, cap, arg);
1120                 last_inode = inode;
1121
1122                 spin_lock(&session->s_cap_lock);
1123                 p = p->next;
1124                 if (cap->ci == NULL) {
1125                         dout("iterate_session_caps  finishing cap %p removal\n",
1126                              cap);
1127                         BUG_ON(cap->session != session);
1128                         cap->session = NULL;
1129                         list_del_init(&cap->session_caps);
1130                         session->s_nr_caps--;
1131                         if (cap->queue_release) {
1132                                 list_add_tail(&cap->session_caps,
1133                                               &session->s_cap_releases);
1134                                 session->s_num_cap_releases++;
1135                         } else {
1136                                 old_cap = cap;  /* put_cap it w/o locks held */
1137                         }
1138                 }
1139                 if (ret < 0)
1140                         goto out;
1141         }
1142         ret = 0;
1143 out:
1144         session->s_cap_iterator = NULL;
1145         spin_unlock(&session->s_cap_lock);
1146
1147         iput(last_inode);
1148         if (old_cap)
1149                 ceph_put_cap(session->s_mdsc, old_cap);
1150
1151         return ret;
1152 }
1153
1154 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1155                                   void *arg)
1156 {
1157         struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1158         struct ceph_inode_info *ci = ceph_inode(inode);
1159         LIST_HEAD(to_remove);
1160         bool drop = false;
1161         bool invalidate = false;
1162
1163         dout("removing cap %p, ci is %p, inode is %p\n",
1164              cap, ci, &ci->vfs_inode);
1165         spin_lock(&ci->i_ceph_lock);
1166         __ceph_remove_cap(cap, false);
1167         if (!ci->i_auth_cap) {
1168                 struct ceph_cap_flush *cf;
1169                 struct ceph_mds_client *mdsc = fsc->mdsc;
1170
1171                 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
1172
1173                 if (ci->i_wrbuffer_ref > 0 &&
1174                     READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
1175                         invalidate = true;
1176
1177                 while (!list_empty(&ci->i_cap_flush_list)) {
1178                         cf = list_first_entry(&ci->i_cap_flush_list,
1179                                               struct ceph_cap_flush, i_list);
1180                         list_move(&cf->i_list, &to_remove);
1181                 }
1182
1183                 spin_lock(&mdsc->cap_dirty_lock);
1184
1185                 list_for_each_entry(cf, &to_remove, i_list)
1186                         list_del(&cf->g_list);
1187
1188                 if (!list_empty(&ci->i_dirty_item)) {
1189                         pr_warn_ratelimited(
1190                                 " dropping dirty %s state for %p %lld\n",
1191                                 ceph_cap_string(ci->i_dirty_caps),
1192                                 inode, ceph_ino(inode));
1193                         ci->i_dirty_caps = 0;
1194                         list_del_init(&ci->i_dirty_item);
1195                         drop = true;
1196                 }
1197                 if (!list_empty(&ci->i_flushing_item)) {
1198                         pr_warn_ratelimited(
1199                                 " dropping dirty+flushing %s state for %p %lld\n",
1200                                 ceph_cap_string(ci->i_flushing_caps),
1201                                 inode, ceph_ino(inode));
1202                         ci->i_flushing_caps = 0;
1203                         list_del_init(&ci->i_flushing_item);
1204                         mdsc->num_cap_flushing--;
1205                         drop = true;
1206                 }
1207                 spin_unlock(&mdsc->cap_dirty_lock);
1208
1209                 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1210                         list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1211                         ci->i_prealloc_cap_flush = NULL;
1212                 }
1213         }
1214         spin_unlock(&ci->i_ceph_lock);
1215         while (!list_empty(&to_remove)) {
1216                 struct ceph_cap_flush *cf;
1217                 cf = list_first_entry(&to_remove,
1218                                       struct ceph_cap_flush, i_list);
1219                 list_del(&cf->i_list);
1220                 ceph_free_cap_flush(cf);
1221         }
1222
1223         wake_up_all(&ci->i_cap_wq);
1224         if (invalidate)
1225                 ceph_queue_invalidate(inode);
1226         if (drop)
1227                 iput(inode);
1228         return 0;
1229 }
1230
1231 /*
1232  * caller must hold session s_mutex
1233  */
1234 static void remove_session_caps(struct ceph_mds_session *session)
1235 {
1236         struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1237         struct super_block *sb = fsc->sb;
1238         dout("remove_session_caps on %p\n", session);
1239         iterate_session_caps(session, remove_session_caps_cb, fsc);
1240
1241         wake_up_all(&fsc->mdsc->cap_flushing_wq);
1242
1243         spin_lock(&session->s_cap_lock);
1244         if (session->s_nr_caps > 0) {
1245                 struct inode *inode;
1246                 struct ceph_cap *cap, *prev = NULL;
1247                 struct ceph_vino vino;
1248                 /*
1249                  * iterate_session_caps() skips inodes that are being
1250                  * deleted, we need to wait until deletions are complete.
1251                  * __wait_on_freeing_inode() is designed for the job,
1252                  * but it is not exported, so use lookup inode function
1253                  * to access it.
1254                  */
1255                 while (!list_empty(&session->s_caps)) {
1256                         cap = list_entry(session->s_caps.next,
1257                                          struct ceph_cap, session_caps);
1258                         if (cap == prev)
1259                                 break;
1260                         prev = cap;
1261                         vino = cap->ci->i_vino;
1262                         spin_unlock(&session->s_cap_lock);
1263
1264                         inode = ceph_find_inode(sb, vino);
1265                         iput(inode);
1266
1267                         spin_lock(&session->s_cap_lock);
1268                 }
1269         }
1270
1271         // drop cap expires and unlock s_cap_lock
1272         cleanup_cap_releases(session->s_mdsc, session);
1273
1274         BUG_ON(session->s_nr_caps > 0);
1275         BUG_ON(!list_empty(&session->s_cap_flushing));
1276 }
1277
1278 /*
1279  * wake up any threads waiting on this session's caps.  if the cap is
1280  * old (didn't get renewed on the client reconnect), remove it now.
1281  *
1282  * caller must hold s_mutex.
1283  */
1284 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1285                               void *arg)
1286 {
1287         struct ceph_inode_info *ci = ceph_inode(inode);
1288
1289         if (arg) {
1290                 spin_lock(&ci->i_ceph_lock);
1291                 ci->i_wanted_max_size = 0;
1292                 ci->i_requested_max_size = 0;
1293                 spin_unlock(&ci->i_ceph_lock);
1294         }
1295         wake_up_all(&ci->i_cap_wq);
1296         return 0;
1297 }
1298
1299 static void wake_up_session_caps(struct ceph_mds_session *session,
1300                                  int reconnect)
1301 {
1302         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1303         iterate_session_caps(session, wake_up_session_cb,
1304                              (void *)(unsigned long)reconnect);
1305 }
1306
1307 /*
1308  * Send periodic message to MDS renewing all currently held caps.  The
1309  * ack will reset the expiration for all caps from this session.
1310  *
1311  * caller holds s_mutex
1312  */
1313 static int send_renew_caps(struct ceph_mds_client *mdsc,
1314                            struct ceph_mds_session *session)
1315 {
1316         struct ceph_msg *msg;
1317         int state;
1318
1319         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1320             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1321                 pr_info("mds%d caps stale\n", session->s_mds);
1322         session->s_renew_requested = jiffies;
1323
1324         /* do not try to renew caps until a recovering mds has reconnected
1325          * with its clients. */
1326         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1327         if (state < CEPH_MDS_STATE_RECONNECT) {
1328                 dout("send_renew_caps ignoring mds%d (%s)\n",
1329                      session->s_mds, ceph_mds_state_name(state));
1330                 return 0;
1331         }
1332
1333         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1334                 ceph_mds_state_name(state));
1335         msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1336                                  ++session->s_renew_seq);
1337         if (!msg)
1338                 return -ENOMEM;
1339         ceph_con_send(&session->s_con, msg);
1340         return 0;
1341 }
1342
1343 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1344                              struct ceph_mds_session *session, u64 seq)
1345 {
1346         struct ceph_msg *msg;
1347
1348         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1349              session->s_mds, ceph_session_state_name(session->s_state), seq);
1350         msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1351         if (!msg)
1352                 return -ENOMEM;
1353         ceph_con_send(&session->s_con, msg);
1354         return 0;
1355 }
1356
1357
1358 /*
1359  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1360  *
1361  * Called under session->s_mutex
1362  */
1363 static void renewed_caps(struct ceph_mds_client *mdsc,
1364                          struct ceph_mds_session *session, int is_renew)
1365 {
1366         int was_stale;
1367         int wake = 0;
1368
1369         spin_lock(&session->s_cap_lock);
1370         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1371
1372         session->s_cap_ttl = session->s_renew_requested +
1373                 mdsc->mdsmap->m_session_timeout*HZ;
1374
1375         if (was_stale) {
1376                 if (time_before(jiffies, session->s_cap_ttl)) {
1377                         pr_info("mds%d caps renewed\n", session->s_mds);
1378                         wake = 1;
1379                 } else {
1380                         pr_info("mds%d caps still stale\n", session->s_mds);
1381                 }
1382         }
1383         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1384              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1385              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1386         spin_unlock(&session->s_cap_lock);
1387
1388         if (wake)
1389                 wake_up_session_caps(session, 0);
1390 }
1391
1392 /*
1393  * send a session close request
1394  */
1395 static int request_close_session(struct ceph_mds_client *mdsc,
1396                                  struct ceph_mds_session *session)
1397 {
1398         struct ceph_msg *msg;
1399
1400         dout("request_close_session mds%d state %s seq %lld\n",
1401              session->s_mds, ceph_session_state_name(session->s_state),
1402              session->s_seq);
1403         msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1404         if (!msg)
1405                 return -ENOMEM;
1406         ceph_con_send(&session->s_con, msg);
1407         return 1;
1408 }
1409
1410 /*
1411  * Called with s_mutex held.
1412  */
1413 static int __close_session(struct ceph_mds_client *mdsc,
1414                          struct ceph_mds_session *session)
1415 {
1416         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1417                 return 0;
1418         session->s_state = CEPH_MDS_SESSION_CLOSING;
1419         return request_close_session(mdsc, session);
1420 }
1421
1422 /*
1423  * Trim old(er) caps.
1424  *
1425  * Because we can't cache an inode without one or more caps, we do
1426  * this indirectly: if a cap is unused, we prune its aliases, at which
1427  * point the inode will hopefully get dropped to.
1428  *
1429  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1430  * memory pressure from the MDS, though, so it needn't be perfect.
1431  */
1432 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1433 {
1434         struct ceph_mds_session *session = arg;
1435         struct ceph_inode_info *ci = ceph_inode(inode);
1436         int used, wanted, oissued, mine;
1437
1438         if (session->s_trim_caps <= 0)
1439                 return -1;
1440
1441         spin_lock(&ci->i_ceph_lock);
1442         mine = cap->issued | cap->implemented;
1443         used = __ceph_caps_used(ci);
1444         wanted = __ceph_caps_file_wanted(ci);
1445         oissued = __ceph_caps_issued_other(ci, cap);
1446
1447         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1448              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1449              ceph_cap_string(used), ceph_cap_string(wanted));
1450         if (cap == ci->i_auth_cap) {
1451                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1452                     !list_empty(&ci->i_cap_snaps))
1453                         goto out;
1454                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1455                         goto out;
1456         }
1457         /* The inode has cached pages, but it's no longer used.
1458          * we can safely drop it */
1459         if (wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1460             !(oissued & CEPH_CAP_FILE_CACHE)) {
1461           used = 0;
1462           oissued = 0;
1463         }
1464         if ((used | wanted) & ~oissued & mine)
1465                 goto out;   /* we need these caps */
1466
1467         session->s_trim_caps--;
1468         if (oissued) {
1469                 /* we aren't the only cap.. just remove us */
1470                 __ceph_remove_cap(cap, true);
1471         } else {
1472                 /* try dropping referring dentries */
1473                 spin_unlock(&ci->i_ceph_lock);
1474                 d_prune_aliases(inode);
1475                 dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
1476                      inode, cap, atomic_read(&inode->i_count));
1477                 return 0;
1478         }
1479
1480 out:
1481         spin_unlock(&ci->i_ceph_lock);
1482         return 0;
1483 }
1484
1485 /*
1486  * Trim session cap count down to some max number.
1487  */
1488 static int trim_caps(struct ceph_mds_client *mdsc,
1489                      struct ceph_mds_session *session,
1490                      int max_caps)
1491 {
1492         int trim_caps = session->s_nr_caps - max_caps;
1493
1494         dout("trim_caps mds%d start: %d / %d, trim %d\n",
1495              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1496         if (trim_caps > 0) {
1497                 session->s_trim_caps = trim_caps;
1498                 iterate_session_caps(session, trim_caps_cb, session);
1499                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1500                      session->s_mds, session->s_nr_caps, max_caps,
1501                         trim_caps - session->s_trim_caps);
1502                 session->s_trim_caps = 0;
1503         }
1504
1505         ceph_send_cap_releases(mdsc, session);
1506         return 0;
1507 }
1508
1509 static int check_caps_flush(struct ceph_mds_client *mdsc,
1510                             u64 want_flush_tid)
1511 {
1512         int ret = 1;
1513
1514         spin_lock(&mdsc->cap_dirty_lock);
1515         if (!list_empty(&mdsc->cap_flush_list)) {
1516                 struct ceph_cap_flush *cf =
1517                         list_first_entry(&mdsc->cap_flush_list,
1518                                          struct ceph_cap_flush, g_list);
1519                 if (cf->tid <= want_flush_tid) {
1520                         dout("check_caps_flush still flushing tid "
1521                              "%llu <= %llu\n", cf->tid, want_flush_tid);
1522                         ret = 0;
1523                 }
1524         }
1525         spin_unlock(&mdsc->cap_dirty_lock);
1526         return ret;
1527 }
1528
1529 /*
1530  * flush all dirty inode data to disk.
1531  *
1532  * returns true if we've flushed through want_flush_tid
1533  */
1534 static void wait_caps_flush(struct ceph_mds_client *mdsc,
1535                             u64 want_flush_tid)
1536 {
1537         dout("check_caps_flush want %llu\n", want_flush_tid);
1538
1539         wait_event(mdsc->cap_flushing_wq,
1540                    check_caps_flush(mdsc, want_flush_tid));
1541
1542         dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
1543 }
1544
1545 /*
1546  * called under s_mutex
1547  */
1548 void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1549                             struct ceph_mds_session *session)
1550 {
1551         struct ceph_msg *msg = NULL;
1552         struct ceph_mds_cap_release *head;
1553         struct ceph_mds_cap_item *item;
1554         struct ceph_cap *cap;
1555         LIST_HEAD(tmp_list);
1556         int num_cap_releases;
1557
1558         spin_lock(&session->s_cap_lock);
1559 again:
1560         list_splice_init(&session->s_cap_releases, &tmp_list);
1561         num_cap_releases = session->s_num_cap_releases;
1562         session->s_num_cap_releases = 0;
1563         spin_unlock(&session->s_cap_lock);
1564
1565         while (!list_empty(&tmp_list)) {
1566                 if (!msg) {
1567                         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1568                                         PAGE_SIZE, GFP_NOFS, false);
1569                         if (!msg)
1570                                 goto out_err;
1571                         head = msg->front.iov_base;
1572                         head->num = cpu_to_le32(0);
1573                         msg->front.iov_len = sizeof(*head);
1574                 }
1575                 cap = list_first_entry(&tmp_list, struct ceph_cap,
1576                                         session_caps);
1577                 list_del(&cap->session_caps);
1578                 num_cap_releases--;
1579
1580                 head = msg->front.iov_base;
1581                 le32_add_cpu(&head->num, 1);
1582                 item = msg->front.iov_base + msg->front.iov_len;
1583                 item->ino = cpu_to_le64(cap->cap_ino);
1584                 item->cap_id = cpu_to_le64(cap->cap_id);
1585                 item->migrate_seq = cpu_to_le32(cap->mseq);
1586                 item->seq = cpu_to_le32(cap->issue_seq);
1587                 msg->front.iov_len += sizeof(*item);
1588
1589                 ceph_put_cap(mdsc, cap);
1590
1591                 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1592                         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1593                         dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1594                         ceph_con_send(&session->s_con, msg);
1595                         msg = NULL;
1596                 }
1597         }
1598
1599         BUG_ON(num_cap_releases != 0);
1600
1601         spin_lock(&session->s_cap_lock);
1602         if (!list_empty(&session->s_cap_releases))
1603                 goto again;
1604         spin_unlock(&session->s_cap_lock);
1605
1606         if (msg) {
1607                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1608                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1609                 ceph_con_send(&session->s_con, msg);
1610         }
1611         return;
1612 out_err:
1613         pr_err("send_cap_releases mds%d, failed to allocate message\n",
1614                 session->s_mds);
1615         spin_lock(&session->s_cap_lock);
1616         list_splice(&tmp_list, &session->s_cap_releases);
1617         session->s_num_cap_releases += num_cap_releases;
1618         spin_unlock(&session->s_cap_lock);
1619 }
1620
1621 /*
1622  * requests
1623  */
1624
1625 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
1626                                     struct inode *dir)
1627 {
1628         struct ceph_inode_info *ci = ceph_inode(dir);
1629         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1630         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
1631         size_t size = sizeof(struct ceph_mds_reply_dir_entry);
1632         int order, num_entries;
1633
1634         spin_lock(&ci->i_ceph_lock);
1635         num_entries = ci->i_files + ci->i_subdirs;
1636         spin_unlock(&ci->i_ceph_lock);
1637         num_entries = max(num_entries, 1);
1638         num_entries = min(num_entries, opt->max_readdir);
1639
1640         order = get_order(size * num_entries);
1641         while (order >= 0) {
1642                 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
1643                                                              __GFP_NOWARN,
1644                                                              order);
1645                 if (rinfo->dir_entries)
1646                         break;
1647                 order--;
1648         }
1649         if (!rinfo->dir_entries)
1650                 return -ENOMEM;
1651
1652         num_entries = (PAGE_SIZE << order) / size;
1653         num_entries = min(num_entries, opt->max_readdir);
1654
1655         rinfo->dir_buf_size = PAGE_SIZE << order;
1656         req->r_num_caps = num_entries + 1;
1657         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
1658         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
1659         return 0;
1660 }
1661
1662 /*
1663  * Create an mds request.
1664  */
1665 struct ceph_mds_request *
1666 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1667 {
1668         struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1669         struct timespec ts;
1670
1671         if (!req)
1672                 return ERR_PTR(-ENOMEM);
1673
1674         mutex_init(&req->r_fill_mutex);
1675         req->r_mdsc = mdsc;
1676         req->r_started = jiffies;
1677         req->r_resend_mds = -1;
1678         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1679         INIT_LIST_HEAD(&req->r_unsafe_target_item);
1680         req->r_fmode = -1;
1681         kref_init(&req->r_kref);
1682         RB_CLEAR_NODE(&req->r_node);
1683         INIT_LIST_HEAD(&req->r_wait);
1684         init_completion(&req->r_completion);
1685         init_completion(&req->r_safe_completion);
1686         INIT_LIST_HEAD(&req->r_unsafe_item);
1687
1688         ktime_get_real_ts(&ts);
1689         req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran);
1690
1691         req->r_op = op;
1692         req->r_direct_mode = mode;
1693         return req;
1694 }
1695
1696 /*
1697  * return oldest (lowest) request, tid in request tree, 0 if none.
1698  *
1699  * called under mdsc->mutex.
1700  */
1701 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1702 {
1703         if (RB_EMPTY_ROOT(&mdsc->request_tree))
1704                 return NULL;
1705         return rb_entry(rb_first(&mdsc->request_tree),
1706                         struct ceph_mds_request, r_node);
1707 }
1708
1709 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1710 {
1711         return mdsc->oldest_tid;
1712 }
1713
1714 /*
1715  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
1716  * on build_path_from_dentry in fs/cifs/dir.c.
1717  *
1718  * If @stop_on_nosnap, generate path relative to the first non-snapped
1719  * inode.
1720  *
1721  * Encode hidden .snap dirs as a double /, i.e.
1722  *   foo/.snap/bar -> foo//bar
1723  */
1724 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1725                            int stop_on_nosnap)
1726 {
1727         struct dentry *temp;
1728         char *path;
1729         int len, pos;
1730         unsigned seq;
1731
1732         if (dentry == NULL)
1733                 return ERR_PTR(-EINVAL);
1734
1735 retry:
1736         len = 0;
1737         seq = read_seqbegin(&rename_lock);
1738         rcu_read_lock();
1739         for (temp = dentry; !IS_ROOT(temp);) {
1740                 struct inode *inode = d_inode(temp);
1741                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1742                         len++;  /* slash only */
1743                 else if (stop_on_nosnap && inode &&
1744                          ceph_snap(inode) == CEPH_NOSNAP)
1745                         break;
1746                 else
1747                         len += 1 + temp->d_name.len;
1748                 temp = temp->d_parent;
1749         }
1750         rcu_read_unlock();
1751         if (len)
1752                 len--;  /* no leading '/' */
1753
1754         path = kmalloc(len+1, GFP_NOFS);
1755         if (path == NULL)
1756                 return ERR_PTR(-ENOMEM);
1757         pos = len;
1758         path[pos] = 0;  /* trailing null */
1759         rcu_read_lock();
1760         for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1761                 struct inode *inode;
1762
1763                 spin_lock(&temp->d_lock);
1764                 inode = d_inode(temp);
1765                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1766                         dout("build_path path+%d: %p SNAPDIR\n",
1767                              pos, temp);
1768                 } else if (stop_on_nosnap && inode &&
1769                            ceph_snap(inode) == CEPH_NOSNAP) {
1770                         spin_unlock(&temp->d_lock);
1771                         break;
1772                 } else {
1773                         pos -= temp->d_name.len;
1774                         if (pos < 0) {
1775                                 spin_unlock(&temp->d_lock);
1776                                 break;
1777                         }
1778                         strncpy(path + pos, temp->d_name.name,
1779                                 temp->d_name.len);
1780                 }
1781                 spin_unlock(&temp->d_lock);
1782                 if (pos)
1783                         path[--pos] = '/';
1784                 temp = temp->d_parent;
1785         }
1786         rcu_read_unlock();
1787         if (pos != 0 || read_seqretry(&rename_lock, seq)) {
1788                 pr_err("build_path did not end path lookup where "
1789                        "expected, namelen is %d, pos is %d\n", len, pos);
1790                 /* presumably this is only possible if racing with a
1791                    rename of one of the parent directories (we can not
1792                    lock the dentries above us to prevent this, but
1793                    retrying should be harmless) */
1794                 kfree(path);
1795                 goto retry;
1796         }
1797
1798         *base = ceph_ino(d_inode(temp));
1799         *plen = len;
1800         dout("build_path on %p %d built %llx '%.*s'\n",
1801              dentry, d_count(dentry), *base, len, path);
1802         return path;
1803 }
1804
1805 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1806                              const char **ppath, int *ppathlen, u64 *pino,
1807                              int *pfreepath)
1808 {
1809         char *path;
1810
1811         rcu_read_lock();
1812         if (!dir)
1813                 dir = d_inode_rcu(dentry->d_parent);
1814         if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
1815                 *pino = ceph_ino(dir);
1816                 rcu_read_unlock();
1817                 *ppath = dentry->d_name.name;
1818                 *ppathlen = dentry->d_name.len;
1819                 return 0;
1820         }
1821         rcu_read_unlock();
1822         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1823         if (IS_ERR(path))
1824                 return PTR_ERR(path);
1825         *ppath = path;
1826         *pfreepath = 1;
1827         return 0;
1828 }
1829
1830 static int build_inode_path(struct inode *inode,
1831                             const char **ppath, int *ppathlen, u64 *pino,
1832                             int *pfreepath)
1833 {
1834         struct dentry *dentry;
1835         char *path;
1836
1837         if (ceph_snap(inode) == CEPH_NOSNAP) {
1838                 *pino = ceph_ino(inode);
1839                 *ppathlen = 0;
1840                 return 0;
1841         }
1842         dentry = d_find_alias(inode);
1843         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1844         dput(dentry);
1845         if (IS_ERR(path))
1846                 return PTR_ERR(path);
1847         *ppath = path;
1848         *pfreepath = 1;
1849         return 0;
1850 }
1851
1852 /*
1853  * request arguments may be specified via an inode *, a dentry *, or
1854  * an explicit ino+path.
1855  */
1856 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1857                                   struct inode *rdiri, const char *rpath,
1858                                   u64 rino, const char **ppath, int *pathlen,
1859                                   u64 *ino, int *freepath)
1860 {
1861         int r = 0;
1862
1863         if (rinode) {
1864                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1865                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1866                      ceph_snap(rinode));
1867         } else if (rdentry) {
1868                 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
1869                                         freepath);
1870                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1871                      *ppath);
1872         } else if (rpath || rino) {
1873                 *ino = rino;
1874                 *ppath = rpath;
1875                 *pathlen = rpath ? strlen(rpath) : 0;
1876                 dout(" path %.*s\n", *pathlen, rpath);
1877         }
1878
1879         return r;
1880 }
1881
1882 /*
1883  * called under mdsc->mutex
1884  */
1885 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1886                                                struct ceph_mds_request *req,
1887                                                int mds, bool drop_cap_releases)
1888 {
1889         struct ceph_msg *msg;
1890         struct ceph_mds_request_head *head;
1891         const char *path1 = NULL;
1892         const char *path2 = NULL;
1893         u64 ino1 = 0, ino2 = 0;
1894         int pathlen1 = 0, pathlen2 = 0;
1895         int freepath1 = 0, freepath2 = 0;
1896         int len;
1897         u16 releases;
1898         void *p, *end;
1899         int ret;
1900
1901         ret = set_request_path_attr(req->r_inode, req->r_dentry,
1902                               req->r_parent, req->r_path1, req->r_ino1.ino,
1903                               &path1, &pathlen1, &ino1, &freepath1);
1904         if (ret < 0) {
1905                 msg = ERR_PTR(ret);
1906                 goto out;
1907         }
1908
1909         ret = set_request_path_attr(NULL, req->r_old_dentry,
1910                               req->r_old_dentry_dir,
1911                               req->r_path2, req->r_ino2.ino,
1912                               &path2, &pathlen2, &ino2, &freepath2);
1913         if (ret < 0) {
1914                 msg = ERR_PTR(ret);
1915                 goto out_free1;
1916         }
1917
1918         len = sizeof(*head) +
1919                 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
1920                 sizeof(struct ceph_timespec);
1921
1922         /* calculate (max) length for cap releases */
1923         len += sizeof(struct ceph_mds_request_release) *
1924                 (!!req->r_inode_drop + !!req->r_dentry_drop +
1925                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1926         if (req->r_dentry_drop)
1927                 len += req->r_dentry->d_name.len;
1928         if (req->r_old_dentry_drop)
1929                 len += req->r_old_dentry->d_name.len;
1930
1931         msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
1932         if (!msg) {
1933                 msg = ERR_PTR(-ENOMEM);
1934                 goto out_free2;
1935         }
1936
1937         msg->hdr.version = cpu_to_le16(2);
1938         msg->hdr.tid = cpu_to_le64(req->r_tid);
1939
1940         head = msg->front.iov_base;
1941         p = msg->front.iov_base + sizeof(*head);
1942         end = msg->front.iov_base + msg->front.iov_len;
1943
1944         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1945         head->op = cpu_to_le32(req->r_op);
1946         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
1947         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
1948         head->args = req->r_args;
1949
1950         ceph_encode_filepath(&p, end, ino1, path1);
1951         ceph_encode_filepath(&p, end, ino2, path2);
1952
1953         /* make note of release offset, in case we need to replay */
1954         req->r_request_release_offset = p - msg->front.iov_base;
1955
1956         /* cap releases */
1957         releases = 0;
1958         if (req->r_inode_drop)
1959                 releases += ceph_encode_inode_release(&p,
1960                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
1961                       mds, req->r_inode_drop, req->r_inode_unless, 0);
1962         if (req->r_dentry_drop)
1963                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
1964                                 req->r_parent, mds, req->r_dentry_drop,
1965                                 req->r_dentry_unless);
1966         if (req->r_old_dentry_drop)
1967                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
1968                                 req->r_old_dentry_dir, mds,
1969                                 req->r_old_dentry_drop,
1970                                 req->r_old_dentry_unless);
1971         if (req->r_old_inode_drop)
1972                 releases += ceph_encode_inode_release(&p,
1973                       d_inode(req->r_old_dentry),
1974                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1975
1976         if (drop_cap_releases) {
1977                 releases = 0;
1978                 p = msg->front.iov_base + req->r_request_release_offset;
1979         }
1980
1981         head->num_releases = cpu_to_le16(releases);
1982
1983         /* time stamp */
1984         {
1985                 struct ceph_timespec ts;
1986                 ceph_encode_timespec(&ts, &req->r_stamp);
1987                 ceph_encode_copy(&p, &ts, sizeof(ts));
1988         }
1989
1990         BUG_ON(p > end);
1991         msg->front.iov_len = p - msg->front.iov_base;
1992         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1993
1994         if (req->r_pagelist) {
1995                 struct ceph_pagelist *pagelist = req->r_pagelist;
1996                 atomic_inc(&pagelist->refcnt);
1997                 ceph_msg_data_add_pagelist(msg, pagelist);
1998                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
1999         } else {
2000                 msg->hdr.data_len = 0;
2001         }
2002
2003         msg->hdr.data_off = cpu_to_le16(0);
2004
2005 out_free2:
2006         if (freepath2)
2007                 kfree((char *)path2);
2008 out_free1:
2009         if (freepath1)
2010                 kfree((char *)path1);
2011 out:
2012         return msg;
2013 }
2014
2015 /*
2016  * called under mdsc->mutex if error, under no mutex if
2017  * success.
2018  */
2019 static void complete_request(struct ceph_mds_client *mdsc,
2020                              struct ceph_mds_request *req)
2021 {
2022         if (req->r_callback)
2023                 req->r_callback(mdsc, req);
2024         else
2025                 complete_all(&req->r_completion);
2026 }
2027
2028 /*
2029  * called under mdsc->mutex
2030  */
2031 static int __prepare_send_request(struct ceph_mds_client *mdsc,
2032                                   struct ceph_mds_request *req,
2033                                   int mds, bool drop_cap_releases)
2034 {
2035         struct ceph_mds_request_head *rhead;
2036         struct ceph_msg *msg;
2037         int flags = 0;
2038
2039         req->r_attempts++;
2040         if (req->r_inode) {
2041                 struct ceph_cap *cap =
2042                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2043
2044                 if (cap)
2045                         req->r_sent_on_mseq = cap->mseq;
2046                 else
2047                         req->r_sent_on_mseq = -1;
2048         }
2049         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2050              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2051
2052         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2053                 void *p;
2054                 /*
2055                  * Replay.  Do not regenerate message (and rebuild
2056                  * paths, etc.); just use the original message.
2057                  * Rebuilding paths will break for renames because
2058                  * d_move mangles the src name.
2059                  */
2060                 msg = req->r_request;
2061                 rhead = msg->front.iov_base;
2062
2063                 flags = le32_to_cpu(rhead->flags);
2064                 flags |= CEPH_MDS_FLAG_REPLAY;
2065                 rhead->flags = cpu_to_le32(flags);
2066
2067                 if (req->r_target_inode)
2068                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2069
2070                 rhead->num_retry = req->r_attempts - 1;
2071
2072                 /* remove cap/dentry releases from message */
2073                 rhead->num_releases = 0;
2074
2075                 /* time stamp */
2076                 p = msg->front.iov_base + req->r_request_release_offset;
2077                 {
2078                         struct ceph_timespec ts;
2079                         ceph_encode_timespec(&ts, &req->r_stamp);
2080                         ceph_encode_copy(&p, &ts, sizeof(ts));
2081                 }
2082
2083                 msg->front.iov_len = p - msg->front.iov_base;
2084                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2085                 return 0;
2086         }
2087
2088         if (req->r_request) {
2089                 ceph_msg_put(req->r_request);
2090                 req->r_request = NULL;
2091         }
2092         msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2093         if (IS_ERR(msg)) {
2094                 req->r_err = PTR_ERR(msg);
2095                 return PTR_ERR(msg);
2096         }
2097         req->r_request = msg;
2098
2099         rhead = msg->front.iov_base;
2100         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2101         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2102                 flags |= CEPH_MDS_FLAG_REPLAY;
2103         if (req->r_parent)
2104                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2105         rhead->flags = cpu_to_le32(flags);
2106         rhead->num_fwd = req->r_num_fwd;
2107         rhead->num_retry = req->r_attempts - 1;
2108         rhead->ino = 0;
2109
2110         dout(" r_parent = %p\n", req->r_parent);
2111         return 0;
2112 }
2113
2114 /*
2115  * send request, or put it on the appropriate wait list.
2116  */
2117 static int __do_request(struct ceph_mds_client *mdsc,
2118                         struct ceph_mds_request *req)
2119 {
2120         struct ceph_mds_session *session = NULL;
2121         int mds = -1;
2122         int err = 0;
2123
2124         if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2125                 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2126                         __unregister_request(mdsc, req);
2127                 goto out;
2128         }
2129
2130         if (req->r_timeout &&
2131             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2132                 dout("do_request timed out\n");
2133                 err = -EIO;
2134                 goto finish;
2135         }
2136         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2137                 dout("do_request forced umount\n");
2138                 err = -EIO;
2139                 goto finish;
2140         }
2141         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2142                 if (mdsc->mdsmap_err) {
2143                         err = mdsc->mdsmap_err;
2144                         dout("do_request mdsmap err %d\n", err);
2145                         goto finish;
2146                 }
2147                 if (mdsc->mdsmap->m_epoch == 0) {
2148                         dout("do_request no mdsmap, waiting for map\n");
2149                         list_add(&req->r_wait, &mdsc->waiting_for_map);
2150                         goto finish;
2151                 }
2152                 if (!(mdsc->fsc->mount_options->flags &
2153                       CEPH_MOUNT_OPT_MOUNTWAIT) &&
2154                     !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2155                         err = -ENOENT;
2156                         pr_info("probably no mds server is up\n");
2157                         goto finish;
2158                 }
2159         }
2160
2161         put_request_session(req);
2162
2163         mds = __choose_mds(mdsc, req);
2164         if (mds < 0 ||
2165             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2166                 dout("do_request no mds or not active, waiting for map\n");
2167                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2168                 goto out;
2169         }
2170
2171         /* get, open session */
2172         session = __ceph_lookup_mds_session(mdsc, mds);
2173         if (!session) {
2174                 session = register_session(mdsc, mds);
2175                 if (IS_ERR(session)) {
2176                         err = PTR_ERR(session);
2177                         goto finish;
2178                 }
2179         }
2180         req->r_session = get_session(session);
2181
2182         dout("do_request mds%d session %p state %s\n", mds, session,
2183              ceph_session_state_name(session->s_state));
2184         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2185             session->s_state != CEPH_MDS_SESSION_HUNG) {
2186                 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2187                         err = -EACCES;
2188                         goto out_session;
2189                 }
2190                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2191                     session->s_state == CEPH_MDS_SESSION_CLOSING)
2192                         __open_session(mdsc, session);
2193                 list_add(&req->r_wait, &session->s_waiting);
2194                 goto out_session;
2195         }
2196
2197         /* send request */
2198         req->r_resend_mds = -1;   /* forget any previous mds hint */
2199
2200         if (req->r_request_started == 0)   /* note request start time */
2201                 req->r_request_started = jiffies;
2202
2203         err = __prepare_send_request(mdsc, req, mds, false);
2204         if (!err) {
2205                 ceph_msg_get(req->r_request);
2206                 ceph_con_send(&session->s_con, req->r_request);
2207         }
2208
2209 out_session:
2210         ceph_put_mds_session(session);
2211 finish:
2212         if (err) {
2213                 dout("__do_request early error %d\n", err);
2214                 req->r_err = err;
2215                 complete_request(mdsc, req);
2216                 __unregister_request(mdsc, req);
2217         }
2218 out:
2219         return err;
2220 }
2221
2222 /*
2223  * called under mdsc->mutex
2224  */
2225 static void __wake_requests(struct ceph_mds_client *mdsc,
2226                             struct list_head *head)
2227 {
2228         struct ceph_mds_request *req;
2229         LIST_HEAD(tmp_list);
2230
2231         list_splice_init(head, &tmp_list);
2232
2233         while (!list_empty(&tmp_list)) {
2234                 req = list_entry(tmp_list.next,
2235                                  struct ceph_mds_request, r_wait);
2236                 list_del_init(&req->r_wait);
2237                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2238                 __do_request(mdsc, req);
2239         }
2240 }
2241
2242 /*
2243  * Wake up threads with requests pending for @mds, so that they can
2244  * resubmit their requests to a possibly different mds.
2245  */
2246 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2247 {
2248         struct ceph_mds_request *req;
2249         struct rb_node *p = rb_first(&mdsc->request_tree);
2250
2251         dout("kick_requests mds%d\n", mds);
2252         while (p) {
2253                 req = rb_entry(p, struct ceph_mds_request, r_node);
2254                 p = rb_next(p);
2255                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2256                         continue;
2257                 if (req->r_attempts > 0)
2258                         continue; /* only new requests */
2259                 if (req->r_session &&
2260                     req->r_session->s_mds == mds) {
2261                         dout(" kicking tid %llu\n", req->r_tid);
2262                         list_del_init(&req->r_wait);
2263                         __do_request(mdsc, req);
2264                 }
2265         }
2266 }
2267
2268 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
2269                               struct ceph_mds_request *req)
2270 {
2271         dout("submit_request on %p\n", req);
2272         mutex_lock(&mdsc->mutex);
2273         __register_request(mdsc, req, NULL);
2274         __do_request(mdsc, req);
2275         mutex_unlock(&mdsc->mutex);
2276 }
2277
2278 /*
2279  * Synchrously perform an mds request.  Take care of all of the
2280  * session setup, forwarding, retry details.
2281  */
2282 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2283                          struct inode *dir,
2284                          struct ceph_mds_request *req)
2285 {
2286         int err;
2287
2288         dout("do_request on %p\n", req);
2289
2290         /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
2291         if (req->r_inode)
2292                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2293         if (req->r_parent)
2294                 ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
2295         if (req->r_old_dentry_dir)
2296                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2297                                   CEPH_CAP_PIN);
2298
2299         /* issue */
2300         mutex_lock(&mdsc->mutex);
2301         __register_request(mdsc, req, dir);
2302         __do_request(mdsc, req);
2303
2304         if (req->r_err) {
2305                 err = req->r_err;
2306                 goto out;
2307         }
2308
2309         /* wait */
2310         mutex_unlock(&mdsc->mutex);
2311         dout("do_request waiting\n");
2312         if (!req->r_timeout && req->r_wait_for_completion) {
2313                 err = req->r_wait_for_completion(mdsc, req);
2314         } else {
2315                 long timeleft = wait_for_completion_killable_timeout(
2316                                         &req->r_completion,
2317                                         ceph_timeout_jiffies(req->r_timeout));
2318                 if (timeleft > 0)
2319                         err = 0;
2320                 else if (!timeleft)
2321                         err = -EIO;  /* timed out */
2322                 else
2323                         err = timeleft;  /* killed */
2324         }
2325         dout("do_request waited, got %d\n", err);
2326         mutex_lock(&mdsc->mutex);
2327
2328         /* only abort if we didn't race with a real reply */
2329         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2330                 err = le32_to_cpu(req->r_reply_info.head->result);
2331         } else if (err < 0) {
2332                 dout("aborted request %lld with %d\n", req->r_tid, err);
2333
2334                 /*
2335                  * ensure we aren't running concurrently with
2336                  * ceph_fill_trace or ceph_readdir_prepopulate, which
2337                  * rely on locks (dir mutex) held by our caller.
2338                  */
2339                 mutex_lock(&req->r_fill_mutex);
2340                 req->r_err = err;
2341                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
2342                 mutex_unlock(&req->r_fill_mutex);
2343
2344                 if (req->r_parent &&
2345                     (req->r_op & CEPH_MDS_OP_WRITE))
2346                         ceph_invalidate_dir_request(req);
2347         } else {
2348                 err = req->r_err;
2349         }
2350
2351 out:
2352         mutex_unlock(&mdsc->mutex);
2353         dout("do_request %p done, result %d\n", req, err);
2354         return err;
2355 }
2356
2357 /*
2358  * Invalidate dir's completeness, dentry lease state on an aborted MDS
2359  * namespace request.
2360  */
2361 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2362 {
2363         struct inode *inode = req->r_parent;
2364
2365         dout("invalidate_dir_request %p (complete, lease(s))\n", inode);
2366
2367         ceph_dir_clear_complete(inode);
2368         if (req->r_dentry)
2369                 ceph_invalidate_dentry_lease(req->r_dentry);
2370         if (req->r_old_dentry)
2371                 ceph_invalidate_dentry_lease(req->r_old_dentry);
2372 }
2373
2374 /*
2375  * Handle mds reply.
2376  *
2377  * We take the session mutex and parse and process the reply immediately.
2378  * This preserves the logical ordering of replies, capabilities, etc., sent
2379  * by the MDS as they are applied to our local cache.
2380  */
2381 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2382 {
2383         struct ceph_mds_client *mdsc = session->s_mdsc;
2384         struct ceph_mds_request *req;
2385         struct ceph_mds_reply_head *head = msg->front.iov_base;
2386         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
2387         struct ceph_snap_realm *realm;
2388         u64 tid;
2389         int err, result;
2390         int mds = session->s_mds;
2391
2392         if (msg->front.iov_len < sizeof(*head)) {
2393                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2394                 ceph_msg_dump(msg);
2395                 return;
2396         }
2397
2398         /* get request, session */
2399         tid = le64_to_cpu(msg->hdr.tid);
2400         mutex_lock(&mdsc->mutex);
2401         req = lookup_get_request(mdsc, tid);
2402         if (!req) {
2403                 dout("handle_reply on unknown tid %llu\n", tid);
2404                 mutex_unlock(&mdsc->mutex);
2405                 return;
2406         }
2407         dout("handle_reply %p\n", req);
2408
2409         /* correct session? */
2410         if (req->r_session != session) {
2411                 pr_err("mdsc_handle_reply got %llu on session mds%d"
2412                        " not mds%d\n", tid, session->s_mds,
2413                        req->r_session ? req->r_session->s_mds : -1);
2414                 mutex_unlock(&mdsc->mutex);
2415                 goto out;
2416         }
2417
2418         /* dup? */
2419         if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
2420             (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
2421                 pr_warn("got a dup %s reply on %llu from mds%d\n",
2422                            head->safe ? "safe" : "unsafe", tid, mds);
2423                 mutex_unlock(&mdsc->mutex);
2424                 goto out;
2425         }
2426         if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
2427                 pr_warn("got unsafe after safe on %llu from mds%d\n",
2428                            tid, mds);
2429                 mutex_unlock(&mdsc->mutex);
2430                 goto out;
2431         }
2432
2433         result = le32_to_cpu(head->result);
2434
2435         /*
2436          * Handle an ESTALE
2437          * if we're not talking to the authority, send to them
2438          * if the authority has changed while we weren't looking,
2439          * send to new authority
2440          * Otherwise we just have to return an ESTALE
2441          */
2442         if (result == -ESTALE) {
2443                 dout("got ESTALE on request %llu", req->r_tid);
2444                 req->r_resend_mds = -1;
2445                 if (req->r_direct_mode != USE_AUTH_MDS) {
2446                         dout("not using auth, setting for that now");
2447                         req->r_direct_mode = USE_AUTH_MDS;
2448                         __do_request(mdsc, req);
2449                         mutex_unlock(&mdsc->mutex);
2450                         goto out;
2451                 } else  {
2452                         int mds = __choose_mds(mdsc, req);
2453                         if (mds >= 0 && mds != req->r_session->s_mds) {
2454                                 dout("but auth changed, so resending");
2455                                 __do_request(mdsc, req);
2456                                 mutex_unlock(&mdsc->mutex);
2457                                 goto out;
2458                         }
2459                 }
2460                 dout("have to return ESTALE on request %llu", req->r_tid);
2461         }
2462
2463
2464         if (head->safe) {
2465                 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
2466                 __unregister_request(mdsc, req);
2467
2468                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2469                         /*
2470                          * We already handled the unsafe response, now do the
2471                          * cleanup.  No need to examine the response; the MDS
2472                          * doesn't include any result info in the safe
2473                          * response.  And even if it did, there is nothing
2474                          * useful we could do with a revised return value.
2475                          */
2476                         dout("got safe reply %llu, mds%d\n", tid, mds);
2477
2478                         /* last unsafe request during umount? */
2479                         if (mdsc->stopping && !__get_oldest_req(mdsc))
2480                                 complete_all(&mdsc->safe_umount_waiters);
2481                         mutex_unlock(&mdsc->mutex);
2482                         goto out;
2483                 }
2484         } else {
2485                 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
2486                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2487                 if (req->r_unsafe_dir) {
2488                         struct ceph_inode_info *ci =
2489                                         ceph_inode(req->r_unsafe_dir);
2490                         spin_lock(&ci->i_unsafe_lock);
2491                         list_add_tail(&req->r_unsafe_dir_item,
2492                                       &ci->i_unsafe_dirops);
2493                         spin_unlock(&ci->i_unsafe_lock);
2494                 }
2495         }
2496
2497         dout("handle_reply tid %lld result %d\n", tid, result);
2498         rinfo = &req->r_reply_info;
2499         err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
2500         mutex_unlock(&mdsc->mutex);
2501
2502         mutex_lock(&session->s_mutex);
2503         if (err < 0) {
2504                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2505                 ceph_msg_dump(msg);
2506                 goto out_err;
2507         }
2508
2509         /* snap trace */
2510         realm = NULL;
2511         if (rinfo->snapblob_len) {
2512                 down_write(&mdsc->snap_rwsem);
2513                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2514                                 rinfo->snapblob + rinfo->snapblob_len,
2515                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
2516                                 &realm);
2517                 downgrade_write(&mdsc->snap_rwsem);
2518         } else {
2519                 down_read(&mdsc->snap_rwsem);
2520         }
2521
2522         /* insert trace into our cache */
2523         mutex_lock(&req->r_fill_mutex);
2524         current->journal_info = req;
2525         err = ceph_fill_trace(mdsc->fsc->sb, req);
2526         if (err == 0) {
2527                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2528                                     req->r_op == CEPH_MDS_OP_LSSNAP))
2529                         ceph_readdir_prepopulate(req, req->r_session);
2530                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2531         }
2532         current->journal_info = NULL;
2533         mutex_unlock(&req->r_fill_mutex);
2534
2535         up_read(&mdsc->snap_rwsem);
2536         if (realm)
2537                 ceph_put_snap_realm(mdsc, realm);
2538
2539         if (err == 0 && req->r_target_inode &&
2540             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2541                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
2542                 spin_lock(&ci->i_unsafe_lock);
2543                 list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops);
2544                 spin_unlock(&ci->i_unsafe_lock);
2545         }
2546 out_err:
2547         mutex_lock(&mdsc->mutex);
2548         if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
2549                 if (err) {
2550                         req->r_err = err;
2551                 } else {
2552                         req->r_reply =  ceph_msg_get(msg);
2553                         set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
2554                 }
2555         } else {
2556                 dout("reply arrived after request %lld was aborted\n", tid);
2557         }
2558         mutex_unlock(&mdsc->mutex);
2559
2560         mutex_unlock(&session->s_mutex);
2561
2562         /* kick calling process */
2563         complete_request(mdsc, req);
2564 out:
2565         ceph_mdsc_put_request(req);
2566         return;
2567 }
2568
2569
2570
2571 /*
2572  * handle mds notification that our request has been forwarded.
2573  */
2574 static void handle_forward(struct ceph_mds_client *mdsc,
2575                            struct ceph_mds_session *session,
2576                            struct ceph_msg *msg)
2577 {
2578         struct ceph_mds_request *req;
2579         u64 tid = le64_to_cpu(msg->hdr.tid);
2580         u32 next_mds;
2581         u32 fwd_seq;
2582         int err = -EINVAL;
2583         void *p = msg->front.iov_base;
2584         void *end = p + msg->front.iov_len;
2585
2586         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2587         next_mds = ceph_decode_32(&p);
2588         fwd_seq = ceph_decode_32(&p);
2589
2590         mutex_lock(&mdsc->mutex);
2591         req = lookup_get_request(mdsc, tid);
2592         if (!req) {
2593                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2594                 goto out;  /* dup reply? */
2595         }
2596
2597         if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
2598                 dout("forward tid %llu aborted, unregistering\n", tid);
2599                 __unregister_request(mdsc, req);
2600         } else if (fwd_seq <= req->r_num_fwd) {
2601                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2602                      tid, next_mds, req->r_num_fwd, fwd_seq);
2603         } else {
2604                 /* resend. forward race not possible; mds would drop */
2605                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2606                 BUG_ON(req->r_err);
2607                 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
2608                 req->r_attempts = 0;
2609                 req->r_num_fwd = fwd_seq;
2610                 req->r_resend_mds = next_mds;
2611                 put_request_session(req);
2612                 __do_request(mdsc, req);
2613         }
2614         ceph_mdsc_put_request(req);
2615 out:
2616         mutex_unlock(&mdsc->mutex);
2617         return;
2618
2619 bad:
2620         pr_err("mdsc_handle_forward decode error err=%d\n", err);
2621 }
2622
2623 /*
2624  * handle a mds session control message
2625  */
2626 static void handle_session(struct ceph_mds_session *session,
2627                            struct ceph_msg *msg)
2628 {
2629         struct ceph_mds_client *mdsc = session->s_mdsc;
2630         u32 op;
2631         u64 seq;
2632         int mds = session->s_mds;
2633         struct ceph_mds_session_head *h = msg->front.iov_base;
2634         int wake = 0;
2635
2636         /* decode */
2637         if (msg->front.iov_len != sizeof(*h))
2638                 goto bad;
2639         op = le32_to_cpu(h->op);
2640         seq = le64_to_cpu(h->seq);
2641
2642         mutex_lock(&mdsc->mutex);
2643         if (op == CEPH_SESSION_CLOSE)
2644                 __unregister_session(mdsc, session);
2645         /* FIXME: this ttl calculation is generous */
2646         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
2647         mutex_unlock(&mdsc->mutex);
2648
2649         mutex_lock(&session->s_mutex);
2650
2651         dout("handle_session mds%d %s %p state %s seq %llu\n",
2652              mds, ceph_session_op_name(op), session,
2653              ceph_session_state_name(session->s_state), seq);
2654
2655         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
2656                 session->s_state = CEPH_MDS_SESSION_OPEN;
2657                 pr_info("mds%d came back\n", session->s_mds);
2658         }
2659
2660         switch (op) {
2661         case CEPH_SESSION_OPEN:
2662                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2663                         pr_info("mds%d reconnect success\n", session->s_mds);
2664                 session->s_state = CEPH_MDS_SESSION_OPEN;
2665                 renewed_caps(mdsc, session, 0);
2666                 wake = 1;
2667                 if (mdsc->stopping)
2668                         __close_session(mdsc, session);
2669                 break;
2670
2671         case CEPH_SESSION_RENEWCAPS:
2672                 if (session->s_renew_seq == seq)
2673                         renewed_caps(mdsc, session, 1);
2674                 break;
2675
2676         case CEPH_SESSION_CLOSE:
2677                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2678                         pr_info("mds%d reconnect denied\n", session->s_mds);
2679                 cleanup_session_requests(mdsc, session);
2680                 remove_session_caps(session);
2681                 wake = 2; /* for good measure */
2682                 wake_up_all(&mdsc->session_close_wq);
2683                 break;
2684
2685         case CEPH_SESSION_STALE:
2686                 pr_info("mds%d caps went stale, renewing\n",
2687                         session->s_mds);
2688                 spin_lock(&session->s_gen_ttl_lock);
2689                 session->s_cap_gen++;
2690                 session->s_cap_ttl = jiffies - 1;
2691                 spin_unlock(&session->s_gen_ttl_lock);
2692                 send_renew_caps(mdsc, session);
2693                 break;
2694
2695         case CEPH_SESSION_RECALL_STATE:
2696                 trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2697                 break;
2698
2699         case CEPH_SESSION_FLUSHMSG:
2700                 send_flushmsg_ack(mdsc, session, seq);
2701                 break;
2702
2703         case CEPH_SESSION_FORCE_RO:
2704                 dout("force_session_readonly %p\n", session);
2705                 spin_lock(&session->s_cap_lock);
2706                 session->s_readonly = true;
2707                 spin_unlock(&session->s_cap_lock);
2708                 wake_up_session_caps(session, 0);
2709                 break;
2710
2711         case CEPH_SESSION_REJECT:
2712                 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
2713                 pr_info("mds%d rejected session\n", session->s_mds);
2714                 session->s_state = CEPH_MDS_SESSION_REJECTED;
2715                 cleanup_session_requests(mdsc, session);
2716                 remove_session_caps(session);
2717                 wake = 2; /* for good measure */
2718                 break;
2719
2720         default:
2721                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2722                 WARN_ON(1);
2723         }
2724
2725         mutex_unlock(&session->s_mutex);
2726         if (wake) {
2727                 mutex_lock(&mdsc->mutex);
2728                 __wake_requests(mdsc, &session->s_waiting);
2729                 if (wake == 2)
2730                         kick_requests(mdsc, mds);
2731                 mutex_unlock(&mdsc->mutex);
2732         }
2733         return;
2734
2735 bad:
2736         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2737                (int)msg->front.iov_len);
2738         ceph_msg_dump(msg);
2739         return;
2740 }
2741
2742
2743 /*
2744  * called under session->mutex.
2745  */
2746 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2747                                    struct ceph_mds_session *session)
2748 {
2749         struct ceph_mds_request *req, *nreq;
2750         struct rb_node *p;
2751         int err;
2752
2753         dout("replay_unsafe_requests mds%d\n", session->s_mds);
2754
2755         mutex_lock(&mdsc->mutex);
2756         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2757                 err = __prepare_send_request(mdsc, req, session->s_mds, true);
2758                 if (!err) {
2759                         ceph_msg_get(req->r_request);
2760                         ceph_con_send(&session->s_con, req->r_request);
2761                 }
2762         }
2763
2764         /*
2765          * also re-send old requests when MDS enters reconnect stage. So that MDS
2766          * can process completed request in clientreplay stage.
2767          */
2768         p = rb_first(&mdsc->request_tree);
2769         while (p) {
2770                 req = rb_entry(p, struct ceph_mds_request, r_node);
2771                 p = rb_next(p);
2772                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2773                         continue;
2774                 if (req->r_attempts == 0)
2775                         continue; /* only old requests */
2776                 if (req->r_session &&
2777                     req->r_session->s_mds == session->s_mds) {
2778                         err = __prepare_send_request(mdsc, req,
2779                                                      session->s_mds, true);
2780                         if (!err) {
2781                                 ceph_msg_get(req->r_request);
2782                                 ceph_con_send(&session->s_con, req->r_request);
2783                         }
2784                 }
2785         }
2786         mutex_unlock(&mdsc->mutex);
2787 }
2788
2789 /*
2790  * Encode information about a cap for a reconnect with the MDS.
2791  */
2792 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2793                           void *arg)
2794 {
2795         union {
2796                 struct ceph_mds_cap_reconnect v2;
2797                 struct ceph_mds_cap_reconnect_v1 v1;
2798         } rec;
2799         struct ceph_inode_info *ci;
2800         struct ceph_reconnect_state *recon_state = arg;
2801         struct ceph_pagelist *pagelist = recon_state->pagelist;
2802         char *path;
2803         int pathlen, err;
2804         u64 pathbase;
2805         u64 snap_follows;
2806         struct dentry *dentry;
2807
2808         ci = cap->ci;
2809
2810         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2811              inode, ceph_vinop(inode), cap, cap->cap_id,
2812              ceph_cap_string(cap->issued));
2813         err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2814         if (err)
2815                 return err;
2816
2817         dentry = d_find_alias(inode);
2818         if (dentry) {
2819                 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2820                 if (IS_ERR(path)) {
2821                         err = PTR_ERR(path);
2822                         goto out_dput;
2823                 }
2824         } else {
2825                 path = NULL;
2826                 pathlen = 0;
2827                 pathbase = 0;
2828         }
2829
2830         spin_lock(&ci->i_ceph_lock);
2831         cap->seq = 0;        /* reset cap seq */
2832         cap->issue_seq = 0;  /* and issue_seq */
2833         cap->mseq = 0;       /* and migrate_seq */
2834         cap->cap_gen = cap->session->s_cap_gen;
2835
2836         if (recon_state->msg_version >= 2) {
2837                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
2838                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2839                 rec.v2.issued = cpu_to_le32(cap->issued);
2840                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2841                 rec.v2.pathbase = cpu_to_le64(pathbase);
2842                 rec.v2.flock_len = 0;
2843         } else {
2844                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
2845                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2846                 rec.v1.issued = cpu_to_le32(cap->issued);
2847                 rec.v1.size = cpu_to_le64(inode->i_size);
2848                 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
2849                 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
2850                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2851                 rec.v1.pathbase = cpu_to_le64(pathbase);
2852         }
2853
2854         if (list_empty(&ci->i_cap_snaps)) {
2855                 snap_follows = 0;
2856         } else {
2857                 struct ceph_cap_snap *capsnap =
2858                         list_first_entry(&ci->i_cap_snaps,
2859                                          struct ceph_cap_snap, ci_item);
2860                 snap_follows = capsnap->follows;
2861         }
2862         spin_unlock(&ci->i_ceph_lock);
2863
2864         if (recon_state->msg_version >= 2) {
2865                 int num_fcntl_locks, num_flock_locks;
2866                 struct ceph_filelock *flocks;
2867                 size_t struct_len, total_len = 0;
2868                 u8 struct_v = 0;
2869
2870 encode_again:
2871                 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2872                 flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2873                                  sizeof(struct ceph_filelock), GFP_NOFS);
2874                 if (!flocks) {
2875                         err = -ENOMEM;
2876                         goto out_free;
2877                 }
2878                 err = ceph_encode_locks_to_buffer(inode, flocks,
2879                                                   num_fcntl_locks,
2880                                                   num_flock_locks);
2881                 if (err) {
2882                         kfree(flocks);
2883                         if (err == -ENOSPC)
2884                                 goto encode_again;
2885                         goto out_free;
2886                 }
2887
2888                 if (recon_state->msg_version >= 3) {
2889                         /* version, compat_version and struct_len */
2890                         total_len = 2 * sizeof(u8) + sizeof(u32);
2891                         struct_v = 2;
2892                 }
2893                 /*
2894                  * number of encoded locks is stable, so copy to pagelist
2895                  */
2896                 struct_len = 2 * sizeof(u32) +
2897                             (num_fcntl_locks + num_flock_locks) *
2898                             sizeof(struct ceph_filelock);
2899                 rec.v2.flock_len = cpu_to_le32(struct_len);
2900
2901                 struct_len += sizeof(rec.v2);
2902                 struct_len += sizeof(u32) + pathlen;
2903
2904                 if (struct_v >= 2)
2905                         struct_len += sizeof(u64); /* snap_follows */
2906
2907                 total_len += struct_len;
2908                 err = ceph_pagelist_reserve(pagelist, total_len);
2909
2910                 if (!err) {
2911                         if (recon_state->msg_version >= 3) {
2912                                 ceph_pagelist_encode_8(pagelist, struct_v);
2913                                 ceph_pagelist_encode_8(pagelist, 1);
2914                                 ceph_pagelist_encode_32(pagelist, struct_len);
2915                         }
2916                         ceph_pagelist_encode_string(pagelist, path, pathlen);
2917                         ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
2918                         ceph_locks_to_pagelist(flocks, pagelist,
2919                                                num_fcntl_locks,
2920                                                num_flock_locks);
2921                         if (struct_v >= 2)
2922                                 ceph_pagelist_encode_64(pagelist, snap_follows);
2923                 }
2924                 kfree(flocks);
2925         } else {
2926                 size_t size = sizeof(u32) + pathlen + sizeof(rec.v1);
2927                 err = ceph_pagelist_reserve(pagelist, size);
2928                 if (!err) {
2929                         ceph_pagelist_encode_string(pagelist, path, pathlen);
2930                         ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
2931                 }
2932         }
2933
2934         recon_state->nr_caps++;
2935 out_free:
2936         kfree(path);
2937 out_dput:
2938         dput(dentry);
2939         return err;
2940 }
2941
2942
2943 /*
2944  * If an MDS fails and recovers, clients need to reconnect in order to
2945  * reestablish shared state.  This includes all caps issued through
2946  * this session _and_ the snap_realm hierarchy.  Because it's not
2947  * clear which snap realms the mds cares about, we send everything we
2948  * know about.. that ensures we'll then get any new info the
2949  * recovering MDS might have.
2950  *
2951  * This is a relatively heavyweight operation, but it's rare.
2952  *
2953  * called with mdsc->mutex held.
2954  */
2955 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2956                                struct ceph_mds_session *session)
2957 {
2958         struct ceph_msg *reply;
2959         struct rb_node *p;
2960         int mds = session->s_mds;
2961         int err = -ENOMEM;
2962         int s_nr_caps;
2963         struct ceph_pagelist *pagelist;
2964         struct ceph_reconnect_state recon_state;
2965
2966         pr_info("mds%d reconnect start\n", mds);
2967
2968         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
2969         if (!pagelist)
2970                 goto fail_nopagelist;
2971         ceph_pagelist_init(pagelist);
2972
2973         reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
2974         if (!reply)
2975                 goto fail_nomsg;
2976
2977         mutex_lock(&session->s_mutex);
2978         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2979         session->s_seq = 0;
2980
2981         dout("session %p state %s\n", session,
2982              ceph_session_state_name(session->s_state));
2983
2984         spin_lock(&session->s_gen_ttl_lock);
2985         session->s_cap_gen++;
2986         spin_unlock(&session->s_gen_ttl_lock);
2987
2988         spin_lock(&session->s_cap_lock);
2989         /* don't know if session is readonly */
2990         session->s_readonly = 0;
2991         /*
2992          * notify __ceph_remove_cap() that we are composing cap reconnect.
2993          * If a cap get released before being added to the cap reconnect,
2994          * __ceph_remove_cap() should skip queuing cap release.
2995          */
2996         session->s_cap_reconnect = 1;
2997         /* drop old cap expires; we're about to reestablish that state */
2998         cleanup_cap_releases(mdsc, session);
2999
3000         /* trim unused caps to reduce MDS's cache rejoin time */
3001         if (mdsc->fsc->sb->s_root)
3002                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
3003
3004         ceph_con_close(&session->s_con);
3005         ceph_con_open(&session->s_con,
3006                       CEPH_ENTITY_TYPE_MDS, mds,
3007                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
3008
3009         /* replay unsafe requests */
3010         replay_unsafe_requests(mdsc, session);
3011
3012         down_read(&mdsc->snap_rwsem);
3013
3014         /* traverse this session's caps */
3015         s_nr_caps = session->s_nr_caps;
3016         err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
3017         if (err)
3018                 goto fail;
3019
3020         recon_state.nr_caps = 0;
3021         recon_state.pagelist = pagelist;
3022         if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
3023                 recon_state.msg_version = 3;
3024         else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
3025                 recon_state.msg_version = 2;
3026         else
3027                 recon_state.msg_version = 1;
3028         err = iterate_session_caps(session, encode_caps_cb, &recon_state);
3029         if (err < 0)
3030                 goto fail;
3031
3032         spin_lock(&session->s_cap_lock);
3033         session->s_cap_reconnect = 0;
3034         spin_unlock(&session->s_cap_lock);
3035
3036         /*
3037          * snaprealms.  we provide mds with the ino, seq (version), and
3038          * parent for all of our realms.  If the mds has any newer info,
3039          * it will tell us.
3040          */
3041         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3042                 struct ceph_snap_realm *realm =
3043                         rb_entry(p, struct ceph_snap_realm, node);
3044                 struct ceph_mds_snaprealm_reconnect sr_rec;
3045
3046                 dout(" adding snap realm %llx seq %lld parent %llx\n",
3047                      realm->ino, realm->seq, realm->parent_ino);
3048                 sr_rec.ino = cpu_to_le64(realm->ino);
3049                 sr_rec.seq = cpu_to_le64(realm->seq);
3050                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3051                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3052                 if (err)
3053                         goto fail;
3054         }
3055
3056         reply->hdr.version = cpu_to_le16(recon_state.msg_version);
3057
3058         /* raced with cap release? */
3059         if (s_nr_caps != recon_state.nr_caps) {
3060                 struct page *page = list_first_entry(&pagelist->head,
3061                                                      struct page, lru);
3062                 __le32 *addr = kmap_atomic(page);
3063                 *addr = cpu_to_le32(recon_state.nr_caps);
3064                 kunmap_atomic(addr);
3065         }
3066
3067         reply->hdr.data_len = cpu_to_le32(pagelist->length);
3068         ceph_msg_data_add_pagelist(reply, pagelist);
3069
3070         ceph_early_kick_flushing_caps(mdsc, session);
3071
3072         ceph_con_send(&session->s_con, reply);
3073
3074         mutex_unlock(&session->s_mutex);
3075
3076         mutex_lock(&mdsc->mutex);
3077         __wake_requests(mdsc, &session->s_waiting);
3078         mutex_unlock(&mdsc->mutex);
3079
3080         up_read(&mdsc->snap_rwsem);
3081         return;
3082
3083 fail:
3084         ceph_msg_put(reply);
3085         up_read(&mdsc->snap_rwsem);
3086         mutex_unlock(&session->s_mutex);
3087 fail_nomsg:
3088         ceph_pagelist_release(pagelist);
3089 fail_nopagelist:
3090         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
3091         return;
3092 }
3093
3094
3095 /*
3096  * compare old and new mdsmaps, kicking requests
3097  * and closing out old connections as necessary
3098  *
3099  * called under mdsc->mutex.
3100  */
3101 static void check_new_map(struct ceph_mds_client *mdsc,
3102                           struct ceph_mdsmap *newmap,
3103                           struct ceph_mdsmap *oldmap)
3104 {
3105         int i;
3106         int oldstate, newstate;
3107         struct ceph_mds_session *s;
3108
3109         dout("check_new_map new %u old %u\n",
3110              newmap->m_epoch, oldmap->m_epoch);
3111
3112         for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
3113                 if (mdsc->sessions[i] == NULL)
3114                         continue;
3115                 s = mdsc->sessions[i];
3116                 oldstate = ceph_mdsmap_get_state(oldmap, i);
3117                 newstate = ceph_mdsmap_get_state(newmap, i);
3118
3119                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3120                      i, ceph_mds_state_name(oldstate),
3121                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
3122                      ceph_mds_state_name(newstate),
3123                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
3124                      ceph_session_state_name(s->s_state));
3125
3126                 if (i >= newmap->m_max_mds ||
3127                     memcmp(ceph_mdsmap_get_addr(oldmap, i),
3128                            ceph_mdsmap_get_addr(newmap, i),
3129                            sizeof(struct ceph_entity_addr))) {
3130                         if (s->s_state == CEPH_MDS_SESSION_OPENING) {
3131                                 /* the session never opened, just close it
3132                                  * out now */
3133                                 __wake_requests(mdsc, &s->s_waiting);
3134                                 __unregister_session(mdsc, s);
3135                         } else {
3136                                 /* just close it */
3137                                 mutex_unlock(&mdsc->mutex);
3138                                 mutex_lock(&s->s_mutex);
3139                                 mutex_lock(&mdsc->mutex);
3140                                 ceph_con_close(&s->s_con);
3141                                 mutex_unlock(&s->s_mutex);
3142                                 s->s_state = CEPH_MDS_SESSION_RESTARTING;
3143                         }
3144                 } else if (oldstate == newstate) {
3145                         continue;  /* nothing new with this mds */
3146                 }
3147
3148                 /*
3149                  * send reconnect?
3150                  */
3151                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
3152                     newstate >= CEPH_MDS_STATE_RECONNECT) {
3153                         mutex_unlock(&mdsc->mutex);
3154                         send_mds_reconnect(mdsc, s);
3155                         mutex_lock(&mdsc->mutex);
3156                 }
3157
3158                 /*
3159                  * kick request on any mds that has gone active.
3160                  */
3161                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
3162                     newstate >= CEPH_MDS_STATE_ACTIVE) {
3163                         if (oldstate != CEPH_MDS_STATE_CREATING &&
3164                             oldstate != CEPH_MDS_STATE_STARTING)
3165                                 pr_info("mds%d recovery completed\n", s->s_mds);
3166                         kick_requests(mdsc, i);
3167                         ceph_kick_flushing_caps(mdsc, s);
3168                         wake_up_session_caps(s, 1);
3169                 }
3170         }
3171
3172         for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) {
3173                 s = mdsc->sessions[i];
3174                 if (!s)
3175                         continue;
3176                 if (!ceph_mdsmap_is_laggy(newmap, i))
3177                         continue;
3178                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3179                     s->s_state == CEPH_MDS_SESSION_HUNG ||
3180                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
3181                         dout(" connecting to export targets of laggy mds%d\n",
3182                              i);
3183                         __open_export_target_sessions(mdsc, s);
3184                 }
3185         }
3186 }
3187
3188
3189
3190 /*
3191  * leases
3192  */
3193
3194 /*
3195  * caller must hold session s_mutex, dentry->d_lock
3196  */
3197 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
3198 {
3199         struct ceph_dentry_info *di = ceph_dentry(dentry);
3200
3201         ceph_put_mds_session(di->lease_session);
3202         di->lease_session = NULL;
3203 }
3204
3205 static void handle_lease(struct ceph_mds_client *mdsc,
3206                          struct ceph_mds_session *session,
3207                          struct ceph_msg *msg)
3208 {
3209         struct super_block *sb = mdsc->fsc->sb;
3210         struct inode *inode;
3211         struct dentry *parent, *dentry;
3212         struct ceph_dentry_info *di;
3213         int mds = session->s_mds;
3214         struct ceph_mds_lease *h = msg->front.iov_base;
3215         u32 seq;
3216         struct ceph_vino vino;
3217         struct qstr dname;
3218         int release = 0;
3219
3220         dout("handle_lease from mds%d\n", mds);
3221
3222         /* decode */
3223         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
3224                 goto bad;
3225         vino.ino = le64_to_cpu(h->ino);
3226         vino.snap = CEPH_NOSNAP;
3227         seq = le32_to_cpu(h->seq);
3228         dname.name = (void *)h + sizeof(*h) + sizeof(u32);
3229         dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
3230         if (dname.len != get_unaligned_le32(h+1))
3231                 goto bad;
3232
3233         /* lookup inode */
3234         inode = ceph_find_inode(sb, vino);
3235         dout("handle_lease %s, ino %llx %p %.*s\n",
3236              ceph_lease_op_name(h->action), vino.ino, inode,
3237              dname.len, dname.name);
3238
3239         mutex_lock(&session->s_mutex);
3240         session->s_seq++;
3241
3242         if (inode == NULL) {
3243                 dout("handle_lease no inode %llx\n", vino.ino);
3244                 goto release;
3245         }
3246
3247         /* dentry */
3248         parent = d_find_alias(inode);
3249         if (!parent) {
3250                 dout("no parent dentry on inode %p\n", inode);
3251                 WARN_ON(1);
3252                 goto release;  /* hrm... */
3253         }
3254         dname.hash = full_name_hash(parent, dname.name, dname.len);
3255         dentry = d_lookup(parent, &dname);
3256         dput(parent);
3257         if (!dentry)
3258                 goto release;
3259
3260         spin_lock(&dentry->d_lock);
3261         di = ceph_dentry(dentry);
3262         switch (h->action) {
3263         case CEPH_MDS_LEASE_REVOKE:
3264                 if (di->lease_session == session) {
3265                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
3266                                 h->seq = cpu_to_le32(di->lease_seq);
3267                         __ceph_mdsc_drop_dentry_lease(dentry);
3268                 }
3269                 release = 1;
3270                 break;
3271
3272         case CEPH_MDS_LEASE_RENEW:
3273                 if (di->lease_session == session &&
3274                     di->lease_gen == session->s_cap_gen &&
3275                     di->lease_renew_from &&
3276                     di->lease_renew_after == 0) {
3277                         unsigned long duration =
3278                                 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
3279
3280                         di->lease_seq = seq;
3281                         di->time = di->lease_renew_from + duration;
3282                         di->lease_renew_after = di->lease_renew_from +
3283                                 (duration >> 1);
3284                         di->lease_renew_from = 0;
3285                 }
3286                 break;
3287         }
3288         spin_unlock(&dentry->d_lock);
3289         dput(dentry);
3290
3291         if (!release)
3292                 goto out;
3293
3294 release:
3295         /* let's just reuse the same message */
3296         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
3297         ceph_msg_get(msg);
3298         ceph_con_send(&session->s_con, msg);
3299
3300 out:
3301         iput(inode);
3302         mutex_unlock(&session->s_mutex);
3303         return;
3304
3305 bad:
3306         pr_err("corrupt lease message\n");
3307         ceph_msg_dump(msg);
3308 }
3309
3310 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
3311                               struct inode *inode,
3312                               struct dentry *dentry, char action,
3313                               u32 seq)
3314 {
3315         struct ceph_msg *msg;
3316         struct ceph_mds_lease *lease;
3317         int len = sizeof(*lease) + sizeof(u32);
3318         int dnamelen = 0;
3319
3320         dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3321              inode, dentry, ceph_lease_op_name(action), session->s_mds);
3322         dnamelen = dentry->d_name.len;
3323         len += dnamelen;
3324
3325         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
3326         if (!msg)
3327                 return;
3328         lease = msg->front.iov_base;
3329         lease->action = action;
3330         lease->ino = cpu_to_le64(ceph_vino(inode).ino);
3331         lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
3332         lease->seq = cpu_to_le32(seq);
3333         put_unaligned_le32(dnamelen, lease + 1);
3334         memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
3335
3336         /*
3337          * if this is a preemptive lease RELEASE, no need to
3338          * flush request stream, since the actual request will
3339          * soon follow.
3340          */
3341         msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
3342
3343         ceph_con_send(&session->s_con, msg);
3344 }
3345
3346 /*
3347  * drop all leases (and dentry refs) in preparation for umount
3348  */
3349 static void drop_leases(struct ceph_mds_client *mdsc)
3350 {
3351         int i;
3352
3353         dout("drop_leases\n");
3354         mutex_lock(&mdsc->mutex);
3355         for (i = 0; i < mdsc->max_sessions; i++) {
3356                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3357                 if (!s)
3358                         continue;
3359                 mutex_unlock(&mdsc->mutex);
3360                 mutex_lock(&s->s_mutex);
3361                 mutex_unlock(&s->s_mutex);
3362                 ceph_put_mds_session(s);
3363                 mutex_lock(&mdsc->mutex);
3364         }
3365         mutex_unlock(&mdsc->mutex);
3366 }
3367
3368
3369
3370 /*
3371  * delayed work -- periodically trim expired leases, renew caps with mds
3372  */
3373 static void schedule_delayed(struct ceph_mds_client *mdsc)
3374 {
3375         int delay = 5;
3376         unsigned hz = round_jiffies_relative(HZ * delay);
3377         schedule_delayed_work(&mdsc->delayed_work, hz);
3378 }
3379
3380 static void delayed_work(struct work_struct *work)
3381 {
3382         int i;
3383         struct ceph_mds_client *mdsc =
3384                 container_of(work, struct ceph_mds_client, delayed_work.work);
3385         int renew_interval;
3386         int renew_caps;
3387
3388         dout("mdsc delayed_work\n");
3389         ceph_check_delayed_caps(mdsc);
3390
3391         mutex_lock(&mdsc->mutex);
3392         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
3393         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
3394                                    mdsc->last_renew_caps);
3395         if (renew_caps)
3396                 mdsc->last_renew_caps = jiffies;
3397
3398         for (i = 0; i < mdsc->max_sessions; i++) {
3399                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3400                 if (s == NULL)
3401                         continue;
3402                 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
3403                         dout("resending session close request for mds%d\n",
3404                              s->s_mds);
3405                         request_close_session(mdsc, s);
3406                         ceph_put_mds_session(s);
3407                         continue;
3408                 }
3409                 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
3410                         if (s->s_state == CEPH_MDS_SESSION_OPEN) {
3411                                 s->s_state = CEPH_MDS_SESSION_HUNG;
3412                                 pr_info("mds%d hung\n", s->s_mds);
3413                         }
3414                 }
3415                 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
3416                         /* this mds is failed or recovering, just wait */
3417                         ceph_put_mds_session(s);
3418                         continue;
3419                 }
3420                 mutex_unlock(&mdsc->mutex);
3421
3422                 mutex_lock(&s->s_mutex);
3423                 if (renew_caps)
3424                         send_renew_caps(mdsc, s);
3425                 else
3426                         ceph_con_keepalive(&s->s_con);
3427                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3428                     s->s_state == CEPH_MDS_SESSION_HUNG)
3429                         ceph_send_cap_releases(mdsc, s);
3430                 mutex_unlock(&s->s_mutex);
3431                 ceph_put_mds_session(s);
3432
3433                 mutex_lock(&mdsc->mutex);
3434         }
3435         mutex_unlock(&mdsc->mutex);
3436
3437         schedule_delayed(mdsc);
3438 }
3439
3440 int ceph_mdsc_init(struct ceph_fs_client *fsc)
3441
3442 {
3443         struct ceph_mds_client *mdsc;
3444
3445         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
3446         if (!mdsc)
3447                 return -ENOMEM;
3448         mdsc->fsc = fsc;
3449         fsc->mdsc = mdsc;
3450         mutex_init(&mdsc->mutex);
3451         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
3452         if (mdsc->mdsmap == NULL) {
3453                 kfree(mdsc);
3454                 return -ENOMEM;
3455         }
3456
3457         init_completion(&mdsc->safe_umount_waiters);
3458         init_waitqueue_head(&mdsc->session_close_wq);
3459         INIT_LIST_HEAD(&mdsc->waiting_for_map);
3460         mdsc->sessions = NULL;
3461         atomic_set(&mdsc->num_sessions, 0);
3462         mdsc->max_sessions = 0;
3463         mdsc->stopping = 0;
3464         mdsc->last_snap_seq = 0;
3465         init_rwsem(&mdsc->snap_rwsem);
3466         mdsc->snap_realms = RB_ROOT;
3467         INIT_LIST_HEAD(&mdsc->snap_empty);
3468         spin_lock_init(&mdsc->snap_empty_lock);
3469         mdsc->last_tid = 0;
3470         mdsc->oldest_tid = 0;
3471         mdsc->request_tree = RB_ROOT;
3472         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
3473         mdsc->last_renew_caps = jiffies;
3474         INIT_LIST_HEAD(&mdsc->cap_delay_list);
3475         spin_lock_init(&mdsc->cap_delay_lock);
3476         INIT_LIST_HEAD(&mdsc->snap_flush_list);
3477         spin_lock_init(&mdsc->snap_flush_lock);
3478         mdsc->last_cap_flush_tid = 1;
3479         INIT_LIST_HEAD(&mdsc->cap_flush_list);
3480         INIT_LIST_HEAD(&mdsc->cap_dirty);
3481         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
3482         mdsc->num_cap_flushing = 0;
3483         spin_lock_init(&mdsc->cap_dirty_lock);
3484         init_waitqueue_head(&mdsc->cap_flushing_wq);
3485         spin_lock_init(&mdsc->dentry_lru_lock);
3486         INIT_LIST_HEAD(&mdsc->dentry_lru);
3487
3488         ceph_caps_init(mdsc);
3489         ceph_adjust_min_caps(mdsc, fsc->min_caps);
3490
3491         init_rwsem(&mdsc->pool_perm_rwsem);
3492         mdsc->pool_perm_tree = RB_ROOT;
3493
3494         return 0;
3495 }
3496
3497 /*
3498  * Wait for safe replies on open mds requests.  If we time out, drop
3499  * all requests from the tree to avoid dangling dentry refs.
3500  */
3501 static void wait_requests(struct ceph_mds_client *mdsc)
3502 {
3503         struct ceph_options *opts = mdsc->fsc->client->options;
3504         struct ceph_mds_request *req;
3505
3506         mutex_lock(&mdsc->mutex);
3507         if (__get_oldest_req(mdsc)) {
3508                 mutex_unlock(&mdsc->mutex);
3509
3510                 dout("wait_requests waiting for requests\n");
3511                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
3512                                     ceph_timeout_jiffies(opts->mount_timeout));
3513
3514                 /* tear down remaining requests */
3515                 mutex_lock(&mdsc->mutex);
3516                 while ((req = __get_oldest_req(mdsc))) {
3517                         dout("wait_requests timed out on tid %llu\n",
3518                              req->r_tid);
3519                         __unregister_request(mdsc, req);
3520                 }
3521         }
3522         mutex_unlock(&mdsc->mutex);
3523         dout("wait_requests done\n");
3524 }
3525
3526 /*
3527  * called before mount is ro, and before dentries are torn down.
3528  * (hmm, does this still race with new lookups?)
3529  */
3530 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
3531 {
3532         dout("pre_umount\n");
3533         mdsc->stopping = 1;
3534
3535         drop_leases(mdsc);
3536         ceph_flush_dirty_caps(mdsc);
3537         wait_requests(mdsc);
3538
3539         /*
3540          * wait for reply handlers to drop their request refs and
3541          * their inode/dcache refs
3542          */
3543         ceph_msgr_flush();
3544 }
3545
3546 /*
3547  * wait for all write mds requests to flush.
3548  */
3549 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
3550 {
3551         struct ceph_mds_request *req = NULL, *nextreq;
3552         struct rb_node *n;
3553
3554         mutex_lock(&mdsc->mutex);
3555         dout("wait_unsafe_requests want %lld\n", want_tid);
3556 restart:
3557         req = __get_oldest_req(mdsc);
3558         while (req && req->r_tid <= want_tid) {
3559                 /* find next request */
3560                 n = rb_next(&req->r_node);
3561                 if (n)
3562                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
3563                 else
3564                         nextreq = NULL;
3565                 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
3566                     (req->r_op & CEPH_MDS_OP_WRITE)) {
3567                         /* write op */
3568                         ceph_mdsc_get_request(req);
3569                         if (nextreq)
3570                                 ceph_mdsc_get_request(nextreq);
3571                         mutex_unlock(&mdsc->mutex);
3572                         dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
3573                              req->r_tid, want_tid);
3574                         wait_for_completion(&req->r_safe_completion);
3575                         mutex_lock(&mdsc->mutex);
3576                         ceph_mdsc_put_request(req);
3577                         if (!nextreq)
3578                                 break;  /* next dne before, so we're done! */
3579                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
3580                                 /* next request was removed from tree */
3581                                 ceph_mdsc_put_request(nextreq);
3582                                 goto restart;
3583                         }
3584                         ceph_mdsc_put_request(nextreq);  /* won't go away */
3585                 }
3586                 req = nextreq;
3587         }
3588         mutex_unlock(&mdsc->mutex);
3589         dout("wait_unsafe_requests done\n");
3590 }
3591
3592 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3593 {
3594         u64 want_tid, want_flush;
3595
3596         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3597                 return;
3598
3599         dout("sync\n");
3600         mutex_lock(&mdsc->mutex);
3601         want_tid = mdsc->last_tid;
3602         mutex_unlock(&mdsc->mutex);
3603
3604         ceph_flush_dirty_caps(mdsc);
3605         spin_lock(&mdsc->cap_dirty_lock);
3606         want_flush = mdsc->last_cap_flush_tid;
3607         if (!list_empty(&mdsc->cap_flush_list)) {
3608                 struct ceph_cap_flush *cf =
3609                         list_last_entry(&mdsc->cap_flush_list,
3610                                         struct ceph_cap_flush, g_list);
3611                 cf->wake = true;
3612         }
3613         spin_unlock(&mdsc->cap_dirty_lock);
3614
3615         dout("sync want tid %lld flush_seq %lld\n",
3616              want_tid, want_flush);
3617
3618         wait_unsafe_requests(mdsc, want_tid);
3619         wait_caps_flush(mdsc, want_flush);
3620 }
3621
3622 /*
3623  * true if all sessions are closed, or we force unmount
3624  */
3625 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
3626 {
3627         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3628                 return true;
3629         return atomic_read(&mdsc->num_sessions) <= skipped;
3630 }
3631
3632 /*
3633  * called after sb is ro.
3634  */
3635 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3636 {
3637         struct ceph_options *opts = mdsc->fsc->client->options;
3638         struct ceph_mds_session *session;
3639         int i;
3640         int skipped = 0;
3641
3642         dout("close_sessions\n");
3643
3644         /* close sessions */
3645         mutex_lock(&mdsc->mutex);
3646         for (i = 0; i < mdsc->max_sessions; i++) {
3647                 session = __ceph_lookup_mds_session(mdsc, i);
3648                 if (!session)
3649                         continue;
3650                 mutex_unlock(&mdsc->mutex);
3651                 mutex_lock(&session->s_mutex);
3652                 if (__close_session(mdsc, session) <= 0)
3653                         skipped++;
3654                 mutex_unlock(&session->s_mutex);
3655                 ceph_put_mds_session(session);
3656                 mutex_lock(&mdsc->mutex);
3657         }
3658         mutex_unlock(&mdsc->mutex);
3659
3660         dout("waiting for sessions to close\n");
3661         wait_event_timeout(mdsc->session_close_wq,
3662                            done_closing_sessions(mdsc, skipped),
3663                            ceph_timeout_jiffies(opts->mount_timeout));
3664
3665         /* tear down remaining sessions */
3666         mutex_lock(&mdsc->mutex);
3667         for (i = 0; i < mdsc->max_sessions; i++) {
3668                 if (mdsc->sessions[i]) {
3669                         session = get_session(mdsc->sessions[i]);
3670                         __unregister_session(mdsc, session);
3671                         mutex_unlock(&mdsc->mutex);
3672                         mutex_lock(&session->s_mutex);
3673                         remove_session_caps(session);
3674                         mutex_unlock(&session->s_mutex);
3675                         ceph_put_mds_session(session);
3676                         mutex_lock(&mdsc->mutex);
3677                 }
3678         }
3679         WARN_ON(!list_empty(&mdsc->cap_delay_list));
3680         mutex_unlock(&mdsc->mutex);
3681
3682         ceph_cleanup_empty_realms(mdsc);
3683
3684         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3685
3686         dout("stopped\n");
3687 }
3688
3689 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
3690 {
3691         struct ceph_mds_session *session;
3692         int mds;
3693
3694         dout("force umount\n");
3695
3696         mutex_lock(&mdsc->mutex);
3697         for (mds = 0; mds < mdsc->max_sessions; mds++) {
3698                 session = __ceph_lookup_mds_session(mdsc, mds);
3699                 if (!session)
3700                         continue;
3701                 mutex_unlock(&mdsc->mutex);
3702                 mutex_lock(&session->s_mutex);
3703                 __close_session(mdsc, session);
3704                 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
3705                         cleanup_session_requests(mdsc, session);
3706                         remove_session_caps(session);
3707                 }
3708                 mutex_unlock(&session->s_mutex);
3709                 ceph_put_mds_session(session);
3710                 mutex_lock(&mdsc->mutex);
3711                 kick_requests(mdsc, mds);
3712         }
3713         __wake_requests(mdsc, &mdsc->waiting_for_map);
3714         mutex_unlock(&mdsc->mutex);
3715 }
3716
3717 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3718 {
3719         dout("stop\n");
3720         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3721         if (mdsc->mdsmap)
3722                 ceph_mdsmap_destroy(mdsc->mdsmap);
3723         kfree(mdsc->sessions);
3724         ceph_caps_finalize(mdsc);
3725         ceph_pool_perm_destroy(mdsc);
3726 }
3727
3728 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3729 {
3730         struct ceph_mds_client *mdsc = fsc->mdsc;
3731
3732         dout("mdsc_destroy %p\n", mdsc);
3733         ceph_mdsc_stop(mdsc);
3734
3735         /* flush out any connection work with references to us */
3736         ceph_msgr_flush();
3737
3738         fsc->mdsc = NULL;
3739         kfree(mdsc);
3740         dout("mdsc_destroy %p done\n", mdsc);
3741 }
3742
3743 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3744 {
3745         struct ceph_fs_client *fsc = mdsc->fsc;
3746         const char *mds_namespace = fsc->mount_options->mds_namespace;
3747         void *p = msg->front.iov_base;
3748         void *end = p + msg->front.iov_len;
3749         u32 epoch;
3750         u32 map_len;
3751         u32 num_fs;
3752         u32 mount_fscid = (u32)-1;
3753         u8 struct_v, struct_cv;
3754         int err = -EINVAL;
3755
3756         ceph_decode_need(&p, end, sizeof(u32), bad);
3757         epoch = ceph_decode_32(&p);
3758
3759         dout("handle_fsmap epoch %u\n", epoch);
3760
3761         ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
3762         struct_v = ceph_decode_8(&p);
3763         struct_cv = ceph_decode_8(&p);
3764         map_len = ceph_decode_32(&p);
3765
3766         ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
3767         p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
3768
3769         num_fs = ceph_decode_32(&p);
3770         while (num_fs-- > 0) {
3771                 void *info_p, *info_end;
3772                 u32 info_len;
3773                 u8 info_v, info_cv;
3774                 u32 fscid, namelen;
3775
3776                 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
3777                 info_v = ceph_decode_8(&p);
3778                 info_cv = ceph_decode_8(&p);
3779                 info_len = ceph_decode_32(&p);
3780                 ceph_decode_need(&p, end, info_len, bad);
3781                 info_p = p;
3782                 info_end = p + info_len;
3783                 p = info_end;
3784
3785                 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
3786                 fscid = ceph_decode_32(&info_p);
3787                 namelen = ceph_decode_32(&info_p);
3788                 ceph_decode_need(&info_p, info_end, namelen, bad);
3789
3790                 if (mds_namespace &&
3791                     strlen(mds_namespace) == namelen &&
3792                     !strncmp(mds_namespace, (char *)info_p, namelen)) {
3793                         mount_fscid = fscid;
3794                         break;
3795                 }
3796         }
3797
3798         ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
3799         if (mount_fscid != (u32)-1) {
3800                 fsc->client->monc.fs_cluster_id = mount_fscid;
3801                 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
3802                                    0, true);
3803                 ceph_monc_renew_subs(&fsc->client->monc);
3804         } else {
3805                 err = -ENOENT;
3806                 goto err_out;
3807         }
3808         return;
3809 bad:
3810         pr_err("error decoding fsmap\n");
3811 err_out:
3812         mutex_lock(&mdsc->mutex);
3813         mdsc->mdsmap_err = -ENOENT;
3814         __wake_requests(mdsc, &mdsc->waiting_for_map);
3815         mutex_unlock(&mdsc->mutex);
3816         return;
3817 }
3818
3819 /*
3820  * handle mds map update.
3821  */
3822 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3823 {
3824         u32 epoch;
3825         u32 maplen;
3826         void *p = msg->front.iov_base;
3827         void *end = p + msg->front.iov_len;
3828         struct ceph_mdsmap *newmap, *oldmap;
3829         struct ceph_fsid fsid;
3830         int err = -EINVAL;
3831
3832         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
3833         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3834         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
3835                 return;
3836         epoch = ceph_decode_32(&p);
3837         maplen = ceph_decode_32(&p);
3838         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
3839
3840         /* do we need it? */
3841         mutex_lock(&mdsc->mutex);
3842         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
3843                 dout("handle_map epoch %u <= our %u\n",
3844                      epoch, mdsc->mdsmap->m_epoch);
3845                 mutex_unlock(&mdsc->mutex);
3846                 return;
3847         }
3848
3849         newmap = ceph_mdsmap_decode(&p, end);
3850         if (IS_ERR(newmap)) {
3851                 err = PTR_ERR(newmap);
3852                 goto bad_unlock;
3853         }
3854
3855         /* swap into place */
3856         if (mdsc->mdsmap) {
3857                 oldmap = mdsc->mdsmap;
3858                 mdsc->mdsmap = newmap;
3859                 check_new_map(mdsc, newmap, oldmap);
3860                 ceph_mdsmap_destroy(oldmap);
3861         } else {
3862                 mdsc->mdsmap = newmap;  /* first mds map */
3863         }
3864         mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
3865
3866         __wake_requests(mdsc, &mdsc->waiting_for_map);
3867         ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
3868                           mdsc->mdsmap->m_epoch);
3869
3870         mutex_unlock(&mdsc->mutex);
3871         schedule_delayed(mdsc);
3872         return;
3873
3874 bad_unlock:
3875         mutex_unlock(&mdsc->mutex);
3876 bad:
3877         pr_err("error decoding mdsmap %d\n", err);
3878         return;
3879 }
3880
3881 static struct ceph_connection *con_get(struct ceph_connection *con)
3882 {
3883         struct ceph_mds_session *s = con->private;
3884
3885         if (get_session(s)) {
3886                 dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
3887                 return con;
3888         }
3889         dout("mdsc con_get %p FAIL\n", s);
3890         return NULL;
3891 }
3892
3893 static void con_put(struct ceph_connection *con)
3894 {
3895         struct ceph_mds_session *s = con->private;
3896
3897         dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
3898         ceph_put_mds_session(s);
3899 }
3900
3901 /*
3902  * if the client is unresponsive for long enough, the mds will kill
3903  * the session entirely.
3904  */
3905 static void peer_reset(struct ceph_connection *con)
3906 {
3907         struct ceph_mds_session *s = con->private;
3908         struct ceph_mds_client *mdsc = s->s_mdsc;
3909
3910         pr_warn("mds%d closed our session\n", s->s_mds);
3911         send_mds_reconnect(mdsc, s);
3912 }
3913
3914 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
3915 {
3916         struct ceph_mds_session *s = con->private;
3917         struct ceph_mds_client *mdsc = s->s_mdsc;
3918         int type = le16_to_cpu(msg->hdr.type);
3919
3920         mutex_lock(&mdsc->mutex);
3921         if (__verify_registered_session(mdsc, s) < 0) {
3922                 mutex_unlock(&mdsc->mutex);
3923                 goto out;
3924         }
3925         mutex_unlock(&mdsc->mutex);
3926
3927         switch (type) {
3928         case CEPH_MSG_MDS_MAP:
3929                 ceph_mdsc_handle_mdsmap(mdsc, msg);
3930                 break;
3931         case CEPH_MSG_FS_MAP_USER:
3932                 ceph_mdsc_handle_fsmap(mdsc, msg);
3933                 break;
3934         case CEPH_MSG_CLIENT_SESSION:
3935                 handle_session(s, msg);
3936                 break;
3937         case CEPH_MSG_CLIENT_REPLY:
3938                 handle_reply(s, msg);
3939                 break;
3940         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
3941                 handle_forward(mdsc, s, msg);
3942                 break;
3943         case CEPH_MSG_CLIENT_CAPS:
3944                 ceph_handle_caps(s, msg);
3945                 break;
3946         case CEPH_MSG_CLIENT_SNAP:
3947                 ceph_handle_snap(mdsc, s, msg);
3948                 break;
3949         case CEPH_MSG_CLIENT_LEASE:
3950                 handle_lease(mdsc, s, msg);
3951                 break;
3952
3953         default:
3954                 pr_err("received unknown message type %d %s\n", type,
3955                        ceph_msg_type_name(type));
3956         }
3957 out:
3958         ceph_msg_put(msg);
3959 }
3960
3961 /*
3962  * authentication
3963  */
3964
3965 /*
3966  * Note: returned pointer is the address of a structure that's
3967  * managed separately.  Caller must *not* attempt to free it.
3968  */
3969 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3970                                         int *proto, int force_new)
3971 {
3972         struct ceph_mds_session *s = con->private;
3973         struct ceph_mds_client *mdsc = s->s_mdsc;
3974         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3975         struct ceph_auth_handshake *auth = &s->s_auth;
3976
3977         if (force_new && auth->authorizer) {
3978                 ceph_auth_destroy_authorizer(auth->authorizer);
3979                 auth->authorizer = NULL;
3980         }
3981         if (!auth->authorizer) {
3982                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3983                                                       auth);
3984                 if (ret)
3985                         return ERR_PTR(ret);
3986         } else {
3987                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3988                                                       auth);
3989                 if (ret)
3990                         return ERR_PTR(ret);
3991         }
3992         *proto = ac->protocol;
3993
3994         return auth;
3995 }
3996
3997
3998 static int verify_authorizer_reply(struct ceph_connection *con)
3999 {
4000         struct ceph_mds_session *s = con->private;
4001         struct ceph_mds_client *mdsc = s->s_mdsc;
4002         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4003
4004         return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer);
4005 }
4006
4007 static int invalidate_authorizer(struct ceph_connection *con)
4008 {
4009         struct ceph_mds_session *s = con->private;
4010         struct ceph_mds_client *mdsc = s->s_mdsc;
4011         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4012
4013         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
4014
4015         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
4016 }
4017
4018 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
4019                                 struct ceph_msg_header *hdr, int *skip)
4020 {
4021         struct ceph_msg *msg;
4022         int type = (int) le16_to_cpu(hdr->type);
4023         int front_len = (int) le32_to_cpu(hdr->front_len);
4024
4025         if (con->in_msg)
4026                 return con->in_msg;
4027
4028         *skip = 0;
4029         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
4030         if (!msg) {
4031                 pr_err("unable to allocate msg type %d len %d\n",
4032                        type, front_len);
4033                 return NULL;
4034         }
4035
4036         return msg;
4037 }
4038
4039 static int mds_sign_message(struct ceph_msg *msg)
4040 {
4041        struct ceph_mds_session *s = msg->con->private;
4042        struct ceph_auth_handshake *auth = &s->s_auth;
4043
4044        return ceph_auth_sign_message(auth, msg);
4045 }
4046
4047 static int mds_check_message_signature(struct ceph_msg *msg)
4048 {
4049        struct ceph_mds_session *s = msg->con->private;
4050        struct ceph_auth_handshake *auth = &s->s_auth;
4051
4052        return ceph_auth_check_message_signature(auth, msg);
4053 }
4054
4055 static const struct ceph_connection_operations mds_con_ops = {
4056         .get = con_get,
4057         .put = con_put,
4058         .dispatch = dispatch,
4059         .get_authorizer = get_authorizer,
4060         .verify_authorizer_reply = verify_authorizer_reply,
4061         .invalidate_authorizer = invalidate_authorizer,
4062         .peer_reset = peer_reset,
4063         .alloc_msg = mds_alloc_msg,
4064         .sign_message = mds_sign_message,
4065         .check_message_signature = mds_check_message_signature,
4066 };
4067
4068 /* eof */