1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/wait.h>
9 #include <linux/writeback.h>
12 #include "mds_client.h"
14 #include <linux/ceph/decode.h>
15 #include <linux/ceph/messenger.h>
18 * Capability management
20 * The Ceph metadata servers control client access to inode metadata
21 * and file data by issuing capabilities, granting clients permission
22 * to read and/or write both inode field and file data to OSDs
23 * (storage nodes). Each capability consists of a set of bits
24 * indicating which operations are allowed.
26 * If the client holds a *_SHARED cap, the client has a coherent value
27 * that can be safely read from the cached inode.
29 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
30 * client is allowed to change inode attributes (e.g., file size,
31 * mtime), note its dirty state in the ceph_cap, and asynchronously
32 * flush that metadata change to the MDS.
34 * In the event of a conflicting operation (perhaps by another
35 * client), the MDS will revoke the conflicting client capabilities.
37 * In order for a client to cache an inode, it must hold a capability
38 * with at least one MDS server. When inodes are released, release
39 * notifications are batched and periodically sent en masse to the MDS
40 * cluster to release server state.
45 * Generate readable cap strings for debugging output.
47 #define MAX_CAP_STR 20
48 static char cap_str[MAX_CAP_STR][40];
49 static DEFINE_SPINLOCK(cap_str_lock);
50 static int last_cap_str;
52 static char *gcap_string(char *s, int c)
54 if (c & CEPH_CAP_GSHARED)
56 if (c & CEPH_CAP_GEXCL)
58 if (c & CEPH_CAP_GCACHE)
64 if (c & CEPH_CAP_GBUFFER)
66 if (c & CEPH_CAP_GLAZYIO)
71 const char *ceph_cap_string(int caps)
77 spin_lock(&cap_str_lock);
79 if (last_cap_str == MAX_CAP_STR)
81 spin_unlock(&cap_str_lock);
85 if (caps & CEPH_CAP_PIN)
88 c = (caps >> CEPH_CAP_SAUTH) & 3;
91 s = gcap_string(s, c);
94 c = (caps >> CEPH_CAP_SLINK) & 3;
97 s = gcap_string(s, c);
100 c = (caps >> CEPH_CAP_SXATTR) & 3;
103 s = gcap_string(s, c);
106 c = caps >> CEPH_CAP_SFILE;
109 s = gcap_string(s, c);
118 void ceph_caps_init(struct ceph_mds_client *mdsc)
120 INIT_LIST_HEAD(&mdsc->caps_list);
121 spin_lock_init(&mdsc->caps_list_lock);
124 void ceph_caps_finalize(struct ceph_mds_client *mdsc)
126 struct ceph_cap *cap;
128 spin_lock(&mdsc->caps_list_lock);
129 while (!list_empty(&mdsc->caps_list)) {
130 cap = list_first_entry(&mdsc->caps_list,
131 struct ceph_cap, caps_item);
132 list_del(&cap->caps_item);
133 kmem_cache_free(ceph_cap_cachep, cap);
135 mdsc->caps_total_count = 0;
136 mdsc->caps_avail_count = 0;
137 mdsc->caps_use_count = 0;
138 mdsc->caps_reserve_count = 0;
139 mdsc->caps_min_count = 0;
140 spin_unlock(&mdsc->caps_list_lock);
143 void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
145 spin_lock(&mdsc->caps_list_lock);
146 mdsc->caps_min_count += delta;
147 BUG_ON(mdsc->caps_min_count < 0);
148 spin_unlock(&mdsc->caps_list_lock);
151 void ceph_reserve_caps(struct ceph_mds_client *mdsc,
152 struct ceph_cap_reservation *ctx, int need)
155 struct ceph_cap *cap;
160 dout("reserve caps ctx=%p need=%d\n", ctx, need);
162 /* first reserve any caps that are already allocated */
163 spin_lock(&mdsc->caps_list_lock);
164 if (mdsc->caps_avail_count >= need)
167 have = mdsc->caps_avail_count;
168 mdsc->caps_avail_count -= have;
169 mdsc->caps_reserve_count += have;
170 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
171 mdsc->caps_reserve_count +
172 mdsc->caps_avail_count);
173 spin_unlock(&mdsc->caps_list_lock);
175 for (i = have; i < need; i++) {
176 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
179 list_add(&cap->caps_item, &newcaps);
182 /* we didn't manage to reserve as much as we needed */
183 if (have + alloc != need)
184 pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
185 ctx, need, have + alloc);
187 spin_lock(&mdsc->caps_list_lock);
188 mdsc->caps_total_count += alloc;
189 mdsc->caps_reserve_count += alloc;
190 list_splice(&newcaps, &mdsc->caps_list);
192 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
193 mdsc->caps_reserve_count +
194 mdsc->caps_avail_count);
195 spin_unlock(&mdsc->caps_list_lock);
198 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
199 ctx, mdsc->caps_total_count, mdsc->caps_use_count,
200 mdsc->caps_reserve_count, mdsc->caps_avail_count);
203 int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
204 struct ceph_cap_reservation *ctx)
206 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
208 spin_lock(&mdsc->caps_list_lock);
209 BUG_ON(mdsc->caps_reserve_count < ctx->count);
210 mdsc->caps_reserve_count -= ctx->count;
211 mdsc->caps_avail_count += ctx->count;
213 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
214 mdsc->caps_total_count, mdsc->caps_use_count,
215 mdsc->caps_reserve_count, mdsc->caps_avail_count);
216 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
217 mdsc->caps_reserve_count +
218 mdsc->caps_avail_count);
219 spin_unlock(&mdsc->caps_list_lock);
224 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
225 struct ceph_cap_reservation *ctx)
227 struct ceph_cap *cap = NULL;
229 /* temporary, until we do something about cap import/export */
231 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
233 spin_lock(&mdsc->caps_list_lock);
234 mdsc->caps_use_count++;
235 mdsc->caps_total_count++;
236 spin_unlock(&mdsc->caps_list_lock);
241 spin_lock(&mdsc->caps_list_lock);
242 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
243 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
244 mdsc->caps_reserve_count, mdsc->caps_avail_count);
246 BUG_ON(ctx->count > mdsc->caps_reserve_count);
247 BUG_ON(list_empty(&mdsc->caps_list));
250 mdsc->caps_reserve_count--;
251 mdsc->caps_use_count++;
253 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
254 list_del(&cap->caps_item);
256 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
257 mdsc->caps_reserve_count + mdsc->caps_avail_count);
258 spin_unlock(&mdsc->caps_list_lock);
262 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
264 spin_lock(&mdsc->caps_list_lock);
265 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
266 cap, mdsc->caps_total_count, mdsc->caps_use_count,
267 mdsc->caps_reserve_count, mdsc->caps_avail_count);
268 mdsc->caps_use_count--;
270 * Keep some preallocated caps around (ceph_min_count), to
271 * avoid lots of free/alloc churn.
273 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
274 mdsc->caps_min_count) {
275 mdsc->caps_total_count--;
276 kmem_cache_free(ceph_cap_cachep, cap);
278 mdsc->caps_avail_count++;
279 list_add(&cap->caps_item, &mdsc->caps_list);
282 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
283 mdsc->caps_reserve_count + mdsc->caps_avail_count);
284 spin_unlock(&mdsc->caps_list_lock);
287 void ceph_reservation_status(struct ceph_fs_client *fsc,
288 int *total, int *avail, int *used, int *reserved,
291 struct ceph_mds_client *mdsc = fsc->mdsc;
294 *total = mdsc->caps_total_count;
296 *avail = mdsc->caps_avail_count;
298 *used = mdsc->caps_use_count;
300 *reserved = mdsc->caps_reserve_count;
302 *min = mdsc->caps_min_count;
306 * Find ceph_cap for given mds, if any.
308 * Called with i_ceph_lock held.
310 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
312 struct ceph_cap *cap;
313 struct rb_node *n = ci->i_caps.rb_node;
316 cap = rb_entry(n, struct ceph_cap, ci_node);
319 else if (mds > cap->mds)
327 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
329 struct ceph_cap *cap;
331 spin_lock(&ci->i_ceph_lock);
332 cap = __get_cap_for_mds(ci, mds);
333 spin_unlock(&ci->i_ceph_lock);
338 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
340 static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
342 struct ceph_cap *cap;
346 /* prefer mds with WR|BUFFER|EXCL caps */
347 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
348 cap = rb_entry(p, struct ceph_cap, ci_node);
350 if (cap->issued & (CEPH_CAP_FILE_WR |
351 CEPH_CAP_FILE_BUFFER |
358 int ceph_get_cap_mds(struct inode *inode)
360 struct ceph_inode_info *ci = ceph_inode(inode);
362 spin_lock(&ci->i_ceph_lock);
363 mds = __ceph_get_cap_mds(ceph_inode(inode));
364 spin_unlock(&ci->i_ceph_lock);
369 * Called under i_ceph_lock.
371 static void __insert_cap_node(struct ceph_inode_info *ci,
372 struct ceph_cap *new)
374 struct rb_node **p = &ci->i_caps.rb_node;
375 struct rb_node *parent = NULL;
376 struct ceph_cap *cap = NULL;
380 cap = rb_entry(parent, struct ceph_cap, ci_node);
381 if (new->mds < cap->mds)
383 else if (new->mds > cap->mds)
389 rb_link_node(&new->ci_node, parent, p);
390 rb_insert_color(&new->ci_node, &ci->i_caps);
394 * (re)set cap hold timeouts, which control the delayed release
395 * of unused caps back to the MDS. Should be called on cap use.
397 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
398 struct ceph_inode_info *ci)
400 struct ceph_mount_options *ma = mdsc->fsc->mount_options;
402 ci->i_hold_caps_min = round_jiffies(jiffies +
403 ma->caps_wanted_delay_min * HZ);
404 ci->i_hold_caps_max = round_jiffies(jiffies +
405 ma->caps_wanted_delay_max * HZ);
406 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
407 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
411 * (Re)queue cap at the end of the delayed cap release list.
413 * If I_FLUSH is set, leave the inode at the front of the list.
415 * Caller holds i_ceph_lock
416 * -> we take mdsc->cap_delay_lock
418 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
419 struct ceph_inode_info *ci)
421 __cap_set_timeouts(mdsc, ci);
422 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
423 ci->i_ceph_flags, ci->i_hold_caps_max);
424 if (!mdsc->stopping) {
425 spin_lock(&mdsc->cap_delay_lock);
426 if (!list_empty(&ci->i_cap_delay_list)) {
427 if (ci->i_ceph_flags & CEPH_I_FLUSH)
429 list_del_init(&ci->i_cap_delay_list);
431 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
433 spin_unlock(&mdsc->cap_delay_lock);
438 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
439 * indicating we should send a cap message to flush dirty metadata
440 * asap, and move to the front of the delayed cap list.
442 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
443 struct ceph_inode_info *ci)
445 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
446 spin_lock(&mdsc->cap_delay_lock);
447 ci->i_ceph_flags |= CEPH_I_FLUSH;
448 if (!list_empty(&ci->i_cap_delay_list))
449 list_del_init(&ci->i_cap_delay_list);
450 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
451 spin_unlock(&mdsc->cap_delay_lock);
455 * Cancel delayed work on cap.
457 * Caller must hold i_ceph_lock.
459 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
460 struct ceph_inode_info *ci)
462 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
463 if (list_empty(&ci->i_cap_delay_list))
465 spin_lock(&mdsc->cap_delay_lock);
466 list_del_init(&ci->i_cap_delay_list);
467 spin_unlock(&mdsc->cap_delay_lock);
471 * Common issue checks for add_cap, handle_cap_grant.
473 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
476 unsigned had = __ceph_caps_issued(ci, NULL);
479 * Each time we receive FILE_CACHE anew, we increment
482 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
483 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
488 * if we are newly issued FILE_SHARED, mark dir not complete; we
489 * don't know what happened to this directory while we didn't
492 if ((issued & CEPH_CAP_FILE_SHARED) &&
493 (had & CEPH_CAP_FILE_SHARED) == 0) {
495 if (S_ISDIR(ci->vfs_inode.i_mode)) {
496 dout(" marking %p NOT complete\n", &ci->vfs_inode);
497 __ceph_dir_clear_complete(ci);
503 * Add a capability under the given MDS session.
505 * Caller should hold session snap_rwsem (read) and s_mutex.
507 * @fmode is the open file mode, if we are opening a file, otherwise
508 * it is < 0. (This is so we can atomically add the cap and add an
509 * open file reference to it.)
511 void ceph_add_cap(struct inode *inode,
512 struct ceph_mds_session *session, u64 cap_id,
513 int fmode, unsigned issued, unsigned wanted,
514 unsigned seq, unsigned mseq, u64 realmino, int flags,
515 struct ceph_cap **new_cap)
517 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
518 struct ceph_inode_info *ci = ceph_inode(inode);
519 struct ceph_cap *cap;
520 int mds = session->s_mds;
523 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
524 session->s_mds, cap_id, ceph_cap_string(issued), seq);
527 * If we are opening the file, include file mode wanted bits
531 wanted |= ceph_caps_for_mode(fmode);
533 cap = __get_cap_for_mds(ci, mds);
539 cap->implemented = 0;
545 __insert_cap_node(ci, cap);
547 /* add to session cap list */
548 cap->session = session;
549 spin_lock(&session->s_cap_lock);
550 list_add_tail(&cap->session_caps, &session->s_caps);
551 session->s_nr_caps++;
552 spin_unlock(&session->s_cap_lock);
555 * auth mds of the inode changed. we received the cap export
556 * message, but still haven't received the cap import message.
557 * handle_cap_export() updated the new auth MDS' cap.
559 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing
560 * a message that was send before the cap import message. So
563 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
564 WARN_ON(cap != ci->i_auth_cap);
565 WARN_ON(cap->cap_id != cap_id);
568 issued |= cap->issued;
569 flags |= CEPH_CAP_FLAG_AUTH;
573 if (!ci->i_snap_realm) {
575 * add this inode to the appropriate snap realm
577 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
580 spin_lock(&realm->inodes_with_caps_lock);
581 ci->i_snap_realm = realm;
582 list_add(&ci->i_snap_realm_item,
583 &realm->inodes_with_caps);
584 spin_unlock(&realm->inodes_with_caps_lock);
586 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
592 __check_cap_issue(ci, cap, issued);
595 * If we are issued caps we don't want, or the mds' wanted
596 * value appears to be off, queue a check so we'll release
597 * later and/or update the mds wanted value.
599 actual_wanted = __ceph_caps_wanted(ci);
600 if ((wanted & ~actual_wanted) ||
601 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
602 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
603 ceph_cap_string(issued), ceph_cap_string(wanted),
604 ceph_cap_string(actual_wanted));
605 __cap_delay_requeue(mdsc, ci);
608 if (flags & CEPH_CAP_FLAG_AUTH) {
609 if (ci->i_auth_cap == NULL ||
610 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
611 ci->i_auth_cap = cap;
612 cap->mds_wanted = wanted;
615 WARN_ON(ci->i_auth_cap == cap);
618 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
619 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
620 ceph_cap_string(issued|cap->issued), seq, mds);
621 cap->cap_id = cap_id;
622 cap->issued = issued;
623 cap->implemented |= issued;
624 if (ceph_seq_cmp(mseq, cap->mseq) > 0)
625 cap->mds_wanted = wanted;
627 cap->mds_wanted |= wanted;
629 cap->issue_seq = seq;
631 cap->cap_gen = session->s_cap_gen;
634 __ceph_get_fmode(ci, fmode);
638 * Return true if cap has not timed out and belongs to the current
639 * generation of the MDS session (i.e. has not gone 'stale' due to
640 * us losing touch with the mds).
642 static int __cap_is_valid(struct ceph_cap *cap)
647 spin_lock(&cap->session->s_gen_ttl_lock);
648 gen = cap->session->s_cap_gen;
649 ttl = cap->session->s_cap_ttl;
650 spin_unlock(&cap->session->s_gen_ttl_lock);
652 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
653 dout("__cap_is_valid %p cap %p issued %s "
654 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
655 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
663 * Return set of valid cap bits issued to us. Note that caps time
664 * out, and may be invalidated in bulk if the client session times out
665 * and session->s_cap_gen is bumped.
667 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
669 int have = ci->i_snap_caps;
670 struct ceph_cap *cap;
675 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
676 cap = rb_entry(p, struct ceph_cap, ci_node);
677 if (!__cap_is_valid(cap))
679 dout("__ceph_caps_issued %p cap %p issued %s\n",
680 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
683 *implemented |= cap->implemented;
686 * exclude caps issued by non-auth MDS, but are been revoking
687 * by the auth MDS. The non-auth MDS should be revoking/exporting
688 * these caps, but the message is delayed.
690 if (ci->i_auth_cap) {
691 cap = ci->i_auth_cap;
692 have &= ~cap->implemented | cap->issued;
698 * Get cap bits issued by caps other than @ocap
700 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
702 int have = ci->i_snap_caps;
703 struct ceph_cap *cap;
706 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
707 cap = rb_entry(p, struct ceph_cap, ci_node);
710 if (!__cap_is_valid(cap))
718 * Move a cap to the end of the LRU (oldest caps at list head, newest
721 static void __touch_cap(struct ceph_cap *cap)
723 struct ceph_mds_session *s = cap->session;
725 spin_lock(&s->s_cap_lock);
726 if (s->s_cap_iterator == NULL) {
727 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
729 list_move_tail(&cap->session_caps, &s->s_caps);
731 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
732 &cap->ci->vfs_inode, cap, s->s_mds);
734 spin_unlock(&s->s_cap_lock);
738 * Check if we hold the given mask. If so, move the cap(s) to the
739 * front of their respective LRUs. (This is the preferred way for
740 * callers to check for caps they want.)
742 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
744 struct ceph_cap *cap;
746 int have = ci->i_snap_caps;
748 if ((have & mask) == mask) {
749 dout("__ceph_caps_issued_mask %p snap issued %s"
750 " (mask %s)\n", &ci->vfs_inode,
751 ceph_cap_string(have),
752 ceph_cap_string(mask));
756 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
757 cap = rb_entry(p, struct ceph_cap, ci_node);
758 if (!__cap_is_valid(cap))
760 if ((cap->issued & mask) == mask) {
761 dout("__ceph_caps_issued_mask %p cap %p issued %s"
762 " (mask %s)\n", &ci->vfs_inode, cap,
763 ceph_cap_string(cap->issued),
764 ceph_cap_string(mask));
770 /* does a combination of caps satisfy mask? */
772 if ((have & mask) == mask) {
773 dout("__ceph_caps_issued_mask %p combo issued %s"
774 " (mask %s)\n", &ci->vfs_inode,
775 ceph_cap_string(cap->issued),
776 ceph_cap_string(mask));
780 /* touch this + preceding caps */
782 for (q = rb_first(&ci->i_caps); q != p;
784 cap = rb_entry(q, struct ceph_cap,
786 if (!__cap_is_valid(cap))
799 * Return true if mask caps are currently being revoked by an MDS.
801 int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
802 struct ceph_cap *ocap, int mask)
804 struct ceph_cap *cap;
807 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
808 cap = rb_entry(p, struct ceph_cap, ci_node);
810 (cap->implemented & ~cap->issued & mask))
816 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
818 struct inode *inode = &ci->vfs_inode;
821 spin_lock(&ci->i_ceph_lock);
822 ret = __ceph_caps_revoking_other(ci, NULL, mask);
823 spin_unlock(&ci->i_ceph_lock);
824 dout("ceph_caps_revoking %p %s = %d\n", inode,
825 ceph_cap_string(mask), ret);
829 int __ceph_caps_used(struct ceph_inode_info *ci)
833 used |= CEPH_CAP_PIN;
835 used |= CEPH_CAP_FILE_RD;
836 if (ci->i_rdcache_ref ||
837 (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */
838 ci->vfs_inode.i_data.nrpages))
839 used |= CEPH_CAP_FILE_CACHE;
841 used |= CEPH_CAP_FILE_WR;
842 if (ci->i_wb_ref || ci->i_wrbuffer_ref)
843 used |= CEPH_CAP_FILE_BUFFER;
848 * wanted, by virtue of open file modes
850 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
854 for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
855 if (ci->i_nr_by_mode[mode])
856 want |= ceph_caps_for_mode(mode);
861 * Return caps we have registered with the MDS(s) as 'wanted'.
863 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
865 struct ceph_cap *cap;
869 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
870 cap = rb_entry(p, struct ceph_cap, ci_node);
871 if (!__cap_is_valid(cap))
873 if (cap == ci->i_auth_cap)
874 mds_wanted |= cap->mds_wanted;
876 mds_wanted |= (cap->mds_wanted & ~CEPH_CAP_ANY_FILE_WR);
882 * called under i_ceph_lock
884 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
886 return !RB_EMPTY_ROOT(&ci->i_caps);
889 int ceph_is_any_caps(struct inode *inode)
891 struct ceph_inode_info *ci = ceph_inode(inode);
894 spin_lock(&ci->i_ceph_lock);
895 ret = __ceph_is_any_caps(ci);
896 spin_unlock(&ci->i_ceph_lock);
901 static void drop_inode_snap_realm(struct ceph_inode_info *ci)
903 struct ceph_snap_realm *realm = ci->i_snap_realm;
904 spin_lock(&realm->inodes_with_caps_lock);
905 list_del_init(&ci->i_snap_realm_item);
906 ci->i_snap_realm_counter++;
907 ci->i_snap_realm = NULL;
908 spin_unlock(&realm->inodes_with_caps_lock);
909 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
914 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
916 * caller should hold i_ceph_lock.
917 * caller will not hold session s_mutex if called from destroy_inode.
919 void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
921 struct ceph_mds_session *session = cap->session;
922 struct ceph_inode_info *ci = cap->ci;
923 struct ceph_mds_client *mdsc =
924 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
927 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
929 /* remove from session list */
930 spin_lock(&session->s_cap_lock);
931 if (session->s_cap_iterator == cap) {
932 /* not yet, we are iterating over this very cap */
933 dout("__ceph_remove_cap delaying %p removal from session %p\n",
936 list_del_init(&cap->session_caps);
937 session->s_nr_caps--;
941 /* protect backpointer with s_cap_lock: see iterate_session_caps */
945 * s_cap_reconnect is protected by s_cap_lock. no one changes
946 * s_cap_gen while session is in the reconnect state.
949 (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
950 cap->queue_release = 1;
952 list_add_tail(&cap->session_caps,
953 &session->s_cap_releases);
954 session->s_num_cap_releases++;
958 cap->queue_release = 0;
960 cap->cap_ino = ci->i_vino.ino;
962 spin_unlock(&session->s_cap_lock);
964 /* remove from inode list */
965 rb_erase(&cap->ci_node, &ci->i_caps);
966 if (ci->i_auth_cap == cap)
967 ci->i_auth_cap = NULL;
970 ceph_put_cap(mdsc, cap);
972 /* when reconnect denied, we remove session caps forcibly,
973 * i_wr_ref can be non-zero. If there are ongoing write,
976 if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
977 drop_inode_snap_realm(ci);
979 if (!__ceph_is_any_real_caps(ci))
980 __cap_delay_cancel(mdsc, ci);
984 * Build and send a cap message to the given MDS.
986 * Caller should be holding s_mutex.
988 static int send_cap_msg(struct ceph_mds_session *session,
989 u64 ino, u64 cid, int op,
990 int caps, int wanted, int dirty,
991 u32 seq, u64 flush_tid, u64 oldest_flush_tid,
992 u32 issue_seq, u32 mseq, u64 size, u64 max_size,
993 struct timespec *mtime, struct timespec *atime,
995 kuid_t uid, kgid_t gid, umode_t mode,
997 struct ceph_buffer *xattrs_buf,
998 u64 follows, bool inline_data)
1000 struct ceph_mds_caps *fc;
1001 struct ceph_msg *msg;
1005 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
1006 " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
1007 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
1008 cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
1009 ceph_cap_string(dirty),
1010 seq, issue_seq, flush_tid, oldest_flush_tid,
1011 mseq, follows, size, max_size,
1012 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
1014 /* flock buffer size + inline version + inline data size +
1015 * osd_epoch_barrier + oldest_flush_tid */
1016 extra_len = 4 + 8 + 4 + 4 + 8;
1017 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
1022 msg->hdr.version = cpu_to_le16(6);
1023 msg->hdr.tid = cpu_to_le64(flush_tid);
1025 fc = msg->front.iov_base;
1026 memset(fc, 0, sizeof(*fc));
1028 fc->cap_id = cpu_to_le64(cid);
1029 fc->op = cpu_to_le32(op);
1030 fc->seq = cpu_to_le32(seq);
1031 fc->issue_seq = cpu_to_le32(issue_seq);
1032 fc->migrate_seq = cpu_to_le32(mseq);
1033 fc->caps = cpu_to_le32(caps);
1034 fc->wanted = cpu_to_le32(wanted);
1035 fc->dirty = cpu_to_le32(dirty);
1036 fc->ino = cpu_to_le64(ino);
1037 fc->snap_follows = cpu_to_le64(follows);
1039 fc->size = cpu_to_le64(size);
1040 fc->max_size = cpu_to_le64(max_size);
1042 ceph_encode_timespec(&fc->mtime, mtime);
1044 ceph_encode_timespec(&fc->atime, atime);
1045 fc->time_warp_seq = cpu_to_le32(time_warp_seq);
1047 fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid));
1048 fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
1049 fc->mode = cpu_to_le32(mode);
1052 /* flock buffer size */
1053 ceph_encode_32(&p, 0);
1054 /* inline version */
1055 ceph_encode_64(&p, inline_data ? 0 : CEPH_INLINE_NONE);
1056 /* inline data size */
1057 ceph_encode_32(&p, 0);
1058 /* osd_epoch_barrier */
1059 ceph_encode_32(&p, 0);
1060 /* oldest_flush_tid */
1061 ceph_encode_64(&p, oldest_flush_tid);
1063 fc->xattr_version = cpu_to_le64(xattr_version);
1065 msg->middle = ceph_buffer_get(xattrs_buf);
1066 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1067 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1070 ceph_con_send(&session->s_con, msg);
1075 * Queue cap releases when an inode is dropped from our cache. Since
1076 * inode is about to be destroyed, there is no need for i_ceph_lock.
1078 void ceph_queue_caps_release(struct inode *inode)
1080 struct ceph_inode_info *ci = ceph_inode(inode);
1083 p = rb_first(&ci->i_caps);
1085 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1087 __ceph_remove_cap(cap, true);
1092 * Send a cap msg on the given inode. Update our caps state, then
1093 * drop i_ceph_lock and send the message.
1095 * Make note of max_size reported/requested from mds, revoked caps
1096 * that have now been implemented.
1098 * Make half-hearted attempt ot to invalidate page cache if we are
1099 * dropping RDCACHE. Note that this will leave behind locked pages
1100 * that we'll then need to deal with elsewhere.
1102 * Return non-zero if delayed release, or we experienced an error
1103 * such that the caller should requeue + retry later.
1105 * called with i_ceph_lock, then drops it.
1106 * caller should hold snap_rwsem (read), s_mutex.
1108 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1109 int op, int used, int want, int retain, int flushing,
1110 u64 flush_tid, u64 oldest_flush_tid)
1111 __releases(cap->ci->i_ceph_lock)
1113 struct ceph_inode_info *ci = cap->ci;
1114 struct inode *inode = &ci->vfs_inode;
1115 u64 cap_id = cap->cap_id;
1116 int held, revoking, dropping, keep;
1117 u64 seq, issue_seq, mseq, time_warp_seq, follows;
1119 struct timespec mtime, atime;
1124 struct ceph_mds_session *session;
1125 u64 xattr_version = 0;
1126 struct ceph_buffer *xattr_blob = NULL;
1131 held = cap->issued | cap->implemented;
1132 revoking = cap->implemented & ~cap->issued;
1133 retain &= ~revoking;
1134 dropping = cap->issued & ~retain;
1136 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1137 inode, cap, cap->session,
1138 ceph_cap_string(held), ceph_cap_string(held & retain),
1139 ceph_cap_string(revoking));
1140 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1142 session = cap->session;
1144 /* don't release wanted unless we've waited a bit. */
1145 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1146 time_before(jiffies, ci->i_hold_caps_min)) {
1147 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1148 ceph_cap_string(cap->issued),
1149 ceph_cap_string(cap->issued & retain),
1150 ceph_cap_string(cap->mds_wanted),
1151 ceph_cap_string(want));
1152 want |= cap->mds_wanted;
1153 retain |= cap->issued;
1156 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1158 cap->issued &= retain; /* drop bits we don't want */
1159 if (cap->implemented & ~cap->issued) {
1161 * Wake up any waiters on wanted -> needed transition.
1162 * This is due to the weird transition from buffered
1163 * to sync IO... we need to flush dirty pages _before_
1164 * allowing sync writes to avoid reordering.
1168 cap->implemented &= cap->issued | used;
1169 cap->mds_wanted = want;
1171 follows = flushing ? ci->i_head_snapc->seq : 0;
1173 keep = cap->implemented;
1175 issue_seq = cap->issue_seq;
1177 size = inode->i_size;
1178 ci->i_reported_size = size;
1179 max_size = ci->i_wanted_max_size;
1180 ci->i_requested_max_size = max_size;
1181 mtime = inode->i_mtime;
1182 atime = inode->i_atime;
1183 time_warp_seq = ci->i_time_warp_seq;
1186 mode = inode->i_mode;
1188 if (flushing & CEPH_CAP_XATTR_EXCL) {
1189 __ceph_build_xattrs_blob(ci);
1190 xattr_blob = ci->i_xattrs.blob;
1191 xattr_version = ci->i_xattrs.version;
1194 inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
1196 spin_unlock(&ci->i_ceph_lock);
1198 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1199 op, keep, want, flushing, seq,
1200 flush_tid, oldest_flush_tid, issue_seq, mseq,
1201 size, max_size, &mtime, &atime, time_warp_seq,
1202 uid, gid, mode, xattr_version, xattr_blob,
1203 follows, inline_data);
1205 dout("error sending cap msg, must requeue %p\n", inode);
1210 wake_up_all(&ci->i_cap_wq);
1216 * When a snapshot is taken, clients accumulate dirty metadata on
1217 * inodes with capabilities in ceph_cap_snaps to describe the file
1218 * state at the time the snapshot was taken. This must be flushed
1219 * asynchronously back to the MDS once sync writes complete and dirty
1220 * data is written out.
1222 * Unless @kick is true, skip cap_snaps that were already sent to
1223 * the MDS (i.e., during this session).
1225 * Called under i_ceph_lock. Takes s_mutex as needed.
1227 void __ceph_flush_snaps(struct ceph_inode_info *ci,
1228 struct ceph_mds_session **psession,
1230 __releases(ci->i_ceph_lock)
1231 __acquires(ci->i_ceph_lock)
1233 struct inode *inode = &ci->vfs_inode;
1235 struct ceph_cap_snap *capsnap;
1237 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1238 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1240 u64 next_follows = 0; /* keep track of how far we've gotten through the
1241 i_cap_snaps list, and skip these entries next time
1242 around to avoid an infinite loop */
1245 session = *psession;
1247 dout("__flush_snaps %p\n", inode);
1249 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1250 /* avoid an infiniute loop after retry */
1251 if (capsnap->follows < next_follows)
1254 * we need to wait for sync writes to complete and for dirty
1255 * pages to be written out.
1257 if (capsnap->dirty_pages || capsnap->writing)
1260 /* should be removed by ceph_try_drop_cap_snap() */
1261 BUG_ON(!capsnap->need_flush);
1263 /* pick mds, take s_mutex */
1264 if (ci->i_auth_cap == NULL) {
1265 dout("no auth cap (migrating?), doing nothing\n");
1269 /* only flush each capsnap once */
1270 if (!kick && !list_empty(&capsnap->flushing_item)) {
1271 dout("already flushed %p, skipping\n", capsnap);
1275 mds = ci->i_auth_cap->session->s_mds;
1276 mseq = ci->i_auth_cap->mseq;
1278 if (session && session->s_mds != mds) {
1279 dout("oops, wrong session %p mutex\n", session);
1283 mutex_unlock(&session->s_mutex);
1284 ceph_put_mds_session(session);
1288 spin_unlock(&ci->i_ceph_lock);
1289 mutex_lock(&mdsc->mutex);
1290 session = __ceph_lookup_mds_session(mdsc, mds);
1291 mutex_unlock(&mdsc->mutex);
1293 dout("inverting session/ino locks on %p\n",
1295 mutex_lock(&session->s_mutex);
1298 * if session == NULL, we raced against a cap
1299 * deletion or migration. retry, and we'll
1300 * get a better @mds value next time.
1302 spin_lock(&ci->i_ceph_lock);
1306 spin_lock(&mdsc->cap_dirty_lock);
1307 capsnap->flush_tid = ++mdsc->last_cap_flush_tid;
1308 spin_unlock(&mdsc->cap_dirty_lock);
1310 atomic_inc(&capsnap->nref);
1311 if (list_empty(&capsnap->flushing_item))
1312 list_add_tail(&capsnap->flushing_item,
1313 &session->s_cap_snaps_flushing);
1314 spin_unlock(&ci->i_ceph_lock);
1316 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1317 inode, capsnap, capsnap->follows, capsnap->flush_tid);
1318 send_cap_msg(session, ceph_vino(inode).ino, 0,
1319 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1320 capsnap->dirty, 0, capsnap->flush_tid, 0,
1321 0, mseq, capsnap->size, 0,
1322 &capsnap->mtime, &capsnap->atime,
1323 capsnap->time_warp_seq,
1324 capsnap->uid, capsnap->gid, capsnap->mode,
1325 capsnap->xattr_version, capsnap->xattr_blob,
1326 capsnap->follows, capsnap->inline_data);
1328 next_follows = capsnap->follows + 1;
1329 ceph_put_cap_snap(capsnap);
1331 spin_lock(&ci->i_ceph_lock);
1335 /* we flushed them all; remove this inode from the queue */
1336 spin_lock(&mdsc->snap_flush_lock);
1337 list_del_init(&ci->i_snap_flush_item);
1338 spin_unlock(&mdsc->snap_flush_lock);
1342 *psession = session;
1344 mutex_unlock(&session->s_mutex);
1345 ceph_put_mds_session(session);
1349 static void ceph_flush_snaps(struct ceph_inode_info *ci)
1351 spin_lock(&ci->i_ceph_lock);
1352 __ceph_flush_snaps(ci, NULL, 0);
1353 spin_unlock(&ci->i_ceph_lock);
1357 * Mark caps dirty. If inode is newly dirty, return the dirty flags.
1358 * Caller is then responsible for calling __mark_inode_dirty with the
1359 * returned flags value.
1361 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
1362 struct ceph_cap_flush **pcf)
1364 struct ceph_mds_client *mdsc =
1365 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1366 struct inode *inode = &ci->vfs_inode;
1367 int was = ci->i_dirty_caps;
1370 if (!ci->i_auth_cap) {
1371 pr_warn("__mark_dirty_caps %p %llx mask %s, "
1372 "but no auth cap (session was closed?)\n",
1373 inode, ceph_ino(inode), ceph_cap_string(mask));
1377 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1378 ceph_cap_string(mask), ceph_cap_string(was),
1379 ceph_cap_string(was | mask));
1380 ci->i_dirty_caps |= mask;
1382 WARN_ON_ONCE(ci->i_prealloc_cap_flush);
1383 swap(ci->i_prealloc_cap_flush, *pcf);
1385 if (!ci->i_head_snapc) {
1386 WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem));
1387 ci->i_head_snapc = ceph_get_snap_context(
1388 ci->i_snap_realm->cached_context);
1390 dout(" inode %p now dirty snapc %p auth cap %p\n",
1391 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
1392 BUG_ON(!list_empty(&ci->i_dirty_item));
1393 spin_lock(&mdsc->cap_dirty_lock);
1394 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1395 spin_unlock(&mdsc->cap_dirty_lock);
1396 if (ci->i_flushing_caps == 0) {
1398 dirty |= I_DIRTY_SYNC;
1401 WARN_ON_ONCE(!ci->i_prealloc_cap_flush);
1403 BUG_ON(list_empty(&ci->i_dirty_item));
1404 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1405 (mask & CEPH_CAP_FILE_BUFFER))
1406 dirty |= I_DIRTY_DATASYNC;
1407 __cap_delay_requeue(mdsc, ci);
1411 static void __add_cap_flushing_to_inode(struct ceph_inode_info *ci,
1412 struct ceph_cap_flush *cf)
1414 struct rb_node **p = &ci->i_cap_flush_tree.rb_node;
1415 struct rb_node *parent = NULL;
1416 struct ceph_cap_flush *other = NULL;
1420 other = rb_entry(parent, struct ceph_cap_flush, i_node);
1422 if (cf->tid < other->tid)
1424 else if (cf->tid > other->tid)
1425 p = &(*p)->rb_right;
1430 rb_link_node(&cf->i_node, parent, p);
1431 rb_insert_color(&cf->i_node, &ci->i_cap_flush_tree);
1434 static void __add_cap_flushing_to_mdsc(struct ceph_mds_client *mdsc,
1435 struct ceph_cap_flush *cf)
1437 struct rb_node **p = &mdsc->cap_flush_tree.rb_node;
1438 struct rb_node *parent = NULL;
1439 struct ceph_cap_flush *other = NULL;
1443 other = rb_entry(parent, struct ceph_cap_flush, g_node);
1445 if (cf->tid < other->tid)
1447 else if (cf->tid > other->tid)
1448 p = &(*p)->rb_right;
1453 rb_link_node(&cf->g_node, parent, p);
1454 rb_insert_color(&cf->g_node, &mdsc->cap_flush_tree);
1457 struct ceph_cap_flush *ceph_alloc_cap_flush(void)
1459 return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
1462 void ceph_free_cap_flush(struct ceph_cap_flush *cf)
1465 kmem_cache_free(ceph_cap_flush_cachep, cf);
1468 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
1470 struct rb_node *n = rb_first(&mdsc->cap_flush_tree);
1472 struct ceph_cap_flush *cf =
1473 rb_entry(n, struct ceph_cap_flush, g_node);
1480 * Add dirty inode to the flushing list. Assigned a seq number so we
1481 * can wait for caps to flush without starving.
1483 * Called under i_ceph_lock.
1485 static int __mark_caps_flushing(struct inode *inode,
1486 struct ceph_mds_session *session,
1487 u64 *flush_tid, u64 *oldest_flush_tid)
1489 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1490 struct ceph_inode_info *ci = ceph_inode(inode);
1491 struct ceph_cap_flush *cf = NULL;
1494 BUG_ON(ci->i_dirty_caps == 0);
1495 BUG_ON(list_empty(&ci->i_dirty_item));
1496 BUG_ON(!ci->i_prealloc_cap_flush);
1498 flushing = ci->i_dirty_caps;
1499 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1500 ceph_cap_string(flushing),
1501 ceph_cap_string(ci->i_flushing_caps),
1502 ceph_cap_string(ci->i_flushing_caps | flushing));
1503 ci->i_flushing_caps |= flushing;
1504 ci->i_dirty_caps = 0;
1505 dout(" inode %p now !dirty\n", inode);
1507 swap(cf, ci->i_prealloc_cap_flush);
1508 cf->caps = flushing;
1510 spin_lock(&mdsc->cap_dirty_lock);
1511 list_del_init(&ci->i_dirty_item);
1513 cf->tid = ++mdsc->last_cap_flush_tid;
1514 __add_cap_flushing_to_mdsc(mdsc, cf);
1515 *oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1517 if (list_empty(&ci->i_flushing_item)) {
1518 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1519 mdsc->num_cap_flushing++;
1520 dout(" inode %p now flushing tid %llu\n", inode, cf->tid);
1522 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1523 dout(" inode %p now flushing (more) tid %llu\n",
1526 spin_unlock(&mdsc->cap_dirty_lock);
1528 __add_cap_flushing_to_inode(ci, cf);
1530 *flush_tid = cf->tid;
1535 * try to invalidate mapping pages without blocking.
1537 static int try_nonblocking_invalidate(struct inode *inode)
1539 struct ceph_inode_info *ci = ceph_inode(inode);
1540 u32 invalidating_gen = ci->i_rdcache_gen;
1542 spin_unlock(&ci->i_ceph_lock);
1543 invalidate_mapping_pages(&inode->i_data, 0, -1);
1544 spin_lock(&ci->i_ceph_lock);
1546 if (inode->i_data.nrpages == 0 &&
1547 invalidating_gen == ci->i_rdcache_gen) {
1549 dout("try_nonblocking_invalidate %p success\n", inode);
1550 /* save any racing async invalidate some trouble */
1551 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1554 dout("try_nonblocking_invalidate %p failed\n", inode);
1559 * Swiss army knife function to examine currently used and wanted
1560 * versus held caps. Release, flush, ack revoked caps to mds as
1563 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1564 * cap release further.
1565 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1566 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1569 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1570 struct ceph_mds_session *session)
1572 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1573 struct ceph_mds_client *mdsc = fsc->mdsc;
1574 struct inode *inode = &ci->vfs_inode;
1575 struct ceph_cap *cap;
1576 u64 flush_tid, oldest_flush_tid;
1577 int file_wanted, used, cap_used;
1578 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
1579 int issued, implemented, want, retain, revoking, flushing = 0;
1580 int mds = -1; /* keep track of how far we've gone through i_caps list
1581 to avoid an infinite loop on retry */
1583 int tried_invalidate = 0;
1584 int delayed = 0, sent = 0, force_requeue = 0, num;
1585 int queue_invalidate = 0;
1586 int is_delayed = flags & CHECK_CAPS_NODELAY;
1588 /* if we are unmounting, flush any unused caps immediately. */
1592 spin_lock(&ci->i_ceph_lock);
1594 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1595 flags |= CHECK_CAPS_FLUSH;
1597 /* flush snaps first time around only */
1598 if (!list_empty(&ci->i_cap_snaps))
1599 __ceph_flush_snaps(ci, &session, 0);
1602 spin_lock(&ci->i_ceph_lock);
1604 file_wanted = __ceph_caps_file_wanted(ci);
1605 used = __ceph_caps_used(ci);
1606 issued = __ceph_caps_issued(ci, &implemented);
1607 revoking = implemented & ~issued;
1610 retain = file_wanted | used | CEPH_CAP_PIN;
1611 if (!mdsc->stopping && inode->i_nlink > 0) {
1613 retain |= CEPH_CAP_ANY; /* be greedy */
1614 } else if (S_ISDIR(inode->i_mode) &&
1615 (issued & CEPH_CAP_FILE_SHARED) &&
1616 __ceph_dir_is_complete(ci)) {
1618 * If a directory is complete, we want to keep
1619 * the exclusive cap. So that MDS does not end up
1620 * revoking the shared cap on every create/unlink
1623 want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL;
1627 retain |= CEPH_CAP_ANY_SHARED;
1629 * keep RD only if we didn't have the file open RW,
1630 * because then the mds would revoke it anyway to
1631 * journal max_size=0.
1633 if (ci->i_max_size == 0)
1634 retain |= CEPH_CAP_ANY_RD;
1638 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1639 " issued %s revoking %s retain %s %s%s%s\n", inode,
1640 ceph_cap_string(file_wanted),
1641 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1642 ceph_cap_string(ci->i_flushing_caps),
1643 ceph_cap_string(issued), ceph_cap_string(revoking),
1644 ceph_cap_string(retain),
1645 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1646 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1647 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1650 * If we no longer need to hold onto old our caps, and we may
1651 * have cached pages, but don't want them, then try to invalidate.
1652 * If we fail, it's because pages are locked.... try again later.
1654 if ((!is_delayed || mdsc->stopping) &&
1655 !S_ISDIR(inode->i_mode) && /* ignore readdir cache */
1656 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
1657 inode->i_data.nrpages && /* have cached pages */
1658 (revoking & (CEPH_CAP_FILE_CACHE|
1659 CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */
1660 !tried_invalidate) {
1661 dout("check_caps trying to invalidate on %p\n", inode);
1662 if (try_nonblocking_invalidate(inode) < 0) {
1663 if (revoking & (CEPH_CAP_FILE_CACHE|
1664 CEPH_CAP_FILE_LAZYIO)) {
1665 dout("check_caps queuing invalidate\n");
1666 queue_invalidate = 1;
1667 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1669 dout("check_caps failed to invalidate pages\n");
1670 /* we failed to invalidate pages. check these
1671 caps again later. */
1673 __cap_set_timeouts(mdsc, ci);
1676 tried_invalidate = 1;
1681 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1682 cap = rb_entry(p, struct ceph_cap, ci_node);
1685 /* avoid looping forever */
1686 if (mds >= cap->mds ||
1687 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1690 /* NOTE: no side-effects allowed, until we take s_mutex */
1693 if (ci->i_auth_cap && cap != ci->i_auth_cap)
1694 cap_used &= ~ci->i_auth_cap->issued;
1696 revoking = cap->implemented & ~cap->issued;
1697 dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
1698 cap->mds, cap, ceph_cap_string(cap->issued),
1699 ceph_cap_string(cap_used),
1700 ceph_cap_string(cap->implemented),
1701 ceph_cap_string(revoking));
1703 if (cap == ci->i_auth_cap &&
1704 (cap->issued & CEPH_CAP_FILE_WR)) {
1705 /* request larger max_size from MDS? */
1706 if (ci->i_wanted_max_size > ci->i_max_size &&
1707 ci->i_wanted_max_size > ci->i_requested_max_size) {
1708 dout("requesting new max_size\n");
1712 /* approaching file_max? */
1713 if ((inode->i_size << 1) >= ci->i_max_size &&
1714 (ci->i_reported_size << 1) < ci->i_max_size) {
1715 dout("i_size approaching max_size\n");
1719 /* flush anything dirty? */
1720 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1722 dout("flushing dirty caps\n");
1726 /* completed revocation? going down and there are no caps? */
1727 if (revoking && (revoking & cap_used) == 0) {
1728 dout("completed revocation of %s\n",
1729 ceph_cap_string(cap->implemented & ~cap->issued));
1733 /* want more caps from mds? */
1734 if (want & ~(cap->mds_wanted | cap->issued))
1737 /* things we might delay */
1738 if ((cap->issued & ~retain) == 0 &&
1739 cap->mds_wanted == want)
1740 continue; /* nope, all good */
1746 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1747 time_before(jiffies, ci->i_hold_caps_max)) {
1748 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1749 ceph_cap_string(cap->issued),
1750 ceph_cap_string(cap->issued & retain),
1751 ceph_cap_string(cap->mds_wanted),
1752 ceph_cap_string(want));
1758 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1759 dout(" skipping %p I_NOFLUSH set\n", inode);
1763 if (session && session != cap->session) {
1764 dout("oops, wrong session %p mutex\n", session);
1765 mutex_unlock(&session->s_mutex);
1769 session = cap->session;
1770 if (mutex_trylock(&session->s_mutex) == 0) {
1771 dout("inverting session/ino locks on %p\n",
1773 spin_unlock(&ci->i_ceph_lock);
1774 if (took_snap_rwsem) {
1775 up_read(&mdsc->snap_rwsem);
1776 took_snap_rwsem = 0;
1778 mutex_lock(&session->s_mutex);
1782 /* take snap_rwsem after session mutex */
1783 if (!took_snap_rwsem) {
1784 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1785 dout("inverting snap/in locks on %p\n",
1787 spin_unlock(&ci->i_ceph_lock);
1788 down_read(&mdsc->snap_rwsem);
1789 took_snap_rwsem = 1;
1792 took_snap_rwsem = 1;
1795 if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
1796 flushing = __mark_caps_flushing(inode, session,
1802 spin_lock(&mdsc->cap_dirty_lock);
1803 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1804 spin_unlock(&mdsc->cap_dirty_lock);
1807 mds = cap->mds; /* remember mds, so we don't repeat */
1810 /* __send_cap drops i_ceph_lock */
1811 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
1812 want, retain, flushing,
1813 flush_tid, oldest_flush_tid);
1814 goto retry; /* retake i_ceph_lock and restart our cap scan. */
1818 * Reschedule delayed caps release if we delayed anything,
1821 if (delayed && is_delayed)
1822 force_requeue = 1; /* __send_cap delayed release; requeue */
1823 if (!delayed && !is_delayed)
1824 __cap_delay_cancel(mdsc, ci);
1825 else if (!is_delayed || force_requeue)
1826 __cap_delay_requeue(mdsc, ci);
1828 spin_unlock(&ci->i_ceph_lock);
1830 if (queue_invalidate)
1831 ceph_queue_invalidate(inode);
1834 mutex_unlock(&session->s_mutex);
1835 if (took_snap_rwsem)
1836 up_read(&mdsc->snap_rwsem);
1840 * Try to flush dirty caps back to the auth mds.
1842 static int try_flush_caps(struct inode *inode, u64 *ptid)
1844 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1845 struct ceph_inode_info *ci = ceph_inode(inode);
1846 struct ceph_mds_session *session = NULL;
1848 u64 flush_tid = 0, oldest_flush_tid = 0;
1851 spin_lock(&ci->i_ceph_lock);
1852 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1853 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1856 if (ci->i_dirty_caps && ci->i_auth_cap) {
1857 struct ceph_cap *cap = ci->i_auth_cap;
1858 int used = __ceph_caps_used(ci);
1859 int want = __ceph_caps_wanted(ci);
1862 if (!session || session != cap->session) {
1863 spin_unlock(&ci->i_ceph_lock);
1865 mutex_unlock(&session->s_mutex);
1866 session = cap->session;
1867 mutex_lock(&session->s_mutex);
1870 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1873 flushing = __mark_caps_flushing(inode, session, &flush_tid,
1876 /* __send_cap drops i_ceph_lock */
1877 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1878 (cap->issued | cap->implemented),
1879 flushing, flush_tid, oldest_flush_tid);
1882 spin_lock(&ci->i_ceph_lock);
1883 __cap_delay_requeue(mdsc, ci);
1884 spin_unlock(&ci->i_ceph_lock);
1887 struct rb_node *n = rb_last(&ci->i_cap_flush_tree);
1889 struct ceph_cap_flush *cf =
1890 rb_entry(n, struct ceph_cap_flush, i_node);
1891 flush_tid = cf->tid;
1893 flushing = ci->i_flushing_caps;
1894 spin_unlock(&ci->i_ceph_lock);
1898 mutex_unlock(&session->s_mutex);
1905 * Return true if we've flushed caps through the given flush_tid.
1907 static int caps_are_flushed(struct inode *inode, u64 flush_tid)
1909 struct ceph_inode_info *ci = ceph_inode(inode);
1910 struct ceph_cap_flush *cf;
1914 spin_lock(&ci->i_ceph_lock);
1915 n = rb_first(&ci->i_cap_flush_tree);
1917 cf = rb_entry(n, struct ceph_cap_flush, i_node);
1918 if (cf->tid <= flush_tid)
1921 spin_unlock(&ci->i_ceph_lock);
1926 * Wait on any unsafe replies for the given inode. First wait on the
1927 * newest request, and make that the upper bound. Then, if there are
1928 * more requests, keep waiting on the oldest as long as it is still older
1929 * than the original request.
1931 static void sync_write_wait(struct inode *inode)
1933 struct ceph_inode_info *ci = ceph_inode(inode);
1934 struct list_head *head = &ci->i_unsafe_writes;
1935 struct ceph_osd_request *req;
1938 if (!S_ISREG(inode->i_mode))
1941 spin_lock(&ci->i_unsafe_lock);
1942 if (list_empty(head))
1945 /* set upper bound as _last_ entry in chain */
1946 req = list_last_entry(head, struct ceph_osd_request,
1948 last_tid = req->r_tid;
1951 ceph_osdc_get_request(req);
1952 spin_unlock(&ci->i_unsafe_lock);
1953 dout("sync_write_wait on tid %llu (until %llu)\n",
1954 req->r_tid, last_tid);
1955 wait_for_completion(&req->r_safe_completion);
1956 spin_lock(&ci->i_unsafe_lock);
1957 ceph_osdc_put_request(req);
1960 * from here on look at first entry in chain, since we
1961 * only want to wait for anything older than last_tid
1963 if (list_empty(head))
1965 req = list_first_entry(head, struct ceph_osd_request,
1967 } while (req->r_tid < last_tid);
1969 spin_unlock(&ci->i_unsafe_lock);
1973 * wait for any uncommitted directory operations to commit.
1975 static int unsafe_dirop_wait(struct inode *inode)
1977 struct ceph_inode_info *ci = ceph_inode(inode);
1978 struct list_head *head = &ci->i_unsafe_dirops;
1979 struct ceph_mds_request *req;
1983 if (!S_ISDIR(inode->i_mode))
1986 spin_lock(&ci->i_unsafe_lock);
1987 if (list_empty(head))
1990 req = list_last_entry(head, struct ceph_mds_request,
1992 last_tid = req->r_tid;
1995 ceph_mdsc_get_request(req);
1996 spin_unlock(&ci->i_unsafe_lock);
1998 dout("unsafe_dirop_wait %p wait on tid %llu (until %llu)\n",
1999 inode, req->r_tid, last_tid);
2000 ret = !wait_for_completion_timeout(&req->r_safe_completion,
2001 ceph_timeout_jiffies(req->r_timeout));
2003 ret = -EIO; /* timed out */
2005 ceph_mdsc_put_request(req);
2007 spin_lock(&ci->i_unsafe_lock);
2008 if (ret || list_empty(head))
2010 req = list_first_entry(head, struct ceph_mds_request,
2012 } while (req->r_tid < last_tid);
2014 spin_unlock(&ci->i_unsafe_lock);
2018 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2020 struct inode *inode = file->f_mapping->host;
2021 struct ceph_inode_info *ci = ceph_inode(inode);
2026 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
2027 sync_write_wait(inode);
2029 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
2036 mutex_lock(&inode->i_mutex);
2038 dirty = try_flush_caps(inode, &flush_tid);
2039 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
2041 ret = unsafe_dirop_wait(inode);
2044 * only wait on non-file metadata writeback (the mds
2045 * can recover size and mtime, so we don't need to
2048 if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
2049 ret = wait_event_interruptible(ci->i_cap_wq,
2050 caps_are_flushed(inode, flush_tid));
2052 mutex_unlock(&inode->i_mutex);
2054 dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
2059 * Flush any dirty caps back to the mds. If we aren't asked to wait,
2060 * queue inode for flush but don't do so immediately, because we can
2061 * get by with fewer MDS messages if we wait for data writeback to
2064 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
2066 struct ceph_inode_info *ci = ceph_inode(inode);
2070 int wait = wbc->sync_mode == WB_SYNC_ALL;
2072 dout("write_inode %p wait=%d\n", inode, wait);
2074 dirty = try_flush_caps(inode, &flush_tid);
2076 err = wait_event_interruptible(ci->i_cap_wq,
2077 caps_are_flushed(inode, flush_tid));
2079 struct ceph_mds_client *mdsc =
2080 ceph_sb_to_client(inode->i_sb)->mdsc;
2082 spin_lock(&ci->i_ceph_lock);
2083 if (__ceph_caps_dirty(ci))
2084 __cap_delay_requeue_front(mdsc, ci);
2085 spin_unlock(&ci->i_ceph_lock);
2091 * After a recovering MDS goes active, we need to resend any caps
2094 * Caller holds session->s_mutex.
2096 static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
2097 struct ceph_mds_session *session)
2099 struct ceph_cap_snap *capsnap;
2101 dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
2102 list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
2104 struct ceph_inode_info *ci = capsnap->ci;
2105 struct inode *inode = &ci->vfs_inode;
2106 struct ceph_cap *cap;
2108 spin_lock(&ci->i_ceph_lock);
2109 cap = ci->i_auth_cap;
2110 if (cap && cap->session == session) {
2111 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
2113 __ceph_flush_snaps(ci, &session, 1);
2115 pr_err("%p auth cap %p not mds%d ???\n", inode,
2116 cap, session->s_mds);
2118 spin_unlock(&ci->i_ceph_lock);
2122 static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2123 struct ceph_mds_session *session,
2124 struct ceph_inode_info *ci)
2126 struct inode *inode = &ci->vfs_inode;
2127 struct ceph_cap *cap;
2128 struct ceph_cap_flush *cf;
2132 u64 oldest_flush_tid;
2134 spin_lock(&mdsc->cap_dirty_lock);
2135 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2136 spin_unlock(&mdsc->cap_dirty_lock);
2139 spin_lock(&ci->i_ceph_lock);
2140 cap = ci->i_auth_cap;
2141 if (!(cap && cap->session == session)) {
2142 pr_err("%p auth cap %p not mds%d ???\n", inode,
2143 cap, session->s_mds);
2144 spin_unlock(&ci->i_ceph_lock);
2148 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2149 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2150 if (cf->tid >= first_tid)
2154 spin_unlock(&ci->i_ceph_lock);
2158 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2160 first_tid = cf->tid + 1;
2162 dout("kick_flushing_caps %p cap %p tid %llu %s\n", inode,
2163 cap, cf->tid, ceph_cap_string(cf->caps));
2164 delayed |= __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
2165 __ceph_caps_used(ci),
2166 __ceph_caps_wanted(ci),
2167 cap->issued | cap->implemented,
2168 cf->caps, cf->tid, oldest_flush_tid);
2173 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2174 struct ceph_mds_session *session)
2176 struct ceph_inode_info *ci;
2177 struct ceph_cap *cap;
2179 dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2180 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2181 spin_lock(&ci->i_ceph_lock);
2182 cap = ci->i_auth_cap;
2183 if (!(cap && cap->session == session)) {
2184 pr_err("%p auth cap %p not mds%d ???\n",
2185 &ci->vfs_inode, cap, session->s_mds);
2186 spin_unlock(&ci->i_ceph_lock);
2192 * if flushing caps were revoked, we re-send the cap flush
2193 * in client reconnect stage. This guarantees MDS * processes
2194 * the cap flush message before issuing the flushing caps to
2197 if ((cap->issued & ci->i_flushing_caps) !=
2198 ci->i_flushing_caps) {
2199 spin_unlock(&ci->i_ceph_lock);
2200 if (!__kick_flushing_caps(mdsc, session, ci))
2202 spin_lock(&ci->i_ceph_lock);
2205 spin_unlock(&ci->i_ceph_lock);
2209 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2210 struct ceph_mds_session *session)
2212 struct ceph_inode_info *ci;
2214 kick_flushing_capsnaps(mdsc, session);
2216 dout("kick_flushing_caps mds%d\n", session->s_mds);
2217 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2218 int delayed = __kick_flushing_caps(mdsc, session, ci);
2220 spin_lock(&ci->i_ceph_lock);
2221 __cap_delay_requeue(mdsc, ci);
2222 spin_unlock(&ci->i_ceph_lock);
2227 static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2228 struct ceph_mds_session *session,
2229 struct inode *inode)
2231 struct ceph_inode_info *ci = ceph_inode(inode);
2232 struct ceph_cap *cap;
2234 spin_lock(&ci->i_ceph_lock);
2235 cap = ci->i_auth_cap;
2236 dout("kick_flushing_inode_caps %p flushing %s\n", inode,
2237 ceph_cap_string(ci->i_flushing_caps));
2239 __ceph_flush_snaps(ci, &session, 1);
2241 if (ci->i_flushing_caps) {
2244 spin_lock(&mdsc->cap_dirty_lock);
2245 list_move_tail(&ci->i_flushing_item,
2246 &cap->session->s_cap_flushing);
2247 spin_unlock(&mdsc->cap_dirty_lock);
2249 spin_unlock(&ci->i_ceph_lock);
2251 delayed = __kick_flushing_caps(mdsc, session, ci);
2253 spin_lock(&ci->i_ceph_lock);
2254 __cap_delay_requeue(mdsc, ci);
2255 spin_unlock(&ci->i_ceph_lock);
2258 spin_unlock(&ci->i_ceph_lock);
2264 * Take references to capabilities we hold, so that we don't release
2265 * them to the MDS prematurely.
2267 * Protected by i_ceph_lock.
2269 static void __take_cap_refs(struct ceph_inode_info *ci, int got,
2270 bool snap_rwsem_locked)
2272 if (got & CEPH_CAP_PIN)
2274 if (got & CEPH_CAP_FILE_RD)
2276 if (got & CEPH_CAP_FILE_CACHE)
2277 ci->i_rdcache_ref++;
2278 if (got & CEPH_CAP_FILE_WR) {
2279 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) {
2280 BUG_ON(!snap_rwsem_locked);
2281 ci->i_head_snapc = ceph_get_snap_context(
2282 ci->i_snap_realm->cached_context);
2286 if (got & CEPH_CAP_FILE_BUFFER) {
2287 if (ci->i_wb_ref == 0)
2288 ihold(&ci->vfs_inode);
2290 dout("__take_cap_refs %p wb %d -> %d (?)\n",
2291 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
2296 * Try to grab cap references. Specify those refs we @want, and the
2297 * minimal set we @need. Also include the larger offset we are writing
2298 * to (when applicable), and check against max_size here as well.
2299 * Note that caller is responsible for ensuring max_size increases are
2300 * requested from the MDS.
2302 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2303 loff_t endoff, bool nonblock, int *got, int *err)
2305 struct inode *inode = &ci->vfs_inode;
2306 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2308 int have, implemented;
2310 bool snap_rwsem_locked = false;
2312 dout("get_cap_refs %p need %s want %s\n", inode,
2313 ceph_cap_string(need), ceph_cap_string(want));
2316 spin_lock(&ci->i_ceph_lock);
2318 /* make sure file is actually open */
2319 file_wanted = __ceph_caps_file_wanted(ci);
2320 if ((file_wanted & need) == 0) {
2321 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2322 ceph_cap_string(need), ceph_cap_string(file_wanted));
2328 /* finish pending truncate */
2329 while (ci->i_truncate_pending) {
2330 spin_unlock(&ci->i_ceph_lock);
2331 if (snap_rwsem_locked) {
2332 up_read(&mdsc->snap_rwsem);
2333 snap_rwsem_locked = false;
2335 __ceph_do_pending_vmtruncate(inode);
2336 spin_lock(&ci->i_ceph_lock);
2339 have = __ceph_caps_issued(ci, &implemented);
2341 if (have & need & CEPH_CAP_FILE_WR) {
2342 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2343 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2344 inode, endoff, ci->i_max_size);
2345 if (endoff > ci->i_requested_max_size) {
2352 * If a sync write is in progress, we must wait, so that we
2353 * can get a final snapshot value for size+mtime.
2355 if (__ceph_have_pending_cap_snap(ci)) {
2356 dout("get_cap_refs %p cap_snap_pending\n", inode);
2361 if ((have & need) == need) {
2363 * Look at (implemented & ~have & not) so that we keep waiting
2364 * on transition from wanted -> needed caps. This is needed
2365 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2366 * going before a prior buffered writeback happens.
2368 int not = want & ~(have & need);
2369 int revoking = implemented & ~have;
2370 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2371 inode, ceph_cap_string(have), ceph_cap_string(not),
2372 ceph_cap_string(revoking));
2373 if ((revoking & not) == 0) {
2374 if (!snap_rwsem_locked &&
2375 !ci->i_head_snapc &&
2376 (need & CEPH_CAP_FILE_WR)) {
2377 if (!down_read_trylock(&mdsc->snap_rwsem)) {
2379 * we can not call down_read() when
2380 * task isn't in TASK_RUNNING state
2388 spin_unlock(&ci->i_ceph_lock);
2389 down_read(&mdsc->snap_rwsem);
2390 snap_rwsem_locked = true;
2393 snap_rwsem_locked = true;
2395 *got = need | (have & want);
2396 __take_cap_refs(ci, *got, true);
2400 int session_readonly = false;
2401 if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
2402 struct ceph_mds_session *s = ci->i_auth_cap->session;
2403 spin_lock(&s->s_cap_lock);
2404 session_readonly = s->s_readonly;
2405 spin_unlock(&s->s_cap_lock);
2407 if (session_readonly) {
2408 dout("get_cap_refs %p needed %s but mds%d readonly\n",
2409 inode, ceph_cap_string(need), ci->i_auth_cap->mds);
2415 if (!__ceph_is_any_caps(ci) &&
2416 ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2417 dout("get_cap_refs %p forced umount\n", inode);
2423 dout("get_cap_refs %p have %s needed %s\n", inode,
2424 ceph_cap_string(have), ceph_cap_string(need));
2427 spin_unlock(&ci->i_ceph_lock);
2428 if (snap_rwsem_locked)
2429 up_read(&mdsc->snap_rwsem);
2431 dout("get_cap_refs %p ret %d got %s\n", inode,
2432 ret, ceph_cap_string(*got));
2437 * Check the offset we are writing up to against our current
2438 * max_size. If necessary, tell the MDS we want to write to
2441 static void check_max_size(struct inode *inode, loff_t endoff)
2443 struct ceph_inode_info *ci = ceph_inode(inode);
2446 /* do we need to explicitly request a larger max_size? */
2447 spin_lock(&ci->i_ceph_lock);
2448 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
2449 dout("write %p at large endoff %llu, req max_size\n",
2451 ci->i_wanted_max_size = endoff;
2453 /* duplicate ceph_check_caps()'s logic */
2454 if (ci->i_auth_cap &&
2455 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) &&
2456 ci->i_wanted_max_size > ci->i_max_size &&
2457 ci->i_wanted_max_size > ci->i_requested_max_size)
2459 spin_unlock(&ci->i_ceph_lock);
2461 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2465 * Wait for caps, and take cap references. If we can't get a WR cap
2466 * due to a small max_size, make sure we check_max_size (and possibly
2467 * ask the mds) so we don't get hung up indefinitely.
2469 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2470 loff_t endoff, int *got, struct page **pinned_page)
2472 int _got, ret, err = 0;
2474 ret = ceph_pool_perm_check(ci, need);
2480 check_max_size(&ci->vfs_inode, endoff);
2484 ret = try_get_cap_refs(ci, need, want, endoff,
2485 false, &_got, &err);
2492 ret = wait_event_interruptible(ci->i_cap_wq,
2493 try_get_cap_refs(ci, need, want, endoff,
2494 true, &_got, &err));
2503 if (ci->i_inline_version != CEPH_INLINE_NONE &&
2504 (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
2505 i_size_read(&ci->vfs_inode) > 0) {
2507 find_get_page(ci->vfs_inode.i_mapping, 0);
2509 if (PageUptodate(page)) {
2510 *pinned_page = page;
2513 page_cache_release(page);
2516 * drop cap refs first because getattr while
2517 * holding * caps refs can cause deadlock.
2519 ceph_put_cap_refs(ci, _got);
2523 * getattr request will bring inline data into
2526 ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
2527 CEPH_STAT_CAP_INLINE_DATA,
2541 * Take cap refs. Caller must already know we hold at least one ref
2542 * on the caps in question or we don't know this is safe.
2544 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2546 spin_lock(&ci->i_ceph_lock);
2547 __take_cap_refs(ci, caps, false);
2548 spin_unlock(&ci->i_ceph_lock);
2553 * drop cap_snap that is not associated with any snapshot.
2554 * we don't need to send FLUSHSNAP message for it.
2556 static int ceph_try_drop_cap_snap(struct ceph_cap_snap *capsnap)
2558 if (!capsnap->need_flush &&
2559 !capsnap->writing && !capsnap->dirty_pages) {
2561 dout("dropping cap_snap %p follows %llu\n",
2562 capsnap, capsnap->follows);
2563 ceph_put_snap_context(capsnap->context);
2564 list_del(&capsnap->ci_item);
2565 list_del(&capsnap->flushing_item);
2566 ceph_put_cap_snap(capsnap);
2575 * If we released the last ref on any given cap, call ceph_check_caps
2576 * to release (or schedule a release).
2578 * If we are releasing a WR cap (from a sync write), finalize any affected
2579 * cap_snap, and wake up any waiters.
2581 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2583 struct inode *inode = &ci->vfs_inode;
2584 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2586 spin_lock(&ci->i_ceph_lock);
2587 if (had & CEPH_CAP_PIN)
2589 if (had & CEPH_CAP_FILE_RD)
2590 if (--ci->i_rd_ref == 0)
2592 if (had & CEPH_CAP_FILE_CACHE)
2593 if (--ci->i_rdcache_ref == 0)
2595 if (had & CEPH_CAP_FILE_BUFFER) {
2596 if (--ci->i_wb_ref == 0) {
2600 dout("put_cap_refs %p wb %d -> %d (?)\n",
2601 inode, ci->i_wb_ref+1, ci->i_wb_ref);
2603 if (had & CEPH_CAP_FILE_WR)
2604 if (--ci->i_wr_ref == 0) {
2606 if (__ceph_have_pending_cap_snap(ci)) {
2607 struct ceph_cap_snap *capsnap =
2608 list_last_entry(&ci->i_cap_snaps,
2609 struct ceph_cap_snap,
2611 capsnap->writing = 0;
2612 if (ceph_try_drop_cap_snap(capsnap))
2614 else if (__ceph_finish_cap_snap(ci, capsnap))
2618 if (ci->i_wrbuffer_ref_head == 0 &&
2619 ci->i_dirty_caps == 0 &&
2620 ci->i_flushing_caps == 0) {
2621 BUG_ON(!ci->i_head_snapc);
2622 ceph_put_snap_context(ci->i_head_snapc);
2623 ci->i_head_snapc = NULL;
2625 /* see comment in __ceph_remove_cap() */
2626 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
2627 drop_inode_snap_realm(ci);
2629 spin_unlock(&ci->i_ceph_lock);
2631 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2632 last ? " last" : "", put ? " put" : "");
2634 if (last && !flushsnaps)
2635 ceph_check_caps(ci, 0, NULL);
2636 else if (flushsnaps)
2637 ceph_flush_snaps(ci);
2639 wake_up_all(&ci->i_cap_wq);
2645 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2646 * context. Adjust per-snap dirty page accounting as appropriate.
2647 * Once all dirty data for a cap_snap is flushed, flush snapped file
2648 * metadata back to the MDS. If we dropped the last ref, call
2651 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2652 struct ceph_snap_context *snapc)
2654 struct inode *inode = &ci->vfs_inode;
2656 int complete_capsnap = 0;
2657 int drop_capsnap = 0;
2659 struct ceph_cap_snap *capsnap = NULL;
2661 spin_lock(&ci->i_ceph_lock);
2662 ci->i_wrbuffer_ref -= nr;
2663 last = !ci->i_wrbuffer_ref;
2665 if (ci->i_head_snapc == snapc) {
2666 ci->i_wrbuffer_ref_head -= nr;
2667 if (ci->i_wrbuffer_ref_head == 0 &&
2668 ci->i_wr_ref == 0 &&
2669 ci->i_dirty_caps == 0 &&
2670 ci->i_flushing_caps == 0) {
2671 BUG_ON(!ci->i_head_snapc);
2672 ceph_put_snap_context(ci->i_head_snapc);
2673 ci->i_head_snapc = NULL;
2675 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2677 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2678 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2679 last ? " LAST" : "");
2681 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2682 if (capsnap->context == snapc) {
2688 capsnap->dirty_pages -= nr;
2689 if (capsnap->dirty_pages == 0) {
2690 complete_capsnap = 1;
2691 drop_capsnap = ceph_try_drop_cap_snap(capsnap);
2693 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2694 " snap %lld %d/%d -> %d/%d %s%s\n",
2695 inode, capsnap, capsnap->context->seq,
2696 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2697 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2698 last ? " (wrbuffer last)" : "",
2699 complete_capsnap ? " (complete capsnap)" : "");
2702 spin_unlock(&ci->i_ceph_lock);
2705 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2707 } else if (complete_capsnap) {
2708 ceph_flush_snaps(ci);
2709 wake_up_all(&ci->i_cap_wq);
2716 * Invalidate unlinked inode's aliases, so we can drop the inode ASAP.
2718 static void invalidate_aliases(struct inode *inode)
2720 struct dentry *dn, *prev = NULL;
2722 dout("invalidate_aliases inode %p\n", inode);
2723 d_prune_aliases(inode);
2725 * For non-directory inode, d_find_alias() only returns
2726 * hashed dentry. After calling d_invalidate(), the
2727 * dentry becomes unhashed.
2729 * For directory inode, d_find_alias() can return
2730 * unhashed dentry. But directory inode should have
2731 * one alias at most.
2733 while ((dn = d_find_alias(inode))) {
2748 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2749 * actually be a revocation if it specifies a smaller cap set.)
2751 * caller holds s_mutex and i_ceph_lock, we drop both.
2753 static void handle_cap_grant(struct ceph_mds_client *mdsc,
2754 struct inode *inode, struct ceph_mds_caps *grant,
2756 void *inline_data, int inline_len,
2757 struct ceph_buffer *xattr_buf,
2758 struct ceph_mds_session *session,
2759 struct ceph_cap *cap, int issued)
2760 __releases(ci->i_ceph_lock)
2761 __releases(mdsc->snap_rwsem)
2763 struct ceph_inode_info *ci = ceph_inode(inode);
2764 int mds = session->s_mds;
2765 int seq = le32_to_cpu(grant->seq);
2766 int newcaps = le32_to_cpu(grant->caps);
2767 int used, wanted, dirty;
2768 u64 size = le64_to_cpu(grant->size);
2769 u64 max_size = le64_to_cpu(grant->max_size);
2770 struct timespec mtime, atime, ctime;
2773 bool writeback = false;
2774 bool queue_trunc = false;
2775 bool queue_invalidate = false;
2776 bool queue_revalidate = false;
2777 bool deleted_inode = false;
2778 bool fill_inline = false;
2780 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2781 inode, cap, mds, seq, ceph_cap_string(newcaps));
2782 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2787 * auth mds of the inode changed. we received the cap export message,
2788 * but still haven't received the cap import message. handle_cap_export
2789 * updated the new auth MDS' cap.
2791 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message
2792 * that was sent before the cap import message. So don't remove caps.
2794 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
2795 WARN_ON(cap != ci->i_auth_cap);
2796 WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id));
2798 newcaps |= cap->issued;
2802 * If CACHE is being revoked, and we have no dirty buffers,
2803 * try to invalidate (once). (If there are dirty buffers, we
2804 * will invalidate _after_ writeback.)
2806 if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
2807 ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2808 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2809 !ci->i_wrbuffer_ref) {
2810 if (try_nonblocking_invalidate(inode)) {
2811 /* there were locked pages.. invalidate later
2812 in a separate thread. */
2813 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2814 queue_invalidate = true;
2815 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2819 ceph_fscache_invalidate(inode);
2822 /* side effects now are allowed */
2823 cap->cap_gen = session->s_cap_gen;
2826 __check_cap_issue(ci, cap, newcaps);
2828 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2829 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
2830 inode->i_mode = le32_to_cpu(grant->mode);
2831 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2832 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
2833 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2834 from_kuid(&init_user_ns, inode->i_uid),
2835 from_kgid(&init_user_ns, inode->i_gid));
2838 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2839 (issued & CEPH_CAP_LINK_EXCL) == 0) {
2840 set_nlink(inode, le32_to_cpu(grant->nlink));
2841 if (inode->i_nlink == 0 &&
2842 (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
2843 deleted_inode = true;
2846 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2847 int len = le32_to_cpu(grant->xattr_len);
2848 u64 version = le64_to_cpu(grant->xattr_version);
2850 if (version > ci->i_xattrs.version) {
2851 dout(" got new xattrs v%llu on %p len %d\n",
2852 version, inode, len);
2853 if (ci->i_xattrs.blob)
2854 ceph_buffer_put(ci->i_xattrs.blob);
2855 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2856 ci->i_xattrs.version = version;
2857 ceph_forget_all_cached_acls(inode);
2861 /* Do we need to revalidate our fscache cookie. Don't bother on the
2862 * first cache cap as we already validate at cookie creation time. */
2863 if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
2864 queue_revalidate = true;
2866 if (newcaps & CEPH_CAP_ANY_RD) {
2867 /* ctime/mtime/atime? */
2868 ceph_decode_timespec(&mtime, &grant->mtime);
2869 ceph_decode_timespec(&atime, &grant->atime);
2870 ceph_decode_timespec(&ctime, &grant->ctime);
2871 ceph_fill_file_time(inode, issued,
2872 le32_to_cpu(grant->time_warp_seq),
2873 &ctime, &mtime, &atime);
2876 if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
2877 /* file layout may have changed */
2878 ci->i_layout = grant->layout;
2879 /* size/truncate_seq? */
2880 queue_trunc = ceph_fill_file_size(inode, issued,
2881 le32_to_cpu(grant->truncate_seq),
2882 le64_to_cpu(grant->truncate_size),
2884 /* max size increase? */
2885 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
2886 dout("max_size %lld -> %llu\n",
2887 ci->i_max_size, max_size);
2888 ci->i_max_size = max_size;
2889 if (max_size >= ci->i_wanted_max_size) {
2890 ci->i_wanted_max_size = 0; /* reset */
2891 ci->i_requested_max_size = 0;
2897 /* check cap bits */
2898 wanted = __ceph_caps_wanted(ci);
2899 used = __ceph_caps_used(ci);
2900 dirty = __ceph_caps_dirty(ci);
2901 dout(" my wanted = %s, used = %s, dirty %s\n",
2902 ceph_cap_string(wanted),
2903 ceph_cap_string(used),
2904 ceph_cap_string(dirty));
2905 if (wanted != le32_to_cpu(grant->wanted)) {
2906 dout("mds wanted %s -> %s\n",
2907 ceph_cap_string(le32_to_cpu(grant->wanted)),
2908 ceph_cap_string(wanted));
2909 /* imported cap may not have correct mds_wanted */
2910 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
2914 /* revocation, grant, or no-op? */
2915 if (cap->issued & ~newcaps) {
2916 int revoking = cap->issued & ~newcaps;
2918 dout("revocation: %s -> %s (revoking %s)\n",
2919 ceph_cap_string(cap->issued),
2920 ceph_cap_string(newcaps),
2921 ceph_cap_string(revoking));
2922 if (revoking & used & CEPH_CAP_FILE_BUFFER)
2923 writeback = true; /* initiate writeback; will delay ack */
2924 else if (revoking == CEPH_CAP_FILE_CACHE &&
2925 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2927 ; /* do nothing yet, invalidation will be queued */
2928 else if (cap == ci->i_auth_cap)
2929 check_caps = 1; /* check auth cap only */
2931 check_caps = 2; /* check all caps */
2932 cap->issued = newcaps;
2933 cap->implemented |= newcaps;
2934 } else if (cap->issued == newcaps) {
2935 dout("caps unchanged: %s -> %s\n",
2936 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2938 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2939 ceph_cap_string(newcaps));
2940 /* non-auth MDS is revoking the newly grant caps ? */
2941 if (cap == ci->i_auth_cap &&
2942 __ceph_caps_revoking_other(ci, cap, newcaps))
2945 cap->issued = newcaps;
2946 cap->implemented |= newcaps; /* add bits only, to
2947 * avoid stepping on a
2948 * pending revocation */
2951 BUG_ON(cap->issued & ~cap->implemented);
2953 if (inline_version > 0 && inline_version >= ci->i_inline_version) {
2954 ci->i_inline_version = inline_version;
2955 if (ci->i_inline_version != CEPH_INLINE_NONE &&
2956 (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)))
2960 spin_unlock(&ci->i_ceph_lock);
2962 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
2963 kick_flushing_inode_caps(mdsc, session, inode);
2964 up_read(&mdsc->snap_rwsem);
2965 if (newcaps & ~issued)
2970 ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
2973 ceph_queue_vmtruncate(inode);
2974 ceph_queue_revalidate(inode);
2975 } else if (queue_revalidate)
2976 ceph_queue_revalidate(inode);
2980 * queue inode for writeback: we can't actually call
2981 * filemap_write_and_wait, etc. from message handler
2984 ceph_queue_writeback(inode);
2985 if (queue_invalidate)
2986 ceph_queue_invalidate(inode);
2988 invalidate_aliases(inode);
2990 wake_up_all(&ci->i_cap_wq);
2992 if (check_caps == 1)
2993 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2995 else if (check_caps == 2)
2996 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2998 mutex_unlock(&session->s_mutex);
3002 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
3003 * MDS has been safely committed.
3005 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
3006 struct ceph_mds_caps *m,
3007 struct ceph_mds_session *session,
3008 struct ceph_cap *cap)
3009 __releases(ci->i_ceph_lock)
3011 struct ceph_inode_info *ci = ceph_inode(inode);
3012 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3013 struct ceph_cap_flush *cf;
3015 LIST_HEAD(to_remove);
3016 unsigned seq = le32_to_cpu(m->seq);
3017 int dirty = le32_to_cpu(m->dirty);
3021 n = rb_first(&ci->i_cap_flush_tree);
3023 cf = rb_entry(n, struct ceph_cap_flush, i_node);
3024 n = rb_next(&cf->i_node);
3025 if (cf->tid == flush_tid)
3027 if (cf->tid <= flush_tid) {
3028 rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
3029 list_add_tail(&cf->list, &to_remove);
3031 cleaned &= ~cf->caps;
3037 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
3038 " flushing %s -> %s\n",
3039 inode, session->s_mds, seq, ceph_cap_string(dirty),
3040 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
3041 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
3043 if (list_empty(&to_remove) && !cleaned)
3046 ci->i_flushing_caps &= ~cleaned;
3048 spin_lock(&mdsc->cap_dirty_lock);
3050 if (!list_empty(&to_remove)) {
3051 list_for_each_entry(cf, &to_remove, list)
3052 rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
3054 n = rb_first(&mdsc->cap_flush_tree);
3055 cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
3056 if (!cf || cf->tid > flush_tid)
3057 wake_up_all(&mdsc->cap_flushing_wq);
3060 if (ci->i_flushing_caps == 0) {
3061 list_del_init(&ci->i_flushing_item);
3062 if (!list_empty(&session->s_cap_flushing))
3063 dout(" mds%d still flushing cap on %p\n",
3065 &list_entry(session->s_cap_flushing.next,
3066 struct ceph_inode_info,
3067 i_flushing_item)->vfs_inode);
3068 mdsc->num_cap_flushing--;
3069 dout(" inode %p now !flushing\n", inode);
3071 if (ci->i_dirty_caps == 0) {
3072 dout(" inode %p now clean\n", inode);
3073 BUG_ON(!list_empty(&ci->i_dirty_item));
3075 if (ci->i_wr_ref == 0 &&
3076 ci->i_wrbuffer_ref_head == 0) {
3077 BUG_ON(!ci->i_head_snapc);
3078 ceph_put_snap_context(ci->i_head_snapc);
3079 ci->i_head_snapc = NULL;
3082 BUG_ON(list_empty(&ci->i_dirty_item));
3085 spin_unlock(&mdsc->cap_dirty_lock);
3086 wake_up_all(&ci->i_cap_wq);
3089 spin_unlock(&ci->i_ceph_lock);
3091 while (!list_empty(&to_remove)) {
3092 cf = list_first_entry(&to_remove,
3093 struct ceph_cap_flush, list);
3094 list_del(&cf->list);
3095 ceph_free_cap_flush(cf);
3102 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
3103 * throw away our cap_snap.
3105 * Caller hold s_mutex.
3107 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
3108 struct ceph_mds_caps *m,
3109 struct ceph_mds_session *session)
3111 struct ceph_inode_info *ci = ceph_inode(inode);
3112 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3113 u64 follows = le64_to_cpu(m->snap_follows);
3114 struct ceph_cap_snap *capsnap;
3117 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
3118 inode, ci, session->s_mds, follows);
3120 spin_lock(&ci->i_ceph_lock);
3121 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
3122 if (capsnap->follows == follows) {
3123 if (capsnap->flush_tid != flush_tid) {
3124 dout(" cap_snap %p follows %lld tid %lld !="
3125 " %lld\n", capsnap, follows,
3126 flush_tid, capsnap->flush_tid);
3129 WARN_ON(capsnap->dirty_pages || capsnap->writing);
3130 dout(" removing %p cap_snap %p follows %lld\n",
3131 inode, capsnap, follows);
3132 ceph_put_snap_context(capsnap->context);
3133 list_del(&capsnap->ci_item);
3134 list_del(&capsnap->flushing_item);
3135 ceph_put_cap_snap(capsnap);
3136 wake_up_all(&mdsc->cap_flushing_wq);
3140 dout(" skipping cap_snap %p follows %lld\n",
3141 capsnap, capsnap->follows);
3144 spin_unlock(&ci->i_ceph_lock);
3150 * Handle TRUNC from MDS, indicating file truncation.
3152 * caller hold s_mutex.
3154 static void handle_cap_trunc(struct inode *inode,
3155 struct ceph_mds_caps *trunc,
3156 struct ceph_mds_session *session)
3157 __releases(ci->i_ceph_lock)
3159 struct ceph_inode_info *ci = ceph_inode(inode);
3160 int mds = session->s_mds;
3161 int seq = le32_to_cpu(trunc->seq);
3162 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
3163 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
3164 u64 size = le64_to_cpu(trunc->size);
3165 int implemented = 0;
3166 int dirty = __ceph_caps_dirty(ci);
3167 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
3168 int queue_trunc = 0;
3170 issued |= implemented | dirty;
3172 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
3173 inode, mds, seq, truncate_size, truncate_seq);
3174 queue_trunc = ceph_fill_file_size(inode, issued,
3175 truncate_seq, truncate_size, size);
3176 spin_unlock(&ci->i_ceph_lock);
3179 ceph_queue_vmtruncate(inode);
3180 ceph_fscache_invalidate(inode);
3185 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
3186 * different one. If we are the most recent migration we've seen (as
3187 * indicated by mseq), make note of the migrating cap bits for the
3188 * duration (until we see the corresponding IMPORT).
3190 * caller holds s_mutex
3192 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
3193 struct ceph_mds_cap_peer *ph,
3194 struct ceph_mds_session *session)
3196 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
3197 struct ceph_mds_session *tsession = NULL;
3198 struct ceph_cap *cap, *tcap, *new_cap = NULL;
3199 struct ceph_inode_info *ci = ceph_inode(inode);
3201 unsigned mseq = le32_to_cpu(ex->migrate_seq);
3202 unsigned t_seq, t_mseq;
3204 int mds = session->s_mds;
3207 t_cap_id = le64_to_cpu(ph->cap_id);
3208 t_seq = le32_to_cpu(ph->seq);
3209 t_mseq = le32_to_cpu(ph->mseq);
3210 target = le32_to_cpu(ph->mds);
3212 t_cap_id = t_seq = t_mseq = 0;
3216 dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
3217 inode, ci, mds, mseq, target);
3219 spin_lock(&ci->i_ceph_lock);
3220 cap = __get_cap_for_mds(ci, mds);
3221 if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
3225 __ceph_remove_cap(cap, false);
3230 * now we know we haven't received the cap import message yet
3231 * because the exported cap still exist.
3234 issued = cap->issued;
3235 WARN_ON(issued != cap->implemented);
3237 tcap = __get_cap_for_mds(ci, target);
3239 /* already have caps from the target */
3240 if (tcap->cap_id != t_cap_id ||
3241 ceph_seq_cmp(tcap->seq, t_seq) < 0) {
3242 dout(" updating import cap %p mds%d\n", tcap, target);
3243 tcap->cap_id = t_cap_id;
3244 tcap->seq = t_seq - 1;
3245 tcap->issue_seq = t_seq - 1;
3246 tcap->mseq = t_mseq;
3247 tcap->issued |= issued;
3248 tcap->implemented |= issued;
3249 if (cap == ci->i_auth_cap)
3250 ci->i_auth_cap = tcap;
3251 if (ci->i_flushing_caps && ci->i_auth_cap == tcap) {
3252 spin_lock(&mdsc->cap_dirty_lock);
3253 list_move_tail(&ci->i_flushing_item,
3254 &tcap->session->s_cap_flushing);
3255 spin_unlock(&mdsc->cap_dirty_lock);
3258 __ceph_remove_cap(cap, false);
3260 } else if (tsession) {
3261 /* add placeholder for the export tagert */
3262 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
3263 ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0,
3264 t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
3266 __ceph_remove_cap(cap, false);
3270 spin_unlock(&ci->i_ceph_lock);
3271 mutex_unlock(&session->s_mutex);
3273 /* open target session */
3274 tsession = ceph_mdsc_open_export_target_session(mdsc, target);
3275 if (!IS_ERR(tsession)) {
3277 mutex_lock(&session->s_mutex);
3278 mutex_lock_nested(&tsession->s_mutex,
3279 SINGLE_DEPTH_NESTING);
3281 mutex_lock(&tsession->s_mutex);
3282 mutex_lock_nested(&session->s_mutex,
3283 SINGLE_DEPTH_NESTING);
3285 new_cap = ceph_get_cap(mdsc, NULL);
3294 spin_unlock(&ci->i_ceph_lock);
3295 mutex_unlock(&session->s_mutex);
3297 mutex_unlock(&tsession->s_mutex);
3298 ceph_put_mds_session(tsession);
3301 ceph_put_cap(mdsc, new_cap);
3305 * Handle cap IMPORT.
3307 * caller holds s_mutex. acquires i_ceph_lock
3309 static void handle_cap_import(struct ceph_mds_client *mdsc,
3310 struct inode *inode, struct ceph_mds_caps *im,
3311 struct ceph_mds_cap_peer *ph,
3312 struct ceph_mds_session *session,
3313 struct ceph_cap **target_cap, int *old_issued)
3314 __acquires(ci->i_ceph_lock)
3316 struct ceph_inode_info *ci = ceph_inode(inode);
3317 struct ceph_cap *cap, *ocap, *new_cap = NULL;
3318 int mds = session->s_mds;
3320 unsigned caps = le32_to_cpu(im->caps);
3321 unsigned wanted = le32_to_cpu(im->wanted);
3322 unsigned seq = le32_to_cpu(im->seq);
3323 unsigned mseq = le32_to_cpu(im->migrate_seq);
3324 u64 realmino = le64_to_cpu(im->realm);
3325 u64 cap_id = le64_to_cpu(im->cap_id);
3330 p_cap_id = le64_to_cpu(ph->cap_id);
3331 peer = le32_to_cpu(ph->mds);
3337 dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
3338 inode, ci, mds, mseq, peer);
3341 spin_lock(&ci->i_ceph_lock);
3342 cap = __get_cap_for_mds(ci, mds);
3345 spin_unlock(&ci->i_ceph_lock);
3346 new_cap = ceph_get_cap(mdsc, NULL);
3352 ceph_put_cap(mdsc, new_cap);
3357 __ceph_caps_issued(ci, &issued);
3358 issued |= __ceph_caps_dirty(ci);
3360 ceph_add_cap(inode, session, cap_id, -1, caps, wanted, seq, mseq,
3361 realmino, CEPH_CAP_FLAG_AUTH, &new_cap);
3363 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
3364 if (ocap && ocap->cap_id == p_cap_id) {
3365 dout(" remove export cap %p mds%d flags %d\n",
3366 ocap, peer, ph->flags);
3367 if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
3368 (ocap->seq != le32_to_cpu(ph->seq) ||
3369 ocap->mseq != le32_to_cpu(ph->mseq))) {
3370 pr_err("handle_cap_import: mismatched seq/mseq: "
3371 "ino (%llx.%llx) mds%d seq %d mseq %d "
3372 "importer mds%d has peer seq %d mseq %d\n",
3373 ceph_vinop(inode), peer, ocap->seq,
3374 ocap->mseq, mds, le32_to_cpu(ph->seq),
3375 le32_to_cpu(ph->mseq));
3377 __ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
3380 /* make sure we re-request max_size, if necessary */
3381 ci->i_wanted_max_size = 0;
3382 ci->i_requested_max_size = 0;
3384 *old_issued = issued;
3389 * Handle a caps message from the MDS.
3391 * Identify the appropriate session, inode, and call the right handler
3392 * based on the cap op.
3394 void ceph_handle_caps(struct ceph_mds_session *session,
3395 struct ceph_msg *msg)
3397 struct ceph_mds_client *mdsc = session->s_mdsc;
3398 struct super_block *sb = mdsc->fsc->sb;
3399 struct inode *inode;
3400 struct ceph_inode_info *ci;
3401 struct ceph_cap *cap;
3402 struct ceph_mds_caps *h;
3403 struct ceph_mds_cap_peer *peer = NULL;
3404 struct ceph_snap_realm *realm;
3405 int mds = session->s_mds;
3408 struct ceph_vino vino;
3412 u64 inline_version = 0;
3413 void *inline_data = NULL;
3416 size_t snaptrace_len;
3419 dout("handle_caps from mds%d\n", mds);
3422 end = msg->front.iov_base + msg->front.iov_len;
3423 tid = le64_to_cpu(msg->hdr.tid);
3424 if (msg->front.iov_len < sizeof(*h))
3426 h = msg->front.iov_base;
3427 op = le32_to_cpu(h->op);
3428 vino.ino = le64_to_cpu(h->ino);
3429 vino.snap = CEPH_NOSNAP;
3430 cap_id = le64_to_cpu(h->cap_id);
3431 seq = le32_to_cpu(h->seq);
3432 mseq = le32_to_cpu(h->migrate_seq);
3433 size = le64_to_cpu(h->size);
3434 max_size = le64_to_cpu(h->max_size);
3437 snaptrace_len = le32_to_cpu(h->snap_trace_len);
3438 p = snaptrace + snaptrace_len;
3440 if (le16_to_cpu(msg->hdr.version) >= 2) {
3442 ceph_decode_32_safe(&p, end, flock_len, bad);
3443 if (p + flock_len > end)
3448 if (le16_to_cpu(msg->hdr.version) >= 3) {
3449 if (op == CEPH_CAP_OP_IMPORT) {
3450 if (p + sizeof(*peer) > end)
3454 } else if (op == CEPH_CAP_OP_EXPORT) {
3455 /* recorded in unused fields */
3456 peer = (void *)&h->size;
3460 if (le16_to_cpu(msg->hdr.version) >= 4) {
3461 ceph_decode_64_safe(&p, end, inline_version, bad);
3462 ceph_decode_32_safe(&p, end, inline_len, bad);
3463 if (p + inline_len > end)
3470 inode = ceph_find_inode(sb, vino);
3471 ci = ceph_inode(inode);
3472 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
3475 mutex_lock(&session->s_mutex);
3477 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
3481 dout(" i don't have ino %llx\n", vino.ino);
3483 if (op == CEPH_CAP_OP_IMPORT) {
3484 cap = ceph_get_cap(mdsc, NULL);
3485 cap->cap_ino = vino.ino;
3486 cap->queue_release = 1;
3487 cap->cap_id = cap_id;
3490 spin_lock(&session->s_cap_lock);
3491 list_add_tail(&cap->session_caps,
3492 &session->s_cap_releases);
3493 session->s_num_cap_releases++;
3494 spin_unlock(&session->s_cap_lock);
3496 goto flush_cap_releases;
3499 /* these will work even if we don't have a cap yet */
3501 case CEPH_CAP_OP_FLUSHSNAP_ACK:
3502 handle_cap_flushsnap_ack(inode, tid, h, session);
3505 case CEPH_CAP_OP_EXPORT:
3506 handle_cap_export(inode, h, peer, session);
3509 case CEPH_CAP_OP_IMPORT:
3511 if (snaptrace_len) {
3512 down_write(&mdsc->snap_rwsem);
3513 ceph_update_snap_trace(mdsc, snaptrace,
3514 snaptrace + snaptrace_len,
3516 downgrade_write(&mdsc->snap_rwsem);
3518 down_read(&mdsc->snap_rwsem);
3520 handle_cap_import(mdsc, inode, h, peer, session,
3522 handle_cap_grant(mdsc, inode, h,
3523 inline_version, inline_data, inline_len,
3524 msg->middle, session, cap, issued);
3526 ceph_put_snap_realm(mdsc, realm);
3530 /* the rest require a cap */
3531 spin_lock(&ci->i_ceph_lock);
3532 cap = __get_cap_for_mds(ceph_inode(inode), mds);
3534 dout(" no cap on %p ino %llx.%llx from mds%d\n",
3535 inode, ceph_ino(inode), ceph_snap(inode), mds);
3536 spin_unlock(&ci->i_ceph_lock);
3537 goto flush_cap_releases;
3540 /* note that each of these drops i_ceph_lock for us */
3542 case CEPH_CAP_OP_REVOKE:
3543 case CEPH_CAP_OP_GRANT:
3544 __ceph_caps_issued(ci, &issued);
3545 issued |= __ceph_caps_dirty(ci);
3546 handle_cap_grant(mdsc, inode, h,
3547 inline_version, inline_data, inline_len,
3548 msg->middle, session, cap, issued);
3551 case CEPH_CAP_OP_FLUSH_ACK:
3552 handle_cap_flush_ack(inode, tid, h, session, cap);
3555 case CEPH_CAP_OP_TRUNC:
3556 handle_cap_trunc(inode, h, session);
3560 spin_unlock(&ci->i_ceph_lock);
3561 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
3562 ceph_cap_op_name(op));
3569 * send any cap release message to try to move things
3570 * along for the mds (who clearly thinks we still have this
3573 ceph_send_cap_releases(mdsc, session);
3576 mutex_unlock(&session->s_mutex);
3582 pr_err("ceph_handle_caps: corrupt message\n");
3588 * Delayed work handler to process end of delayed cap release LRU list.
3590 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
3592 struct ceph_inode_info *ci;
3593 int flags = CHECK_CAPS_NODELAY;
3595 dout("check_delayed_caps\n");
3597 spin_lock(&mdsc->cap_delay_lock);
3598 if (list_empty(&mdsc->cap_delay_list))
3600 ci = list_first_entry(&mdsc->cap_delay_list,
3601 struct ceph_inode_info,
3603 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
3604 time_before(jiffies, ci->i_hold_caps_max))
3606 list_del_init(&ci->i_cap_delay_list);
3607 spin_unlock(&mdsc->cap_delay_lock);
3608 dout("check_delayed_caps on %p\n", &ci->vfs_inode);
3609 ceph_check_caps(ci, flags, NULL);
3611 spin_unlock(&mdsc->cap_delay_lock);
3615 * Flush all dirty caps to the mds
3617 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
3619 struct ceph_inode_info *ci;
3620 struct inode *inode;
3622 dout("flush_dirty_caps\n");
3623 spin_lock(&mdsc->cap_dirty_lock);
3624 while (!list_empty(&mdsc->cap_dirty)) {
3625 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
3627 inode = &ci->vfs_inode;
3629 dout("flush_dirty_caps %p\n", inode);
3630 spin_unlock(&mdsc->cap_dirty_lock);
3631 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
3633 spin_lock(&mdsc->cap_dirty_lock);
3635 spin_unlock(&mdsc->cap_dirty_lock);
3636 dout("flush_dirty_caps done\n");
3640 * Drop open file reference. If we were the last open file,
3641 * we may need to release capabilities to the MDS (or schedule
3642 * their delayed release).
3644 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
3646 struct inode *inode = &ci->vfs_inode;
3649 spin_lock(&ci->i_ceph_lock);
3650 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
3651 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
3652 BUG_ON(ci->i_nr_by_mode[fmode] == 0);
3653 if (--ci->i_nr_by_mode[fmode] == 0)
3655 spin_unlock(&ci->i_ceph_lock);
3657 if (last && ci->i_vino.snap == CEPH_NOSNAP)
3658 ceph_check_caps(ci, 0, NULL);
3662 * Helpers for embedding cap and dentry lease releases into mds
3665 * @force is used by dentry_release (below) to force inclusion of a
3666 * record for the directory inode, even when there aren't any caps to
3669 int ceph_encode_inode_release(void **p, struct inode *inode,
3670 int mds, int drop, int unless, int force)
3672 struct ceph_inode_info *ci = ceph_inode(inode);
3673 struct ceph_cap *cap;
3674 struct ceph_mds_request_release *rel = *p;
3678 spin_lock(&ci->i_ceph_lock);
3679 used = __ceph_caps_used(ci);
3680 dirty = __ceph_caps_dirty(ci);
3682 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3683 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
3684 ceph_cap_string(unless));
3686 /* only drop unused, clean caps */
3687 drop &= ~(used | dirty);
3689 cap = __get_cap_for_mds(ci, mds);
3690 if (cap && __cap_is_valid(cap)) {
3692 ((cap->issued & drop) &&
3693 (cap->issued & unless) == 0)) {
3694 if ((cap->issued & drop) &&
3695 (cap->issued & unless) == 0) {
3696 int wanted = __ceph_caps_wanted(ci);
3697 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3698 wanted |= cap->mds_wanted;
3699 dout("encode_inode_release %p cap %p "
3700 "%s -> %s, wanted %s -> %s\n", inode, cap,
3701 ceph_cap_string(cap->issued),
3702 ceph_cap_string(cap->issued & ~drop),
3703 ceph_cap_string(cap->mds_wanted),
3704 ceph_cap_string(wanted));
3706 cap->issued &= ~drop;
3707 cap->implemented &= ~drop;
3708 cap->mds_wanted = wanted;
3710 dout("encode_inode_release %p cap %p %s"
3711 " (force)\n", inode, cap,
3712 ceph_cap_string(cap->issued));
3715 rel->ino = cpu_to_le64(ceph_ino(inode));
3716 rel->cap_id = cpu_to_le64(cap->cap_id);
3717 rel->seq = cpu_to_le32(cap->seq);
3718 rel->issue_seq = cpu_to_le32(cap->issue_seq);
3719 rel->mseq = cpu_to_le32(cap->mseq);
3720 rel->caps = cpu_to_le32(cap->implemented);
3721 rel->wanted = cpu_to_le32(cap->mds_wanted);
3727 dout("encode_inode_release %p cap %p %s\n",
3728 inode, cap, ceph_cap_string(cap->issued));
3731 spin_unlock(&ci->i_ceph_lock);
3735 int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3736 int mds, int drop, int unless)
3738 struct inode *dir = d_inode(dentry->d_parent);
3739 struct ceph_mds_request_release *rel = *p;
3740 struct ceph_dentry_info *di = ceph_dentry(dentry);
3745 * force an record for the directory caps if we have a dentry lease.
3746 * this is racy (can't take i_ceph_lock and d_lock together), but it
3747 * doesn't have to be perfect; the mds will revoke anything we don't
3750 spin_lock(&dentry->d_lock);
3751 if (di->lease_session && di->lease_session->s_mds == mds)
3753 spin_unlock(&dentry->d_lock);
3755 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3757 spin_lock(&dentry->d_lock);
3758 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3759 dout("encode_dentry_release %p mds%d seq %d\n",
3760 dentry, mds, (int)di->lease_seq);
3761 rel->dname_len = cpu_to_le32(dentry->d_name.len);
3762 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3763 *p += dentry->d_name.len;
3764 rel->dname_seq = cpu_to_le32(di->lease_seq);
3765 __ceph_mdsc_drop_dentry_lease(dentry);
3767 spin_unlock(&dentry->d_lock);