1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/uaccess.h>
8 #include <linux/kernel.h>
9 #include <linux/writeback.h>
10 #include <linux/vmalloc.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl.h>
13 #include <linux/random.h>
14 #include <linux/sort.h>
17 #include "mds_client.h"
19 #include <linux/ceph/decode.h>
22 * Ceph inode operations
24 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
25 * setattr, etc.), xattr helpers, and helpers for assimilating
26 * metadata returned by the MDS into our cache.
28 * Also define helpers for doing asynchronous writeback, invalidation,
29 * and truncation for the benefit of those who can't afford to block
30 * (typically because they are in the message handler path).
33 static const struct inode_operations ceph_symlink_iops;
35 static void ceph_invalidate_work(struct work_struct *work);
36 static void ceph_writeback_work(struct work_struct *work);
37 static void ceph_vmtruncate_work(struct work_struct *work);
40 * find or create an inode, given the ceph ino number
42 static int ceph_set_ino_cb(struct inode *inode, void *data)
44 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
45 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
49 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
52 ino_t t = ceph_vino_to_ino(vino);
54 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
56 return ERR_PTR(-ENOMEM);
57 if (inode->i_state & I_NEW) {
58 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
59 inode, ceph_vinop(inode), (u64)inode->i_ino);
60 unlock_new_inode(inode);
63 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
69 * get/constuct snapdir inode for a given directory
71 struct inode *ceph_get_snapdir(struct inode *parent)
73 struct ceph_vino vino = {
74 .ino = ceph_ino(parent),
77 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
78 struct ceph_inode_info *ci = ceph_inode(inode);
80 BUG_ON(!S_ISDIR(parent->i_mode));
83 inode->i_mode = parent->i_mode;
84 inode->i_uid = parent->i_uid;
85 inode->i_gid = parent->i_gid;
86 inode->i_op = &ceph_snapdir_iops;
87 inode->i_fop = &ceph_snapdir_fops;
88 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
93 const struct inode_operations ceph_file_iops = {
94 .permission = ceph_permission,
95 .setattr = ceph_setattr,
96 .getattr = ceph_getattr,
97 .setxattr = generic_setxattr,
98 .getxattr = generic_getxattr,
99 .listxattr = ceph_listxattr,
100 .removexattr = generic_removexattr,
101 .get_acl = ceph_get_acl,
102 .set_acl = ceph_set_acl,
107 * We use a 'frag tree' to keep track of the MDS's directory fragments
108 * for a given inode (usually there is just a single fragment). We
109 * need to know when a child frag is delegated to a new MDS, or when
110 * it is flagged as replicated, so we can direct our requests
115 * find/create a frag in the tree
117 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
121 struct rb_node *parent = NULL;
122 struct ceph_inode_frag *frag;
125 p = &ci->i_fragtree.rb_node;
128 frag = rb_entry(parent, struct ceph_inode_frag, node);
129 c = ceph_frag_compare(f, frag->frag);
138 frag = kmalloc(sizeof(*frag), GFP_NOFS);
140 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
141 "frag %x\n", &ci->vfs_inode,
142 ceph_vinop(&ci->vfs_inode), f);
143 return ERR_PTR(-ENOMEM);
150 rb_link_node(&frag->node, parent, p);
151 rb_insert_color(&frag->node, &ci->i_fragtree);
153 dout("get_or_create_frag added %llx.%llx frag %x\n",
154 ceph_vinop(&ci->vfs_inode), f);
159 * find a specific frag @f
161 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
163 struct rb_node *n = ci->i_fragtree.rb_node;
166 struct ceph_inode_frag *frag =
167 rb_entry(n, struct ceph_inode_frag, node);
168 int c = ceph_frag_compare(f, frag->frag);
180 * Choose frag containing the given value @v. If @pfrag is
181 * specified, copy the frag delegation info to the caller if
184 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
185 struct ceph_inode_frag *pfrag, int *found)
187 u32 t = ceph_frag_make(0, 0);
188 struct ceph_inode_frag *frag;
196 WARN_ON(!ceph_frag_contains_value(t, v));
197 frag = __ceph_find_frag(ci, t);
199 break; /* t is a leaf */
200 if (frag->split_by == 0) {
202 memcpy(pfrag, frag, sizeof(*pfrag));
209 nway = 1 << frag->split_by;
210 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
211 frag->split_by, nway);
212 for (i = 0; i < nway; i++) {
213 n = ceph_frag_make_child(t, frag->split_by, i);
214 if (ceph_frag_contains_value(n, v)) {
221 dout("choose_frag(%x) = %x\n", v, t);
226 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
227 struct ceph_inode_frag *pfrag, int *found)
230 mutex_lock(&ci->i_fragtree_mutex);
231 ret = __ceph_choose_frag(ci, v, pfrag, found);
232 mutex_unlock(&ci->i_fragtree_mutex);
237 * Process dirfrag (delegation) info from the mds. Include leaf
238 * fragment in tree ONLY if ndist > 0. Otherwise, only
239 * branches/splits are included in i_fragtree)
241 static int ceph_fill_dirfrag(struct inode *inode,
242 struct ceph_mds_reply_dirfrag *dirinfo)
244 struct ceph_inode_info *ci = ceph_inode(inode);
245 struct ceph_inode_frag *frag;
246 u32 id = le32_to_cpu(dirinfo->frag);
247 int mds = le32_to_cpu(dirinfo->auth);
248 int ndist = le32_to_cpu(dirinfo->ndist);
253 spin_lock(&ci->i_ceph_lock);
255 diri_auth = ci->i_auth_cap->mds;
256 spin_unlock(&ci->i_ceph_lock);
258 if (mds == -1) /* CDIR_AUTH_PARENT */
261 mutex_lock(&ci->i_fragtree_mutex);
262 if (ndist == 0 && mds == diri_auth) {
263 /* no delegation info needed. */
264 frag = __ceph_find_frag(ci, id);
267 if (frag->split_by == 0) {
268 /* tree leaf, remove */
269 dout("fill_dirfrag removed %llx.%llx frag %x"
270 " (no ref)\n", ceph_vinop(inode), id);
271 rb_erase(&frag->node, &ci->i_fragtree);
274 /* tree branch, keep and clear */
275 dout("fill_dirfrag cleared %llx.%llx frag %x"
276 " referral\n", ceph_vinop(inode), id);
284 /* find/add this frag to store mds delegation info */
285 frag = __get_or_create_frag(ci, id);
287 /* this is not the end of the world; we can continue
288 with bad/inaccurate delegation info */
289 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
290 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
296 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
297 for (i = 0; i < frag->ndist; i++)
298 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
299 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
300 ceph_vinop(inode), frag->frag, frag->ndist);
303 mutex_unlock(&ci->i_fragtree_mutex);
307 static int frag_tree_split_cmp(const void *l, const void *r)
309 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
310 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
311 return ceph_frag_compare(ls->frag, rs->frag);
314 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
317 return f == ceph_frag_make(0, 0);
318 if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
320 return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
323 static int ceph_fill_fragtree(struct inode *inode,
324 struct ceph_frag_tree_head *fragtree,
325 struct ceph_mds_reply_dirfrag *dirinfo)
327 struct ceph_inode_info *ci = ceph_inode(inode);
328 struct ceph_inode_frag *frag, *prev_frag = NULL;
329 struct rb_node *rb_node;
330 unsigned i, split_by, nsplits;
334 mutex_lock(&ci->i_fragtree_mutex);
335 nsplits = le32_to_cpu(fragtree->nsplits);
336 if (nsplits != ci->i_fragtree_nsplits) {
338 } else if (nsplits) {
339 i = prandom_u32() % nsplits;
340 id = le32_to_cpu(fragtree->splits[i].frag);
341 if (!__ceph_find_frag(ci, id))
343 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
344 rb_node = rb_first(&ci->i_fragtree);
345 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
346 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
349 if (!update && dirinfo) {
350 id = le32_to_cpu(dirinfo->frag);
351 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
358 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
359 frag_tree_split_cmp, NULL);
362 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
363 rb_node = rb_first(&ci->i_fragtree);
364 for (i = 0; i < nsplits; i++) {
365 id = le32_to_cpu(fragtree->splits[i].frag);
366 split_by = le32_to_cpu(fragtree->splits[i].by);
367 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
368 pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
369 "frag %x split by %d\n", ceph_vinop(inode),
370 i, nsplits, id, split_by);
375 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
376 if (ceph_frag_compare(frag->frag, id) >= 0) {
377 if (frag->frag != id)
380 rb_node = rb_next(rb_node);
383 rb_node = rb_next(rb_node);
384 /* delete stale split/leaf node */
385 if (frag->split_by > 0 ||
386 !is_frag_child(frag->frag, prev_frag)) {
387 rb_erase(&frag->node, &ci->i_fragtree);
388 if (frag->split_by > 0)
389 ci->i_fragtree_nsplits--;
395 frag = __get_or_create_frag(ci, id);
399 if (frag->split_by == 0)
400 ci->i_fragtree_nsplits++;
401 frag->split_by = split_by;
402 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
406 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
407 rb_node = rb_next(rb_node);
408 /* delete stale split/leaf node */
409 if (frag->split_by > 0 ||
410 !is_frag_child(frag->frag, prev_frag)) {
411 rb_erase(&frag->node, &ci->i_fragtree);
412 if (frag->split_by > 0)
413 ci->i_fragtree_nsplits--;
418 mutex_unlock(&ci->i_fragtree_mutex);
423 * initialize a newly allocated inode.
425 struct inode *ceph_alloc_inode(struct super_block *sb)
427 struct ceph_inode_info *ci;
430 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
434 dout("alloc_inode %p\n", &ci->vfs_inode);
436 spin_lock_init(&ci->i_ceph_lock);
439 ci->i_inline_version = 0;
440 ci->i_time_warp_seq = 0;
441 ci->i_ceph_flags = 0;
442 atomic64_set(&ci->i_ordered_count, 1);
443 atomic64_set(&ci->i_release_count, 1);
444 atomic64_set(&ci->i_complete_seq[0], 0);
445 atomic64_set(&ci->i_complete_seq[1], 0);
446 ci->i_symlink = NULL;
448 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
449 ci->i_pool_ns_len = 0;
451 ci->i_fragtree = RB_ROOT;
452 mutex_init(&ci->i_fragtree_mutex);
454 ci->i_xattrs.blob = NULL;
455 ci->i_xattrs.prealloc_blob = NULL;
456 ci->i_xattrs.dirty = false;
457 ci->i_xattrs.index = RB_ROOT;
458 ci->i_xattrs.count = 0;
459 ci->i_xattrs.names_size = 0;
460 ci->i_xattrs.vals_size = 0;
461 ci->i_xattrs.version = 0;
462 ci->i_xattrs.index_version = 0;
464 ci->i_caps = RB_ROOT;
465 ci->i_auth_cap = NULL;
466 ci->i_dirty_caps = 0;
467 ci->i_flushing_caps = 0;
468 INIT_LIST_HEAD(&ci->i_dirty_item);
469 INIT_LIST_HEAD(&ci->i_flushing_item);
470 ci->i_prealloc_cap_flush = NULL;
471 ci->i_cap_flush_tree = RB_ROOT;
472 init_waitqueue_head(&ci->i_cap_wq);
473 ci->i_hold_caps_min = 0;
474 ci->i_hold_caps_max = 0;
475 INIT_LIST_HEAD(&ci->i_cap_delay_list);
476 INIT_LIST_HEAD(&ci->i_cap_snaps);
477 ci->i_head_snapc = NULL;
480 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
481 ci->i_nr_by_mode[i] = 0;
483 mutex_init(&ci->i_truncate_mutex);
484 ci->i_truncate_seq = 0;
485 ci->i_truncate_size = 0;
486 ci->i_truncate_pending = 0;
489 ci->i_reported_size = 0;
490 ci->i_wanted_max_size = 0;
491 ci->i_requested_max_size = 0;
495 ci->i_rdcache_ref = 0;
498 ci->i_wrbuffer_ref = 0;
499 ci->i_wrbuffer_ref_head = 0;
500 ci->i_shared_gen = 0;
501 ci->i_rdcache_gen = 0;
502 ci->i_rdcache_revoking = 0;
504 INIT_LIST_HEAD(&ci->i_unsafe_writes);
505 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
506 INIT_LIST_HEAD(&ci->i_unsafe_iops);
507 spin_lock_init(&ci->i_unsafe_lock);
509 ci->i_snap_realm = NULL;
510 INIT_LIST_HEAD(&ci->i_snap_realm_item);
511 INIT_LIST_HEAD(&ci->i_snap_flush_item);
513 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
514 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
516 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
518 ceph_fscache_inode_init(ci);
520 return &ci->vfs_inode;
523 static void ceph_i_callback(struct rcu_head *head)
525 struct inode *inode = container_of(head, struct inode, i_rcu);
526 struct ceph_inode_info *ci = ceph_inode(inode);
528 kmem_cache_free(ceph_inode_cachep, ci);
531 void ceph_destroy_inode(struct inode *inode)
533 struct ceph_inode_info *ci = ceph_inode(inode);
534 struct ceph_inode_frag *frag;
537 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
539 ceph_fscache_unregister_inode_cookie(ci);
541 ceph_queue_caps_release(inode);
544 * we may still have a snap_realm reference if there are stray
545 * caps in i_snap_caps.
547 if (ci->i_snap_realm) {
548 struct ceph_mds_client *mdsc =
549 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
550 struct ceph_snap_realm *realm = ci->i_snap_realm;
552 dout(" dropping residual ref to snap realm %p\n", realm);
553 spin_lock(&realm->inodes_with_caps_lock);
554 list_del_init(&ci->i_snap_realm_item);
555 spin_unlock(&realm->inodes_with_caps_lock);
556 ceph_put_snap_realm(mdsc, realm);
559 kfree(ci->i_symlink);
560 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
561 frag = rb_entry(n, struct ceph_inode_frag, node);
562 rb_erase(n, &ci->i_fragtree);
565 ci->i_fragtree_nsplits = 0;
567 __ceph_destroy_xattrs(ci);
568 if (ci->i_xattrs.blob)
569 ceph_buffer_put(ci->i_xattrs.blob);
570 if (ci->i_xattrs.prealloc_blob)
571 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
573 call_rcu(&inode->i_rcu, ceph_i_callback);
576 int ceph_drop_inode(struct inode *inode)
579 * Positve dentry and corresponding inode are always accompanied
580 * in MDS reply. So no need to keep inode in the cache after
581 * dropping all its aliases.
586 static inline blkcnt_t calc_inode_blocks(u64 size)
588 return (size + (1<<9) - 1) >> 9;
592 * Helpers to fill in size, ctime, mtime, and atime. We have to be
593 * careful because either the client or MDS may have more up to date
594 * info, depending on which capabilities are held, and whether
595 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
596 * and size are monotonically increasing, except when utimes() or
597 * truncate() increments the corresponding _seq values.)
599 int ceph_fill_file_size(struct inode *inode, int issued,
600 u32 truncate_seq, u64 truncate_size, u64 size)
602 struct ceph_inode_info *ci = ceph_inode(inode);
605 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
606 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
607 dout("size %lld -> %llu\n", inode->i_size, size);
608 if (size > 0 && S_ISDIR(inode->i_mode)) {
609 pr_err("fill_file_size non-zero size for directory\n");
612 i_size_write(inode, size);
613 inode->i_blocks = calc_inode_blocks(size);
614 ci->i_reported_size = size;
615 if (truncate_seq != ci->i_truncate_seq) {
616 dout("truncate_seq %u -> %u\n",
617 ci->i_truncate_seq, truncate_seq);
618 ci->i_truncate_seq = truncate_seq;
620 /* the MDS should have revoked these caps */
621 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
624 CEPH_CAP_FILE_LAZYIO));
626 * If we hold relevant caps, or in the case where we're
627 * not the only client referencing this file and we
628 * don't hold those caps, then we need to check whether
629 * the file is either opened or mmaped
631 if ((issued & (CEPH_CAP_FILE_CACHE|
632 CEPH_CAP_FILE_BUFFER)) ||
633 mapping_mapped(inode->i_mapping) ||
634 __ceph_caps_file_wanted(ci)) {
635 ci->i_truncate_pending++;
640 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
641 ci->i_truncate_size != truncate_size) {
642 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
644 ci->i_truncate_size = truncate_size;
648 ceph_fscache_invalidate(inode);
653 void ceph_fill_file_time(struct inode *inode, int issued,
654 u64 time_warp_seq, struct timespec *ctime,
655 struct timespec *mtime, struct timespec *atime)
657 struct ceph_inode_info *ci = ceph_inode(inode);
660 if (issued & (CEPH_CAP_FILE_EXCL|
662 CEPH_CAP_FILE_BUFFER|
664 CEPH_CAP_XATTR_EXCL)) {
665 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
666 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
667 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
668 ctime->tv_sec, ctime->tv_nsec);
669 inode->i_ctime = *ctime;
671 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
672 /* the MDS did a utimes() */
673 dout("mtime %ld.%09ld -> %ld.%09ld "
675 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
676 mtime->tv_sec, mtime->tv_nsec,
677 ci->i_time_warp_seq, (int)time_warp_seq);
679 inode->i_mtime = *mtime;
680 inode->i_atime = *atime;
681 ci->i_time_warp_seq = time_warp_seq;
682 } else if (time_warp_seq == ci->i_time_warp_seq) {
683 /* nobody did utimes(); take the max */
684 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
685 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
686 inode->i_mtime.tv_sec,
687 inode->i_mtime.tv_nsec,
688 mtime->tv_sec, mtime->tv_nsec);
689 inode->i_mtime = *mtime;
691 if (timespec_compare(atime, &inode->i_atime) > 0) {
692 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
693 inode->i_atime.tv_sec,
694 inode->i_atime.tv_nsec,
695 atime->tv_sec, atime->tv_nsec);
696 inode->i_atime = *atime;
698 } else if (issued & CEPH_CAP_FILE_EXCL) {
699 /* we did a utimes(); ignore mds values */
704 /* we have no write|excl caps; whatever the MDS says is true */
705 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
706 inode->i_ctime = *ctime;
707 inode->i_mtime = *mtime;
708 inode->i_atime = *atime;
709 ci->i_time_warp_seq = time_warp_seq;
714 if (warn) /* time_warp_seq shouldn't go backwards */
715 dout("%p mds time_warp_seq %llu < %u\n",
716 inode, time_warp_seq, ci->i_time_warp_seq);
720 * Populate an inode based on info from mds. May be called on new or
723 static int fill_inode(struct inode *inode, struct page *locked_page,
724 struct ceph_mds_reply_info_in *iinfo,
725 struct ceph_mds_reply_dirfrag *dirinfo,
726 struct ceph_mds_session *session,
727 unsigned long ttl_from, int cap_fmode,
728 struct ceph_cap_reservation *caps_reservation)
730 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
731 struct ceph_mds_reply_inode *info = iinfo->in;
732 struct ceph_inode_info *ci = ceph_inode(inode);
733 int issued = 0, implemented, new_issued;
734 struct timespec mtime, atime, ctime;
735 struct ceph_buffer *xattr_blob = NULL;
736 struct ceph_cap *new_cap = NULL;
739 bool queue_trunc = false;
740 bool new_version = false;
741 bool fill_inline = false;
743 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
744 inode, ceph_vinop(inode), le64_to_cpu(info->version),
747 /* prealloc new cap struct */
748 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
749 new_cap = ceph_get_cap(mdsc, caps_reservation);
752 * prealloc xattr data, if it looks like we'll need it. only
753 * if len > 4 (meaning there are actually xattrs; the first 4
754 * bytes are the xattr count).
756 if (iinfo->xattr_len > 4) {
757 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
759 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
763 spin_lock(&ci->i_ceph_lock);
766 * provided version will be odd if inode value is projected,
767 * even if stable. skip the update if we have newer stable
768 * info (ours>=theirs, e.g. due to racing mds replies), unless
769 * we are getting projected (unstable) info (in which case the
770 * version is odd, and we want ours>theirs).
776 if (ci->i_version == 0 ||
777 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
778 le64_to_cpu(info->version) > (ci->i_version & ~1)))
781 issued = __ceph_caps_issued(ci, &implemented);
782 issued |= implemented | __ceph_caps_dirty(ci);
783 new_issued = ~issued & le32_to_cpu(info->cap.caps);
786 ci->i_version = le64_to_cpu(info->version);
788 inode->i_rdev = le32_to_cpu(info->rdev);
789 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
791 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
792 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
793 inode->i_mode = le32_to_cpu(info->mode);
794 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
795 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
796 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
797 from_kuid(&init_user_ns, inode->i_uid),
798 from_kgid(&init_user_ns, inode->i_gid));
801 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
802 (issued & CEPH_CAP_LINK_EXCL) == 0)
803 set_nlink(inode, le32_to_cpu(info->nlink));
805 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
806 /* be careful with mtime, atime, size */
807 ceph_decode_timespec(&atime, &info->atime);
808 ceph_decode_timespec(&mtime, &info->mtime);
809 ceph_decode_timespec(&ctime, &info->ctime);
810 ceph_fill_file_time(inode, issued,
811 le32_to_cpu(info->time_warp_seq),
812 &ctime, &mtime, &atime);
816 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
817 if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
818 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
819 ci->i_layout = info->layout;
820 ci->i_pool_ns_len = iinfo->pool_ns_len;
822 queue_trunc = ceph_fill_file_size(inode, issued,
823 le32_to_cpu(info->truncate_seq),
824 le64_to_cpu(info->truncate_size),
825 le64_to_cpu(info->size));
826 /* only update max_size on auth cap */
827 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
828 ci->i_max_size != le64_to_cpu(info->max_size)) {
829 dout("max_size %lld -> %llu\n", ci->i_max_size,
830 le64_to_cpu(info->max_size));
831 ci->i_max_size = le64_to_cpu(info->max_size);
836 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
837 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
838 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
839 if (ci->i_xattrs.blob)
840 ceph_buffer_put(ci->i_xattrs.blob);
841 ci->i_xattrs.blob = xattr_blob;
843 memcpy(ci->i_xattrs.blob->vec.iov_base,
844 iinfo->xattr_data, iinfo->xattr_len);
845 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
846 ceph_forget_all_cached_acls(inode);
850 inode->i_mapping->a_ops = &ceph_aops;
852 switch (inode->i_mode & S_IFMT) {
857 init_special_inode(inode, inode->i_mode, inode->i_rdev);
858 inode->i_op = &ceph_file_iops;
861 inode->i_op = &ceph_file_iops;
862 inode->i_fop = &ceph_file_fops;
865 inode->i_op = &ceph_symlink_iops;
866 if (!ci->i_symlink) {
867 u32 symlen = iinfo->symlink_len;
870 spin_unlock(&ci->i_ceph_lock);
872 if (symlen != i_size_read(inode)) {
873 pr_err("fill_inode %llx.%llx BAD symlink "
874 "size %lld\n", ceph_vinop(inode),
876 i_size_write(inode, symlen);
877 inode->i_blocks = calc_inode_blocks(symlen);
881 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
885 spin_lock(&ci->i_ceph_lock);
889 kfree(sym); /* lost a race */
891 inode->i_link = ci->i_symlink;
894 inode->i_op = &ceph_dir_iops;
895 inode->i_fop = &ceph_dir_fops;
897 ci->i_dir_layout = iinfo->dir_layout;
899 ci->i_files = le64_to_cpu(info->files);
900 ci->i_subdirs = le64_to_cpu(info->subdirs);
901 ci->i_rbytes = le64_to_cpu(info->rbytes);
902 ci->i_rfiles = le64_to_cpu(info->rfiles);
903 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
904 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
907 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
908 ceph_vinop(inode), inode->i_mode);
911 /* were we issued a capability? */
912 if (info->cap.caps) {
913 if (ceph_snap(inode) == CEPH_NOSNAP) {
914 unsigned caps = le32_to_cpu(info->cap.caps);
915 ceph_add_cap(inode, session,
916 le64_to_cpu(info->cap.cap_id),
918 le32_to_cpu(info->cap.wanted),
919 le32_to_cpu(info->cap.seq),
920 le32_to_cpu(info->cap.mseq),
921 le64_to_cpu(info->cap.realm),
922 info->cap.flags, &new_cap);
924 /* set dir completion flag? */
925 if (S_ISDIR(inode->i_mode) &&
926 ci->i_files == 0 && ci->i_subdirs == 0 &&
927 (caps & CEPH_CAP_FILE_SHARED) &&
928 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
929 !__ceph_dir_is_complete(ci)) {
930 dout(" marking %p complete (empty)\n", inode);
931 i_size_write(inode, 0);
932 __ceph_dir_set_complete(ci,
933 atomic64_read(&ci->i_release_count),
934 atomic64_read(&ci->i_ordered_count));
939 dout(" %p got snap_caps %s\n", inode,
940 ceph_cap_string(le32_to_cpu(info->cap.caps)));
941 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
943 __ceph_get_fmode(ci, cap_fmode);
945 } else if (cap_fmode >= 0) {
946 pr_warn("mds issued no caps on %llx.%llx\n",
948 __ceph_get_fmode(ci, cap_fmode);
951 if (iinfo->inline_version > 0 &&
952 iinfo->inline_version >= ci->i_inline_version) {
953 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
954 ci->i_inline_version = iinfo->inline_version;
955 if (ci->i_inline_version != CEPH_INLINE_NONE &&
957 (le32_to_cpu(info->cap.caps) & cache_caps)))
961 spin_unlock(&ci->i_ceph_lock);
964 ceph_fill_inline_data(inode, locked_page,
965 iinfo->inline_data, iinfo->inline_len);
968 wake_up_all(&ci->i_cap_wq);
970 /* queue truncate if we saw i_size decrease */
972 ceph_queue_vmtruncate(inode);
974 /* populate frag tree */
975 if (S_ISDIR(inode->i_mode))
976 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
978 /* update delegation info? */
980 ceph_fill_dirfrag(inode, dirinfo);
985 ceph_put_cap(mdsc, new_cap);
987 ceph_buffer_put(xattr_blob);
992 * caller should hold session s_mutex.
994 static void update_dentry_lease(struct dentry *dentry,
995 struct ceph_mds_reply_lease *lease,
996 struct ceph_mds_session *session,
997 unsigned long from_time)
999 struct ceph_dentry_info *di = ceph_dentry(dentry);
1000 long unsigned duration = le32_to_cpu(lease->duration_ms);
1001 long unsigned ttl = from_time + (duration * HZ) / 1000;
1002 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1005 /* only track leases on regular dentries */
1006 if (dentry->d_op != &ceph_dentry_ops)
1009 spin_lock(&dentry->d_lock);
1010 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1011 dentry, duration, ttl);
1013 /* make lease_rdcache_gen match directory */
1014 dir = d_inode(dentry->d_parent);
1015 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
1020 if (di->lease_gen == session->s_cap_gen &&
1021 time_before(ttl, dentry->d_time))
1022 goto out_unlock; /* we already have a newer lease. */
1024 if (di->lease_session && di->lease_session != session)
1027 ceph_dentry_lru_touch(dentry);
1029 if (!di->lease_session)
1030 di->lease_session = ceph_get_mds_session(session);
1031 di->lease_gen = session->s_cap_gen;
1032 di->lease_seq = le32_to_cpu(lease->seq);
1033 di->lease_renew_after = half_ttl;
1034 di->lease_renew_from = 0;
1035 dentry->d_time = ttl;
1037 spin_unlock(&dentry->d_lock);
1042 * splice a dentry to an inode.
1043 * caller must hold directory i_mutex for this to be safe.
1045 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
1047 struct dentry *realdn;
1049 BUG_ON(d_inode(dn));
1051 /* dn must be unhashed */
1052 if (!d_unhashed(dn))
1054 realdn = d_splice_alias(in, dn);
1055 if (IS_ERR(realdn)) {
1056 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1057 PTR_ERR(realdn), dn, in, ceph_vinop(in));
1058 dn = realdn; /* note realdn contains the error */
1060 } else if (realdn) {
1061 dout("dn %p (%d) spliced with %p (%d) "
1062 "inode %p ino %llx.%llx\n",
1064 realdn, d_count(realdn),
1065 d_inode(realdn), ceph_vinop(d_inode(realdn)));
1069 BUG_ON(!ceph_dentry(dn));
1070 dout("dn %p attached to %p ino %llx.%llx\n",
1071 dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1078 * Incorporate results into the local cache. This is either just
1079 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1082 * A reply may contain
1083 * a directory inode along with a dentry.
1084 * and/or a target inode
1086 * Called with snap_rwsem (read).
1088 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1089 struct ceph_mds_session *session)
1091 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1092 struct inode *in = NULL;
1093 struct ceph_vino vino;
1094 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1097 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1098 rinfo->head->is_dentry, rinfo->head->is_target);
1104 * If we resend completed ops to a recovering mds, we get no
1105 * trace. Since that is very rare, pretend this is the case
1106 * to ensure the 'no trace' handlers in the callers behave.
1108 * Fill in inodes unconditionally to avoid breaking cap
1111 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1112 pr_info("fill_trace faking empty trace on %lld %s\n",
1113 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1114 if (rinfo->head->is_dentry) {
1115 rinfo->head->is_dentry = 0;
1116 err = fill_inode(req->r_locked_dir,
1117 &rinfo->diri, rinfo->dirfrag,
1118 session, req->r_request_started, -1);
1120 if (rinfo->head->is_target) {
1121 rinfo->head->is_target = 0;
1122 ininfo = rinfo->targeti.in;
1123 vino.ino = le64_to_cpu(ininfo->ino);
1124 vino.snap = le64_to_cpu(ininfo->snapid);
1125 in = ceph_get_inode(sb, vino);
1126 err = fill_inode(in, &rinfo->targeti, NULL,
1127 session, req->r_request_started,
1134 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1135 dout("fill_trace reply is empty!\n");
1136 if (rinfo->head->result == 0 && req->r_locked_dir)
1137 ceph_invalidate_dir_request(req);
1141 if (rinfo->head->is_dentry) {
1142 struct inode *dir = req->r_locked_dir;
1145 err = fill_inode(dir, NULL,
1146 &rinfo->diri, rinfo->dirfrag,
1147 session, req->r_request_started, -1,
1148 &req->r_caps_reservation);
1155 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1157 struct dentry *dn, *parent;
1159 BUG_ON(!rinfo->head->is_target);
1160 BUG_ON(req->r_dentry);
1162 parent = d_find_any_alias(dir);
1165 dname.name = rinfo->dname;
1166 dname.len = rinfo->dname_len;
1167 dname.hash = full_name_hash(parent, dname.name, dname.len);
1168 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1169 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1171 dn = d_lookup(parent, &dname);
1172 dout("d_lookup on parent=%p name=%.*s got %p\n",
1173 parent, dname.len, dname.name, dn);
1176 dn = d_alloc(parent, &dname);
1177 dout("d_alloc %p '%.*s' = %p\n", parent,
1178 dname.len, dname.name, dn);
1184 err = ceph_init_dentry(dn);
1190 } else if (d_really_is_positive(dn) &&
1191 (ceph_ino(d_inode(dn)) != vino.ino ||
1192 ceph_snap(d_inode(dn)) != vino.snap)) {
1193 dout(" dn %p points to wrong inode %p\n",
1205 if (rinfo->head->is_target) {
1206 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1207 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1209 in = ceph_get_inode(sb, vino);
1214 req->r_target_inode = in;
1216 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
1217 session, req->r_request_started,
1218 (!req->r_aborted && rinfo->head->result == 0) ?
1220 &req->r_caps_reservation);
1222 pr_err("fill_inode badness %p %llx.%llx\n",
1223 in, ceph_vinop(in));
1229 * ignore null lease/binding on snapdir ENOENT, or else we
1230 * will have trouble splicing in the virtual snapdir later
1232 if (rinfo->head->is_dentry && !req->r_aborted &&
1233 req->r_locked_dir &&
1234 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1235 fsc->mount_options->snapdir_name,
1236 req->r_dentry->d_name.len))) {
1238 * lookup link rename : null -> possibly existing inode
1239 * mknod symlink mkdir : null -> new inode
1240 * unlink : linked -> null
1242 struct inode *dir = req->r_locked_dir;
1243 struct dentry *dn = req->r_dentry;
1244 bool have_dir_cap, have_lease;
1248 BUG_ON(d_inode(dn->d_parent) != dir);
1249 BUG_ON(ceph_ino(dir) !=
1250 le64_to_cpu(rinfo->diri.in->ino));
1251 BUG_ON(ceph_snap(dir) !=
1252 le64_to_cpu(rinfo->diri.in->snapid));
1254 /* do we have a lease on the whole dir? */
1256 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1257 CEPH_CAP_FILE_SHARED);
1259 /* do we have a dn lease? */
1260 have_lease = have_dir_cap ||
1261 le32_to_cpu(rinfo->dlease->duration_ms);
1263 dout("fill_trace no dentry lease or dir cap\n");
1266 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1267 struct inode *olddir = req->r_old_dentry_dir;
1270 dout(" src %p '%pd' dst %p '%pd'\n",
1274 dout("fill_trace doing d_move %p -> %p\n",
1275 req->r_old_dentry, dn);
1277 /* d_move screws up sibling dentries' offsets */
1278 ceph_dir_clear_ordered(dir);
1279 ceph_dir_clear_ordered(olddir);
1281 d_move(req->r_old_dentry, dn);
1282 dout(" src %p '%pd' dst %p '%pd'\n",
1287 /* ensure target dentry is invalidated, despite
1288 rehashing bug in vfs_rename_dir */
1289 ceph_invalidate_dentry_lease(dn);
1291 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1292 ceph_dentry(req->r_old_dentry)->offset);
1294 dn = req->r_old_dentry; /* use old_dentry */
1298 if (!rinfo->head->is_target) {
1299 dout("fill_trace null dentry\n");
1300 if (d_really_is_positive(dn)) {
1301 ceph_dir_clear_ordered(dir);
1302 dout("d_delete %p\n", dn);
1305 if (have_lease && d_unhashed(dn))
1307 update_dentry_lease(dn, rinfo->dlease,
1309 req->r_request_started);
1314 /* attach proper inode */
1315 if (d_really_is_negative(dn)) {
1316 ceph_dir_clear_ordered(dir);
1318 dn = splice_dentry(dn, in);
1323 req->r_dentry = dn; /* may have spliced */
1324 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1325 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1326 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1333 update_dentry_lease(dn, rinfo->dlease, session,
1334 req->r_request_started);
1335 dout(" final dn %p\n", dn);
1336 } else if (!req->r_aborted &&
1337 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1338 req->r_op == CEPH_MDS_OP_MKSNAP)) {
1339 struct dentry *dn = req->r_dentry;
1340 struct inode *dir = req->r_locked_dir;
1342 /* fill out a snapdir LOOKUPSNAP dentry */
1345 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1346 dout(" linking snapped dir %p to dn %p\n", in, dn);
1347 ceph_dir_clear_ordered(dir);
1349 dn = splice_dentry(dn, in);
1354 req->r_dentry = dn; /* may have spliced */
1357 dout("fill_trace done err=%d\n", err);
1362 * Prepopulate our cache with readdir results, leases, etc.
1364 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1365 struct ceph_mds_session *session)
1367 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1370 for (i = 0; i < rinfo->dir_nr; i++) {
1371 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1372 struct ceph_vino vino;
1376 vino.ino = le64_to_cpu(rde->inode.in->ino);
1377 vino.snap = le64_to_cpu(rde->inode.in->snapid);
1379 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1382 dout("new_inode badness got %d\n", err);
1385 rc = fill_inode(in, NULL, &rde->inode, NULL, session,
1386 req->r_request_started, -1,
1387 &req->r_caps_reservation);
1389 pr_err("fill_inode badness on %p got %d\n", in, rc);
1398 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1402 put_page(ctl->page);
1407 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1408 struct ceph_readdir_cache_control *ctl,
1409 struct ceph_mds_request *req)
1411 struct ceph_inode_info *ci = ceph_inode(dir);
1412 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1413 unsigned idx = ctl->index % nsize;
1414 pgoff_t pgoff = ctl->index / nsize;
1416 if (!ctl->page || pgoff != page_index(ctl->page)) {
1417 ceph_readdir_cache_release(ctl);
1419 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1421 ctl->page = find_lock_page(&dir->i_data, pgoff);
1424 return idx == 0 ? -ENOMEM : 0;
1426 /* reading/filling the cache are serialized by
1427 * i_mutex, no need to use page lock */
1428 unlock_page(ctl->page);
1429 ctl->dentries = kmap(ctl->page);
1431 memset(ctl->dentries, 0, PAGE_SIZE);
1434 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1435 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1436 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1437 ctl->dentries[idx] = dn;
1440 dout("disable readdir cache\n");
1446 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1447 struct ceph_mds_session *session)
1449 struct dentry *parent = req->r_dentry;
1450 struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1451 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1455 int err = 0, skipped = 0, ret, i;
1456 struct inode *snapdir = NULL;
1457 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1458 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1461 struct ceph_readdir_cache_control cache_ctl = {};
1464 return readdir_prepopulate_inodes_only(req, session);
1466 if (rinfo->hash_order && req->r_path2) {
1467 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1468 req->r_path2, strlen(req->r_path2));
1469 last_hash = ceph_frag_value(last_hash);
1472 if (rinfo->dir_dir &&
1473 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1474 dout("readdir_prepopulate got new frag %x -> %x\n",
1475 frag, le32_to_cpu(rinfo->dir_dir->frag));
1476 frag = le32_to_cpu(rinfo->dir_dir->frag);
1477 if (!rinfo->hash_order)
1478 req->r_readdir_offset = 2;
1481 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1482 snapdir = ceph_get_snapdir(d_inode(parent));
1483 parent = d_find_alias(snapdir);
1484 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1485 rinfo->dir_nr, parent);
1487 dout("readdir_prepopulate %d items under dn %p\n",
1488 rinfo->dir_nr, parent);
1490 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1493 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
1494 /* note dir version at start of readdir so we can tell
1495 * if any dentries get dropped */
1496 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
1497 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
1498 req->r_readdir_cache_idx = 0;
1501 cache_ctl.index = req->r_readdir_cache_idx;
1502 fpos_offset = req->r_readdir_offset;
1504 /* FIXME: release caps/leases if error occurs */
1505 for (i = 0; i < rinfo->dir_nr; i++) {
1506 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1507 struct ceph_vino vino;
1509 dname.name = rde->name;
1510 dname.len = rde->name_len;
1511 dname.hash = full_name_hash(parent, dname.name, dname.len);
1513 vino.ino = le64_to_cpu(rde->inode.in->ino);
1514 vino.snap = le64_to_cpu(rde->inode.in->snapid);
1516 if (rinfo->hash_order) {
1517 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1518 rde->name, rde->name_len);
1519 hash = ceph_frag_value(hash);
1520 if (hash != last_hash)
1523 rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1525 rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1529 dn = d_lookup(parent, &dname);
1530 dout("d_lookup on parent=%p name=%.*s got %p\n",
1531 parent, dname.len, dname.name, dn);
1534 dn = d_alloc(parent, &dname);
1535 dout("d_alloc %p '%.*s' = %p\n", parent,
1536 dname.len, dname.name, dn);
1538 dout("d_alloc badness\n");
1542 ret = ceph_init_dentry(dn);
1548 } else if (d_really_is_positive(dn) &&
1549 (ceph_ino(d_inode(dn)) != vino.ino ||
1550 ceph_snap(d_inode(dn)) != vino.snap)) {
1551 dout(" dn %p points to wrong inode %p\n",
1559 if (d_really_is_positive(dn)) {
1562 in = ceph_get_inode(parent->d_sb, vino);
1564 dout("new_inode badness\n");
1572 ret = fill_inode(in, NULL, &rde->inode, NULL, session,
1573 req->r_request_started, -1,
1574 &req->r_caps_reservation);
1576 pr_err("fill_inode badness on %p\n", in);
1577 if (d_really_is_negative(dn))
1584 if (d_really_is_negative(dn)) {
1585 struct dentry *realdn;
1587 if (ceph_security_xattr_deadlock(in)) {
1588 dout(" skip splicing dn %p to inode %p"
1589 " (security xattr deadlock)\n", dn, in);
1595 realdn = splice_dentry(dn, in);
1596 if (IS_ERR(realdn)) {
1597 err = PTR_ERR(realdn);
1605 ceph_dentry(dn)->offset = rde->offset;
1607 update_dentry_lease(dn, rde->lease, req->r_session,
1608 req->r_request_started);
1610 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1611 ret = fill_readdir_cache(d_inode(parent), dn,
1621 if (err == 0 && skipped == 0) {
1622 req->r_did_prepopulate = true;
1623 req->r_readdir_cache_idx = cache_ctl.index;
1625 ceph_readdir_cache_release(&cache_ctl);
1630 dout("readdir_prepopulate done\n");
1634 int ceph_inode_set_size(struct inode *inode, loff_t size)
1636 struct ceph_inode_info *ci = ceph_inode(inode);
1639 spin_lock(&ci->i_ceph_lock);
1640 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1641 i_size_write(inode, size);
1642 inode->i_blocks = calc_inode_blocks(size);
1644 /* tell the MDS if we are approaching max_size */
1645 if ((size << 1) >= ci->i_max_size &&
1646 (ci->i_reported_size << 1) < ci->i_max_size)
1649 spin_unlock(&ci->i_ceph_lock);
1654 * Write back inode data in a worker thread. (This can't be done
1655 * in the message handler context.)
1657 void ceph_queue_writeback(struct inode *inode)
1660 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1661 &ceph_inode(inode)->i_wb_work)) {
1662 dout("ceph_queue_writeback %p\n", inode);
1664 dout("ceph_queue_writeback %p failed\n", inode);
1669 static void ceph_writeback_work(struct work_struct *work)
1671 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1673 struct inode *inode = &ci->vfs_inode;
1675 dout("writeback %p\n", inode);
1676 filemap_fdatawrite(&inode->i_data);
1681 * queue an async invalidation
1683 void ceph_queue_invalidate(struct inode *inode)
1686 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1687 &ceph_inode(inode)->i_pg_inv_work)) {
1688 dout("ceph_queue_invalidate %p\n", inode);
1690 dout("ceph_queue_invalidate %p failed\n", inode);
1696 * Invalidate inode pages in a worker thread. (This can't be done
1697 * in the message handler context.)
1699 static void ceph_invalidate_work(struct work_struct *work)
1701 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1703 struct inode *inode = &ci->vfs_inode;
1704 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1708 mutex_lock(&ci->i_truncate_mutex);
1710 if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1711 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
1712 inode, ceph_ino(inode));
1713 mapping_set_error(inode->i_mapping, -EIO);
1714 truncate_pagecache(inode, 0);
1715 mutex_unlock(&ci->i_truncate_mutex);
1719 spin_lock(&ci->i_ceph_lock);
1720 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1721 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1722 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1723 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1725 spin_unlock(&ci->i_ceph_lock);
1726 mutex_unlock(&ci->i_truncate_mutex);
1729 orig_gen = ci->i_rdcache_gen;
1730 spin_unlock(&ci->i_ceph_lock);
1732 if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1733 pr_err("invalidate_pages %p fails\n", inode);
1736 spin_lock(&ci->i_ceph_lock);
1737 if (orig_gen == ci->i_rdcache_gen &&
1738 orig_gen == ci->i_rdcache_revoking) {
1739 dout("invalidate_pages %p gen %d successful\n", inode,
1741 ci->i_rdcache_revoking--;
1744 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1745 inode, orig_gen, ci->i_rdcache_gen,
1746 ci->i_rdcache_revoking);
1747 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1750 spin_unlock(&ci->i_ceph_lock);
1751 mutex_unlock(&ci->i_truncate_mutex);
1754 ceph_check_caps(ci, 0, NULL);
1760 * called by trunc_wq;
1762 * We also truncate in a separate thread as well.
1764 static void ceph_vmtruncate_work(struct work_struct *work)
1766 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1768 struct inode *inode = &ci->vfs_inode;
1770 dout("vmtruncate_work %p\n", inode);
1771 __ceph_do_pending_vmtruncate(inode);
1776 * Queue an async vmtruncate. If we fail to queue work, we will handle
1777 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1779 void ceph_queue_vmtruncate(struct inode *inode)
1781 struct ceph_inode_info *ci = ceph_inode(inode);
1785 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1786 &ci->i_vmtruncate_work)) {
1787 dout("ceph_queue_vmtruncate %p\n", inode);
1789 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1790 inode, ci->i_truncate_pending);
1796 * Make sure any pending truncation is applied before doing anything
1797 * that may depend on it.
1799 void __ceph_do_pending_vmtruncate(struct inode *inode)
1801 struct ceph_inode_info *ci = ceph_inode(inode);
1803 int wrbuffer_refs, finish = 0;
1805 mutex_lock(&ci->i_truncate_mutex);
1807 spin_lock(&ci->i_ceph_lock);
1808 if (ci->i_truncate_pending == 0) {
1809 dout("__do_pending_vmtruncate %p none pending\n", inode);
1810 spin_unlock(&ci->i_ceph_lock);
1811 mutex_unlock(&ci->i_truncate_mutex);
1816 * make sure any dirty snapped pages are flushed before we
1817 * possibly truncate them.. so write AND block!
1819 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1820 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1822 spin_unlock(&ci->i_ceph_lock);
1823 filemap_write_and_wait_range(&inode->i_data, 0,
1824 inode->i_sb->s_maxbytes);
1828 /* there should be no reader or writer */
1829 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1831 to = ci->i_truncate_size;
1832 wrbuffer_refs = ci->i_wrbuffer_ref;
1833 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1834 ci->i_truncate_pending, to);
1835 spin_unlock(&ci->i_ceph_lock);
1837 truncate_pagecache(inode, to);
1839 spin_lock(&ci->i_ceph_lock);
1840 if (to == ci->i_truncate_size) {
1841 ci->i_truncate_pending = 0;
1844 spin_unlock(&ci->i_ceph_lock);
1848 mutex_unlock(&ci->i_truncate_mutex);
1850 if (wrbuffer_refs == 0)
1851 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1853 wake_up_all(&ci->i_cap_wq);
1859 static const struct inode_operations ceph_symlink_iops = {
1860 .readlink = generic_readlink,
1861 .get_link = simple_get_link,
1862 .setattr = ceph_setattr,
1863 .getattr = ceph_getattr,
1864 .setxattr = generic_setxattr,
1865 .getxattr = generic_getxattr,
1866 .listxattr = ceph_listxattr,
1867 .removexattr = generic_removexattr,
1870 int __ceph_setattr(struct inode *inode, struct iattr *attr)
1872 struct ceph_inode_info *ci = ceph_inode(inode);
1873 const unsigned int ia_valid = attr->ia_valid;
1874 struct ceph_mds_request *req;
1875 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1876 struct ceph_cap_flush *prealloc_cf;
1878 int release = 0, dirtied = 0;
1881 int inode_dirty_flags = 0;
1882 bool lock_snap_rwsem = false;
1884 if (ceph_snap(inode) != CEPH_NOSNAP)
1887 err = inode_change_ok(inode, attr);
1891 prealloc_cf = ceph_alloc_cap_flush();
1895 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1898 ceph_free_cap_flush(prealloc_cf);
1899 return PTR_ERR(req);
1902 spin_lock(&ci->i_ceph_lock);
1903 issued = __ceph_caps_issued(ci, NULL);
1905 if (!ci->i_head_snapc &&
1906 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
1907 lock_snap_rwsem = true;
1908 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1909 spin_unlock(&ci->i_ceph_lock);
1910 down_read(&mdsc->snap_rwsem);
1911 spin_lock(&ci->i_ceph_lock);
1912 issued = __ceph_caps_issued(ci, NULL);
1916 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1918 if (ia_valid & ATTR_UID) {
1919 dout("setattr %p uid %d -> %d\n", inode,
1920 from_kuid(&init_user_ns, inode->i_uid),
1921 from_kuid(&init_user_ns, attr->ia_uid));
1922 if (issued & CEPH_CAP_AUTH_EXCL) {
1923 inode->i_uid = attr->ia_uid;
1924 dirtied |= CEPH_CAP_AUTH_EXCL;
1925 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1926 !uid_eq(attr->ia_uid, inode->i_uid)) {
1927 req->r_args.setattr.uid = cpu_to_le32(
1928 from_kuid(&init_user_ns, attr->ia_uid));
1929 mask |= CEPH_SETATTR_UID;
1930 release |= CEPH_CAP_AUTH_SHARED;
1933 if (ia_valid & ATTR_GID) {
1934 dout("setattr %p gid %d -> %d\n", inode,
1935 from_kgid(&init_user_ns, inode->i_gid),
1936 from_kgid(&init_user_ns, attr->ia_gid));
1937 if (issued & CEPH_CAP_AUTH_EXCL) {
1938 inode->i_gid = attr->ia_gid;
1939 dirtied |= CEPH_CAP_AUTH_EXCL;
1940 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1941 !gid_eq(attr->ia_gid, inode->i_gid)) {
1942 req->r_args.setattr.gid = cpu_to_le32(
1943 from_kgid(&init_user_ns, attr->ia_gid));
1944 mask |= CEPH_SETATTR_GID;
1945 release |= CEPH_CAP_AUTH_SHARED;
1948 if (ia_valid & ATTR_MODE) {
1949 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1951 if (issued & CEPH_CAP_AUTH_EXCL) {
1952 inode->i_mode = attr->ia_mode;
1953 dirtied |= CEPH_CAP_AUTH_EXCL;
1954 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1955 attr->ia_mode != inode->i_mode) {
1956 inode->i_mode = attr->ia_mode;
1957 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1958 mask |= CEPH_SETATTR_MODE;
1959 release |= CEPH_CAP_AUTH_SHARED;
1963 if (ia_valid & ATTR_ATIME) {
1964 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1965 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1966 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1967 if (issued & CEPH_CAP_FILE_EXCL) {
1968 ci->i_time_warp_seq++;
1969 inode->i_atime = attr->ia_atime;
1970 dirtied |= CEPH_CAP_FILE_EXCL;
1971 } else if ((issued & CEPH_CAP_FILE_WR) &&
1972 timespec_compare(&inode->i_atime,
1973 &attr->ia_atime) < 0) {
1974 inode->i_atime = attr->ia_atime;
1975 dirtied |= CEPH_CAP_FILE_WR;
1976 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1977 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1978 ceph_encode_timespec(&req->r_args.setattr.atime,
1980 mask |= CEPH_SETATTR_ATIME;
1981 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1985 if (ia_valid & ATTR_MTIME) {
1986 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1987 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1988 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1989 if (issued & CEPH_CAP_FILE_EXCL) {
1990 ci->i_time_warp_seq++;
1991 inode->i_mtime = attr->ia_mtime;
1992 dirtied |= CEPH_CAP_FILE_EXCL;
1993 } else if ((issued & CEPH_CAP_FILE_WR) &&
1994 timespec_compare(&inode->i_mtime,
1995 &attr->ia_mtime) < 0) {
1996 inode->i_mtime = attr->ia_mtime;
1997 dirtied |= CEPH_CAP_FILE_WR;
1998 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1999 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
2000 ceph_encode_timespec(&req->r_args.setattr.mtime,
2002 mask |= CEPH_SETATTR_MTIME;
2003 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
2007 if (ia_valid & ATTR_SIZE) {
2008 dout("setattr %p size %lld -> %lld\n", inode,
2009 inode->i_size, attr->ia_size);
2010 if ((issued & CEPH_CAP_FILE_EXCL) &&
2011 attr->ia_size > inode->i_size) {
2012 i_size_write(inode, attr->ia_size);
2013 inode->i_blocks = calc_inode_blocks(attr->ia_size);
2014 inode->i_ctime = attr->ia_ctime;
2015 ci->i_reported_size = attr->ia_size;
2016 dirtied |= CEPH_CAP_FILE_EXCL;
2017 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2018 attr->ia_size != inode->i_size) {
2019 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2020 req->r_args.setattr.old_size =
2021 cpu_to_le64(inode->i_size);
2022 mask |= CEPH_SETATTR_SIZE;
2023 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
2028 /* these do nothing */
2029 if (ia_valid & ATTR_CTIME) {
2030 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2031 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2032 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
2033 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2034 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2035 only ? "ctime only" : "ignored");
2036 inode->i_ctime = attr->ia_ctime;
2039 * if kernel wants to dirty ctime but nothing else,
2040 * we need to choose a cap to dirty under, or do
2041 * a almost-no-op setattr
2043 if (issued & CEPH_CAP_AUTH_EXCL)
2044 dirtied |= CEPH_CAP_AUTH_EXCL;
2045 else if (issued & CEPH_CAP_FILE_EXCL)
2046 dirtied |= CEPH_CAP_FILE_EXCL;
2047 else if (issued & CEPH_CAP_XATTR_EXCL)
2048 dirtied |= CEPH_CAP_XATTR_EXCL;
2050 mask |= CEPH_SETATTR_CTIME;
2053 if (ia_valid & ATTR_FILE)
2054 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2057 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2059 inode->i_ctime = current_fs_time(inode->i_sb);
2063 spin_unlock(&ci->i_ceph_lock);
2064 if (lock_snap_rwsem)
2065 up_read(&mdsc->snap_rwsem);
2067 if (inode_dirty_flags)
2068 __mark_inode_dirty(inode, inode_dirty_flags);
2070 if (ia_valid & ATTR_MODE) {
2071 err = posix_acl_chmod(inode, attr->ia_mode);
2077 req->r_inode = inode;
2079 req->r_inode_drop = release;
2080 req->r_args.setattr.mask = cpu_to_le32(mask);
2081 req->r_num_caps = 1;
2082 err = ceph_mdsc_do_request(mdsc, NULL, req);
2084 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2085 ceph_cap_string(dirtied), mask);
2087 ceph_mdsc_put_request(req);
2088 if (mask & CEPH_SETATTR_SIZE)
2089 __ceph_do_pending_vmtruncate(inode);
2090 ceph_free_cap_flush(prealloc_cf);
2093 ceph_mdsc_put_request(req);
2094 ceph_free_cap_flush(prealloc_cf);
2101 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
2103 return __ceph_setattr(d_inode(dentry), attr);
2107 * Verify that we have a lease on the given mask. If not,
2108 * do a getattr against an mds.
2110 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2111 int mask, bool force)
2113 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2114 struct ceph_mds_client *mdsc = fsc->mdsc;
2115 struct ceph_mds_request *req;
2118 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2119 dout("do_getattr inode %p SNAPDIR\n", inode);
2123 dout("do_getattr inode %p mask %s mode 0%o\n",
2124 inode, ceph_cap_string(mask), inode->i_mode);
2125 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
2128 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
2130 return PTR_ERR(req);
2131 req->r_inode = inode;
2133 req->r_num_caps = 1;
2134 req->r_args.getattr.mask = cpu_to_le32(mask);
2135 req->r_locked_page = locked_page;
2136 err = ceph_mdsc_do_request(mdsc, NULL, req);
2137 if (locked_page && err == 0) {
2138 u64 inline_version = req->r_reply_info.targeti.inline_version;
2139 if (inline_version == 0) {
2140 /* the reply is supposed to contain inline data */
2142 } else if (inline_version == CEPH_INLINE_NONE) {
2145 err = req->r_reply_info.targeti.inline_len;
2148 ceph_mdsc_put_request(req);
2149 dout("do_getattr result=%d\n", err);
2155 * Check inode permissions. We verify we have a valid value for
2156 * the AUTH cap, then call the generic handler.
2158 int ceph_permission(struct inode *inode, int mask)
2162 if (mask & MAY_NOT_BLOCK)
2165 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2168 err = generic_permission(inode, mask);
2173 * Get all attributes. Hopefully somedata we'll have a statlite()
2174 * and can limit the fields we require to be accurate.
2176 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
2179 struct inode *inode = d_inode(dentry);
2180 struct ceph_inode_info *ci = ceph_inode(inode);
2183 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
2185 generic_fillattr(inode, stat);
2186 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
2187 if (ceph_snap(inode) != CEPH_NOSNAP)
2188 stat->dev = ceph_snap(inode);
2191 if (S_ISDIR(inode->i_mode)) {
2192 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2194 stat->size = ci->i_rbytes;
2196 stat->size = ci->i_files + ci->i_subdirs;
2198 stat->blksize = 65536;