1 #include "ceph_debug.h"
3 #include <linux/module.h>
5 #include <linux/smp_lock.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/vmalloc.h>
13 #include <linux/pagevec.h>
19 * Ceph inode operations
21 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
22 * setattr, etc.), xattr helpers, and helpers for assimilating
23 * metadata returned by the MDS into our cache.
25 * Also define helpers for doing asynchronous writeback, invalidation,
26 * and truncation for the benefit of those who can't afford to block
27 * (typically because they are in the message handler path).
30 static const struct inode_operations ceph_symlink_iops;
32 static void ceph_invalidate_work(struct work_struct *work);
33 static void ceph_writeback_work(struct work_struct *work);
34 static void ceph_vmtruncate_work(struct work_struct *work);
37 * find or create an inode, given the ceph ino number
39 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
42 ino_t t = ceph_vino_to_ino(vino);
44 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
46 return ERR_PTR(-ENOMEM);
47 if (inode->i_state & I_NEW) {
48 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
49 inode, ceph_vinop(inode), (u64)inode->i_ino);
50 unlock_new_inode(inode);
53 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
59 * get/constuct snapdir inode for a given directory
61 struct inode *ceph_get_snapdir(struct inode *parent)
63 struct ceph_vino vino = {
64 .ino = ceph_ino(parent),
67 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
68 struct ceph_inode_info *ci = ceph_inode(inode);
70 BUG_ON(!S_ISDIR(parent->i_mode));
73 inode->i_mode = parent->i_mode;
74 inode->i_uid = parent->i_uid;
75 inode->i_gid = parent->i_gid;
76 inode->i_op = &ceph_dir_iops;
77 inode->i_fop = &ceph_dir_fops;
78 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
83 const struct inode_operations ceph_file_iops = {
84 .permission = ceph_permission,
85 .setattr = ceph_setattr,
86 .getattr = ceph_getattr,
87 .setxattr = ceph_setxattr,
88 .getxattr = ceph_getxattr,
89 .listxattr = ceph_listxattr,
90 .removexattr = ceph_removexattr,
95 * We use a 'frag tree' to keep track of the MDS's directory fragments
96 * for a given inode (usually there is just a single fragment). We
97 * need to know when a child frag is delegated to a new MDS, or when
98 * it is flagged as replicated, so we can direct our requests
103 * find/create a frag in the tree
105 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
109 struct rb_node *parent = NULL;
110 struct ceph_inode_frag *frag;
113 p = &ci->i_fragtree.rb_node;
116 frag = rb_entry(parent, struct ceph_inode_frag, node);
117 c = ceph_frag_compare(f, frag->frag);
126 frag = kmalloc(sizeof(*frag), GFP_NOFS);
128 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
129 "frag %x\n", &ci->vfs_inode,
130 ceph_vinop(&ci->vfs_inode), f);
131 return ERR_PTR(-ENOMEM);
138 rb_link_node(&frag->node, parent, p);
139 rb_insert_color(&frag->node, &ci->i_fragtree);
141 dout("get_or_create_frag added %llx.%llx frag %x\n",
142 ceph_vinop(&ci->vfs_inode), f);
147 * find a specific frag @f
149 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
151 struct rb_node *n = ci->i_fragtree.rb_node;
154 struct ceph_inode_frag *frag =
155 rb_entry(n, struct ceph_inode_frag, node);
156 int c = ceph_frag_compare(f, frag->frag);
168 * Choose frag containing the given value @v. If @pfrag is
169 * specified, copy the frag delegation info to the caller if
172 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
173 struct ceph_inode_frag *pfrag,
176 u32 t = ceph_frag_make(0, 0);
177 struct ceph_inode_frag *frag;
184 mutex_lock(&ci->i_fragtree_mutex);
186 WARN_ON(!ceph_frag_contains_value(t, v));
187 frag = __ceph_find_frag(ci, t);
189 break; /* t is a leaf */
190 if (frag->split_by == 0) {
192 memcpy(pfrag, frag, sizeof(*pfrag));
199 nway = 1 << frag->split_by;
200 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
201 frag->split_by, nway);
202 for (i = 0; i < nway; i++) {
203 n = ceph_frag_make_child(t, frag->split_by, i);
204 if (ceph_frag_contains_value(n, v)) {
211 dout("choose_frag(%x) = %x\n", v, t);
213 mutex_unlock(&ci->i_fragtree_mutex);
218 * Process dirfrag (delegation) info from the mds. Include leaf
219 * fragment in tree ONLY if ndist > 0. Otherwise, only
220 * branches/splits are included in i_fragtree)
222 static int ceph_fill_dirfrag(struct inode *inode,
223 struct ceph_mds_reply_dirfrag *dirinfo)
225 struct ceph_inode_info *ci = ceph_inode(inode);
226 struct ceph_inode_frag *frag;
227 u32 id = le32_to_cpu(dirinfo->frag);
228 int mds = le32_to_cpu(dirinfo->auth);
229 int ndist = le32_to_cpu(dirinfo->ndist);
233 mutex_lock(&ci->i_fragtree_mutex);
235 /* no delegation info needed. */
236 frag = __ceph_find_frag(ci, id);
239 if (frag->split_by == 0) {
240 /* tree leaf, remove */
241 dout("fill_dirfrag removed %llx.%llx frag %x"
242 " (no ref)\n", ceph_vinop(inode), id);
243 rb_erase(&frag->node, &ci->i_fragtree);
246 /* tree branch, keep and clear */
247 dout("fill_dirfrag cleared %llx.%llx frag %x"
248 " referral\n", ceph_vinop(inode), id);
256 /* find/add this frag to store mds delegation info */
257 frag = __get_or_create_frag(ci, id);
259 /* this is not the end of the world; we can continue
260 with bad/inaccurate delegation info */
261 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
262 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
268 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
269 for (i = 0; i < frag->ndist; i++)
270 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
271 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
272 ceph_vinop(inode), frag->frag, frag->ndist);
275 mutex_unlock(&ci->i_fragtree_mutex);
281 * initialize a newly allocated inode.
283 struct inode *ceph_alloc_inode(struct super_block *sb)
285 struct ceph_inode_info *ci;
288 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
292 dout("alloc_inode %p\n", &ci->vfs_inode);
295 ci->i_time_warp_seq = 0;
296 ci->i_ceph_flags = 0;
297 ci->i_release_count = 0;
298 ci->i_symlink = NULL;
300 ci->i_fragtree = RB_ROOT;
301 mutex_init(&ci->i_fragtree_mutex);
303 ci->i_xattrs.blob = NULL;
304 ci->i_xattrs.prealloc_blob = NULL;
305 ci->i_xattrs.dirty = false;
306 ci->i_xattrs.index = RB_ROOT;
307 ci->i_xattrs.count = 0;
308 ci->i_xattrs.names_size = 0;
309 ci->i_xattrs.vals_size = 0;
310 ci->i_xattrs.version = 0;
311 ci->i_xattrs.index_version = 0;
313 ci->i_caps = RB_ROOT;
314 ci->i_auth_cap = NULL;
315 ci->i_dirty_caps = 0;
316 ci->i_flushing_caps = 0;
317 INIT_LIST_HEAD(&ci->i_dirty_item);
318 INIT_LIST_HEAD(&ci->i_flushing_item);
319 ci->i_cap_flush_seq = 0;
320 ci->i_cap_flush_last_tid = 0;
321 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
322 init_waitqueue_head(&ci->i_cap_wq);
323 ci->i_hold_caps_min = 0;
324 ci->i_hold_caps_max = 0;
325 INIT_LIST_HEAD(&ci->i_cap_delay_list);
326 ci->i_cap_exporting_mds = 0;
327 ci->i_cap_exporting_mseq = 0;
328 ci->i_cap_exporting_issued = 0;
329 INIT_LIST_HEAD(&ci->i_cap_snaps);
330 ci->i_head_snapc = NULL;
333 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
334 ci->i_nr_by_mode[i] = 0;
336 ci->i_truncate_seq = 0;
337 ci->i_truncate_size = 0;
338 ci->i_truncate_pending = 0;
341 ci->i_reported_size = 0;
342 ci->i_wanted_max_size = 0;
343 ci->i_requested_max_size = 0;
347 ci->i_rdcache_ref = 0;
349 ci->i_wrbuffer_ref = 0;
350 ci->i_wrbuffer_ref_head = 0;
351 ci->i_shared_gen = 0;
352 ci->i_rdcache_gen = 0;
353 ci->i_rdcache_revoking = 0;
355 INIT_LIST_HEAD(&ci->i_unsafe_writes);
356 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
357 spin_lock_init(&ci->i_unsafe_lock);
359 ci->i_snap_realm = NULL;
360 INIT_LIST_HEAD(&ci->i_snap_realm_item);
361 INIT_LIST_HEAD(&ci->i_snap_flush_item);
363 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
364 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
366 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
368 return &ci->vfs_inode;
371 void ceph_destroy_inode(struct inode *inode)
373 struct ceph_inode_info *ci = ceph_inode(inode);
374 struct ceph_inode_frag *frag;
377 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
379 ceph_queue_caps_release(inode);
382 * we may still have a snap_realm reference if there are stray
383 * caps in i_cap_exporting_issued or i_snap_caps.
385 if (ci->i_snap_realm) {
386 struct ceph_mds_client *mdsc =
387 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
388 struct ceph_snap_realm *realm = ci->i_snap_realm;
390 dout(" dropping residual ref to snap realm %p\n", realm);
391 spin_lock(&realm->inodes_with_caps_lock);
392 list_del_init(&ci->i_snap_realm_item);
393 spin_unlock(&realm->inodes_with_caps_lock);
394 ceph_put_snap_realm(mdsc, realm);
397 kfree(ci->i_symlink);
398 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
399 frag = rb_entry(n, struct ceph_inode_frag, node);
400 rb_erase(n, &ci->i_fragtree);
404 __ceph_destroy_xattrs(ci);
405 if (ci->i_xattrs.blob)
406 ceph_buffer_put(ci->i_xattrs.blob);
407 if (ci->i_xattrs.prealloc_blob)
408 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
410 kmem_cache_free(ceph_inode_cachep, ci);
415 * Helpers to fill in size, ctime, mtime, and atime. We have to be
416 * careful because either the client or MDS may have more up to date
417 * info, depending on which capabilities are held, and whether
418 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
419 * and size are monotonically increasing, except when utimes() or
420 * truncate() increments the corresponding _seq values.)
422 int ceph_fill_file_size(struct inode *inode, int issued,
423 u32 truncate_seq, u64 truncate_size, u64 size)
425 struct ceph_inode_info *ci = ceph_inode(inode);
428 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
429 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
430 dout("size %lld -> %llu\n", inode->i_size, size);
431 inode->i_size = size;
432 inode->i_blocks = (size + (1<<9) - 1) >> 9;
433 ci->i_reported_size = size;
434 if (truncate_seq != ci->i_truncate_seq) {
435 dout("truncate_seq %u -> %u\n",
436 ci->i_truncate_seq, truncate_seq);
437 ci->i_truncate_seq = truncate_seq;
439 * If we hold relevant caps, or in the case where we're
440 * not the only client referencing this file and we
441 * don't hold those caps, then we need to check whether
442 * the file is either opened or mmaped
444 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
445 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
447 CEPH_CAP_FILE_LAZYIO)) ||
448 mapping_mapped(inode->i_mapping) ||
449 __ceph_caps_file_wanted(ci)) {
450 ci->i_truncate_pending++;
455 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
456 ci->i_truncate_size != truncate_size) {
457 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
459 ci->i_truncate_size = truncate_size;
464 void ceph_fill_file_time(struct inode *inode, int issued,
465 u64 time_warp_seq, struct timespec *ctime,
466 struct timespec *mtime, struct timespec *atime)
468 struct ceph_inode_info *ci = ceph_inode(inode);
471 if (issued & (CEPH_CAP_FILE_EXCL|
473 CEPH_CAP_FILE_BUFFER)) {
474 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
475 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
476 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
477 ctime->tv_sec, ctime->tv_nsec);
478 inode->i_ctime = *ctime;
480 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
481 /* the MDS did a utimes() */
482 dout("mtime %ld.%09ld -> %ld.%09ld "
484 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
485 mtime->tv_sec, mtime->tv_nsec,
486 ci->i_time_warp_seq, (int)time_warp_seq);
488 inode->i_mtime = *mtime;
489 inode->i_atime = *atime;
490 ci->i_time_warp_seq = time_warp_seq;
491 } else if (time_warp_seq == ci->i_time_warp_seq) {
492 /* nobody did utimes(); take the max */
493 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
494 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
495 inode->i_mtime.tv_sec,
496 inode->i_mtime.tv_nsec,
497 mtime->tv_sec, mtime->tv_nsec);
498 inode->i_mtime = *mtime;
500 if (timespec_compare(atime, &inode->i_atime) > 0) {
501 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
502 inode->i_atime.tv_sec,
503 inode->i_atime.tv_nsec,
504 atime->tv_sec, atime->tv_nsec);
505 inode->i_atime = *atime;
507 } else if (issued & CEPH_CAP_FILE_EXCL) {
508 /* we did a utimes(); ignore mds values */
513 /* we have no write caps; whatever the MDS says is true */
514 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
515 inode->i_ctime = *ctime;
516 inode->i_mtime = *mtime;
517 inode->i_atime = *atime;
518 ci->i_time_warp_seq = time_warp_seq;
523 if (warn) /* time_warp_seq shouldn't go backwards */
524 dout("%p mds time_warp_seq %llu < %u\n",
525 inode, time_warp_seq, ci->i_time_warp_seq);
529 * Populate an inode based on info from mds. May be called on new or
532 static int fill_inode(struct inode *inode,
533 struct ceph_mds_reply_info_in *iinfo,
534 struct ceph_mds_reply_dirfrag *dirinfo,
535 struct ceph_mds_session *session,
536 unsigned long ttl_from, int cap_fmode,
537 struct ceph_cap_reservation *caps_reservation)
539 struct ceph_mds_reply_inode *info = iinfo->in;
540 struct ceph_inode_info *ci = ceph_inode(inode);
542 int issued, implemented;
543 struct timespec mtime, atime, ctime;
545 struct ceph_buffer *xattr_blob = NULL;
549 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
550 inode, ceph_vinop(inode), le64_to_cpu(info->version),
554 * prealloc xattr data, if it looks like we'll need it. only
555 * if len > 4 (meaning there are actually xattrs; the first 4
556 * bytes are the xattr count).
558 if (iinfo->xattr_len > 4) {
559 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
561 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
565 spin_lock(&inode->i_lock);
568 * provided version will be odd if inode value is projected,
569 * even if stable. skip the update if we have a newer info
570 * (e.g., due to inode info racing form multiple MDSs), or if
571 * we are getting projected (unstable) inode info.
573 if (le64_to_cpu(info->version) > 0 &&
574 (ci->i_version & ~1) > le64_to_cpu(info->version))
577 issued = __ceph_caps_issued(ci, &implemented);
578 issued |= implemented | __ceph_caps_dirty(ci);
581 ci->i_version = le64_to_cpu(info->version);
583 inode->i_rdev = le32_to_cpu(info->rdev);
585 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
586 inode->i_mode = le32_to_cpu(info->mode);
587 inode->i_uid = le32_to_cpu(info->uid);
588 inode->i_gid = le32_to_cpu(info->gid);
589 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
590 inode->i_uid, inode->i_gid);
593 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
594 inode->i_nlink = le32_to_cpu(info->nlink);
596 /* be careful with mtime, atime, size */
597 ceph_decode_timespec(&atime, &info->atime);
598 ceph_decode_timespec(&mtime, &info->mtime);
599 ceph_decode_timespec(&ctime, &info->ctime);
600 queue_trunc = ceph_fill_file_size(inode, issued,
601 le32_to_cpu(info->truncate_seq),
602 le64_to_cpu(info->truncate_size),
603 le64_to_cpu(info->size));
604 ceph_fill_file_time(inode, issued,
605 le32_to_cpu(info->time_warp_seq),
606 &ctime, &mtime, &atime);
608 ci->i_max_size = le64_to_cpu(info->max_size);
609 ci->i_layout = info->layout;
610 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
613 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
614 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
615 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
616 if (ci->i_xattrs.blob)
617 ceph_buffer_put(ci->i_xattrs.blob);
618 ci->i_xattrs.blob = xattr_blob;
620 memcpy(ci->i_xattrs.blob->vec.iov_base,
621 iinfo->xattr_data, iinfo->xattr_len);
622 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
626 inode->i_mapping->a_ops = &ceph_aops;
627 inode->i_mapping->backing_dev_info =
628 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
630 switch (inode->i_mode & S_IFMT) {
635 init_special_inode(inode, inode->i_mode, inode->i_rdev);
636 inode->i_op = &ceph_file_iops;
639 inode->i_op = &ceph_file_iops;
640 inode->i_fop = &ceph_file_fops;
643 inode->i_op = &ceph_symlink_iops;
644 if (!ci->i_symlink) {
645 int symlen = iinfo->symlink_len;
648 BUG_ON(symlen != inode->i_size);
649 spin_unlock(&inode->i_lock);
652 sym = kmalloc(symlen+1, GFP_NOFS);
655 memcpy(sym, iinfo->symlink, symlen);
658 spin_lock(&inode->i_lock);
662 kfree(sym); /* lost a race */
666 inode->i_op = &ceph_dir_iops;
667 inode->i_fop = &ceph_dir_fops;
669 ci->i_files = le64_to_cpu(info->files);
670 ci->i_subdirs = le64_to_cpu(info->subdirs);
671 ci->i_rbytes = le64_to_cpu(info->rbytes);
672 ci->i_rfiles = le64_to_cpu(info->rfiles);
673 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
674 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
676 /* set dir completion flag? */
677 if (ci->i_files == 0 && ci->i_subdirs == 0 &&
678 ceph_snap(inode) == CEPH_NOSNAP &&
679 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
680 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
681 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
682 dout(" marking %p complete (empty)\n", inode);
683 ci->i_ceph_flags |= CEPH_I_COMPLETE;
684 ci->i_max_offset = 2;
687 /* it may be better to set st_size in getattr instead? */
688 if (ceph_test_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
689 inode->i_size = ci->i_rbytes;
692 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
693 ceph_vinop(inode), inode->i_mode);
697 spin_unlock(&inode->i_lock);
699 /* queue truncate if we saw i_size decrease */
701 ceph_queue_vmtruncate(inode);
703 /* populate frag tree */
704 /* FIXME: move me up, if/when version reflects fragtree changes */
705 nsplits = le32_to_cpu(info->fragtree.nsplits);
706 mutex_lock(&ci->i_fragtree_mutex);
707 for (i = 0; i < nsplits; i++) {
708 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
709 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
713 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
714 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
716 mutex_unlock(&ci->i_fragtree_mutex);
718 /* were we issued a capability? */
719 if (info->cap.caps) {
720 if (ceph_snap(inode) == CEPH_NOSNAP) {
721 ceph_add_cap(inode, session,
722 le64_to_cpu(info->cap.cap_id),
724 le32_to_cpu(info->cap.caps),
725 le32_to_cpu(info->cap.wanted),
726 le32_to_cpu(info->cap.seq),
727 le32_to_cpu(info->cap.mseq),
728 le64_to_cpu(info->cap.realm),
732 spin_lock(&inode->i_lock);
733 dout(" %p got snap_caps %s\n", inode,
734 ceph_cap_string(le32_to_cpu(info->cap.caps)));
735 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
737 __ceph_get_fmode(ci, cap_fmode);
738 spin_unlock(&inode->i_lock);
740 } else if (cap_fmode >= 0) {
741 pr_warning("mds issued no caps on %llx.%llx\n",
743 __ceph_get_fmode(ci, cap_fmode);
746 /* update delegation info? */
748 ceph_fill_dirfrag(inode, dirinfo);
754 ceph_buffer_put(xattr_blob);
759 * caller should hold session s_mutex.
761 static void update_dentry_lease(struct dentry *dentry,
762 struct ceph_mds_reply_lease *lease,
763 struct ceph_mds_session *session,
764 unsigned long from_time)
766 struct ceph_dentry_info *di = ceph_dentry(dentry);
767 long unsigned duration = le32_to_cpu(lease->duration_ms);
768 long unsigned ttl = from_time + (duration * HZ) / 1000;
769 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
772 /* only track leases on regular dentries */
773 if (dentry->d_op != &ceph_dentry_ops)
776 spin_lock(&dentry->d_lock);
777 dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
778 dentry, le16_to_cpu(lease->mask), duration, ttl);
780 /* make lease_rdcache_gen match directory */
781 dir = dentry->d_parent->d_inode;
782 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
784 if (lease->mask == 0)
787 if (di->lease_gen == session->s_cap_gen &&
788 time_before(ttl, dentry->d_time))
789 goto out_unlock; /* we already have a newer lease. */
791 if (di->lease_session && di->lease_session != session)
794 ceph_dentry_lru_touch(dentry);
796 if (!di->lease_session)
797 di->lease_session = ceph_get_mds_session(session);
798 di->lease_gen = session->s_cap_gen;
799 di->lease_seq = le32_to_cpu(lease->seq);
800 di->lease_renew_after = half_ttl;
801 di->lease_renew_from = 0;
802 dentry->d_time = ttl;
804 spin_unlock(&dentry->d_lock);
809 * Set dentry's directory position based on the current dir's max, and
810 * order it in d_subdirs, so that dcache_readdir behaves.
812 static void ceph_set_dentry_offset(struct dentry *dn)
814 struct dentry *dir = dn->d_parent;
815 struct inode *inode = dn->d_parent->d_inode;
816 struct ceph_dentry_info *di;
820 di = ceph_dentry(dn);
822 spin_lock(&inode->i_lock);
823 if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
824 spin_unlock(&inode->i_lock);
827 di->offset = ceph_inode(inode)->i_max_offset++;
828 spin_unlock(&inode->i_lock);
830 spin_lock(&dcache_lock);
831 spin_lock(&dn->d_lock);
832 list_move(&dn->d_u.d_child, &dir->d_subdirs);
833 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
834 dn->d_u.d_child.prev, dn->d_u.d_child.next);
835 spin_unlock(&dn->d_lock);
836 spin_unlock(&dcache_lock);
840 * splice a dentry to an inode.
841 * caller must hold directory i_mutex for this to be safe.
843 * we will only rehash the resulting dentry if @prehash is
844 * true; @prehash will be set to false (for the benefit of
845 * the caller) if we fail.
847 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
850 struct dentry *realdn;
854 /* dn must be unhashed */
857 realdn = d_materialise_unique(dn, in);
858 if (IS_ERR(realdn)) {
859 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
860 PTR_ERR(realdn), dn, in, ceph_vinop(in));
862 *prehash = false; /* don't rehash on error */
863 dn = realdn; /* note realdn contains the error */
866 dout("dn %p (%d) spliced with %p (%d) "
867 "inode %p ino %llx.%llx\n",
868 dn, atomic_read(&dn->d_count),
869 realdn, atomic_read(&realdn->d_count),
870 realdn->d_inode, ceph_vinop(realdn->d_inode));
874 BUG_ON(!ceph_dentry(dn));
875 dout("dn %p attached to %p ino %llx.%llx\n",
876 dn, dn->d_inode, ceph_vinop(dn->d_inode));
878 if ((!prehash || *prehash) && d_unhashed(dn))
880 ceph_set_dentry_offset(dn);
886 * Incorporate results into the local cache. This is either just
887 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
890 * A reply may contain
891 * a directory inode along with a dentry.
892 * and/or a target inode
894 * Called with snap_rwsem (read).
896 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
897 struct ceph_mds_session *session)
899 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
900 struct inode *in = NULL;
901 struct ceph_mds_reply_inode *ininfo;
902 struct ceph_vino vino;
903 struct ceph_client *client = ceph_sb_to_client(sb);
907 dout("fill_trace %p is_dentry %d is_target %d\n", req,
908 rinfo->head->is_dentry, rinfo->head->is_target);
914 * If we resend completed ops to a recovering mds, we get no
915 * trace. Since that is very rare, pretend this is the case
916 * to ensure the 'no trace' handlers in the callers behave.
918 * Fill in inodes unconditionally to avoid breaking cap
921 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
922 pr_info("fill_trace faking empty trace on %lld %s\n",
923 req->r_tid, ceph_mds_op_name(rinfo->head->op));
924 if (rinfo->head->is_dentry) {
925 rinfo->head->is_dentry = 0;
926 err = fill_inode(req->r_locked_dir,
927 &rinfo->diri, rinfo->dirfrag,
928 session, req->r_request_started, -1);
930 if (rinfo->head->is_target) {
931 rinfo->head->is_target = 0;
932 ininfo = rinfo->targeti.in;
933 vino.ino = le64_to_cpu(ininfo->ino);
934 vino.snap = le64_to_cpu(ininfo->snapid);
935 in = ceph_get_inode(sb, vino);
936 err = fill_inode(in, &rinfo->targeti, NULL,
937 session, req->r_request_started,
944 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
945 dout("fill_trace reply is empty!\n");
946 if (rinfo->head->result == 0 && req->r_locked_dir)
947 ceph_invalidate_dir_request(req);
951 if (rinfo->head->is_dentry) {
952 struct inode *dir = req->r_locked_dir;
954 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
955 session, req->r_request_started, -1,
956 &req->r_caps_reservation);
962 * ignore null lease/binding on snapdir ENOENT, or else we
963 * will have trouble splicing in the virtual snapdir later
965 if (rinfo->head->is_dentry && !req->r_aborted &&
966 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
967 client->mount_args->snapdir_name,
968 req->r_dentry->d_name.len))) {
970 * lookup link rename : null -> possibly existing inode
971 * mknod symlink mkdir : null -> new inode
972 * unlink : linked -> null
974 struct inode *dir = req->r_locked_dir;
975 struct dentry *dn = req->r_dentry;
976 bool have_dir_cap, have_lease;
980 BUG_ON(dn->d_parent->d_inode != dir);
981 BUG_ON(ceph_ino(dir) !=
982 le64_to_cpu(rinfo->diri.in->ino));
983 BUG_ON(ceph_snap(dir) !=
984 le64_to_cpu(rinfo->diri.in->snapid));
986 /* do we have a lease on the whole dir? */
988 (le32_to_cpu(rinfo->diri.in->cap.caps) &
989 CEPH_CAP_FILE_SHARED);
991 /* do we have a dn lease? */
992 have_lease = have_dir_cap ||
993 (le16_to_cpu(rinfo->dlease->mask) &
997 dout("fill_trace no dentry lease or dir cap\n");
1000 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1001 dout(" src %p '%.*s' dst %p '%.*s'\n",
1003 req->r_old_dentry->d_name.len,
1004 req->r_old_dentry->d_name.name,
1005 dn, dn->d_name.len, dn->d_name.name);
1006 dout("fill_trace doing d_move %p -> %p\n",
1007 req->r_old_dentry, dn);
1009 /* d_move screws up d_subdirs order */
1010 ceph_i_clear(dir, CEPH_I_COMPLETE);
1012 d_move(req->r_old_dentry, dn);
1013 dout(" src %p '%.*s' dst %p '%.*s'\n",
1015 req->r_old_dentry->d_name.len,
1016 req->r_old_dentry->d_name.name,
1017 dn, dn->d_name.len, dn->d_name.name);
1019 /* ensure target dentry is invalidated, despite
1020 rehashing bug in vfs_rename_dir */
1021 ceph_invalidate_dentry_lease(dn);
1023 /* take overwritten dentry's readdir offset */
1024 dout("dn %p gets %p offset %lld (old offset %lld)\n",
1025 req->r_old_dentry, dn, ceph_dentry(dn)->offset,
1026 ceph_dentry(req->r_old_dentry)->offset);
1027 ceph_dentry(req->r_old_dentry)->offset =
1028 ceph_dentry(dn)->offset;
1030 dn = req->r_old_dentry; /* use old_dentry */
1035 if (!rinfo->head->is_target) {
1036 dout("fill_trace null dentry\n");
1038 dout("d_delete %p\n", dn);
1041 dout("d_instantiate %p NULL\n", dn);
1042 d_instantiate(dn, NULL);
1043 if (have_lease && d_unhashed(dn))
1045 update_dentry_lease(dn, rinfo->dlease,
1047 req->r_request_started);
1052 /* attach proper inode */
1053 ininfo = rinfo->targeti.in;
1054 vino.ino = le64_to_cpu(ininfo->ino);
1055 vino.snap = le64_to_cpu(ininfo->snapid);
1057 in = ceph_get_inode(sb, vino);
1059 pr_err("fill_trace bad get_inode "
1060 "%llx.%llx\n", vino.ino, vino.snap);
1065 dn = splice_dentry(dn, in, &have_lease);
1070 req->r_dentry = dn; /* may have spliced */
1072 } else if (ceph_ino(in) == vino.ino &&
1073 ceph_snap(in) == vino.snap) {
1076 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1077 dn, in, ceph_ino(in), ceph_snap(in),
1078 vino.ino, vino.snap);
1084 update_dentry_lease(dn, rinfo->dlease, session,
1085 req->r_request_started);
1086 dout(" final dn %p\n", dn);
1088 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1089 req->r_op == CEPH_MDS_OP_MKSNAP) {
1090 struct dentry *dn = req->r_dentry;
1092 /* fill out a snapdir LOOKUPSNAP dentry */
1094 BUG_ON(!req->r_locked_dir);
1095 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1096 ininfo = rinfo->targeti.in;
1097 vino.ino = le64_to_cpu(ininfo->ino);
1098 vino.snap = le64_to_cpu(ininfo->snapid);
1099 in = ceph_get_inode(sb, vino);
1101 pr_err("fill_inode get_inode badness %llx.%llx\n",
1102 vino.ino, vino.snap);
1107 dout(" linking snapped dir %p to dn %p\n", in, dn);
1108 dn = splice_dentry(dn, in, NULL);
1113 req->r_dentry = dn; /* may have spliced */
1115 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1118 if (rinfo->head->is_target) {
1119 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1120 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1122 if (in == NULL || ceph_ino(in) != vino.ino ||
1123 ceph_snap(in) != vino.snap) {
1124 in = ceph_get_inode(sb, vino);
1130 req->r_target_inode = in;
1132 err = fill_inode(in,
1133 &rinfo->targeti, NULL,
1134 session, req->r_request_started,
1135 (le32_to_cpu(rinfo->head->result) == 0) ?
1137 &req->r_caps_reservation);
1139 pr_err("fill_inode badness %p %llx.%llx\n",
1140 in, ceph_vinop(in));
1146 dout("fill_trace done err=%d\n", err);
1151 * Prepopulate our cache with readdir results, leases, etc.
1153 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1154 struct ceph_mds_session *session)
1156 struct dentry *parent = req->r_dentry;
1157 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1162 struct inode *snapdir = NULL;
1163 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1164 u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1165 struct ceph_dentry_info *di;
1167 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1168 snapdir = ceph_get_snapdir(parent->d_inode);
1169 parent = d_find_alias(snapdir);
1170 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1171 rinfo->dir_nr, parent);
1173 dout("readdir_prepopulate %d items under dn %p\n",
1174 rinfo->dir_nr, parent);
1176 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1179 for (i = 0; i < rinfo->dir_nr; i++) {
1180 struct ceph_vino vino;
1182 dname.name = rinfo->dir_dname[i];
1183 dname.len = rinfo->dir_dname_len[i];
1184 dname.hash = full_name_hash(dname.name, dname.len);
1186 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1187 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1190 dn = d_lookup(parent, &dname);
1191 dout("d_lookup on parent=%p name=%.*s got %p\n",
1192 parent, dname.len, dname.name, dn);
1195 dn = d_alloc(parent, &dname);
1196 dout("d_alloc %p '%.*s' = %p\n", parent,
1197 dname.len, dname.name, dn);
1199 dout("d_alloc badness\n");
1203 err = ceph_init_dentry(dn);
1208 } else if (dn->d_inode &&
1209 (ceph_ino(dn->d_inode) != vino.ino ||
1210 ceph_snap(dn->d_inode) != vino.snap)) {
1211 dout(" dn %p points to wrong inode %p\n",
1217 /* reorder parent's d_subdirs */
1218 spin_lock(&dcache_lock);
1219 spin_lock(&dn->d_lock);
1220 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1221 spin_unlock(&dn->d_lock);
1222 spin_unlock(&dcache_lock);
1226 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1232 in = ceph_get_inode(parent->d_sb, vino);
1234 dout("new_inode badness\n");
1240 dn = splice_dentry(dn, in, NULL);
1245 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1246 req->r_request_started, -1,
1247 &req->r_caps_reservation) < 0) {
1248 pr_err("fill_inode badness on %p\n", in);
1252 update_dentry_lease(dn, rinfo->dir_dlease[i],
1254 req->r_request_started);
1259 req->r_did_prepopulate = true;
1266 dout("readdir_prepopulate done\n");
1270 int ceph_inode_set_size(struct inode *inode, loff_t size)
1272 struct ceph_inode_info *ci = ceph_inode(inode);
1275 spin_lock(&inode->i_lock);
1276 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1277 inode->i_size = size;
1278 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1280 /* tell the MDS if we are approaching max_size */
1281 if ((size << 1) >= ci->i_max_size &&
1282 (ci->i_reported_size << 1) < ci->i_max_size)
1285 spin_unlock(&inode->i_lock);
1290 * Write back inode data in a worker thread. (This can't be done
1291 * in the message handler context.)
1293 void ceph_queue_writeback(struct inode *inode)
1295 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1296 &ceph_inode(inode)->i_wb_work)) {
1297 dout("ceph_queue_writeback %p\n", inode);
1300 dout("ceph_queue_writeback %p failed\n", inode);
1304 static void ceph_writeback_work(struct work_struct *work)
1306 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1308 struct inode *inode = &ci->vfs_inode;
1310 dout("writeback %p\n", inode);
1311 filemap_fdatawrite(&inode->i_data);
1316 * queue an async invalidation
1318 void ceph_queue_invalidate(struct inode *inode)
1320 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1321 &ceph_inode(inode)->i_pg_inv_work)) {
1322 dout("ceph_queue_invalidate %p\n", inode);
1325 dout("ceph_queue_invalidate %p failed\n", inode);
1330 * invalidate any pages that are not dirty or under writeback. this
1331 * includes pages that are clean and mapped.
1333 static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
1335 struct pagevec pvec;
1339 pagevec_init(&pvec, 0);
1340 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1341 for (i = 0; i < pagevec_count(&pvec); i++) {
1342 struct page *page = pvec.pages[i];
1345 (PageDirty(page) || PageWriteback(page));
1348 skip_page = !trylock_page(page);
1351 * We really shouldn't be looking at the ->index of an
1352 * unlocked page. But we're not allowed to lock these
1353 * pages. So we rely upon nobody altering the ->index
1354 * of this (pinned-by-us) page.
1356 index = page->index;
1364 generic_error_remove_page(mapping, page);
1367 pagevec_release(&pvec);
1373 * Invalidate inode pages in a worker thread. (This can't be done
1374 * in the message handler context.)
1376 static void ceph_invalidate_work(struct work_struct *work)
1378 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1380 struct inode *inode = &ci->vfs_inode;
1384 spin_lock(&inode->i_lock);
1385 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1386 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1387 if (ci->i_rdcache_gen == 0 ||
1388 ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1389 BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen);
1391 ci->i_rdcache_revoking = 0;
1392 spin_unlock(&inode->i_lock);
1395 orig_gen = ci->i_rdcache_gen;
1396 spin_unlock(&inode->i_lock);
1398 ceph_invalidate_nondirty_pages(inode->i_mapping);
1400 spin_lock(&inode->i_lock);
1401 if (orig_gen == ci->i_rdcache_gen) {
1402 dout("invalidate_pages %p gen %d successful\n", inode,
1404 ci->i_rdcache_gen = 0;
1405 ci->i_rdcache_revoking = 0;
1408 dout("invalidate_pages %p gen %d raced, gen now %d\n",
1409 inode, orig_gen, ci->i_rdcache_gen);
1411 spin_unlock(&inode->i_lock);
1414 ceph_check_caps(ci, 0, NULL);
1421 * called by trunc_wq; take i_mutex ourselves
1423 * We also truncate in a separate thread as well.
1425 static void ceph_vmtruncate_work(struct work_struct *work)
1427 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1429 struct inode *inode = &ci->vfs_inode;
1431 dout("vmtruncate_work %p\n", inode);
1432 mutex_lock(&inode->i_mutex);
1433 __ceph_do_pending_vmtruncate(inode);
1434 mutex_unlock(&inode->i_mutex);
1439 * Queue an async vmtruncate. If we fail to queue work, we will handle
1440 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1442 void ceph_queue_vmtruncate(struct inode *inode)
1444 struct ceph_inode_info *ci = ceph_inode(inode);
1446 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1447 &ci->i_vmtruncate_work)) {
1448 dout("ceph_queue_vmtruncate %p\n", inode);
1451 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1452 inode, ci->i_truncate_pending);
1457 * called with i_mutex held.
1459 * Make sure any pending truncation is applied before doing anything
1460 * that may depend on it.
1462 void __ceph_do_pending_vmtruncate(struct inode *inode)
1464 struct ceph_inode_info *ci = ceph_inode(inode);
1466 int wrbuffer_refs, wake = 0;
1469 spin_lock(&inode->i_lock);
1470 if (ci->i_truncate_pending == 0) {
1471 dout("__do_pending_vmtruncate %p none pending\n", inode);
1472 spin_unlock(&inode->i_lock);
1477 * make sure any dirty snapped pages are flushed before we
1478 * possibly truncate them.. so write AND block!
1480 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1481 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1483 spin_unlock(&inode->i_lock);
1484 filemap_write_and_wait_range(&inode->i_data, 0,
1485 inode->i_sb->s_maxbytes);
1489 to = ci->i_truncate_size;
1490 wrbuffer_refs = ci->i_wrbuffer_ref;
1491 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1492 ci->i_truncate_pending, to);
1493 spin_unlock(&inode->i_lock);
1495 truncate_inode_pages(inode->i_mapping, to);
1497 spin_lock(&inode->i_lock);
1498 ci->i_truncate_pending--;
1499 if (ci->i_truncate_pending == 0)
1501 spin_unlock(&inode->i_lock);
1503 if (wrbuffer_refs == 0)
1504 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1506 wake_up_all(&ci->i_cap_wq);
1513 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1515 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1516 nd_set_link(nd, ci->i_symlink);
1520 static const struct inode_operations ceph_symlink_iops = {
1521 .readlink = generic_readlink,
1522 .follow_link = ceph_sym_follow_link,
1528 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1530 struct inode *inode = dentry->d_inode;
1531 struct ceph_inode_info *ci = ceph_inode(inode);
1532 struct inode *parent_inode = dentry->d_parent->d_inode;
1533 const unsigned int ia_valid = attr->ia_valid;
1534 struct ceph_mds_request *req;
1535 struct ceph_mds_client *mdsc = &ceph_sb_to_client(dentry->d_sb)->mdsc;
1537 int release = 0, dirtied = 0;
1541 if (ceph_snap(inode) != CEPH_NOSNAP)
1544 __ceph_do_pending_vmtruncate(inode);
1546 err = inode_change_ok(inode, attr);
1550 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1553 return PTR_ERR(req);
1555 spin_lock(&inode->i_lock);
1556 issued = __ceph_caps_issued(ci, NULL);
1557 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1559 if (ia_valid & ATTR_UID) {
1560 dout("setattr %p uid %d -> %d\n", inode,
1561 inode->i_uid, attr->ia_uid);
1562 if (issued & CEPH_CAP_AUTH_EXCL) {
1563 inode->i_uid = attr->ia_uid;
1564 dirtied |= CEPH_CAP_AUTH_EXCL;
1565 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1566 attr->ia_uid != inode->i_uid) {
1567 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
1568 mask |= CEPH_SETATTR_UID;
1569 release |= CEPH_CAP_AUTH_SHARED;
1572 if (ia_valid & ATTR_GID) {
1573 dout("setattr %p gid %d -> %d\n", inode,
1574 inode->i_gid, attr->ia_gid);
1575 if (issued & CEPH_CAP_AUTH_EXCL) {
1576 inode->i_gid = attr->ia_gid;
1577 dirtied |= CEPH_CAP_AUTH_EXCL;
1578 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1579 attr->ia_gid != inode->i_gid) {
1580 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
1581 mask |= CEPH_SETATTR_GID;
1582 release |= CEPH_CAP_AUTH_SHARED;
1585 if (ia_valid & ATTR_MODE) {
1586 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1588 if (issued & CEPH_CAP_AUTH_EXCL) {
1589 inode->i_mode = attr->ia_mode;
1590 dirtied |= CEPH_CAP_AUTH_EXCL;
1591 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1592 attr->ia_mode != inode->i_mode) {
1593 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1594 mask |= CEPH_SETATTR_MODE;
1595 release |= CEPH_CAP_AUTH_SHARED;
1599 if (ia_valid & ATTR_ATIME) {
1600 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1601 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1602 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1603 if (issued & CEPH_CAP_FILE_EXCL) {
1604 ci->i_time_warp_seq++;
1605 inode->i_atime = attr->ia_atime;
1606 dirtied |= CEPH_CAP_FILE_EXCL;
1607 } else if ((issued & CEPH_CAP_FILE_WR) &&
1608 timespec_compare(&inode->i_atime,
1609 &attr->ia_atime) < 0) {
1610 inode->i_atime = attr->ia_atime;
1611 dirtied |= CEPH_CAP_FILE_WR;
1612 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1613 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1614 ceph_encode_timespec(&req->r_args.setattr.atime,
1616 mask |= CEPH_SETATTR_ATIME;
1617 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1621 if (ia_valid & ATTR_MTIME) {
1622 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1623 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1624 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1625 if (issued & CEPH_CAP_FILE_EXCL) {
1626 ci->i_time_warp_seq++;
1627 inode->i_mtime = attr->ia_mtime;
1628 dirtied |= CEPH_CAP_FILE_EXCL;
1629 } else if ((issued & CEPH_CAP_FILE_WR) &&
1630 timespec_compare(&inode->i_mtime,
1631 &attr->ia_mtime) < 0) {
1632 inode->i_mtime = attr->ia_mtime;
1633 dirtied |= CEPH_CAP_FILE_WR;
1634 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1635 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1636 ceph_encode_timespec(&req->r_args.setattr.mtime,
1638 mask |= CEPH_SETATTR_MTIME;
1639 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1643 if (ia_valid & ATTR_SIZE) {
1644 dout("setattr %p size %lld -> %lld\n", inode,
1645 inode->i_size, attr->ia_size);
1646 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1650 if ((issued & CEPH_CAP_FILE_EXCL) &&
1651 attr->ia_size > inode->i_size) {
1652 inode->i_size = attr->ia_size;
1654 (attr->ia_size + (1 << 9) - 1) >> 9;
1655 inode->i_ctime = attr->ia_ctime;
1656 ci->i_reported_size = attr->ia_size;
1657 dirtied |= CEPH_CAP_FILE_EXCL;
1658 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1659 attr->ia_size != inode->i_size) {
1660 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1661 req->r_args.setattr.old_size =
1662 cpu_to_le64(inode->i_size);
1663 mask |= CEPH_SETATTR_SIZE;
1664 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1669 /* these do nothing */
1670 if (ia_valid & ATTR_CTIME) {
1671 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1672 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1673 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1674 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1675 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1676 only ? "ctime only" : "ignored");
1677 inode->i_ctime = attr->ia_ctime;
1680 * if kernel wants to dirty ctime but nothing else,
1681 * we need to choose a cap to dirty under, or do
1682 * a almost-no-op setattr
1684 if (issued & CEPH_CAP_AUTH_EXCL)
1685 dirtied |= CEPH_CAP_AUTH_EXCL;
1686 else if (issued & CEPH_CAP_FILE_EXCL)
1687 dirtied |= CEPH_CAP_FILE_EXCL;
1688 else if (issued & CEPH_CAP_XATTR_EXCL)
1689 dirtied |= CEPH_CAP_XATTR_EXCL;
1691 mask |= CEPH_SETATTR_CTIME;
1694 if (ia_valid & ATTR_FILE)
1695 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1698 __ceph_mark_dirty_caps(ci, dirtied);
1699 inode->i_ctime = CURRENT_TIME;
1703 spin_unlock(&inode->i_lock);
1706 req->r_inode = igrab(inode);
1707 req->r_inode_drop = release;
1708 req->r_args.setattr.mask = cpu_to_le32(mask);
1709 req->r_num_caps = 1;
1710 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1712 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1713 ceph_cap_string(dirtied), mask);
1715 ceph_mdsc_put_request(req);
1716 __ceph_do_pending_vmtruncate(inode);
1719 spin_unlock(&inode->i_lock);
1720 ceph_mdsc_put_request(req);
1725 * Verify that we have a lease on the given mask. If not,
1726 * do a getattr against an mds.
1728 int ceph_do_getattr(struct inode *inode, int mask)
1730 struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
1731 struct ceph_mds_client *mdsc = &client->mdsc;
1732 struct ceph_mds_request *req;
1735 if (ceph_snap(inode) == CEPH_SNAPDIR) {
1736 dout("do_getattr inode %p SNAPDIR\n", inode);
1740 dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask));
1741 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1744 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1746 return PTR_ERR(req);
1747 req->r_inode = igrab(inode);
1748 req->r_num_caps = 1;
1749 req->r_args.getattr.mask = cpu_to_le32(mask);
1750 err = ceph_mdsc_do_request(mdsc, NULL, req);
1751 ceph_mdsc_put_request(req);
1752 dout("do_getattr result=%d\n", err);
1758 * Check inode permissions. We verify we have a valid value for
1759 * the AUTH cap, then call the generic handler.
1761 int ceph_permission(struct inode *inode, int mask)
1763 int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1766 err = generic_permission(inode, mask, NULL);
1771 * Get all attributes. Hopefully somedata we'll have a statlite()
1772 * and can limit the fields we require to be accurate.
1774 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1777 struct inode *inode = dentry->d_inode;
1778 struct ceph_inode_info *ci = ceph_inode(inode);
1781 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1783 generic_fillattr(inode, stat);
1784 stat->ino = inode->i_ino;
1785 if (ceph_snap(inode) != CEPH_NOSNAP)
1786 stat->dev = ceph_snap(inode);
1789 if (S_ISDIR(inode->i_mode)) {
1790 stat->size = ci->i_rbytes;
1792 stat->blksize = 65536;