]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/ceph/inode.c
ceph: Fix up after semantic merge conflict
[karo-tx-linux.git] / fs / ceph / inode.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/module.h>
4 #include <linux/fs.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/uaccess.h>
8 #include <linux/kernel.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/posix_acl.h>
13
14 #include "super.h"
15 #include "mds_client.h"
16 #include "cache.h"
17 #include <linux/ceph/decode.h>
18
19 /*
20  * Ceph inode operations
21  *
22  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
23  * setattr, etc.), xattr helpers, and helpers for assimilating
24  * metadata returned by the MDS into our cache.
25  *
26  * Also define helpers for doing asynchronous writeback, invalidation,
27  * and truncation for the benefit of those who can't afford to block
28  * (typically because they are in the message handler path).
29  */
30
31 static const struct inode_operations ceph_symlink_iops;
32
33 static void ceph_invalidate_work(struct work_struct *work);
34 static void ceph_writeback_work(struct work_struct *work);
35 static void ceph_vmtruncate_work(struct work_struct *work);
36
37 /*
38  * find or create an inode, given the ceph ino number
39  */
40 static int ceph_set_ino_cb(struct inode *inode, void *data)
41 {
42         ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
43         inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
44         return 0;
45 }
46
47 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
48 {
49         struct inode *inode;
50         ino_t t = ceph_vino_to_ino(vino);
51
52         inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
53         if (inode == NULL)
54                 return ERR_PTR(-ENOMEM);
55         if (inode->i_state & I_NEW) {
56                 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
57                      inode, ceph_vinop(inode), (u64)inode->i_ino);
58                 unlock_new_inode(inode);
59         }
60
61         dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
62              vino.snap, inode);
63         return inode;
64 }
65
66 /*
67  * get/constuct snapdir inode for a given directory
68  */
69 struct inode *ceph_get_snapdir(struct inode *parent)
70 {
71         struct ceph_vino vino = {
72                 .ino = ceph_ino(parent),
73                 .snap = CEPH_SNAPDIR,
74         };
75         struct inode *inode = ceph_get_inode(parent->i_sb, vino);
76         struct ceph_inode_info *ci = ceph_inode(inode);
77
78         BUG_ON(!S_ISDIR(parent->i_mode));
79         if (IS_ERR(inode))
80                 return inode;
81         inode->i_mode = parent->i_mode;
82         inode->i_uid = parent->i_uid;
83         inode->i_gid = parent->i_gid;
84         inode->i_op = &ceph_dir_iops;
85         inode->i_fop = &ceph_dir_fops;
86         ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
87         ci->i_rbytes = 0;
88         return inode;
89 }
90
91 const struct inode_operations ceph_file_iops = {
92         .permission = ceph_permission,
93         .setattr = ceph_setattr,
94         .getattr = ceph_getattr,
95         .setxattr = ceph_setxattr,
96         .getxattr = ceph_getxattr,
97         .listxattr = ceph_listxattr,
98         .removexattr = ceph_removexattr,
99         .get_acl = ceph_get_acl,
100 };
101
102
103 /*
104  * We use a 'frag tree' to keep track of the MDS's directory fragments
105  * for a given inode (usually there is just a single fragment).  We
106  * need to know when a child frag is delegated to a new MDS, or when
107  * it is flagged as replicated, so we can direct our requests
108  * accordingly.
109  */
110
111 /*
112  * find/create a frag in the tree
113  */
114 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
115                                                     u32 f)
116 {
117         struct rb_node **p;
118         struct rb_node *parent = NULL;
119         struct ceph_inode_frag *frag;
120         int c;
121
122         p = &ci->i_fragtree.rb_node;
123         while (*p) {
124                 parent = *p;
125                 frag = rb_entry(parent, struct ceph_inode_frag, node);
126                 c = ceph_frag_compare(f, frag->frag);
127                 if (c < 0)
128                         p = &(*p)->rb_left;
129                 else if (c > 0)
130                         p = &(*p)->rb_right;
131                 else
132                         return frag;
133         }
134
135         frag = kmalloc(sizeof(*frag), GFP_NOFS);
136         if (!frag) {
137                 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
138                        "frag %x\n", &ci->vfs_inode,
139                        ceph_vinop(&ci->vfs_inode), f);
140                 return ERR_PTR(-ENOMEM);
141         }
142         frag->frag = f;
143         frag->split_by = 0;
144         frag->mds = -1;
145         frag->ndist = 0;
146
147         rb_link_node(&frag->node, parent, p);
148         rb_insert_color(&frag->node, &ci->i_fragtree);
149
150         dout("get_or_create_frag added %llx.%llx frag %x\n",
151              ceph_vinop(&ci->vfs_inode), f);
152         return frag;
153 }
154
155 /*
156  * find a specific frag @f
157  */
158 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
159 {
160         struct rb_node *n = ci->i_fragtree.rb_node;
161
162         while (n) {
163                 struct ceph_inode_frag *frag =
164                         rb_entry(n, struct ceph_inode_frag, node);
165                 int c = ceph_frag_compare(f, frag->frag);
166                 if (c < 0)
167                         n = n->rb_left;
168                 else if (c > 0)
169                         n = n->rb_right;
170                 else
171                         return frag;
172         }
173         return NULL;
174 }
175
176 /*
177  * Choose frag containing the given value @v.  If @pfrag is
178  * specified, copy the frag delegation info to the caller if
179  * it is present.
180  */
181 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
182                      struct ceph_inode_frag *pfrag,
183                      int *found)
184 {
185         u32 t = ceph_frag_make(0, 0);
186         struct ceph_inode_frag *frag;
187         unsigned nway, i;
188         u32 n;
189
190         if (found)
191                 *found = 0;
192
193         mutex_lock(&ci->i_fragtree_mutex);
194         while (1) {
195                 WARN_ON(!ceph_frag_contains_value(t, v));
196                 frag = __ceph_find_frag(ci, t);
197                 if (!frag)
198                         break; /* t is a leaf */
199                 if (frag->split_by == 0) {
200                         if (pfrag)
201                                 memcpy(pfrag, frag, sizeof(*pfrag));
202                         if (found)
203                                 *found = 1;
204                         break;
205                 }
206
207                 /* choose child */
208                 nway = 1 << frag->split_by;
209                 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
210                      frag->split_by, nway);
211                 for (i = 0; i < nway; i++) {
212                         n = ceph_frag_make_child(t, frag->split_by, i);
213                         if (ceph_frag_contains_value(n, v)) {
214                                 t = n;
215                                 break;
216                         }
217                 }
218                 BUG_ON(i == nway);
219         }
220         dout("choose_frag(%x) = %x\n", v, t);
221
222         mutex_unlock(&ci->i_fragtree_mutex);
223         return t;
224 }
225
226 /*
227  * Process dirfrag (delegation) info from the mds.  Include leaf
228  * fragment in tree ONLY if ndist > 0.  Otherwise, only
229  * branches/splits are included in i_fragtree)
230  */
231 static int ceph_fill_dirfrag(struct inode *inode,
232                              struct ceph_mds_reply_dirfrag *dirinfo)
233 {
234         struct ceph_inode_info *ci = ceph_inode(inode);
235         struct ceph_inode_frag *frag;
236         u32 id = le32_to_cpu(dirinfo->frag);
237         int mds = le32_to_cpu(dirinfo->auth);
238         int ndist = le32_to_cpu(dirinfo->ndist);
239         int i;
240         int err = 0;
241
242         mutex_lock(&ci->i_fragtree_mutex);
243         if (ndist == 0) {
244                 /* no delegation info needed. */
245                 frag = __ceph_find_frag(ci, id);
246                 if (!frag)
247                         goto out;
248                 if (frag->split_by == 0) {
249                         /* tree leaf, remove */
250                         dout("fill_dirfrag removed %llx.%llx frag %x"
251                              " (no ref)\n", ceph_vinop(inode), id);
252                         rb_erase(&frag->node, &ci->i_fragtree);
253                         kfree(frag);
254                 } else {
255                         /* tree branch, keep and clear */
256                         dout("fill_dirfrag cleared %llx.%llx frag %x"
257                              " referral\n", ceph_vinop(inode), id);
258                         frag->mds = -1;
259                         frag->ndist = 0;
260                 }
261                 goto out;
262         }
263
264
265         /* find/add this frag to store mds delegation info */
266         frag = __get_or_create_frag(ci, id);
267         if (IS_ERR(frag)) {
268                 /* this is not the end of the world; we can continue
269                    with bad/inaccurate delegation info */
270                 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
271                        ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
272                 err = -ENOMEM;
273                 goto out;
274         }
275
276         frag->mds = mds;
277         frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
278         for (i = 0; i < frag->ndist; i++)
279                 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
280         dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
281              ceph_vinop(inode), frag->frag, frag->ndist);
282
283 out:
284         mutex_unlock(&ci->i_fragtree_mutex);
285         return err;
286 }
287
288
289 /*
290  * initialize a newly allocated inode.
291  */
292 struct inode *ceph_alloc_inode(struct super_block *sb)
293 {
294         struct ceph_inode_info *ci;
295         int i;
296
297         ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
298         if (!ci)
299                 return NULL;
300
301         dout("alloc_inode %p\n", &ci->vfs_inode);
302
303         spin_lock_init(&ci->i_ceph_lock);
304
305         ci->i_version = 0;
306         ci->i_time_warp_seq = 0;
307         ci->i_ceph_flags = 0;
308         atomic_set(&ci->i_release_count, 1);
309         atomic_set(&ci->i_complete_count, 0);
310         ci->i_symlink = NULL;
311
312         memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
313
314         ci->i_fragtree = RB_ROOT;
315         mutex_init(&ci->i_fragtree_mutex);
316
317         ci->i_xattrs.blob = NULL;
318         ci->i_xattrs.prealloc_blob = NULL;
319         ci->i_xattrs.dirty = false;
320         ci->i_xattrs.index = RB_ROOT;
321         ci->i_xattrs.count = 0;
322         ci->i_xattrs.names_size = 0;
323         ci->i_xattrs.vals_size = 0;
324         ci->i_xattrs.version = 0;
325         ci->i_xattrs.index_version = 0;
326
327         ci->i_caps = RB_ROOT;
328         ci->i_auth_cap = NULL;
329         ci->i_dirty_caps = 0;
330         ci->i_flushing_caps = 0;
331         INIT_LIST_HEAD(&ci->i_dirty_item);
332         INIT_LIST_HEAD(&ci->i_flushing_item);
333         ci->i_cap_flush_seq = 0;
334         ci->i_cap_flush_last_tid = 0;
335         memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
336         init_waitqueue_head(&ci->i_cap_wq);
337         ci->i_hold_caps_min = 0;
338         ci->i_hold_caps_max = 0;
339         INIT_LIST_HEAD(&ci->i_cap_delay_list);
340         INIT_LIST_HEAD(&ci->i_cap_snaps);
341         ci->i_head_snapc = NULL;
342         ci->i_snap_caps = 0;
343         ci->i_cap_exporting_issued = 0;
344
345         for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
346                 ci->i_nr_by_mode[i] = 0;
347
348         mutex_init(&ci->i_truncate_mutex);
349         ci->i_truncate_seq = 0;
350         ci->i_truncate_size = 0;
351         ci->i_truncate_pending = 0;
352
353         ci->i_max_size = 0;
354         ci->i_reported_size = 0;
355         ci->i_wanted_max_size = 0;
356         ci->i_requested_max_size = 0;
357
358         ci->i_pin_ref = 0;
359         ci->i_rd_ref = 0;
360         ci->i_rdcache_ref = 0;
361         ci->i_wr_ref = 0;
362         ci->i_wb_ref = 0;
363         ci->i_wrbuffer_ref = 0;
364         ci->i_wrbuffer_ref_head = 0;
365         ci->i_shared_gen = 0;
366         ci->i_rdcache_gen = 0;
367         ci->i_rdcache_revoking = 0;
368
369         INIT_LIST_HEAD(&ci->i_unsafe_writes);
370         INIT_LIST_HEAD(&ci->i_unsafe_dirops);
371         spin_lock_init(&ci->i_unsafe_lock);
372
373         ci->i_snap_realm = NULL;
374         INIT_LIST_HEAD(&ci->i_snap_realm_item);
375         INIT_LIST_HEAD(&ci->i_snap_flush_item);
376
377         INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
378         INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
379
380         INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
381
382         ceph_fscache_inode_init(ci);
383
384         return &ci->vfs_inode;
385 }
386
387 static void ceph_i_callback(struct rcu_head *head)
388 {
389         struct inode *inode = container_of(head, struct inode, i_rcu);
390         struct ceph_inode_info *ci = ceph_inode(inode);
391
392         kmem_cache_free(ceph_inode_cachep, ci);
393 }
394
395 void ceph_destroy_inode(struct inode *inode)
396 {
397         struct ceph_inode_info *ci = ceph_inode(inode);
398         struct ceph_inode_frag *frag;
399         struct rb_node *n;
400
401         dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
402
403         ceph_fscache_unregister_inode_cookie(ci);
404
405         ceph_queue_caps_release(inode);
406
407         /*
408          * we may still have a snap_realm reference if there are stray
409          * caps in i_cap_exporting_issued or i_snap_caps.
410          */
411         if (ci->i_snap_realm) {
412                 struct ceph_mds_client *mdsc =
413                         ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
414                 struct ceph_snap_realm *realm = ci->i_snap_realm;
415
416                 dout(" dropping residual ref to snap realm %p\n", realm);
417                 spin_lock(&realm->inodes_with_caps_lock);
418                 list_del_init(&ci->i_snap_realm_item);
419                 spin_unlock(&realm->inodes_with_caps_lock);
420                 ceph_put_snap_realm(mdsc, realm);
421         }
422
423         kfree(ci->i_symlink);
424         while ((n = rb_first(&ci->i_fragtree)) != NULL) {
425                 frag = rb_entry(n, struct ceph_inode_frag, node);
426                 rb_erase(n, &ci->i_fragtree);
427                 kfree(frag);
428         }
429
430         __ceph_destroy_xattrs(ci);
431         if (ci->i_xattrs.blob)
432                 ceph_buffer_put(ci->i_xattrs.blob);
433         if (ci->i_xattrs.prealloc_blob)
434                 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
435
436         call_rcu(&inode->i_rcu, ceph_i_callback);
437 }
438
439 int ceph_drop_inode(struct inode *inode)
440 {
441         /*
442          * Positve dentry and corresponding inode are always accompanied
443          * in MDS reply. So no need to keep inode in the cache after
444          * dropping all its aliases.
445          */
446         return 1;
447 }
448
449 /*
450  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
451  * careful because either the client or MDS may have more up to date
452  * info, depending on which capabilities are held, and whether
453  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
454  * and size are monotonically increasing, except when utimes() or
455  * truncate() increments the corresponding _seq values.)
456  */
457 int ceph_fill_file_size(struct inode *inode, int issued,
458                         u32 truncate_seq, u64 truncate_size, u64 size)
459 {
460         struct ceph_inode_info *ci = ceph_inode(inode);
461         int queue_trunc = 0;
462
463         if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
464             (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
465                 dout("size %lld -> %llu\n", inode->i_size, size);
466                 inode->i_size = size;
467                 inode->i_blocks = (size + (1<<9) - 1) >> 9;
468                 ci->i_reported_size = size;
469                 if (truncate_seq != ci->i_truncate_seq) {
470                         dout("truncate_seq %u -> %u\n",
471                              ci->i_truncate_seq, truncate_seq);
472                         ci->i_truncate_seq = truncate_seq;
473
474                         /* the MDS should have revoked these caps */
475                         WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
476                                                CEPH_CAP_FILE_RD |
477                                                CEPH_CAP_FILE_WR |
478                                                CEPH_CAP_FILE_LAZYIO));
479                         /*
480                          * If we hold relevant caps, or in the case where we're
481                          * not the only client referencing this file and we
482                          * don't hold those caps, then we need to check whether
483                          * the file is either opened or mmaped
484                          */
485                         if ((issued & (CEPH_CAP_FILE_CACHE|
486                                        CEPH_CAP_FILE_BUFFER)) ||
487                             mapping_mapped(inode->i_mapping) ||
488                             __ceph_caps_file_wanted(ci)) {
489                                 ci->i_truncate_pending++;
490                                 queue_trunc = 1;
491                         }
492                 }
493         }
494         if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
495             ci->i_truncate_size != truncate_size) {
496                 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
497                      truncate_size);
498                 ci->i_truncate_size = truncate_size;
499         }
500
501         if (queue_trunc)
502                 ceph_fscache_invalidate(inode);
503
504         return queue_trunc;
505 }
506
507 void ceph_fill_file_time(struct inode *inode, int issued,
508                          u64 time_warp_seq, struct timespec *ctime,
509                          struct timespec *mtime, struct timespec *atime)
510 {
511         struct ceph_inode_info *ci = ceph_inode(inode);
512         int warn = 0;
513
514         if (issued & (CEPH_CAP_FILE_EXCL|
515                       CEPH_CAP_FILE_WR|
516                       CEPH_CAP_FILE_BUFFER|
517                       CEPH_CAP_AUTH_EXCL|
518                       CEPH_CAP_XATTR_EXCL)) {
519                 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
520                         dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
521                              inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
522                              ctime->tv_sec, ctime->tv_nsec);
523                         inode->i_ctime = *ctime;
524                 }
525                 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
526                         /* the MDS did a utimes() */
527                         dout("mtime %ld.%09ld -> %ld.%09ld "
528                              "tw %d -> %d\n",
529                              inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
530                              mtime->tv_sec, mtime->tv_nsec,
531                              ci->i_time_warp_seq, (int)time_warp_seq);
532
533                         inode->i_mtime = *mtime;
534                         inode->i_atime = *atime;
535                         ci->i_time_warp_seq = time_warp_seq;
536                 } else if (time_warp_seq == ci->i_time_warp_seq) {
537                         /* nobody did utimes(); take the max */
538                         if (timespec_compare(mtime, &inode->i_mtime) > 0) {
539                                 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
540                                      inode->i_mtime.tv_sec,
541                                      inode->i_mtime.tv_nsec,
542                                      mtime->tv_sec, mtime->tv_nsec);
543                                 inode->i_mtime = *mtime;
544                         }
545                         if (timespec_compare(atime, &inode->i_atime) > 0) {
546                                 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
547                                      inode->i_atime.tv_sec,
548                                      inode->i_atime.tv_nsec,
549                                      atime->tv_sec, atime->tv_nsec);
550                                 inode->i_atime = *atime;
551                         }
552                 } else if (issued & CEPH_CAP_FILE_EXCL) {
553                         /* we did a utimes(); ignore mds values */
554                 } else {
555                         warn = 1;
556                 }
557         } else {
558                 /* we have no write|excl caps; whatever the MDS says is true */
559                 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
560                         inode->i_ctime = *ctime;
561                         inode->i_mtime = *mtime;
562                         inode->i_atime = *atime;
563                         ci->i_time_warp_seq = time_warp_seq;
564                 } else {
565                         warn = 1;
566                 }
567         }
568         if (warn) /* time_warp_seq shouldn't go backwards */
569                 dout("%p mds time_warp_seq %llu < %u\n",
570                      inode, time_warp_seq, ci->i_time_warp_seq);
571 }
572
573 /*
574  * Populate an inode based on info from mds.  May be called on new or
575  * existing inodes.
576  */
577 static int fill_inode(struct inode *inode,
578                       struct ceph_mds_reply_info_in *iinfo,
579                       struct ceph_mds_reply_dirfrag *dirinfo,
580                       struct ceph_mds_session *session,
581                       unsigned long ttl_from, int cap_fmode,
582                       struct ceph_cap_reservation *caps_reservation)
583 {
584         struct ceph_mds_reply_inode *info = iinfo->in;
585         struct ceph_inode_info *ci = ceph_inode(inode);
586         int i;
587         int issued = 0, implemented;
588         struct timespec mtime, atime, ctime;
589         u32 nsplits;
590         struct ceph_inode_frag *frag;
591         struct rb_node *rb_node;
592         struct ceph_buffer *xattr_blob = NULL;
593         int err = 0;
594         int queue_trunc = 0;
595
596         dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
597              inode, ceph_vinop(inode), le64_to_cpu(info->version),
598              ci->i_version);
599
600         /*
601          * prealloc xattr data, if it looks like we'll need it.  only
602          * if len > 4 (meaning there are actually xattrs; the first 4
603          * bytes are the xattr count).
604          */
605         if (iinfo->xattr_len > 4) {
606                 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
607                 if (!xattr_blob)
608                         pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
609                                iinfo->xattr_len);
610         }
611
612         spin_lock(&ci->i_ceph_lock);
613
614         /*
615          * provided version will be odd if inode value is projected,
616          * even if stable.  skip the update if we have newer stable
617          * info (ours>=theirs, e.g. due to racing mds replies), unless
618          * we are getting projected (unstable) info (in which case the
619          * version is odd, and we want ours>theirs).
620          *   us   them
621          *   2    2     skip
622          *   3    2     skip
623          *   3    3     update
624          */
625         if (le64_to_cpu(info->version) > 0 &&
626             (ci->i_version & ~1) >= le64_to_cpu(info->version))
627                 goto no_change;
628         
629         issued = __ceph_caps_issued(ci, &implemented);
630         issued |= implemented | __ceph_caps_dirty(ci);
631
632         /* update inode */
633         ci->i_version = le64_to_cpu(info->version);
634         inode->i_version++;
635         inode->i_rdev = le32_to_cpu(info->rdev);
636
637         if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
638                 inode->i_mode = le32_to_cpu(info->mode);
639                 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
640                 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
641                 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
642                      from_kuid(&init_user_ns, inode->i_uid),
643                      from_kgid(&init_user_ns, inode->i_gid));
644         }
645
646         if ((issued & CEPH_CAP_LINK_EXCL) == 0)
647                 set_nlink(inode, le32_to_cpu(info->nlink));
648
649         /* be careful with mtime, atime, size */
650         ceph_decode_timespec(&atime, &info->atime);
651         ceph_decode_timespec(&mtime, &info->mtime);
652         ceph_decode_timespec(&ctime, &info->ctime);
653         queue_trunc = ceph_fill_file_size(inode, issued,
654                                           le32_to_cpu(info->truncate_seq),
655                                           le64_to_cpu(info->truncate_size),
656                                           le64_to_cpu(info->size));
657         ceph_fill_file_time(inode, issued,
658                             le32_to_cpu(info->time_warp_seq),
659                             &ctime, &mtime, &atime);
660
661         /* only update max_size on auth cap */
662         if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
663             ci->i_max_size != le64_to_cpu(info->max_size)) {
664                 dout("max_size %lld -> %llu\n", ci->i_max_size,
665                      le64_to_cpu(info->max_size));
666                 ci->i_max_size = le64_to_cpu(info->max_size);
667         }
668
669         ci->i_layout = info->layout;
670         inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
671
672         /* xattrs */
673         /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
674         if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
675             le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
676                 if (ci->i_xattrs.blob)
677                         ceph_buffer_put(ci->i_xattrs.blob);
678                 ci->i_xattrs.blob = xattr_blob;
679                 if (xattr_blob)
680                         memcpy(ci->i_xattrs.blob->vec.iov_base,
681                                iinfo->xattr_data, iinfo->xattr_len);
682                 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
683                 ceph_forget_all_cached_acls(inode);
684                 xattr_blob = NULL;
685         }
686
687         inode->i_mapping->a_ops = &ceph_aops;
688         inode->i_mapping->backing_dev_info =
689                 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
690
691         switch (inode->i_mode & S_IFMT) {
692         case S_IFIFO:
693         case S_IFBLK:
694         case S_IFCHR:
695         case S_IFSOCK:
696                 init_special_inode(inode, inode->i_mode, inode->i_rdev);
697                 inode->i_op = &ceph_file_iops;
698                 break;
699         case S_IFREG:
700                 inode->i_op = &ceph_file_iops;
701                 inode->i_fop = &ceph_file_fops;
702                 break;
703         case S_IFLNK:
704                 inode->i_op = &ceph_symlink_iops;
705                 if (!ci->i_symlink) {
706                         u32 symlen = iinfo->symlink_len;
707                         char *sym;
708
709                         spin_unlock(&ci->i_ceph_lock);
710
711                         err = -EINVAL;
712                         if (WARN_ON(symlen != inode->i_size))
713                                 goto out;
714
715                         err = -ENOMEM;
716                         sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
717                         if (!sym)
718                                 goto out;
719
720                         spin_lock(&ci->i_ceph_lock);
721                         if (!ci->i_symlink)
722                                 ci->i_symlink = sym;
723                         else
724                                 kfree(sym); /* lost a race */
725                 }
726                 break;
727         case S_IFDIR:
728                 inode->i_op = &ceph_dir_iops;
729                 inode->i_fop = &ceph_dir_fops;
730
731                 ci->i_dir_layout = iinfo->dir_layout;
732
733                 ci->i_files = le64_to_cpu(info->files);
734                 ci->i_subdirs = le64_to_cpu(info->subdirs);
735                 ci->i_rbytes = le64_to_cpu(info->rbytes);
736                 ci->i_rfiles = le64_to_cpu(info->rfiles);
737                 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
738                 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
739                 break;
740         default:
741                 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
742                        ceph_vinop(inode), inode->i_mode);
743         }
744
745         /* set dir completion flag? */
746         if (S_ISDIR(inode->i_mode) &&
747             ci->i_files == 0 && ci->i_subdirs == 0 &&
748             ceph_snap(inode) == CEPH_NOSNAP &&
749             (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
750             (issued & CEPH_CAP_FILE_EXCL) == 0 &&
751             !__ceph_dir_is_complete(ci)) {
752                 dout(" marking %p complete (empty)\n", inode);
753                 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
754                 ci->i_max_offset = 2;
755         }
756 no_change:
757         spin_unlock(&ci->i_ceph_lock);
758
759         /* queue truncate if we saw i_size decrease */
760         if (queue_trunc)
761                 ceph_queue_vmtruncate(inode);
762
763         /* populate frag tree */
764         /* FIXME: move me up, if/when version reflects fragtree changes */
765         nsplits = le32_to_cpu(info->fragtree.nsplits);
766         mutex_lock(&ci->i_fragtree_mutex);
767         rb_node = rb_first(&ci->i_fragtree);
768         for (i = 0; i < nsplits; i++) {
769                 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
770                 frag = NULL;
771                 while (rb_node) {
772                         frag = rb_entry(rb_node, struct ceph_inode_frag, node);
773                         if (ceph_frag_compare(frag->frag, id) >= 0) {
774                                 if (frag->frag != id)
775                                         frag = NULL;
776                                 else
777                                         rb_node = rb_next(rb_node);
778                                 break;
779                         }
780                         rb_node = rb_next(rb_node);
781                         rb_erase(&frag->node, &ci->i_fragtree);
782                         kfree(frag);
783                         frag = NULL;
784                 }
785                 if (!frag) {
786                         frag = __get_or_create_frag(ci, id);
787                         if (IS_ERR(frag))
788                                 continue;
789                 }
790                 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
791                 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
792         }
793         while (rb_node) {
794                 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
795                 rb_node = rb_next(rb_node);
796                 rb_erase(&frag->node, &ci->i_fragtree);
797                 kfree(frag);
798         }
799         mutex_unlock(&ci->i_fragtree_mutex);
800
801         /* were we issued a capability? */
802         if (info->cap.caps) {
803                 if (ceph_snap(inode) == CEPH_NOSNAP) {
804                         ceph_add_cap(inode, session,
805                                      le64_to_cpu(info->cap.cap_id),
806                                      cap_fmode,
807                                      le32_to_cpu(info->cap.caps),
808                                      le32_to_cpu(info->cap.wanted),
809                                      le32_to_cpu(info->cap.seq),
810                                      le32_to_cpu(info->cap.mseq),
811                                      le64_to_cpu(info->cap.realm),
812                                      info->cap.flags,
813                                      caps_reservation);
814                 } else {
815                         spin_lock(&ci->i_ceph_lock);
816                         dout(" %p got snap_caps %s\n", inode,
817                              ceph_cap_string(le32_to_cpu(info->cap.caps)));
818                         ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
819                         if (cap_fmode >= 0)
820                                 __ceph_get_fmode(ci, cap_fmode);
821                         spin_unlock(&ci->i_ceph_lock);
822                 }
823         } else if (cap_fmode >= 0) {
824                 pr_warning("mds issued no caps on %llx.%llx\n",
825                            ceph_vinop(inode));
826                 __ceph_get_fmode(ci, cap_fmode);
827         }
828
829         /* update delegation info? */
830         if (dirinfo)
831                 ceph_fill_dirfrag(inode, dirinfo);
832
833         err = 0;
834
835 out:
836         if (xattr_blob)
837                 ceph_buffer_put(xattr_blob);
838         return err;
839 }
840
841 /*
842  * caller should hold session s_mutex.
843  */
844 static void update_dentry_lease(struct dentry *dentry,
845                                 struct ceph_mds_reply_lease *lease,
846                                 struct ceph_mds_session *session,
847                                 unsigned long from_time)
848 {
849         struct ceph_dentry_info *di = ceph_dentry(dentry);
850         long unsigned duration = le32_to_cpu(lease->duration_ms);
851         long unsigned ttl = from_time + (duration * HZ) / 1000;
852         long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
853         struct inode *dir;
854
855         /* only track leases on regular dentries */
856         if (dentry->d_op != &ceph_dentry_ops)
857                 return;
858
859         spin_lock(&dentry->d_lock);
860         dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
861              dentry, duration, ttl);
862
863         /* make lease_rdcache_gen match directory */
864         dir = dentry->d_parent->d_inode;
865         di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
866
867         if (duration == 0)
868                 goto out_unlock;
869
870         if (di->lease_gen == session->s_cap_gen &&
871             time_before(ttl, dentry->d_time))
872                 goto out_unlock;  /* we already have a newer lease. */
873
874         if (di->lease_session && di->lease_session != session)
875                 goto out_unlock;
876
877         ceph_dentry_lru_touch(dentry);
878
879         if (!di->lease_session)
880                 di->lease_session = ceph_get_mds_session(session);
881         di->lease_gen = session->s_cap_gen;
882         di->lease_seq = le32_to_cpu(lease->seq);
883         di->lease_renew_after = half_ttl;
884         di->lease_renew_from = 0;
885         dentry->d_time = ttl;
886 out_unlock:
887         spin_unlock(&dentry->d_lock);
888         return;
889 }
890
891 /*
892  * Set dentry's directory position based on the current dir's max, and
893  * order it in d_subdirs, so that dcache_readdir behaves.
894  *
895  * Always called under directory's i_mutex.
896  */
897 static void ceph_set_dentry_offset(struct dentry *dn)
898 {
899         struct dentry *dir = dn->d_parent;
900         struct inode *inode = dir->d_inode;
901         struct ceph_inode_info *ci;
902         struct ceph_dentry_info *di;
903
904         BUG_ON(!inode);
905
906         ci = ceph_inode(inode);
907         di = ceph_dentry(dn);
908
909         spin_lock(&ci->i_ceph_lock);
910         if (!__ceph_dir_is_complete(ci)) {
911                 spin_unlock(&ci->i_ceph_lock);
912                 return;
913         }
914         di->offset = ceph_inode(inode)->i_max_offset++;
915         spin_unlock(&ci->i_ceph_lock);
916
917         spin_lock(&dir->d_lock);
918         spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
919         list_move(&dn->d_u.d_child, &dir->d_subdirs);
920         dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
921              dn->d_u.d_child.prev, dn->d_u.d_child.next);
922         spin_unlock(&dn->d_lock);
923         spin_unlock(&dir->d_lock);
924 }
925
926 /*
927  * splice a dentry to an inode.
928  * caller must hold directory i_mutex for this to be safe.
929  *
930  * we will only rehash the resulting dentry if @prehash is
931  * true; @prehash will be set to false (for the benefit of
932  * the caller) if we fail.
933  */
934 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
935                                     bool *prehash, bool set_offset)
936 {
937         struct dentry *realdn;
938
939         BUG_ON(dn->d_inode);
940
941         /* dn must be unhashed */
942         if (!d_unhashed(dn))
943                 d_drop(dn);
944         realdn = d_materialise_unique(dn, in);
945         if (IS_ERR(realdn)) {
946                 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
947                        PTR_ERR(realdn), dn, in, ceph_vinop(in));
948                 if (prehash)
949                         *prehash = false; /* don't rehash on error */
950                 dn = realdn; /* note realdn contains the error */
951                 goto out;
952         } else if (realdn) {
953                 dout("dn %p (%d) spliced with %p (%d) "
954                      "inode %p ino %llx.%llx\n",
955                      dn, d_count(dn),
956                      realdn, d_count(realdn),
957                      realdn->d_inode, ceph_vinop(realdn->d_inode));
958                 dput(dn);
959                 dn = realdn;
960         } else {
961                 BUG_ON(!ceph_dentry(dn));
962                 dout("dn %p attached to %p ino %llx.%llx\n",
963                      dn, dn->d_inode, ceph_vinop(dn->d_inode));
964         }
965         if ((!prehash || *prehash) && d_unhashed(dn))
966                 d_rehash(dn);
967         if (set_offset)
968                 ceph_set_dentry_offset(dn);
969 out:
970         return dn;
971 }
972
973 /*
974  * Incorporate results into the local cache.  This is either just
975  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
976  * after a lookup).
977  *
978  * A reply may contain
979  *         a directory inode along with a dentry.
980  *  and/or a target inode
981  *
982  * Called with snap_rwsem (read).
983  */
984 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
985                     struct ceph_mds_session *session)
986 {
987         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
988         struct inode *in = NULL;
989         struct ceph_mds_reply_inode *ininfo;
990         struct ceph_vino vino;
991         struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
992         int err = 0;
993
994         dout("fill_trace %p is_dentry %d is_target %d\n", req,
995              rinfo->head->is_dentry, rinfo->head->is_target);
996
997 #if 0
998         /*
999          * Debugging hook:
1000          *
1001          * If we resend completed ops to a recovering mds, we get no
1002          * trace.  Since that is very rare, pretend this is the case
1003          * to ensure the 'no trace' handlers in the callers behave.
1004          *
1005          * Fill in inodes unconditionally to avoid breaking cap
1006          * invariants.
1007          */
1008         if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1009                 pr_info("fill_trace faking empty trace on %lld %s\n",
1010                         req->r_tid, ceph_mds_op_name(rinfo->head->op));
1011                 if (rinfo->head->is_dentry) {
1012                         rinfo->head->is_dentry = 0;
1013                         err = fill_inode(req->r_locked_dir,
1014                                          &rinfo->diri, rinfo->dirfrag,
1015                                          session, req->r_request_started, -1);
1016                 }
1017                 if (rinfo->head->is_target) {
1018                         rinfo->head->is_target = 0;
1019                         ininfo = rinfo->targeti.in;
1020                         vino.ino = le64_to_cpu(ininfo->ino);
1021                         vino.snap = le64_to_cpu(ininfo->snapid);
1022                         in = ceph_get_inode(sb, vino);
1023                         err = fill_inode(in, &rinfo->targeti, NULL,
1024                                          session, req->r_request_started,
1025                                          req->r_fmode);
1026                         iput(in);
1027                 }
1028         }
1029 #endif
1030
1031         if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1032                 dout("fill_trace reply is empty!\n");
1033                 if (rinfo->head->result == 0 && req->r_locked_dir)
1034                         ceph_invalidate_dir_request(req);
1035                 return 0;
1036         }
1037
1038         if (rinfo->head->is_dentry) {
1039                 struct inode *dir = req->r_locked_dir;
1040
1041                 if (dir) {
1042                         err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
1043                                          session, req->r_request_started, -1,
1044                                          &req->r_caps_reservation);
1045                         if (err < 0)
1046                                 return err;
1047                 } else {
1048                         WARN_ON_ONCE(1);
1049                 }
1050         }
1051
1052         if (rinfo->head->is_target) {
1053                 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1054                 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1055
1056                 in = ceph_get_inode(sb, vino);
1057                 if (IS_ERR(in)) {
1058                         err = PTR_ERR(in);
1059                         goto done;
1060                 }
1061                 req->r_target_inode = in;
1062
1063                 err = fill_inode(in, &rinfo->targeti, NULL,
1064                                 session, req->r_request_started,
1065                                 (le32_to_cpu(rinfo->head->result) == 0) ?
1066                                 req->r_fmode : -1,
1067                                 &req->r_caps_reservation);
1068                 if (err < 0) {
1069                         pr_err("fill_inode badness %p %llx.%llx\n",
1070                                 in, ceph_vinop(in));
1071                         goto done;
1072                 }
1073         }
1074
1075         /*
1076          * ignore null lease/binding on snapdir ENOENT, or else we
1077          * will have trouble splicing in the virtual snapdir later
1078          */
1079         if (rinfo->head->is_dentry && !req->r_aborted &&
1080             req->r_locked_dir &&
1081             (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1082                                                fsc->mount_options->snapdir_name,
1083                                                req->r_dentry->d_name.len))) {
1084                 /*
1085                  * lookup link rename   : null -> possibly existing inode
1086                  * mknod symlink mkdir  : null -> new inode
1087                  * unlink               : linked -> null
1088                  */
1089                 struct inode *dir = req->r_locked_dir;
1090                 struct dentry *dn = req->r_dentry;
1091                 bool have_dir_cap, have_lease;
1092
1093                 BUG_ON(!dn);
1094                 BUG_ON(!dir);
1095                 BUG_ON(dn->d_parent->d_inode != dir);
1096                 BUG_ON(ceph_ino(dir) !=
1097                        le64_to_cpu(rinfo->diri.in->ino));
1098                 BUG_ON(ceph_snap(dir) !=
1099                        le64_to_cpu(rinfo->diri.in->snapid));
1100
1101                 /* do we have a lease on the whole dir? */
1102                 have_dir_cap =
1103                         (le32_to_cpu(rinfo->diri.in->cap.caps) &
1104                          CEPH_CAP_FILE_SHARED);
1105
1106                 /* do we have a dn lease? */
1107                 have_lease = have_dir_cap ||
1108                         le32_to_cpu(rinfo->dlease->duration_ms);
1109                 if (!have_lease)
1110                         dout("fill_trace  no dentry lease or dir cap\n");
1111
1112                 /* rename? */
1113                 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1114                         dout(" src %p '%.*s' dst %p '%.*s'\n",
1115                              req->r_old_dentry,
1116                              req->r_old_dentry->d_name.len,
1117                              req->r_old_dentry->d_name.name,
1118                              dn, dn->d_name.len, dn->d_name.name);
1119                         dout("fill_trace doing d_move %p -> %p\n",
1120                              req->r_old_dentry, dn);
1121
1122                         d_move(req->r_old_dentry, dn);
1123                         dout(" src %p '%.*s' dst %p '%.*s'\n",
1124                              req->r_old_dentry,
1125                              req->r_old_dentry->d_name.len,
1126                              req->r_old_dentry->d_name.name,
1127                              dn, dn->d_name.len, dn->d_name.name);
1128
1129                         /* ensure target dentry is invalidated, despite
1130                            rehashing bug in vfs_rename_dir */
1131                         ceph_invalidate_dentry_lease(dn);
1132
1133                         /*
1134                          * d_move() puts the renamed dentry at the end of
1135                          * d_subdirs.  We need to assign it an appropriate
1136                          * directory offset so we can behave when dir is
1137                          * complete.
1138                          */
1139                         ceph_set_dentry_offset(req->r_old_dentry);
1140                         dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1141                              ceph_dentry(req->r_old_dentry)->offset);
1142
1143                         dn = req->r_old_dentry;  /* use old_dentry */
1144                 }
1145
1146                 /* null dentry? */
1147                 if (!rinfo->head->is_target) {
1148                         dout("fill_trace null dentry\n");
1149                         if (dn->d_inode) {
1150                                 dout("d_delete %p\n", dn);
1151                                 d_delete(dn);
1152                         } else {
1153                                 dout("d_instantiate %p NULL\n", dn);
1154                                 d_instantiate(dn, NULL);
1155                                 if (have_lease && d_unhashed(dn))
1156                                         d_rehash(dn);
1157                                 update_dentry_lease(dn, rinfo->dlease,
1158                                                     session,
1159                                                     req->r_request_started);
1160                         }
1161                         goto done;
1162                 }
1163
1164                 /* attach proper inode */
1165                 if (!dn->d_inode) {
1166                         ihold(in);
1167                         dn = splice_dentry(dn, in, &have_lease, true);
1168                         if (IS_ERR(dn)) {
1169                                 err = PTR_ERR(dn);
1170                                 goto done;
1171                         }
1172                         req->r_dentry = dn;  /* may have spliced */
1173                 } else if (dn->d_inode && dn->d_inode != in) {
1174                         dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1175                              dn, dn->d_inode, ceph_vinop(dn->d_inode),
1176                              ceph_vinop(in));
1177                         have_lease = false;
1178                 }
1179
1180                 if (have_lease)
1181                         update_dentry_lease(dn, rinfo->dlease, session,
1182                                             req->r_request_started);
1183                 dout(" final dn %p\n", dn);
1184         } else if (!req->r_aborted &&
1185                    (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1186                     req->r_op == CEPH_MDS_OP_MKSNAP)) {
1187                 struct dentry *dn = req->r_dentry;
1188
1189                 /* fill out a snapdir LOOKUPSNAP dentry */
1190                 BUG_ON(!dn);
1191                 BUG_ON(!req->r_locked_dir);
1192                 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1193                 ininfo = rinfo->targeti.in;
1194                 vino.ino = le64_to_cpu(ininfo->ino);
1195                 vino.snap = le64_to_cpu(ininfo->snapid);
1196                 dout(" linking snapped dir %p to dn %p\n", in, dn);
1197                 ihold(in);
1198                 dn = splice_dentry(dn, in, NULL, true);
1199                 if (IS_ERR(dn)) {
1200                         err = PTR_ERR(dn);
1201                         goto done;
1202                 }
1203                 req->r_dentry = dn;  /* may have spliced */
1204         }
1205 done:
1206         dout("fill_trace done err=%d\n", err);
1207         return err;
1208 }
1209
1210 /*
1211  * Prepopulate our cache with readdir results, leases, etc.
1212  */
1213 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1214                                            struct ceph_mds_session *session)
1215 {
1216         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1217         int i, err = 0;
1218
1219         for (i = 0; i < rinfo->dir_nr; i++) {
1220                 struct ceph_vino vino;
1221                 struct inode *in;
1222                 int rc;
1223
1224                 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1225                 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1226
1227                 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1228                 if (IS_ERR(in)) {
1229                         err = PTR_ERR(in);
1230                         dout("new_inode badness got %d\n", err);
1231                         continue;
1232                 }
1233                 rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
1234                                 req->r_request_started, -1,
1235                                 &req->r_caps_reservation);
1236                 if (rc < 0) {
1237                         pr_err("fill_inode badness on %p got %d\n", in, rc);
1238                         err = rc;
1239                         continue;
1240                 }
1241         }
1242
1243         return err;
1244 }
1245
1246 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1247                              struct ceph_mds_session *session)
1248 {
1249         struct dentry *parent = req->r_dentry;
1250         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1251         struct qstr dname;
1252         struct dentry *dn;
1253         struct inode *in;
1254         int err = 0, ret, i;
1255         struct inode *snapdir = NULL;
1256         struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1257         struct ceph_dentry_info *di;
1258         u64 r_readdir_offset = req->r_readdir_offset;
1259         u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1260
1261         if (rinfo->dir_dir &&
1262             le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1263                 dout("readdir_prepopulate got new frag %x -> %x\n",
1264                      frag, le32_to_cpu(rinfo->dir_dir->frag));
1265                 frag = le32_to_cpu(rinfo->dir_dir->frag);
1266                 if (ceph_frag_is_leftmost(frag))
1267                         r_readdir_offset = 2;
1268                 else
1269                         r_readdir_offset = 0;
1270         }
1271
1272         if (req->r_aborted)
1273                 return readdir_prepopulate_inodes_only(req, session);
1274
1275         if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1276                 snapdir = ceph_get_snapdir(parent->d_inode);
1277                 parent = d_find_alias(snapdir);
1278                 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1279                      rinfo->dir_nr, parent);
1280         } else {
1281                 dout("readdir_prepopulate %d items under dn %p\n",
1282                      rinfo->dir_nr, parent);
1283                 if (rinfo->dir_dir)
1284                         ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1285         }
1286
1287         /* FIXME: release caps/leases if error occurs */
1288         for (i = 0; i < rinfo->dir_nr; i++) {
1289                 struct ceph_vino vino;
1290
1291                 dname.name = rinfo->dir_dname[i];
1292                 dname.len = rinfo->dir_dname_len[i];
1293                 dname.hash = full_name_hash(dname.name, dname.len);
1294
1295                 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1296                 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1297
1298 retry_lookup:
1299                 dn = d_lookup(parent, &dname);
1300                 dout("d_lookup on parent=%p name=%.*s got %p\n",
1301                      parent, dname.len, dname.name, dn);
1302
1303                 if (!dn) {
1304                         dn = d_alloc(parent, &dname);
1305                         dout("d_alloc %p '%.*s' = %p\n", parent,
1306                              dname.len, dname.name, dn);
1307                         if (dn == NULL) {
1308                                 dout("d_alloc badness\n");
1309                                 err = -ENOMEM;
1310                                 goto out;
1311                         }
1312                         ret = ceph_init_dentry(dn);
1313                         if (ret < 0) {
1314                                 dput(dn);
1315                                 err = ret;
1316                                 goto out;
1317                         }
1318                 } else if (dn->d_inode &&
1319                            (ceph_ino(dn->d_inode) != vino.ino ||
1320                             ceph_snap(dn->d_inode) != vino.snap)) {
1321                         dout(" dn %p points to wrong inode %p\n",
1322                              dn, dn->d_inode);
1323                         d_delete(dn);
1324                         dput(dn);
1325                         goto retry_lookup;
1326                 } else {
1327                         /* reorder parent's d_subdirs */
1328                         spin_lock(&parent->d_lock);
1329                         spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
1330                         list_move(&dn->d_u.d_child, &parent->d_subdirs);
1331                         spin_unlock(&dn->d_lock);
1332                         spin_unlock(&parent->d_lock);
1333                 }
1334
1335                 /* inode */
1336                 if (dn->d_inode) {
1337                         in = dn->d_inode;
1338                 } else {
1339                         in = ceph_get_inode(parent->d_sb, vino);
1340                         if (IS_ERR(in)) {
1341                                 dout("new_inode badness\n");
1342                                 d_drop(dn);
1343                                 dput(dn);
1344                                 err = PTR_ERR(in);
1345                                 goto out;
1346                         }
1347                 }
1348
1349                 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1350                                req->r_request_started, -1,
1351                                &req->r_caps_reservation) < 0) {
1352                         pr_err("fill_inode badness on %p\n", in);
1353                         if (!dn->d_inode)
1354                                 iput(in);
1355                         d_drop(dn);
1356                         goto next_item;
1357                 }
1358
1359                 if (!dn->d_inode) {
1360                         dn = splice_dentry(dn, in, NULL, false);
1361                         if (IS_ERR(dn)) {
1362                                 err = PTR_ERR(dn);
1363                                 dn = NULL;
1364                                 goto next_item;
1365                         }
1366                 }
1367
1368                 di = dn->d_fsdata;
1369                 di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1370
1371                 update_dentry_lease(dn, rinfo->dir_dlease[i],
1372                                     req->r_session,
1373                                     req->r_request_started);
1374 next_item:
1375                 if (dn)
1376                         dput(dn);
1377         }
1378         if (err == 0)
1379                 req->r_did_prepopulate = true;
1380
1381 out:
1382         if (snapdir) {
1383                 iput(snapdir);
1384                 dput(parent);
1385         }
1386         dout("readdir_prepopulate done\n");
1387         return err;
1388 }
1389
1390 int ceph_inode_set_size(struct inode *inode, loff_t size)
1391 {
1392         struct ceph_inode_info *ci = ceph_inode(inode);
1393         int ret = 0;
1394
1395         spin_lock(&ci->i_ceph_lock);
1396         dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1397         inode->i_size = size;
1398         inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1399
1400         /* tell the MDS if we are approaching max_size */
1401         if ((size << 1) >= ci->i_max_size &&
1402             (ci->i_reported_size << 1) < ci->i_max_size)
1403                 ret = 1;
1404
1405         spin_unlock(&ci->i_ceph_lock);
1406         return ret;
1407 }
1408
1409 /*
1410  * Write back inode data in a worker thread.  (This can't be done
1411  * in the message handler context.)
1412  */
1413 void ceph_queue_writeback(struct inode *inode)
1414 {
1415         ihold(inode);
1416         if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1417                        &ceph_inode(inode)->i_wb_work)) {
1418                 dout("ceph_queue_writeback %p\n", inode);
1419         } else {
1420                 dout("ceph_queue_writeback %p failed\n", inode);
1421                 iput(inode);
1422         }
1423 }
1424
1425 static void ceph_writeback_work(struct work_struct *work)
1426 {
1427         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1428                                                   i_wb_work);
1429         struct inode *inode = &ci->vfs_inode;
1430
1431         dout("writeback %p\n", inode);
1432         filemap_fdatawrite(&inode->i_data);
1433         iput(inode);
1434 }
1435
1436 /*
1437  * queue an async invalidation
1438  */
1439 void ceph_queue_invalidate(struct inode *inode)
1440 {
1441         ihold(inode);
1442         if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1443                        &ceph_inode(inode)->i_pg_inv_work)) {
1444                 dout("ceph_queue_invalidate %p\n", inode);
1445         } else {
1446                 dout("ceph_queue_invalidate %p failed\n", inode);
1447                 iput(inode);
1448         }
1449 }
1450
1451 /*
1452  * Invalidate inode pages in a worker thread.  (This can't be done
1453  * in the message handler context.)
1454  */
1455 static void ceph_invalidate_work(struct work_struct *work)
1456 {
1457         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1458                                                   i_pg_inv_work);
1459         struct inode *inode = &ci->vfs_inode;
1460         u32 orig_gen;
1461         int check = 0;
1462
1463         mutex_lock(&ci->i_truncate_mutex);
1464         spin_lock(&ci->i_ceph_lock);
1465         dout("invalidate_pages %p gen %d revoking %d\n", inode,
1466              ci->i_rdcache_gen, ci->i_rdcache_revoking);
1467         if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1468                 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1469                         check = 1;
1470                 spin_unlock(&ci->i_ceph_lock);
1471                 mutex_unlock(&ci->i_truncate_mutex);
1472                 goto out;
1473         }
1474         orig_gen = ci->i_rdcache_gen;
1475         spin_unlock(&ci->i_ceph_lock);
1476
1477         truncate_inode_pages(inode->i_mapping, 0);
1478
1479         spin_lock(&ci->i_ceph_lock);
1480         if (orig_gen == ci->i_rdcache_gen &&
1481             orig_gen == ci->i_rdcache_revoking) {
1482                 dout("invalidate_pages %p gen %d successful\n", inode,
1483                      ci->i_rdcache_gen);
1484                 ci->i_rdcache_revoking--;
1485                 check = 1;
1486         } else {
1487                 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1488                      inode, orig_gen, ci->i_rdcache_gen,
1489                      ci->i_rdcache_revoking);
1490                 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1491                         check = 1;
1492         }
1493         spin_unlock(&ci->i_ceph_lock);
1494         mutex_unlock(&ci->i_truncate_mutex);
1495 out:
1496         if (check)
1497                 ceph_check_caps(ci, 0, NULL);
1498         iput(inode);
1499 }
1500
1501
1502 /*
1503  * called by trunc_wq;
1504  *
1505  * We also truncate in a separate thread as well.
1506  */
1507 static void ceph_vmtruncate_work(struct work_struct *work)
1508 {
1509         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1510                                                   i_vmtruncate_work);
1511         struct inode *inode = &ci->vfs_inode;
1512
1513         dout("vmtruncate_work %p\n", inode);
1514         __ceph_do_pending_vmtruncate(inode);
1515         iput(inode);
1516 }
1517
1518 /*
1519  * Queue an async vmtruncate.  If we fail to queue work, we will handle
1520  * the truncation the next time we call __ceph_do_pending_vmtruncate.
1521  */
1522 void ceph_queue_vmtruncate(struct inode *inode)
1523 {
1524         struct ceph_inode_info *ci = ceph_inode(inode);
1525
1526         ihold(inode);
1527
1528         if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1529                        &ci->i_vmtruncate_work)) {
1530                 dout("ceph_queue_vmtruncate %p\n", inode);
1531         } else {
1532                 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1533                      inode, ci->i_truncate_pending);
1534                 iput(inode);
1535         }
1536 }
1537
1538 /*
1539  * Make sure any pending truncation is applied before doing anything
1540  * that may depend on it.
1541  */
1542 void __ceph_do_pending_vmtruncate(struct inode *inode)
1543 {
1544         struct ceph_inode_info *ci = ceph_inode(inode);
1545         u64 to;
1546         int wrbuffer_refs, finish = 0;
1547
1548         mutex_lock(&ci->i_truncate_mutex);
1549 retry:
1550         spin_lock(&ci->i_ceph_lock);
1551         if (ci->i_truncate_pending == 0) {
1552                 dout("__do_pending_vmtruncate %p none pending\n", inode);
1553                 spin_unlock(&ci->i_ceph_lock);
1554                 mutex_unlock(&ci->i_truncate_mutex);
1555                 return;
1556         }
1557
1558         /*
1559          * make sure any dirty snapped pages are flushed before we
1560          * possibly truncate them.. so write AND block!
1561          */
1562         if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1563                 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1564                      inode);
1565                 spin_unlock(&ci->i_ceph_lock);
1566                 filemap_write_and_wait_range(&inode->i_data, 0,
1567                                              inode->i_sb->s_maxbytes);
1568                 goto retry;
1569         }
1570
1571         /* there should be no reader or writer */
1572         WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1573
1574         to = ci->i_truncate_size;
1575         wrbuffer_refs = ci->i_wrbuffer_ref;
1576         dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1577              ci->i_truncate_pending, to);
1578         spin_unlock(&ci->i_ceph_lock);
1579
1580         truncate_inode_pages(inode->i_mapping, to);
1581
1582         spin_lock(&ci->i_ceph_lock);
1583         if (to == ci->i_truncate_size) {
1584                 ci->i_truncate_pending = 0;
1585                 finish = 1;
1586         }
1587         spin_unlock(&ci->i_ceph_lock);
1588         if (!finish)
1589                 goto retry;
1590
1591         mutex_unlock(&ci->i_truncate_mutex);
1592
1593         if (wrbuffer_refs == 0)
1594                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1595
1596         wake_up_all(&ci->i_cap_wq);
1597 }
1598
1599 /*
1600  * symlinks
1601  */
1602 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1603 {
1604         struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1605         nd_set_link(nd, ci->i_symlink);
1606         return NULL;
1607 }
1608
1609 static const struct inode_operations ceph_symlink_iops = {
1610         .readlink = generic_readlink,
1611         .follow_link = ceph_sym_follow_link,
1612         .setattr = ceph_setattr,
1613         .getattr = ceph_getattr,
1614         .setxattr = ceph_setxattr,
1615         .getxattr = ceph_getxattr,
1616         .listxattr = ceph_listxattr,
1617         .removexattr = ceph_removexattr,
1618         .get_acl = ceph_get_acl,
1619 };
1620
1621 /*
1622  * setattr
1623  */
1624 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1625 {
1626         struct inode *inode = dentry->d_inode;
1627         struct ceph_inode_info *ci = ceph_inode(inode);
1628         struct inode *parent_inode;
1629         const unsigned int ia_valid = attr->ia_valid;
1630         struct ceph_mds_request *req;
1631         struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1632         int issued;
1633         int release = 0, dirtied = 0;
1634         int mask = 0;
1635         int err = 0;
1636         int inode_dirty_flags = 0;
1637
1638         if (ceph_snap(inode) != CEPH_NOSNAP)
1639                 return -EROFS;
1640
1641         err = inode_change_ok(inode, attr);
1642         if (err != 0)
1643                 return err;
1644
1645         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1646                                        USE_AUTH_MDS);
1647         if (IS_ERR(req))
1648                 return PTR_ERR(req);
1649
1650         spin_lock(&ci->i_ceph_lock);
1651         issued = __ceph_caps_issued(ci, NULL);
1652         dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1653
1654         if (ia_valid & ATTR_UID) {
1655                 dout("setattr %p uid %d -> %d\n", inode,
1656                      from_kuid(&init_user_ns, inode->i_uid),
1657                      from_kuid(&init_user_ns, attr->ia_uid));
1658                 if (issued & CEPH_CAP_AUTH_EXCL) {
1659                         inode->i_uid = attr->ia_uid;
1660                         dirtied |= CEPH_CAP_AUTH_EXCL;
1661                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1662                            !uid_eq(attr->ia_uid, inode->i_uid)) {
1663                         req->r_args.setattr.uid = cpu_to_le32(
1664                                 from_kuid(&init_user_ns, attr->ia_uid));
1665                         mask |= CEPH_SETATTR_UID;
1666                         release |= CEPH_CAP_AUTH_SHARED;
1667                 }
1668         }
1669         if (ia_valid & ATTR_GID) {
1670                 dout("setattr %p gid %d -> %d\n", inode,
1671                      from_kgid(&init_user_ns, inode->i_gid),
1672                      from_kgid(&init_user_ns, attr->ia_gid));
1673                 if (issued & CEPH_CAP_AUTH_EXCL) {
1674                         inode->i_gid = attr->ia_gid;
1675                         dirtied |= CEPH_CAP_AUTH_EXCL;
1676                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1677                            !gid_eq(attr->ia_gid, inode->i_gid)) {
1678                         req->r_args.setattr.gid = cpu_to_le32(
1679                                 from_kgid(&init_user_ns, attr->ia_gid));
1680                         mask |= CEPH_SETATTR_GID;
1681                         release |= CEPH_CAP_AUTH_SHARED;
1682                 }
1683         }
1684         if (ia_valid & ATTR_MODE) {
1685                 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1686                      attr->ia_mode);
1687                 if (issued & CEPH_CAP_AUTH_EXCL) {
1688                         inode->i_mode = attr->ia_mode;
1689                         dirtied |= CEPH_CAP_AUTH_EXCL;
1690                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1691                            attr->ia_mode != inode->i_mode) {
1692                         inode->i_mode = attr->ia_mode;
1693                         req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1694                         mask |= CEPH_SETATTR_MODE;
1695                         release |= CEPH_CAP_AUTH_SHARED;
1696                 }
1697         }
1698
1699         if (ia_valid & ATTR_ATIME) {
1700                 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1701                      inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1702                      attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1703                 if (issued & CEPH_CAP_FILE_EXCL) {
1704                         ci->i_time_warp_seq++;
1705                         inode->i_atime = attr->ia_atime;
1706                         dirtied |= CEPH_CAP_FILE_EXCL;
1707                 } else if ((issued & CEPH_CAP_FILE_WR) &&
1708                            timespec_compare(&inode->i_atime,
1709                                             &attr->ia_atime) < 0) {
1710                         inode->i_atime = attr->ia_atime;
1711                         dirtied |= CEPH_CAP_FILE_WR;
1712                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1713                            !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1714                         ceph_encode_timespec(&req->r_args.setattr.atime,
1715                                              &attr->ia_atime);
1716                         mask |= CEPH_SETATTR_ATIME;
1717                         release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1718                                 CEPH_CAP_FILE_WR;
1719                 }
1720         }
1721         if (ia_valid & ATTR_MTIME) {
1722                 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1723                      inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1724                      attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1725                 if (issued & CEPH_CAP_FILE_EXCL) {
1726                         ci->i_time_warp_seq++;
1727                         inode->i_mtime = attr->ia_mtime;
1728                         dirtied |= CEPH_CAP_FILE_EXCL;
1729                 } else if ((issued & CEPH_CAP_FILE_WR) &&
1730                            timespec_compare(&inode->i_mtime,
1731                                             &attr->ia_mtime) < 0) {
1732                         inode->i_mtime = attr->ia_mtime;
1733                         dirtied |= CEPH_CAP_FILE_WR;
1734                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1735                            !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1736                         ceph_encode_timespec(&req->r_args.setattr.mtime,
1737                                              &attr->ia_mtime);
1738                         mask |= CEPH_SETATTR_MTIME;
1739                         release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1740                                 CEPH_CAP_FILE_WR;
1741                 }
1742         }
1743         if (ia_valid & ATTR_SIZE) {
1744                 dout("setattr %p size %lld -> %lld\n", inode,
1745                      inode->i_size, attr->ia_size);
1746                 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1747                         err = -EINVAL;
1748                         goto out;
1749                 }
1750                 if ((issued & CEPH_CAP_FILE_EXCL) &&
1751                     attr->ia_size > inode->i_size) {
1752                         inode->i_size = attr->ia_size;
1753                         inode->i_blocks =
1754                                 (attr->ia_size + (1 << 9) - 1) >> 9;
1755                         inode->i_ctime = attr->ia_ctime;
1756                         ci->i_reported_size = attr->ia_size;
1757                         dirtied |= CEPH_CAP_FILE_EXCL;
1758                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1759                            attr->ia_size != inode->i_size) {
1760                         req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1761                         req->r_args.setattr.old_size =
1762                                 cpu_to_le64(inode->i_size);
1763                         mask |= CEPH_SETATTR_SIZE;
1764                         release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1765                                 CEPH_CAP_FILE_WR;
1766                 }
1767         }
1768
1769         /* these do nothing */
1770         if (ia_valid & ATTR_CTIME) {
1771                 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1772                                          ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1773                 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1774                      inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1775                      attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1776                      only ? "ctime only" : "ignored");
1777                 inode->i_ctime = attr->ia_ctime;
1778                 if (only) {
1779                         /*
1780                          * if kernel wants to dirty ctime but nothing else,
1781                          * we need to choose a cap to dirty under, or do
1782                          * a almost-no-op setattr
1783                          */
1784                         if (issued & CEPH_CAP_AUTH_EXCL)
1785                                 dirtied |= CEPH_CAP_AUTH_EXCL;
1786                         else if (issued & CEPH_CAP_FILE_EXCL)
1787                                 dirtied |= CEPH_CAP_FILE_EXCL;
1788                         else if (issued & CEPH_CAP_XATTR_EXCL)
1789                                 dirtied |= CEPH_CAP_XATTR_EXCL;
1790                         else
1791                                 mask |= CEPH_SETATTR_CTIME;
1792                 }
1793         }
1794         if (ia_valid & ATTR_FILE)
1795                 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1796
1797         if (dirtied) {
1798                 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
1799                 inode->i_ctime = CURRENT_TIME;
1800         }
1801
1802         release &= issued;
1803         spin_unlock(&ci->i_ceph_lock);
1804
1805         if (inode_dirty_flags)
1806                 __mark_inode_dirty(inode, inode_dirty_flags);
1807
1808         if (ia_valid & ATTR_MODE) {
1809                 err = posix_acl_chmod(inode, attr->ia_mode);
1810                 if (err)
1811                         goto out_put;
1812         }
1813
1814         if (mask) {
1815                 req->r_inode = inode;
1816                 ihold(inode);
1817                 req->r_inode_drop = release;
1818                 req->r_args.setattr.mask = cpu_to_le32(mask);
1819                 req->r_num_caps = 1;
1820                 parent_inode = ceph_get_dentry_parent_inode(dentry);
1821                 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1822                 iput(parent_inode);
1823         }
1824         dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1825              ceph_cap_string(dirtied), mask);
1826
1827         ceph_mdsc_put_request(req);
1828         if (mask & CEPH_SETATTR_SIZE)
1829                 __ceph_do_pending_vmtruncate(inode);
1830         return err;
1831 out:
1832         spin_unlock(&ci->i_ceph_lock);
1833 out_put:
1834         ceph_mdsc_put_request(req);
1835         return err;
1836 }
1837
1838 /*
1839  * Verify that we have a lease on the given mask.  If not,
1840  * do a getattr against an mds.
1841  */
1842 int ceph_do_getattr(struct inode *inode, int mask)
1843 {
1844         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1845         struct ceph_mds_client *mdsc = fsc->mdsc;
1846         struct ceph_mds_request *req;
1847         int err;
1848
1849         if (ceph_snap(inode) == CEPH_SNAPDIR) {
1850                 dout("do_getattr inode %p SNAPDIR\n", inode);
1851                 return 0;
1852         }
1853
1854         dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
1855         if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1856                 return 0;
1857
1858         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1859         if (IS_ERR(req))
1860                 return PTR_ERR(req);
1861         req->r_inode = inode;
1862         ihold(inode);
1863         req->r_num_caps = 1;
1864         req->r_args.getattr.mask = cpu_to_le32(mask);
1865         err = ceph_mdsc_do_request(mdsc, NULL, req);
1866         ceph_mdsc_put_request(req);
1867         dout("do_getattr result=%d\n", err);
1868         return err;
1869 }
1870
1871
1872 /*
1873  * Check inode permissions.  We verify we have a valid value for
1874  * the AUTH cap, then call the generic handler.
1875  */
1876 int ceph_permission(struct inode *inode, int mask)
1877 {
1878         int err;
1879
1880         if (mask & MAY_NOT_BLOCK)
1881                 return -ECHILD;
1882
1883         err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1884
1885         if (!err)
1886                 err = generic_permission(inode, mask);
1887         return err;
1888 }
1889
1890 /*
1891  * Get all attributes.  Hopefully somedata we'll have a statlite()
1892  * and can limit the fields we require to be accurate.
1893  */
1894 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1895                  struct kstat *stat)
1896 {
1897         struct inode *inode = dentry->d_inode;
1898         struct ceph_inode_info *ci = ceph_inode(inode);
1899         int err;
1900
1901         err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1902         if (!err) {
1903                 generic_fillattr(inode, stat);
1904                 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
1905                 if (ceph_snap(inode) != CEPH_NOSNAP)
1906                         stat->dev = ceph_snap(inode);
1907                 else
1908                         stat->dev = 0;
1909                 if (S_ISDIR(inode->i_mode)) {
1910                         if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
1911                                                 RBYTES))
1912                                 stat->size = ci->i_rbytes;
1913                         else
1914                                 stat->size = ci->i_files + ci->i_subdirs;
1915                         stat->blocks = 0;
1916                         stat->blksize = 65536;
1917                 }
1918         }
1919         return err;
1920 }