1 #include <linux/ceph/ceph_debug.h>
2 #include <linux/ceph/pagelist.h>
5 #include "mds_client.h"
7 #include <linux/ceph/decode.h>
9 #include <linux/xattr.h>
10 #include <linux/posix_acl_xattr.h>
11 #include <linux/slab.h>
13 #define XATTR_CEPH_PREFIX "ceph."
14 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
16 static int __remove_xattr(struct ceph_inode_info *ci,
17 struct ceph_inode_xattr *xattr);
19 const struct xattr_handler ceph_other_xattr_handler;
22 * List of handlers for synthetic system.* attributes. Other
23 * attributes are handled directly.
25 const struct xattr_handler *ceph_xattr_handlers[] = {
26 #ifdef CONFIG_CEPH_FS_POSIX_ACL
27 &posix_acl_access_xattr_handler,
28 &posix_acl_default_xattr_handler,
30 &ceph_other_xattr_handler,
34 static bool ceph_is_valid_xattr(const char *name)
36 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
37 !strncmp(name, XATTR_SECURITY_PREFIX,
38 XATTR_SECURITY_PREFIX_LEN) ||
39 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
40 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
44 * These define virtual xattrs exposing the recursive directory
45 * statistics and layout metadata.
49 size_t name_size; /* strlen(name) + 1 (for '\0') */
50 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
52 bool readonly, hidden;
53 bool (*exists_cb)(struct ceph_inode_info *ci);
58 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
61 char *p = (char *)&ci->i_layout;
63 for (s = 0; s < sizeof(ci->i_layout); s++, p++)
69 static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
73 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
74 struct ceph_osd_client *osdc = &fsc->client->osdc;
75 s64 pool = ci->i_layout.pool_id;
76 const char *pool_name;
79 dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
80 down_read(&osdc->lock);
81 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
83 size_t len = strlen(pool_name);
84 ret = snprintf(buf, sizeof(buf),
85 "stripe_unit=%u stripe_count=%u object_size=%u pool=",
86 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
87 ci->i_layout.object_size);
90 } else if (ret + len > size) {
93 memcpy(val, buf, ret);
94 memcpy(val + ret, pool_name, len);
98 ret = snprintf(buf, sizeof(buf),
99 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
100 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
101 ci->i_layout.object_size, (unsigned long long)pool);
104 memcpy(val, buf, ret);
109 up_read(&osdc->lock);
113 static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
114 char *val, size_t size)
116 return snprintf(val, size, "%u", ci->i_layout.stripe_unit);
119 static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
120 char *val, size_t size)
122 return snprintf(val, size, "%u", ci->i_layout.stripe_count);
125 static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
126 char *val, size_t size)
128 return snprintf(val, size, "%u", ci->i_layout.object_size);
131 static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
132 char *val, size_t size)
135 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
136 struct ceph_osd_client *osdc = &fsc->client->osdc;
137 s64 pool = ci->i_layout.pool_id;
138 const char *pool_name;
140 down_read(&osdc->lock);
141 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
143 ret = snprintf(val, size, "%s", pool_name);
145 ret = snprintf(val, size, "%lld", (unsigned long long)pool);
146 up_read(&osdc->lock);
152 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
155 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
158 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
161 return snprintf(val, size, "%lld", ci->i_files);
164 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
167 return snprintf(val, size, "%lld", ci->i_subdirs);
170 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
173 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
176 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
179 return snprintf(val, size, "%lld", ci->i_rfiles);
182 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
185 return snprintf(val, size, "%lld", ci->i_rsubdirs);
188 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
191 return snprintf(val, size, "%lld", ci->i_rbytes);
194 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
197 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
198 (long)ci->i_rctime.tv_nsec);
202 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
203 #define CEPH_XATTR_NAME2(_type, _name, _name2) \
204 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
206 #define XATTR_NAME_CEPH(_type, _name) \
208 .name = CEPH_XATTR_NAME(_type, _name), \
209 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
210 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
215 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \
217 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
218 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
219 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
222 .exists_cb = ceph_vxattrcb_layout_exists, \
225 static struct ceph_vxattr ceph_dir_vxattrs[] = {
227 .name = "ceph.dir.layout",
228 .name_size = sizeof("ceph.dir.layout"),
229 .getxattr_cb = ceph_vxattrcb_layout,
232 .exists_cb = ceph_vxattrcb_layout_exists,
234 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
235 XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
236 XATTR_LAYOUT_FIELD(dir, layout, object_size),
237 XATTR_LAYOUT_FIELD(dir, layout, pool),
238 XATTR_NAME_CEPH(dir, entries),
239 XATTR_NAME_CEPH(dir, files),
240 XATTR_NAME_CEPH(dir, subdirs),
241 XATTR_NAME_CEPH(dir, rentries),
242 XATTR_NAME_CEPH(dir, rfiles),
243 XATTR_NAME_CEPH(dir, rsubdirs),
244 XATTR_NAME_CEPH(dir, rbytes),
245 XATTR_NAME_CEPH(dir, rctime),
246 { .name = NULL, 0 } /* Required table terminator */
248 static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
252 static struct ceph_vxattr ceph_file_vxattrs[] = {
254 .name = "ceph.file.layout",
255 .name_size = sizeof("ceph.file.layout"),
256 .getxattr_cb = ceph_vxattrcb_layout,
259 .exists_cb = ceph_vxattrcb_layout_exists,
261 XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
262 XATTR_LAYOUT_FIELD(file, layout, stripe_count),
263 XATTR_LAYOUT_FIELD(file, layout, object_size),
264 XATTR_LAYOUT_FIELD(file, layout, pool),
265 { .name = NULL, 0 } /* Required table terminator */
267 static size_t ceph_file_vxattrs_name_size; /* total size of all names */
269 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
271 if (S_ISDIR(inode->i_mode))
272 return ceph_dir_vxattrs;
273 else if (S_ISREG(inode->i_mode))
274 return ceph_file_vxattrs;
278 static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
280 if (vxattrs == ceph_dir_vxattrs)
281 return ceph_dir_vxattrs_name_size;
282 if (vxattrs == ceph_file_vxattrs)
283 return ceph_file_vxattrs_name_size;
289 * Compute the aggregate size (including terminating '\0') of all
290 * virtual extended attribute names in the given vxattr table.
292 static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
294 struct ceph_vxattr *vxattr;
297 for (vxattr = vxattrs; vxattr->name; vxattr++)
299 size += vxattr->name_size;
304 /* Routines called at initialization and exit time */
306 void __init ceph_xattr_init(void)
308 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
309 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
312 void ceph_xattr_exit(void)
314 ceph_dir_vxattrs_name_size = 0;
315 ceph_file_vxattrs_name_size = 0;
318 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
321 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
324 while (vxattr->name) {
325 if (!strcmp(vxattr->name, name))
334 static int __set_xattr(struct ceph_inode_info *ci,
335 const char *name, int name_len,
336 const char *val, int val_len,
337 int flags, int update_xattr,
338 struct ceph_inode_xattr **newxattr)
341 struct rb_node *parent = NULL;
342 struct ceph_inode_xattr *xattr = NULL;
346 p = &ci->i_xattrs.index.rb_node;
349 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
350 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
356 if (name_len == xattr->name_len)
358 else if (name_len < xattr->name_len)
368 if (xattr && (flags & XATTR_CREATE))
370 else if (!xattr && (flags & XATTR_REPLACE))
377 if (update_xattr < 0) {
379 __remove_xattr(ci, xattr);
389 xattr->name_len = name_len;
390 xattr->should_free_name = update_xattr;
392 ci->i_xattrs.count++;
393 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
397 if (xattr->should_free_val)
398 kfree((void *)xattr->val);
404 ci->i_xattrs.names_size -= xattr->name_len;
405 ci->i_xattrs.vals_size -= xattr->val_len;
407 ci->i_xattrs.names_size += name_len;
408 ci->i_xattrs.vals_size += val_len;
414 xattr->val_len = val_len;
415 xattr->dirty = update_xattr;
416 xattr->should_free_val = (val && update_xattr);
419 rb_link_node(&xattr->node, parent, p);
420 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
421 dout("__set_xattr_val p=%p\n", p);
424 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
425 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
430 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
434 struct rb_node *parent = NULL;
435 struct ceph_inode_xattr *xattr = NULL;
436 int name_len = strlen(name);
439 p = &ci->i_xattrs.index.rb_node;
442 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
443 c = strncmp(name, xattr->name, xattr->name_len);
444 if (c == 0 && name_len > xattr->name_len)
451 dout("__get_xattr %s: found %.*s\n", name,
452 xattr->val_len, xattr->val);
457 dout("__get_xattr %s: not found\n", name);
462 static void __free_xattr(struct ceph_inode_xattr *xattr)
466 if (xattr->should_free_name)
467 kfree((void *)xattr->name);
468 if (xattr->should_free_val)
469 kfree((void *)xattr->val);
474 static int __remove_xattr(struct ceph_inode_info *ci,
475 struct ceph_inode_xattr *xattr)
480 rb_erase(&xattr->node, &ci->i_xattrs.index);
482 if (xattr->should_free_name)
483 kfree((void *)xattr->name);
484 if (xattr->should_free_val)
485 kfree((void *)xattr->val);
487 ci->i_xattrs.names_size -= xattr->name_len;
488 ci->i_xattrs.vals_size -= xattr->val_len;
489 ci->i_xattrs.count--;
495 static char *__copy_xattr_names(struct ceph_inode_info *ci,
499 struct ceph_inode_xattr *xattr = NULL;
501 p = rb_first(&ci->i_xattrs.index);
502 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
505 xattr = rb_entry(p, struct ceph_inode_xattr, node);
506 memcpy(dest, xattr->name, xattr->name_len);
507 dest[xattr->name_len] = '\0';
509 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
510 xattr->name_len, ci->i_xattrs.names_size);
512 dest += xattr->name_len + 1;
519 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
521 struct rb_node *p, *tmp;
522 struct ceph_inode_xattr *xattr = NULL;
524 p = rb_first(&ci->i_xattrs.index);
526 dout("__ceph_destroy_xattrs p=%p\n", p);
529 xattr = rb_entry(p, struct ceph_inode_xattr, node);
532 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
533 xattr->name_len, xattr->name);
534 rb_erase(tmp, &ci->i_xattrs.index);
539 ci->i_xattrs.names_size = 0;
540 ci->i_xattrs.vals_size = 0;
541 ci->i_xattrs.index_version = 0;
542 ci->i_xattrs.count = 0;
543 ci->i_xattrs.index = RB_ROOT;
546 static int __build_xattrs(struct inode *inode)
547 __releases(ci->i_ceph_lock)
548 __acquires(ci->i_ceph_lock)
554 const char *name, *val;
555 struct ceph_inode_info *ci = ceph_inode(inode);
557 struct ceph_inode_xattr **xattrs = NULL;
561 dout("__build_xattrs() len=%d\n",
562 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
564 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
565 return 0; /* already built */
567 __ceph_destroy_xattrs(ci);
570 /* updated internal xattr rb tree */
571 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
572 p = ci->i_xattrs.blob->vec.iov_base;
573 end = p + ci->i_xattrs.blob->vec.iov_len;
574 ceph_decode_32_safe(&p, end, numattr, bad);
575 xattr_version = ci->i_xattrs.version;
576 spin_unlock(&ci->i_ceph_lock);
578 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *),
584 for (i = 0; i < numattr; i++) {
585 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
591 spin_lock(&ci->i_ceph_lock);
592 if (ci->i_xattrs.version != xattr_version) {
593 /* lost a race, retry */
594 for (i = 0; i < numattr; i++)
602 ceph_decode_32_safe(&p, end, len, bad);
606 ceph_decode_32_safe(&p, end, len, bad);
610 err = __set_xattr(ci, name, namelen, val, len,
611 0, 0, &xattrs[numattr]);
618 ci->i_xattrs.index_version = ci->i_xattrs.version;
619 ci->i_xattrs.dirty = false;
623 spin_lock(&ci->i_ceph_lock);
626 for (i = 0; i < numattr; i++)
630 ci->i_xattrs.names_size = 0;
634 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
638 * 4 bytes for the length, and additional 4 bytes per each xattr name,
639 * 4 bytes per each value
641 int size = 4 + ci->i_xattrs.count*(4 + 4) +
642 ci->i_xattrs.names_size +
643 ci->i_xattrs.vals_size;
644 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
645 ci->i_xattrs.count, ci->i_xattrs.names_size,
646 ci->i_xattrs.vals_size);
649 size += 4 + 4 + name_size + val_size;
655 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
656 * and swap into place.
658 void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
661 struct ceph_inode_xattr *xattr = NULL;
664 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
665 if (ci->i_xattrs.dirty) {
666 int need = __get_required_blob_size(ci, 0, 0);
668 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
670 p = rb_first(&ci->i_xattrs.index);
671 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
673 ceph_encode_32(&dest, ci->i_xattrs.count);
675 xattr = rb_entry(p, struct ceph_inode_xattr, node);
677 ceph_encode_32(&dest, xattr->name_len);
678 memcpy(dest, xattr->name, xattr->name_len);
679 dest += xattr->name_len;
680 ceph_encode_32(&dest, xattr->val_len);
681 memcpy(dest, xattr->val, xattr->val_len);
682 dest += xattr->val_len;
687 /* adjust buffer len; it may be larger than we need */
688 ci->i_xattrs.prealloc_blob->vec.iov_len =
689 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
691 if (ci->i_xattrs.blob)
692 ceph_buffer_put(ci->i_xattrs.blob);
693 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
694 ci->i_xattrs.prealloc_blob = NULL;
695 ci->i_xattrs.dirty = false;
696 ci->i_xattrs.version++;
700 static inline int __get_request_mask(struct inode *in) {
701 struct ceph_mds_request *req = current->journal_info;
703 if (req && req->r_target_inode == in) {
704 if (req->r_op == CEPH_MDS_OP_LOOKUP ||
705 req->r_op == CEPH_MDS_OP_LOOKUPINO ||
706 req->r_op == CEPH_MDS_OP_LOOKUPPARENT ||
707 req->r_op == CEPH_MDS_OP_GETATTR) {
708 mask = le32_to_cpu(req->r_args.getattr.mask);
709 } else if (req->r_op == CEPH_MDS_OP_OPEN ||
710 req->r_op == CEPH_MDS_OP_CREATE) {
711 mask = le32_to_cpu(req->r_args.open.mask);
717 ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
720 struct ceph_inode_info *ci = ceph_inode(inode);
721 struct ceph_inode_xattr *xattr;
722 struct ceph_vxattr *vxattr = NULL;
726 /* let's see if a virtual xattr was requested */
727 vxattr = ceph_match_vxattr(inode, name);
730 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
731 err = vxattr->getxattr_cb(ci, value, size);
735 req_mask = __get_request_mask(inode);
737 spin_lock(&ci->i_ceph_lock);
738 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
739 ci->i_xattrs.version, ci->i_xattrs.index_version);
741 if (ci->i_xattrs.version == 0 ||
742 !((req_mask & CEPH_CAP_XATTR_SHARED) ||
743 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) {
744 spin_unlock(&ci->i_ceph_lock);
746 /* security module gets xattr while filling trace */
747 if (current->journal_info != NULL) {
748 pr_warn_ratelimited("sync getxattr %p "
749 "during filling trace\n", inode);
753 /* get xattrs from mds (if we don't already have them) */
754 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
757 spin_lock(&ci->i_ceph_lock);
760 err = __build_xattrs(inode);
764 err = -ENODATA; /* == ENOATTR */
765 xattr = __get_xattr(ci, name);
770 if (size && size < xattr->val_len)
773 err = xattr->val_len;
777 memcpy(value, xattr->val, xattr->val_len);
779 if (current->journal_info != NULL &&
780 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
781 ci->i_ceph_flags |= CEPH_I_SEC_INITED;
783 spin_unlock(&ci->i_ceph_lock);
787 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
789 struct inode *inode = d_inode(dentry);
790 struct ceph_inode_info *ci = ceph_inode(inode);
791 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
798 spin_lock(&ci->i_ceph_lock);
799 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
800 ci->i_xattrs.version, ci->i_xattrs.index_version);
802 if (ci->i_xattrs.version == 0 ||
803 !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) {
804 spin_unlock(&ci->i_ceph_lock);
805 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
808 spin_lock(&ci->i_ceph_lock);
811 err = __build_xattrs(inode);
815 * Start with virtual dir xattr names (if any) (including
816 * terminating '\0' characters for each).
818 vir_namelen = ceph_vxattrs_name_size(vxattrs);
820 /* adding 1 byte per each variable due to the null termination */
821 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
823 if (size && vir_namelen + namelen > size)
826 err = namelen + vir_namelen;
830 names = __copy_xattr_names(ci, names);
832 /* virtual xattr names, too */
835 for (i = 0; vxattrs[i].name; i++) {
836 if (!vxattrs[i].hidden &&
837 !(vxattrs[i].exists_cb &&
838 !vxattrs[i].exists_cb(ci))) {
839 len = sprintf(names, "%s", vxattrs[i].name);
847 spin_unlock(&ci->i_ceph_lock);
851 static int ceph_sync_setxattr(struct inode *inode, const char *name,
852 const char *value, size_t size, int flags)
854 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
855 struct ceph_inode_info *ci = ceph_inode(inode);
856 struct ceph_mds_request *req;
857 struct ceph_mds_client *mdsc = fsc->mdsc;
858 struct ceph_pagelist *pagelist = NULL;
859 int op = CEPH_MDS_OP_SETXATTR;
863 /* copy value into pagelist */
864 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
868 ceph_pagelist_init(pagelist);
869 err = ceph_pagelist_append(pagelist, value, size);
873 if (flags & CEPH_XATTR_REPLACE)
874 op = CEPH_MDS_OP_RMXATTR;
876 flags |= CEPH_XATTR_REMOVE;
879 dout("setxattr value=%.*s\n", (int)size, value);
882 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
888 req->r_path2 = kstrdup(name, GFP_NOFS);
890 ceph_mdsc_put_request(req);
895 if (op == CEPH_MDS_OP_SETXATTR) {
896 req->r_args.setxattr.flags = cpu_to_le32(flags);
897 req->r_pagelist = pagelist;
901 req->r_inode = inode;
904 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
906 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
907 err = ceph_mdsc_do_request(mdsc, NULL, req);
908 ceph_mdsc_put_request(req);
909 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
913 ceph_pagelist_release(pagelist);
917 int __ceph_setxattr(struct inode *inode, const char *name,
918 const void *value, size_t size, int flags)
920 struct ceph_vxattr *vxattr;
921 struct ceph_inode_info *ci = ceph_inode(inode);
922 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
923 struct ceph_cap_flush *prealloc_cf = NULL;
927 int name_len = strlen(name);
929 char *newname = NULL;
931 struct ceph_inode_xattr *xattr = NULL;
932 int required_blob_size;
933 bool lock_snap_rwsem = false;
935 if (ceph_snap(inode) != CEPH_NOSNAP)
938 vxattr = ceph_match_vxattr(inode, name);
939 if (vxattr && vxattr->readonly)
942 /* pass any unhandled ceph.* xattrs through to the MDS */
943 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
944 goto do_sync_unlocked;
946 /* preallocate memory for xattr name, value, index node */
948 newname = kmemdup(name, name_len + 1, GFP_NOFS);
953 newval = kmemdup(value, val_len, GFP_NOFS);
958 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
962 prealloc_cf = ceph_alloc_cap_flush();
966 spin_lock(&ci->i_ceph_lock);
968 issued = __ceph_caps_issued(ci, NULL);
969 if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
972 if (!lock_snap_rwsem && !ci->i_head_snapc) {
973 lock_snap_rwsem = true;
974 if (!down_read_trylock(&mdsc->snap_rwsem)) {
975 spin_unlock(&ci->i_ceph_lock);
976 down_read(&mdsc->snap_rwsem);
977 spin_lock(&ci->i_ceph_lock);
982 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
983 __build_xattrs(inode);
985 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
987 if (!ci->i_xattrs.prealloc_blob ||
988 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
989 struct ceph_buffer *blob;
991 spin_unlock(&ci->i_ceph_lock);
992 dout(" preaallocating new blob size=%d\n", required_blob_size);
993 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
995 goto do_sync_unlocked;
996 spin_lock(&ci->i_ceph_lock);
997 if (ci->i_xattrs.prealloc_blob)
998 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
999 ci->i_xattrs.prealloc_blob = blob;
1003 err = __set_xattr(ci, newname, name_len, newval, val_len,
1004 flags, value ? 1 : -1, &xattr);
1007 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
1009 ci->i_xattrs.dirty = true;
1010 inode->i_ctime = current_fs_time(inode->i_sb);
1013 spin_unlock(&ci->i_ceph_lock);
1014 if (lock_snap_rwsem)
1015 up_read(&mdsc->snap_rwsem);
1017 __mark_inode_dirty(inode, dirty);
1018 ceph_free_cap_flush(prealloc_cf);
1022 spin_unlock(&ci->i_ceph_lock);
1024 if (lock_snap_rwsem)
1025 up_read(&mdsc->snap_rwsem);
1027 /* security module set xattr while filling trace */
1028 if (current->journal_info != NULL) {
1029 pr_warn_ratelimited("sync setxattr %p "
1030 "during filling trace\n", inode);
1033 err = ceph_sync_setxattr(inode, name, value, size, flags);
1036 ceph_free_cap_flush(prealloc_cf);
1043 static int ceph_get_xattr_handler(const struct xattr_handler *handler,
1044 struct dentry *dentry, struct inode *inode,
1045 const char *name, void *value, size_t size)
1047 if (!ceph_is_valid_xattr(name))
1049 return __ceph_getxattr(inode, name, value, size);
1052 static int ceph_set_xattr_handler(const struct xattr_handler *handler,
1053 struct dentry *unused, struct inode *inode,
1054 const char *name, const void *value,
1055 size_t size, int flags)
1057 if (!ceph_is_valid_xattr(name))
1059 return __ceph_setxattr(inode, name, value, size, flags);
1062 const struct xattr_handler ceph_other_xattr_handler = {
1063 .prefix = "", /* match any name => handlers called with full name */
1064 .get = ceph_get_xattr_handler,
1065 .set = ceph_set_xattr_handler,
1068 #ifdef CONFIG_SECURITY
1069 bool ceph_security_xattr_wanted(struct inode *in)
1071 return in->i_security != NULL;
1074 bool ceph_security_xattr_deadlock(struct inode *in)
1076 struct ceph_inode_info *ci;
1078 if (in->i_security == NULL)
1080 ci = ceph_inode(in);
1081 spin_lock(&ci->i_ceph_lock);
1082 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) &&
1083 !(ci->i_xattrs.version > 0 &&
1084 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0));
1085 spin_unlock(&ci->i_ceph_lock);