1 #include <linux/ceph/ceph_debug.h>
4 #include "mds_client.h"
6 #include <linux/ceph/decode.h>
8 #include <linux/xattr.h>
9 #include <linux/slab.h>
11 #define XATTR_CEPH_PREFIX "ceph."
12 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
14 static bool ceph_is_valid_xattr(const char *name)
16 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
17 !strncmp(name, XATTR_SECURITY_PREFIX,
18 XATTR_SECURITY_PREFIX_LEN) ||
19 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
20 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
24 * These define virtual xattrs exposing the recursive directory
25 * statistics and layout metadata.
29 size_t name_size; /* strlen(name) + 1 (for '\0') */
30 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
32 bool readonly, hidden;
33 bool (*exists_cb)(struct ceph_inode_info *ci);
38 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
41 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
44 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
47 return snprintf(val, size, "%lld", ci->i_files);
50 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
53 return snprintf(val, size, "%lld", ci->i_subdirs);
56 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
59 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
62 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
65 return snprintf(val, size, "%lld", ci->i_rfiles);
68 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
71 return snprintf(val, size, "%lld", ci->i_rsubdirs);
74 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
77 return snprintf(val, size, "%lld", ci->i_rbytes);
80 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
83 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
84 (long)ci->i_rctime.tv_nsec);
87 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
89 #define XATTR_NAME_CEPH(_type, _name) \
91 .name = CEPH_XATTR_NAME(_type, _name), \
92 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
93 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
99 static struct ceph_vxattr ceph_dir_vxattrs[] = {
100 XATTR_NAME_CEPH(dir, entries),
101 XATTR_NAME_CEPH(dir, files),
102 XATTR_NAME_CEPH(dir, subdirs),
103 XATTR_NAME_CEPH(dir, rentries),
104 XATTR_NAME_CEPH(dir, rfiles),
105 XATTR_NAME_CEPH(dir, rsubdirs),
106 XATTR_NAME_CEPH(dir, rbytes),
107 XATTR_NAME_CEPH(dir, rctime),
108 { 0 } /* Required table terminator */
110 static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
114 static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
119 ret = snprintf(val, size,
120 "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
121 (unsigned long long)ceph_file_layout_su(ci->i_layout),
122 (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
123 (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
127 static struct ceph_vxattr ceph_file_vxattrs[] = {
128 XATTR_NAME_CEPH(file, layout),
129 { 0 } /* Required table terminator */
131 static size_t ceph_file_vxattrs_name_size; /* total size of all names */
133 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
135 if (S_ISDIR(inode->i_mode))
136 return ceph_dir_vxattrs;
137 else if (S_ISREG(inode->i_mode))
138 return ceph_file_vxattrs;
142 static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
144 if (vxattrs == ceph_dir_vxattrs)
145 return ceph_dir_vxattrs_name_size;
146 if (vxattrs == ceph_file_vxattrs)
147 return ceph_file_vxattrs_name_size;
154 * Compute the aggregate size (including terminating '\0') of all
155 * virtual extended attribute names in the given vxattr table.
157 static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
159 struct ceph_vxattr *vxattr;
162 for (vxattr = vxattrs; vxattr->name; vxattr++)
164 size += vxattr->name_size;
169 /* Routines called at initialization and exit time */
171 void __init ceph_xattr_init(void)
173 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
174 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
177 void ceph_xattr_exit(void)
179 ceph_dir_vxattrs_name_size = 0;
180 ceph_file_vxattrs_name_size = 0;
183 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
186 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
189 while (vxattr->name) {
190 if (!strcmp(vxattr->name, name))
199 static int __set_xattr(struct ceph_inode_info *ci,
200 const char *name, int name_len,
201 const char *val, int val_len,
203 int should_free_name, int should_free_val,
204 struct ceph_inode_xattr **newxattr)
207 struct rb_node *parent = NULL;
208 struct ceph_inode_xattr *xattr = NULL;
212 p = &ci->i_xattrs.index.rb_node;
215 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
216 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
222 if (name_len == xattr->name_len)
224 else if (name_len < xattr->name_len)
236 xattr->name_len = name_len;
237 xattr->should_free_name = should_free_name;
239 ci->i_xattrs.count++;
240 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
244 if (xattr->should_free_val)
245 kfree((void *)xattr->val);
247 if (should_free_name) {
251 ci->i_xattrs.names_size -= xattr->name_len;
252 ci->i_xattrs.vals_size -= xattr->val_len;
254 ci->i_xattrs.names_size += name_len;
255 ci->i_xattrs.vals_size += val_len;
261 xattr->val_len = val_len;
262 xattr->dirty = dirty;
263 xattr->should_free_val = (val && should_free_val);
266 rb_link_node(&xattr->node, parent, p);
267 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
268 dout("__set_xattr_val p=%p\n", p);
271 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
272 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
277 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
281 struct rb_node *parent = NULL;
282 struct ceph_inode_xattr *xattr = NULL;
283 int name_len = strlen(name);
286 p = &ci->i_xattrs.index.rb_node;
289 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
290 c = strncmp(name, xattr->name, xattr->name_len);
291 if (c == 0 && name_len > xattr->name_len)
298 dout("__get_xattr %s: found %.*s\n", name,
299 xattr->val_len, xattr->val);
304 dout("__get_xattr %s: not found\n", name);
309 static void __free_xattr(struct ceph_inode_xattr *xattr)
313 if (xattr->should_free_name)
314 kfree((void *)xattr->name);
315 if (xattr->should_free_val)
316 kfree((void *)xattr->val);
321 static int __remove_xattr(struct ceph_inode_info *ci,
322 struct ceph_inode_xattr *xattr)
327 rb_erase(&xattr->node, &ci->i_xattrs.index);
329 if (xattr->should_free_name)
330 kfree((void *)xattr->name);
331 if (xattr->should_free_val)
332 kfree((void *)xattr->val);
334 ci->i_xattrs.names_size -= xattr->name_len;
335 ci->i_xattrs.vals_size -= xattr->val_len;
336 ci->i_xattrs.count--;
342 static int __remove_xattr_by_name(struct ceph_inode_info *ci,
346 struct ceph_inode_xattr *xattr;
349 p = &ci->i_xattrs.index.rb_node;
350 xattr = __get_xattr(ci, name);
351 err = __remove_xattr(ci, xattr);
355 static char *__copy_xattr_names(struct ceph_inode_info *ci,
359 struct ceph_inode_xattr *xattr = NULL;
361 p = rb_first(&ci->i_xattrs.index);
362 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
365 xattr = rb_entry(p, struct ceph_inode_xattr, node);
366 memcpy(dest, xattr->name, xattr->name_len);
367 dest[xattr->name_len] = '\0';
369 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
370 xattr->name_len, ci->i_xattrs.names_size);
372 dest += xattr->name_len + 1;
379 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
381 struct rb_node *p, *tmp;
382 struct ceph_inode_xattr *xattr = NULL;
384 p = rb_first(&ci->i_xattrs.index);
386 dout("__ceph_destroy_xattrs p=%p\n", p);
389 xattr = rb_entry(p, struct ceph_inode_xattr, node);
392 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
393 xattr->name_len, xattr->name);
394 rb_erase(tmp, &ci->i_xattrs.index);
399 ci->i_xattrs.names_size = 0;
400 ci->i_xattrs.vals_size = 0;
401 ci->i_xattrs.index_version = 0;
402 ci->i_xattrs.count = 0;
403 ci->i_xattrs.index = RB_ROOT;
406 static int __build_xattrs(struct inode *inode)
407 __releases(ci->i_ceph_lock)
408 __acquires(ci->i_ceph_lock)
414 const char *name, *val;
415 struct ceph_inode_info *ci = ceph_inode(inode);
417 struct ceph_inode_xattr **xattrs = NULL;
421 dout("__build_xattrs() len=%d\n",
422 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
424 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
425 return 0; /* already built */
427 __ceph_destroy_xattrs(ci);
430 /* updated internal xattr rb tree */
431 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
432 p = ci->i_xattrs.blob->vec.iov_base;
433 end = p + ci->i_xattrs.blob->vec.iov_len;
434 ceph_decode_32_safe(&p, end, numattr, bad);
435 xattr_version = ci->i_xattrs.version;
436 spin_unlock(&ci->i_ceph_lock);
438 xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
443 memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
444 for (i = 0; i < numattr; i++) {
445 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
451 spin_lock(&ci->i_ceph_lock);
452 if (ci->i_xattrs.version != xattr_version) {
453 /* lost a race, retry */
454 for (i = 0; i < numattr; i++)
462 ceph_decode_32_safe(&p, end, len, bad);
466 ceph_decode_32_safe(&p, end, len, bad);
470 err = __set_xattr(ci, name, namelen, val, len,
471 0, 0, 0, &xattrs[numattr]);
478 ci->i_xattrs.index_version = ci->i_xattrs.version;
479 ci->i_xattrs.dirty = false;
483 spin_lock(&ci->i_ceph_lock);
486 for (i = 0; i < numattr; i++)
490 ci->i_xattrs.names_size = 0;
494 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
498 * 4 bytes for the length, and additional 4 bytes per each xattr name,
499 * 4 bytes per each value
501 int size = 4 + ci->i_xattrs.count*(4 + 4) +
502 ci->i_xattrs.names_size +
503 ci->i_xattrs.vals_size;
504 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
505 ci->i_xattrs.count, ci->i_xattrs.names_size,
506 ci->i_xattrs.vals_size);
509 size += 4 + 4 + name_size + val_size;
515 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
516 * and swap into place.
518 void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
521 struct ceph_inode_xattr *xattr = NULL;
524 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
525 if (ci->i_xattrs.dirty) {
526 int need = __get_required_blob_size(ci, 0, 0);
528 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
530 p = rb_first(&ci->i_xattrs.index);
531 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
533 ceph_encode_32(&dest, ci->i_xattrs.count);
535 xattr = rb_entry(p, struct ceph_inode_xattr, node);
537 ceph_encode_32(&dest, xattr->name_len);
538 memcpy(dest, xattr->name, xattr->name_len);
539 dest += xattr->name_len;
540 ceph_encode_32(&dest, xattr->val_len);
541 memcpy(dest, xattr->val, xattr->val_len);
542 dest += xattr->val_len;
547 /* adjust buffer len; it may be larger than we need */
548 ci->i_xattrs.prealloc_blob->vec.iov_len =
549 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
551 if (ci->i_xattrs.blob)
552 ceph_buffer_put(ci->i_xattrs.blob);
553 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
554 ci->i_xattrs.prealloc_blob = NULL;
555 ci->i_xattrs.dirty = false;
556 ci->i_xattrs.version++;
560 ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
563 struct inode *inode = dentry->d_inode;
564 struct ceph_inode_info *ci = ceph_inode(inode);
566 struct ceph_inode_xattr *xattr;
567 struct ceph_vxattr *vxattr = NULL;
569 if (!ceph_is_valid_xattr(name))
572 spin_lock(&ci->i_ceph_lock);
573 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
574 ci->i_xattrs.version, ci->i_xattrs.index_version);
576 /* let's see if a virtual xattr was requested */
577 vxattr = ceph_match_vxattr(inode, name);
578 if (vxattr && !(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
579 err = vxattr->getxattr_cb(ci, value, size);
583 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
584 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
587 spin_unlock(&ci->i_ceph_lock);
588 /* get xattrs from mds (if we don't already have them) */
589 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
594 spin_lock(&ci->i_ceph_lock);
596 err = __build_xattrs(inode);
601 err = -ENODATA; /* == ENOATTR */
602 xattr = __get_xattr(ci, name);
607 if (size && size < xattr->val_len)
610 err = xattr->val_len;
614 memcpy(value, xattr->val, xattr->val_len);
617 spin_unlock(&ci->i_ceph_lock);
621 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
623 struct inode *inode = dentry->d_inode;
624 struct ceph_inode_info *ci = ceph_inode(inode);
625 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
632 spin_lock(&ci->i_ceph_lock);
633 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
634 ci->i_xattrs.version, ci->i_xattrs.index_version);
636 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
637 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
640 spin_unlock(&ci->i_ceph_lock);
641 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
646 spin_lock(&ci->i_ceph_lock);
648 err = __build_xattrs(inode);
654 * Start with virtual dir xattr names (if any) (including
655 * terminating '\0' characters for each).
657 vir_namelen = ceph_vxattrs_name_size(vxattrs);
659 /* adding 1 byte per each variable due to the null termination */
660 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
662 if (size && vir_namelen + namelen > size)
665 err = namelen + vir_namelen;
669 names = __copy_xattr_names(ci, names);
671 /* virtual xattr names, too */
674 for (i = 0; vxattrs[i].name; i++) {
675 if (!vxattrs[i].hidden &&
676 !(vxattrs[i].exists_cb &&
677 !vxattrs[i].exists_cb(ci))) {
678 len = sprintf(names, "%s", vxattrs[i].name);
686 spin_unlock(&ci->i_ceph_lock);
690 static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
691 const char *value, size_t size, int flags)
693 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
694 struct inode *inode = dentry->d_inode;
695 struct ceph_inode_info *ci = ceph_inode(inode);
696 struct inode *parent_inode;
697 struct ceph_mds_request *req;
698 struct ceph_mds_client *mdsc = fsc->mdsc;
701 struct page **pages = NULL;
704 /* copy value into some pages */
705 nr_pages = calc_pages_for(0, size);
707 pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
711 for (i = 0; i < nr_pages; i++) {
712 pages[i] = __page_cache_alloc(GFP_NOFS);
717 kaddr = kmap(pages[i]);
718 memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
719 min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
723 dout("setxattr value=%.*s\n", (int)size, value);
726 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
732 req->r_inode = inode;
734 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
736 req->r_args.setxattr.flags = cpu_to_le32(flags);
737 req->r_path2 = kstrdup(name, GFP_NOFS);
739 req->r_pages = pages;
740 req->r_num_pages = nr_pages;
741 req->r_data_len = size;
743 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
744 parent_inode = ceph_get_dentry_parent_inode(dentry);
745 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
747 ceph_mdsc_put_request(req);
748 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
752 for (i = 0; i < nr_pages; i++)
753 __free_page(pages[i]);
759 int ceph_setxattr(struct dentry *dentry, const char *name,
760 const void *value, size_t size, int flags)
762 struct inode *inode = dentry->d_inode;
763 struct ceph_vxattr *vxattr;
764 struct ceph_inode_info *ci = ceph_inode(inode);
768 int name_len = strlen(name);
770 char *newname = NULL;
772 struct ceph_inode_xattr *xattr = NULL;
773 int required_blob_size;
775 if (ceph_snap(inode) != CEPH_NOSNAP)
778 if (!ceph_is_valid_xattr(name))
781 vxattr = ceph_match_vxattr(inode, name);
782 if (vxattr && vxattr->readonly)
785 /* pass any unhandled ceph.* xattrs through to the MDS */
786 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
787 goto do_sync_unlocked;
789 /* preallocate memory for xattr name, value, index node */
791 newname = kmemdup(name, name_len + 1, GFP_NOFS);
796 newval = kmemdup(value, val_len, GFP_NOFS);
801 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
805 spin_lock(&ci->i_ceph_lock);
807 issued = __ceph_caps_issued(ci, NULL);
808 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
809 if (!(issued & CEPH_CAP_XATTR_EXCL))
811 __build_xattrs(inode);
813 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
815 if (!ci->i_xattrs.prealloc_blob ||
816 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
817 struct ceph_buffer *blob;
819 spin_unlock(&ci->i_ceph_lock);
820 dout(" preaallocating new blob size=%d\n", required_blob_size);
821 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
824 spin_lock(&ci->i_ceph_lock);
825 if (ci->i_xattrs.prealloc_blob)
826 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
827 ci->i_xattrs.prealloc_blob = blob;
831 err = __set_xattr(ci, newname, name_len, newval,
832 val_len, 1, 1, 1, &xattr);
834 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
835 ci->i_xattrs.dirty = true;
836 inode->i_ctime = CURRENT_TIME;
838 spin_unlock(&ci->i_ceph_lock);
840 __mark_inode_dirty(inode, dirty);
844 spin_unlock(&ci->i_ceph_lock);
846 err = ceph_sync_setxattr(dentry, name, value, size, flags);
854 static int ceph_send_removexattr(struct dentry *dentry, const char *name)
856 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
857 struct ceph_mds_client *mdsc = fsc->mdsc;
858 struct inode *inode = dentry->d_inode;
859 struct inode *parent_inode;
860 struct ceph_mds_request *req;
863 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
867 req->r_inode = inode;
869 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
871 req->r_path2 = kstrdup(name, GFP_NOFS);
873 parent_inode = ceph_get_dentry_parent_inode(dentry);
874 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
876 ceph_mdsc_put_request(req);
880 int ceph_removexattr(struct dentry *dentry, const char *name)
882 struct inode *inode = dentry->d_inode;
883 struct ceph_vxattr *vxattr;
884 struct ceph_inode_info *ci = ceph_inode(inode);
887 int required_blob_size;
890 if (ceph_snap(inode) != CEPH_NOSNAP)
893 if (!ceph_is_valid_xattr(name))
896 vxattr = ceph_match_vxattr(inode, name);
897 if (vxattr && vxattr->readonly)
900 /* pass any unhandled ceph.* xattrs through to the MDS */
901 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
902 goto do_sync_unlocked;
905 spin_lock(&ci->i_ceph_lock);
907 issued = __ceph_caps_issued(ci, NULL);
908 dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
910 if (!(issued & CEPH_CAP_XATTR_EXCL))
912 __build_xattrs(inode);
914 required_blob_size = __get_required_blob_size(ci, 0, 0);
916 if (!ci->i_xattrs.prealloc_blob ||
917 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
918 struct ceph_buffer *blob;
920 spin_unlock(&ci->i_ceph_lock);
921 dout(" preaallocating new blob size=%d\n", required_blob_size);
922 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
925 spin_lock(&ci->i_ceph_lock);
926 if (ci->i_xattrs.prealloc_blob)
927 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
928 ci->i_xattrs.prealloc_blob = blob;
932 err = __remove_xattr_by_name(ceph_inode(inode), name);
934 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
935 ci->i_xattrs.dirty = true;
936 inode->i_ctime = CURRENT_TIME;
937 spin_unlock(&ci->i_ceph_lock);
939 __mark_inode_dirty(inode, dirty);
942 spin_unlock(&ci->i_ceph_lock);
944 err = ceph_send_removexattr(dentry, name);