]> git.karo-electronics.de Git - linux-beck.git/blob - fs/ext4/xattr.c
fs: use block_device name vsprintf helper
[linux-beck.git] / fs / ext4 / xattr.c
1 /*
2  * linux/fs/ext4/xattr.c
3  *
4  * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
5  *
6  * Fix by Harrison Xing <harrison@mountainviewdata.com>.
7  * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
8  * Extended attributes for symlinks and special files added per
9  *  suggestion of Luka Renko <luka.renko@hermes.si>.
10  * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
11  *  Red Hat Inc.
12  * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
13  *  and Andreas Gruenbacher <agruen@suse.de>.
14  */
15
16 /*
17  * Extended attributes are stored directly in inodes (on file systems with
18  * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
19  * field contains the block number if an inode uses an additional block. All
20  * attributes must fit in the inode and one additional block. Blocks that
21  * contain the identical set of attributes may be shared among several inodes.
22  * Identical blocks are detected by keeping a cache of blocks that have
23  * recently been accessed.
24  *
25  * The attributes in inodes and on blocks have a different header; the entries
26  * are stored in the same format:
27  *
28  *   +------------------+
29  *   | header           |
30  *   | entry 1          | |
31  *   | entry 2          | | growing downwards
32  *   | entry 3          | v
33  *   | four null bytes  |
34  *   | . . .            |
35  *   | value 1          | ^
36  *   | value 3          | | growing upwards
37  *   | value 2          | |
38  *   +------------------+
39  *
40  * The header is followed by multiple entry descriptors. In disk blocks, the
41  * entry descriptors are kept sorted. In inodes, they are unsorted. The
42  * attribute values are aligned to the end of the block in no specific order.
43  *
44  * Locking strategy
45  * ----------------
46  * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
47  * EA blocks are only changed if they are exclusive to an inode, so
48  * holding xattr_sem also means that nothing but the EA block's reference
49  * count can change. Multiple writers to the same block are synchronized
50  * by the buffer lock.
51  */
52
53 #include <linux/init.h>
54 #include <linux/fs.h>
55 #include <linux/slab.h>
56 #include <linux/mbcache.h>
57 #include <linux/quotaops.h>
58 #include "ext4_jbd2.h"
59 #include "ext4.h"
60 #include "xattr.h"
61 #include "acl.h"
62
63 #ifdef EXT4_XATTR_DEBUG
64 # define ea_idebug(inode, f...) do { \
65                 printk(KERN_DEBUG "inode %s:%lu: ", \
66                         inode->i_sb->s_id, inode->i_ino); \
67                 printk(f); \
68                 printk("\n"); \
69         } while (0)
70 # define ea_bdebug(bh, f...) do { \
71                 printk(KERN_DEBUG "block %pg:%lu: ",               \
72                        bh->b_bdev, (unsigned long) bh->b_blocknr); \
73                 printk(f); \
74                 printk("\n"); \
75         } while (0)
76 #else
77 # define ea_idebug(inode, fmt, ...)     no_printk(fmt, ##__VA_ARGS__)
78 # define ea_bdebug(bh, fmt, ...)        no_printk(fmt, ##__VA_ARGS__)
79 #endif
80
81 static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
82 static struct buffer_head *ext4_xattr_cache_find(struct inode *,
83                                                  struct ext4_xattr_header *,
84                                                  struct mb_cache_entry **);
85 static void ext4_xattr_rehash(struct ext4_xattr_header *,
86                               struct ext4_xattr_entry *);
87 static int ext4_xattr_list(struct dentry *dentry, char *buffer,
88                            size_t buffer_size);
89
90 static const struct xattr_handler *ext4_xattr_handler_map[] = {
91         [EXT4_XATTR_INDEX_USER]              = &ext4_xattr_user_handler,
92 #ifdef CONFIG_EXT4_FS_POSIX_ACL
93         [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
94         [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
95 #endif
96         [EXT4_XATTR_INDEX_TRUSTED]           = &ext4_xattr_trusted_handler,
97 #ifdef CONFIG_EXT4_FS_SECURITY
98         [EXT4_XATTR_INDEX_SECURITY]          = &ext4_xattr_security_handler,
99 #endif
100 };
101
102 const struct xattr_handler *ext4_xattr_handlers[] = {
103         &ext4_xattr_user_handler,
104         &ext4_xattr_trusted_handler,
105 #ifdef CONFIG_EXT4_FS_POSIX_ACL
106         &posix_acl_access_xattr_handler,
107         &posix_acl_default_xattr_handler,
108 #endif
109 #ifdef CONFIG_EXT4_FS_SECURITY
110         &ext4_xattr_security_handler,
111 #endif
112         NULL
113 };
114
115 #define EXT4_GET_MB_CACHE(inode)        (((struct ext4_sb_info *) \
116                                 inode->i_sb->s_fs_info)->s_mb_cache)
117
118 static __le32 ext4_xattr_block_csum(struct inode *inode,
119                                     sector_t block_nr,
120                                     struct ext4_xattr_header *hdr)
121 {
122         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
123         __u32 csum;
124         __le32 save_csum;
125         __le64 dsk_block_nr = cpu_to_le64(block_nr);
126
127         save_csum = hdr->h_checksum;
128         hdr->h_checksum = 0;
129         csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
130                            sizeof(dsk_block_nr));
131         csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
132                            EXT4_BLOCK_SIZE(inode->i_sb));
133
134         hdr->h_checksum = save_csum;
135         return cpu_to_le32(csum);
136 }
137
138 static int ext4_xattr_block_csum_verify(struct inode *inode,
139                                         sector_t block_nr,
140                                         struct ext4_xattr_header *hdr)
141 {
142         if (ext4_has_metadata_csum(inode->i_sb) &&
143             (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
144                 return 0;
145         return 1;
146 }
147
148 static void ext4_xattr_block_csum_set(struct inode *inode,
149                                       sector_t block_nr,
150                                       struct ext4_xattr_header *hdr)
151 {
152         if (!ext4_has_metadata_csum(inode->i_sb))
153                 return;
154
155         hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
156 }
157
158 static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
159                                                 struct inode *inode,
160                                                 struct buffer_head *bh)
161 {
162         ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
163         return ext4_handle_dirty_metadata(handle, inode, bh);
164 }
165
166 static inline const struct xattr_handler *
167 ext4_xattr_handler(int name_index)
168 {
169         const struct xattr_handler *handler = NULL;
170
171         if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
172                 handler = ext4_xattr_handler_map[name_index];
173         return handler;
174 }
175
176 /*
177  * Inode operation listxattr()
178  *
179  * d_inode(dentry)->i_mutex: don't care
180  */
181 ssize_t
182 ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
183 {
184         return ext4_xattr_list(dentry, buffer, size);
185 }
186
187 static int
188 ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
189                        void *value_start)
190 {
191         struct ext4_xattr_entry *e = entry;
192
193         while (!IS_LAST_ENTRY(e)) {
194                 struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
195                 if ((void *)next >= end)
196                         return -EFSCORRUPTED;
197                 e = next;
198         }
199
200         while (!IS_LAST_ENTRY(entry)) {
201                 if (entry->e_value_size != 0 &&
202                     (value_start + le16_to_cpu(entry->e_value_offs) <
203                      (void *)e + sizeof(__u32) ||
204                      value_start + le16_to_cpu(entry->e_value_offs) +
205                     le32_to_cpu(entry->e_value_size) > end))
206                         return -EFSCORRUPTED;
207                 entry = EXT4_XATTR_NEXT(entry);
208         }
209
210         return 0;
211 }
212
213 static inline int
214 ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
215 {
216         int error;
217
218         if (buffer_verified(bh))
219                 return 0;
220
221         if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
222             BHDR(bh)->h_blocks != cpu_to_le32(1))
223                 return -EFSCORRUPTED;
224         if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
225                 return -EFSBADCRC;
226         error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
227                                        bh->b_data);
228         if (!error)
229                 set_buffer_verified(bh);
230         return error;
231 }
232
233 static inline int
234 ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
235 {
236         size_t value_size = le32_to_cpu(entry->e_value_size);
237
238         if (entry->e_value_block != 0 || value_size > size ||
239             le16_to_cpu(entry->e_value_offs) + value_size > size)
240                 return -EFSCORRUPTED;
241         return 0;
242 }
243
244 static int
245 ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
246                       const char *name, size_t size, int sorted)
247 {
248         struct ext4_xattr_entry *entry;
249         size_t name_len;
250         int cmp = 1;
251
252         if (name == NULL)
253                 return -EINVAL;
254         name_len = strlen(name);
255         entry = *pentry;
256         for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
257                 cmp = name_index - entry->e_name_index;
258                 if (!cmp)
259                         cmp = name_len - entry->e_name_len;
260                 if (!cmp)
261                         cmp = memcmp(name, entry->e_name, name_len);
262                 if (cmp <= 0 && (sorted || cmp == 0))
263                         break;
264         }
265         *pentry = entry;
266         if (!cmp && ext4_xattr_check_entry(entry, size))
267                 return -EFSCORRUPTED;
268         return cmp ? -ENODATA : 0;
269 }
270
271 static int
272 ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
273                      void *buffer, size_t buffer_size)
274 {
275         struct buffer_head *bh = NULL;
276         struct ext4_xattr_entry *entry;
277         size_t size;
278         int error;
279         struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
280
281         ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
282                   name_index, name, buffer, (long)buffer_size);
283
284         error = -ENODATA;
285         if (!EXT4_I(inode)->i_file_acl)
286                 goto cleanup;
287         ea_idebug(inode, "reading block %llu",
288                   (unsigned long long)EXT4_I(inode)->i_file_acl);
289         bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
290         if (!bh)
291                 goto cleanup;
292         ea_bdebug(bh, "b_count=%d, refcount=%d",
293                 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
294         if (ext4_xattr_check_block(inode, bh)) {
295 bad_block:
296                 EXT4_ERROR_INODE(inode, "bad block %llu",
297                                  EXT4_I(inode)->i_file_acl);
298                 error = -EFSCORRUPTED;
299                 goto cleanup;
300         }
301         ext4_xattr_cache_insert(ext4_mb_cache, bh);
302         entry = BFIRST(bh);
303         error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
304         if (error == -EFSCORRUPTED)
305                 goto bad_block;
306         if (error)
307                 goto cleanup;
308         size = le32_to_cpu(entry->e_value_size);
309         if (buffer) {
310                 error = -ERANGE;
311                 if (size > buffer_size)
312                         goto cleanup;
313                 memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
314                        size);
315         }
316         error = size;
317
318 cleanup:
319         brelse(bh);
320         return error;
321 }
322
323 int
324 ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
325                      void *buffer, size_t buffer_size)
326 {
327         struct ext4_xattr_ibody_header *header;
328         struct ext4_xattr_entry *entry;
329         struct ext4_inode *raw_inode;
330         struct ext4_iloc iloc;
331         size_t size;
332         void *end;
333         int error;
334
335         if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
336                 return -ENODATA;
337         error = ext4_get_inode_loc(inode, &iloc);
338         if (error)
339                 return error;
340         raw_inode = ext4_raw_inode(&iloc);
341         header = IHDR(inode, raw_inode);
342         entry = IFIRST(header);
343         end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
344         error = ext4_xattr_check_names(entry, end, entry);
345         if (error)
346                 goto cleanup;
347         error = ext4_xattr_find_entry(&entry, name_index, name,
348                                       end - (void *)entry, 0);
349         if (error)
350                 goto cleanup;
351         size = le32_to_cpu(entry->e_value_size);
352         if (buffer) {
353                 error = -ERANGE;
354                 if (size > buffer_size)
355                         goto cleanup;
356                 memcpy(buffer, (void *)IFIRST(header) +
357                        le16_to_cpu(entry->e_value_offs), size);
358         }
359         error = size;
360
361 cleanup:
362         brelse(iloc.bh);
363         return error;
364 }
365
366 /*
367  * ext4_xattr_get()
368  *
369  * Copy an extended attribute into the buffer
370  * provided, or compute the buffer size required.
371  * Buffer is NULL to compute the size of the buffer required.
372  *
373  * Returns a negative error number on failure, or the number of bytes
374  * used / required on success.
375  */
376 int
377 ext4_xattr_get(struct inode *inode, int name_index, const char *name,
378                void *buffer, size_t buffer_size)
379 {
380         int error;
381
382         if (strlen(name) > 255)
383                 return -ERANGE;
384
385         down_read(&EXT4_I(inode)->xattr_sem);
386         error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
387                                      buffer_size);
388         if (error == -ENODATA)
389                 error = ext4_xattr_block_get(inode, name_index, name, buffer,
390                                              buffer_size);
391         up_read(&EXT4_I(inode)->xattr_sem);
392         return error;
393 }
394
395 static int
396 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
397                         char *buffer, size_t buffer_size)
398 {
399         size_t rest = buffer_size;
400
401         for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
402                 const struct xattr_handler *handler =
403                         ext4_xattr_handler(entry->e_name_index);
404
405                 if (handler) {
406                         size_t size = handler->list(handler, dentry, buffer,
407                                                     rest, entry->e_name,
408                                                     entry->e_name_len);
409                         if (buffer) {
410                                 if (size > rest)
411                                         return -ERANGE;
412                                 buffer += size;
413                         }
414                         rest -= size;
415                 }
416         }
417         return buffer_size - rest;
418 }
419
420 static int
421 ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
422 {
423         struct inode *inode = d_inode(dentry);
424         struct buffer_head *bh = NULL;
425         int error;
426         struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
427
428         ea_idebug(inode, "buffer=%p, buffer_size=%ld",
429                   buffer, (long)buffer_size);
430
431         error = 0;
432         if (!EXT4_I(inode)->i_file_acl)
433                 goto cleanup;
434         ea_idebug(inode, "reading block %llu",
435                   (unsigned long long)EXT4_I(inode)->i_file_acl);
436         bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
437         error = -EIO;
438         if (!bh)
439                 goto cleanup;
440         ea_bdebug(bh, "b_count=%d, refcount=%d",
441                 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
442         if (ext4_xattr_check_block(inode, bh)) {
443                 EXT4_ERROR_INODE(inode, "bad block %llu",
444                                  EXT4_I(inode)->i_file_acl);
445                 error = -EFSCORRUPTED;
446                 goto cleanup;
447         }
448         ext4_xattr_cache_insert(ext4_mb_cache, bh);
449         error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
450
451 cleanup:
452         brelse(bh);
453
454         return error;
455 }
456
457 static int
458 ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
459 {
460         struct inode *inode = d_inode(dentry);
461         struct ext4_xattr_ibody_header *header;
462         struct ext4_inode *raw_inode;
463         struct ext4_iloc iloc;
464         void *end;
465         int error;
466
467         if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
468                 return 0;
469         error = ext4_get_inode_loc(inode, &iloc);
470         if (error)
471                 return error;
472         raw_inode = ext4_raw_inode(&iloc);
473         header = IHDR(inode, raw_inode);
474         end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
475         error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
476         if (error)
477                 goto cleanup;
478         error = ext4_xattr_list_entries(dentry, IFIRST(header),
479                                         buffer, buffer_size);
480
481 cleanup:
482         brelse(iloc.bh);
483         return error;
484 }
485
486 /*
487  * ext4_xattr_list()
488  *
489  * Copy a list of attribute names into the buffer
490  * provided, or compute the buffer size required.
491  * Buffer is NULL to compute the size of the buffer required.
492  *
493  * Returns a negative error number on failure, or the number of bytes
494  * used / required on success.
495  */
496 static int
497 ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
498 {
499         int ret, ret2;
500
501         down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
502         ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
503         if (ret < 0)
504                 goto errout;
505         if (buffer) {
506                 buffer += ret;
507                 buffer_size -= ret;
508         }
509         ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
510         if (ret < 0)
511                 goto errout;
512         ret += ret2;
513 errout:
514         up_read(&EXT4_I(d_inode(dentry))->xattr_sem);
515         return ret;
516 }
517
518 /*
519  * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
520  * not set, set it.
521  */
522 static void ext4_xattr_update_super_block(handle_t *handle,
523                                           struct super_block *sb)
524 {
525         if (ext4_has_feature_xattr(sb))
526                 return;
527
528         BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
529         if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
530                 ext4_set_feature_xattr(sb);
531                 ext4_handle_dirty_super(handle, sb);
532         }
533 }
534
535 /*
536  * Release the xattr block BH: If the reference count is > 1, decrement it;
537  * otherwise free the block.
538  */
539 static void
540 ext4_xattr_release_block(handle_t *handle, struct inode *inode,
541                          struct buffer_head *bh)
542 {
543         struct mb_cache_entry *ce = NULL;
544         int error = 0;
545         struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
546
547         ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
548         BUFFER_TRACE(bh, "get_write_access");
549         error = ext4_journal_get_write_access(handle, bh);
550         if (error)
551                 goto out;
552
553         lock_buffer(bh);
554         if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
555                 ea_bdebug(bh, "refcount now=0; freeing");
556                 if (ce)
557                         mb_cache_entry_free(ce);
558                 get_bh(bh);
559                 unlock_buffer(bh);
560                 ext4_free_blocks(handle, inode, bh, 0, 1,
561                                  EXT4_FREE_BLOCKS_METADATA |
562                                  EXT4_FREE_BLOCKS_FORGET);
563         } else {
564                 le32_add_cpu(&BHDR(bh)->h_refcount, -1);
565                 if (ce)
566                         mb_cache_entry_release(ce);
567                 /*
568                  * Beware of this ugliness: Releasing of xattr block references
569                  * from different inodes can race and so we have to protect
570                  * from a race where someone else frees the block (and releases
571                  * its journal_head) before we are done dirtying the buffer. In
572                  * nojournal mode this race is harmless and we actually cannot
573                  * call ext4_handle_dirty_xattr_block() with locked buffer as
574                  * that function can call sync_dirty_buffer() so for that case
575                  * we handle the dirtying after unlocking the buffer.
576                  */
577                 if (ext4_handle_valid(handle))
578                         error = ext4_handle_dirty_xattr_block(handle, inode,
579                                                               bh);
580                 unlock_buffer(bh);
581                 if (!ext4_handle_valid(handle))
582                         error = ext4_handle_dirty_xattr_block(handle, inode,
583                                                               bh);
584                 if (IS_SYNC(inode))
585                         ext4_handle_sync(handle);
586                 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
587                 ea_bdebug(bh, "refcount now=%d; releasing",
588                           le32_to_cpu(BHDR(bh)->h_refcount));
589         }
590 out:
591         ext4_std_error(inode->i_sb, error);
592         return;
593 }
594
595 /*
596  * Find the available free space for EAs. This also returns the total number of
597  * bytes used by EA entries.
598  */
599 static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
600                                     size_t *min_offs, void *base, int *total)
601 {
602         for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
603                 if (!last->e_value_block && last->e_value_size) {
604                         size_t offs = le16_to_cpu(last->e_value_offs);
605                         if (offs < *min_offs)
606                                 *min_offs = offs;
607                 }
608                 if (total)
609                         *total += EXT4_XATTR_LEN(last->e_name_len);
610         }
611         return (*min_offs - ((void *)last - base) - sizeof(__u32));
612 }
613
614 static int
615 ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
616 {
617         struct ext4_xattr_entry *last;
618         size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
619
620         /* Compute min_offs and last. */
621         last = s->first;
622         for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
623                 if (!last->e_value_block && last->e_value_size) {
624                         size_t offs = le16_to_cpu(last->e_value_offs);
625                         if (offs < min_offs)
626                                 min_offs = offs;
627                 }
628         }
629         free = min_offs - ((void *)last - s->base) - sizeof(__u32);
630         if (!s->not_found) {
631                 if (!s->here->e_value_block && s->here->e_value_size) {
632                         size_t size = le32_to_cpu(s->here->e_value_size);
633                         free += EXT4_XATTR_SIZE(size);
634                 }
635                 free += EXT4_XATTR_LEN(name_len);
636         }
637         if (i->value) {
638                 if (free < EXT4_XATTR_LEN(name_len) +
639                            EXT4_XATTR_SIZE(i->value_len))
640                         return -ENOSPC;
641         }
642
643         if (i->value && s->not_found) {
644                 /* Insert the new name. */
645                 size_t size = EXT4_XATTR_LEN(name_len);
646                 size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
647                 memmove((void *)s->here + size, s->here, rest);
648                 memset(s->here, 0, size);
649                 s->here->e_name_index = i->name_index;
650                 s->here->e_name_len = name_len;
651                 memcpy(s->here->e_name, i->name, name_len);
652         } else {
653                 if (!s->here->e_value_block && s->here->e_value_size) {
654                         void *first_val = s->base + min_offs;
655                         size_t offs = le16_to_cpu(s->here->e_value_offs);
656                         void *val = s->base + offs;
657                         size_t size = EXT4_XATTR_SIZE(
658                                 le32_to_cpu(s->here->e_value_size));
659
660                         if (i->value && size == EXT4_XATTR_SIZE(i->value_len)) {
661                                 /* The old and the new value have the same
662                                    size. Just replace. */
663                                 s->here->e_value_size =
664                                         cpu_to_le32(i->value_len);
665                                 if (i->value == EXT4_ZERO_XATTR_VALUE) {
666                                         memset(val, 0, size);
667                                 } else {
668                                         /* Clear pad bytes first. */
669                                         memset(val + size - EXT4_XATTR_PAD, 0,
670                                                EXT4_XATTR_PAD);
671                                         memcpy(val, i->value, i->value_len);
672                                 }
673                                 return 0;
674                         }
675
676                         /* Remove the old value. */
677                         memmove(first_val + size, first_val, val - first_val);
678                         memset(first_val, 0, size);
679                         s->here->e_value_size = 0;
680                         s->here->e_value_offs = 0;
681                         min_offs += size;
682
683                         /* Adjust all value offsets. */
684                         last = s->first;
685                         while (!IS_LAST_ENTRY(last)) {
686                                 size_t o = le16_to_cpu(last->e_value_offs);
687                                 if (!last->e_value_block &&
688                                     last->e_value_size && o < offs)
689                                         last->e_value_offs =
690                                                 cpu_to_le16(o + size);
691                                 last = EXT4_XATTR_NEXT(last);
692                         }
693                 }
694                 if (!i->value) {
695                         /* Remove the old name. */
696                         size_t size = EXT4_XATTR_LEN(name_len);
697                         last = ENTRY((void *)last - size);
698                         memmove(s->here, (void *)s->here + size,
699                                 (void *)last - (void *)s->here + sizeof(__u32));
700                         memset(last, 0, size);
701                 }
702         }
703
704         if (i->value) {
705                 /* Insert the new value. */
706                 s->here->e_value_size = cpu_to_le32(i->value_len);
707                 if (i->value_len) {
708                         size_t size = EXT4_XATTR_SIZE(i->value_len);
709                         void *val = s->base + min_offs - size;
710                         s->here->e_value_offs = cpu_to_le16(min_offs - size);
711                         if (i->value == EXT4_ZERO_XATTR_VALUE) {
712                                 memset(val, 0, size);
713                         } else {
714                                 /* Clear the pad bytes first. */
715                                 memset(val + size - EXT4_XATTR_PAD, 0,
716                                        EXT4_XATTR_PAD);
717                                 memcpy(val, i->value, i->value_len);
718                         }
719                 }
720         }
721         return 0;
722 }
723
724 struct ext4_xattr_block_find {
725         struct ext4_xattr_search s;
726         struct buffer_head *bh;
727 };
728
729 static int
730 ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
731                       struct ext4_xattr_block_find *bs)
732 {
733         struct super_block *sb = inode->i_sb;
734         int error;
735
736         ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
737                   i->name_index, i->name, i->value, (long)i->value_len);
738
739         if (EXT4_I(inode)->i_file_acl) {
740                 /* The inode already has an extended attribute block. */
741                 bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
742                 error = -EIO;
743                 if (!bs->bh)
744                         goto cleanup;
745                 ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
746                         atomic_read(&(bs->bh->b_count)),
747                         le32_to_cpu(BHDR(bs->bh)->h_refcount));
748                 if (ext4_xattr_check_block(inode, bs->bh)) {
749                         EXT4_ERROR_INODE(inode, "bad block %llu",
750                                          EXT4_I(inode)->i_file_acl);
751                         error = -EFSCORRUPTED;
752                         goto cleanup;
753                 }
754                 /* Find the named attribute. */
755                 bs->s.base = BHDR(bs->bh);
756                 bs->s.first = BFIRST(bs->bh);
757                 bs->s.end = bs->bh->b_data + bs->bh->b_size;
758                 bs->s.here = bs->s.first;
759                 error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
760                                               i->name, bs->bh->b_size, 1);
761                 if (error && error != -ENODATA)
762                         goto cleanup;
763                 bs->s.not_found = error;
764         }
765         error = 0;
766
767 cleanup:
768         return error;
769 }
770
771 static int
772 ext4_xattr_block_set(handle_t *handle, struct inode *inode,
773                      struct ext4_xattr_info *i,
774                      struct ext4_xattr_block_find *bs)
775 {
776         struct super_block *sb = inode->i_sb;
777         struct buffer_head *new_bh = NULL;
778         struct ext4_xattr_search *s = &bs->s;
779         struct mb_cache_entry *ce = NULL;
780         int error = 0;
781         struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
782
783 #define header(x) ((struct ext4_xattr_header *)(x))
784
785         if (i->value && i->value_len > sb->s_blocksize)
786                 return -ENOSPC;
787         if (s->base) {
788                 ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
789                                         bs->bh->b_blocknr);
790                 BUFFER_TRACE(bs->bh, "get_write_access");
791                 error = ext4_journal_get_write_access(handle, bs->bh);
792                 if (error)
793                         goto cleanup;
794                 lock_buffer(bs->bh);
795
796                 if (header(s->base)->h_refcount == cpu_to_le32(1)) {
797                         if (ce) {
798                                 mb_cache_entry_free(ce);
799                                 ce = NULL;
800                         }
801                         ea_bdebug(bs->bh, "modifying in-place");
802                         error = ext4_xattr_set_entry(i, s);
803                         if (!error) {
804                                 if (!IS_LAST_ENTRY(s->first))
805                                         ext4_xattr_rehash(header(s->base),
806                                                           s->here);
807                                 ext4_xattr_cache_insert(ext4_mb_cache,
808                                         bs->bh);
809                         }
810                         unlock_buffer(bs->bh);
811                         if (error == -EFSCORRUPTED)
812                                 goto bad_block;
813                         if (!error)
814                                 error = ext4_handle_dirty_xattr_block(handle,
815                                                                       inode,
816                                                                       bs->bh);
817                         if (error)
818                                 goto cleanup;
819                         goto inserted;
820                 } else {
821                         int offset = (char *)s->here - bs->bh->b_data;
822
823                         unlock_buffer(bs->bh);
824                         if (ce) {
825                                 mb_cache_entry_release(ce);
826                                 ce = NULL;
827                         }
828                         ea_bdebug(bs->bh, "cloning");
829                         s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
830                         error = -ENOMEM;
831                         if (s->base == NULL)
832                                 goto cleanup;
833                         memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
834                         s->first = ENTRY(header(s->base)+1);
835                         header(s->base)->h_refcount = cpu_to_le32(1);
836                         s->here = ENTRY(s->base + offset);
837                         s->end = s->base + bs->bh->b_size;
838                 }
839         } else {
840                 /* Allocate a buffer where we construct the new block. */
841                 s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
842                 /* assert(header == s->base) */
843                 error = -ENOMEM;
844                 if (s->base == NULL)
845                         goto cleanup;
846                 header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
847                 header(s->base)->h_blocks = cpu_to_le32(1);
848                 header(s->base)->h_refcount = cpu_to_le32(1);
849                 s->first = ENTRY(header(s->base)+1);
850                 s->here = ENTRY(header(s->base)+1);
851                 s->end = s->base + sb->s_blocksize;
852         }
853
854         error = ext4_xattr_set_entry(i, s);
855         if (error == -EFSCORRUPTED)
856                 goto bad_block;
857         if (error)
858                 goto cleanup;
859         if (!IS_LAST_ENTRY(s->first))
860                 ext4_xattr_rehash(header(s->base), s->here);
861
862 inserted:
863         if (!IS_LAST_ENTRY(s->first)) {
864                 new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce);
865                 if (new_bh) {
866                         /* We found an identical block in the cache. */
867                         if (new_bh == bs->bh)
868                                 ea_bdebug(new_bh, "keeping");
869                         else {
870                                 /* The old block is released after updating
871                                    the inode. */
872                                 error = dquot_alloc_block(inode,
873                                                 EXT4_C2B(EXT4_SB(sb), 1));
874                                 if (error)
875                                         goto cleanup;
876                                 BUFFER_TRACE(new_bh, "get_write_access");
877                                 error = ext4_journal_get_write_access(handle,
878                                                                       new_bh);
879                                 if (error)
880                                         goto cleanup_dquot;
881                                 lock_buffer(new_bh);
882                                 le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
883                                 ea_bdebug(new_bh, "reusing; refcount now=%d",
884                                         le32_to_cpu(BHDR(new_bh)->h_refcount));
885                                 unlock_buffer(new_bh);
886                                 error = ext4_handle_dirty_xattr_block(handle,
887                                                                       inode,
888                                                                       new_bh);
889                                 if (error)
890                                         goto cleanup_dquot;
891                         }
892                         mb_cache_entry_release(ce);
893                         ce = NULL;
894                 } else if (bs->bh && s->base == bs->bh->b_data) {
895                         /* We were modifying this block in-place. */
896                         ea_bdebug(bs->bh, "keeping this block");
897                         new_bh = bs->bh;
898                         get_bh(new_bh);
899                 } else {
900                         /* We need to allocate a new block */
901                         ext4_fsblk_t goal, block;
902
903                         goal = ext4_group_first_block_no(sb,
904                                                 EXT4_I(inode)->i_block_group);
905
906                         /* non-extent files can't have physical blocks past 2^32 */
907                         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
908                                 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
909
910                         block = ext4_new_meta_blocks(handle, inode, goal, 0,
911                                                      NULL, &error);
912                         if (error)
913                                 goto cleanup;
914
915                         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
916                                 BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
917
918                         ea_idebug(inode, "creating block %llu",
919                                   (unsigned long long)block);
920
921                         new_bh = sb_getblk(sb, block);
922                         if (unlikely(!new_bh)) {
923                                 error = -ENOMEM;
924 getblk_failed:
925                                 ext4_free_blocks(handle, inode, NULL, block, 1,
926                                                  EXT4_FREE_BLOCKS_METADATA);
927                                 goto cleanup;
928                         }
929                         lock_buffer(new_bh);
930                         error = ext4_journal_get_create_access(handle, new_bh);
931                         if (error) {
932                                 unlock_buffer(new_bh);
933                                 error = -EIO;
934                                 goto getblk_failed;
935                         }
936                         memcpy(new_bh->b_data, s->base, new_bh->b_size);
937                         set_buffer_uptodate(new_bh);
938                         unlock_buffer(new_bh);
939                         ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
940                         error = ext4_handle_dirty_xattr_block(handle,
941                                                               inode, new_bh);
942                         if (error)
943                                 goto cleanup;
944                 }
945         }
946
947         /* Update the inode. */
948         EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
949
950         /* Drop the previous xattr block. */
951         if (bs->bh && bs->bh != new_bh)
952                 ext4_xattr_release_block(handle, inode, bs->bh);
953         error = 0;
954
955 cleanup:
956         if (ce)
957                 mb_cache_entry_release(ce);
958         brelse(new_bh);
959         if (!(bs->bh && s->base == bs->bh->b_data))
960                 kfree(s->base);
961
962         return error;
963
964 cleanup_dquot:
965         dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
966         goto cleanup;
967
968 bad_block:
969         EXT4_ERROR_INODE(inode, "bad block %llu",
970                          EXT4_I(inode)->i_file_acl);
971         goto cleanup;
972
973 #undef header
974 }
975
976 int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
977                           struct ext4_xattr_ibody_find *is)
978 {
979         struct ext4_xattr_ibody_header *header;
980         struct ext4_inode *raw_inode;
981         int error;
982
983         if (EXT4_I(inode)->i_extra_isize == 0)
984                 return 0;
985         raw_inode = ext4_raw_inode(&is->iloc);
986         header = IHDR(inode, raw_inode);
987         is->s.base = is->s.first = IFIRST(header);
988         is->s.here = is->s.first;
989         is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
990         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
991                 error = ext4_xattr_check_names(IFIRST(header), is->s.end,
992                                                IFIRST(header));
993                 if (error)
994                         return error;
995                 /* Find the named attribute. */
996                 error = ext4_xattr_find_entry(&is->s.here, i->name_index,
997                                               i->name, is->s.end -
998                                               (void *)is->s.base, 0);
999                 if (error && error != -ENODATA)
1000                         return error;
1001                 is->s.not_found = error;
1002         }
1003         return 0;
1004 }
1005
1006 int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
1007                                 struct ext4_xattr_info *i,
1008                                 struct ext4_xattr_ibody_find *is)
1009 {
1010         struct ext4_xattr_ibody_header *header;
1011         struct ext4_xattr_search *s = &is->s;
1012         int error;
1013
1014         if (EXT4_I(inode)->i_extra_isize == 0)
1015                 return -ENOSPC;
1016         error = ext4_xattr_set_entry(i, s);
1017         if (error) {
1018                 if (error == -ENOSPC &&
1019                     ext4_has_inline_data(inode)) {
1020                         error = ext4_try_to_evict_inline_data(handle, inode,
1021                                         EXT4_XATTR_LEN(strlen(i->name) +
1022                                         EXT4_XATTR_SIZE(i->value_len)));
1023                         if (error)
1024                                 return error;
1025                         error = ext4_xattr_ibody_find(inode, i, is);
1026                         if (error)
1027                                 return error;
1028                         error = ext4_xattr_set_entry(i, s);
1029                 }
1030                 if (error)
1031                         return error;
1032         }
1033         header = IHDR(inode, ext4_raw_inode(&is->iloc));
1034         if (!IS_LAST_ENTRY(s->first)) {
1035                 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
1036                 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
1037         } else {
1038                 header->h_magic = cpu_to_le32(0);
1039                 ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
1040         }
1041         return 0;
1042 }
1043
1044 static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
1045                                 struct ext4_xattr_info *i,
1046                                 struct ext4_xattr_ibody_find *is)
1047 {
1048         struct ext4_xattr_ibody_header *header;
1049         struct ext4_xattr_search *s = &is->s;
1050         int error;
1051
1052         if (EXT4_I(inode)->i_extra_isize == 0)
1053                 return -ENOSPC;
1054         error = ext4_xattr_set_entry(i, s);
1055         if (error)
1056                 return error;
1057         header = IHDR(inode, ext4_raw_inode(&is->iloc));
1058         if (!IS_LAST_ENTRY(s->first)) {
1059                 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
1060                 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
1061         } else {
1062                 header->h_magic = cpu_to_le32(0);
1063                 ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
1064         }
1065         return 0;
1066 }
1067
1068 /*
1069  * ext4_xattr_set_handle()
1070  *
1071  * Create, replace or remove an extended attribute for this inode.  Value
1072  * is NULL to remove an existing extended attribute, and non-NULL to
1073  * either replace an existing extended attribute, or create a new extended
1074  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
1075  * specify that an extended attribute must exist and must not exist
1076  * previous to the call, respectively.
1077  *
1078  * Returns 0, or a negative error number on failure.
1079  */
1080 int
1081 ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1082                       const char *name, const void *value, size_t value_len,
1083                       int flags)
1084 {
1085         struct ext4_xattr_info i = {
1086                 .name_index = name_index,
1087                 .name = name,
1088                 .value = value,
1089                 .value_len = value_len,
1090
1091         };
1092         struct ext4_xattr_ibody_find is = {
1093                 .s = { .not_found = -ENODATA, },
1094         };
1095         struct ext4_xattr_block_find bs = {
1096                 .s = { .not_found = -ENODATA, },
1097         };
1098         unsigned long no_expand;
1099         int error;
1100
1101         if (!name)
1102                 return -EINVAL;
1103         if (strlen(name) > 255)
1104                 return -ERANGE;
1105         down_write(&EXT4_I(inode)->xattr_sem);
1106         no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
1107         ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
1108
1109         error = ext4_reserve_inode_write(handle, inode, &is.iloc);
1110         if (error)
1111                 goto cleanup;
1112
1113         if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
1114                 struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
1115                 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
1116                 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
1117         }
1118
1119         error = ext4_xattr_ibody_find(inode, &i, &is);
1120         if (error)
1121                 goto cleanup;
1122         if (is.s.not_found)
1123                 error = ext4_xattr_block_find(inode, &i, &bs);
1124         if (error)
1125                 goto cleanup;
1126         if (is.s.not_found && bs.s.not_found) {
1127                 error = -ENODATA;
1128                 if (flags & XATTR_REPLACE)
1129                         goto cleanup;
1130                 error = 0;
1131                 if (!value)
1132                         goto cleanup;
1133         } else {
1134                 error = -EEXIST;
1135                 if (flags & XATTR_CREATE)
1136                         goto cleanup;
1137         }
1138         if (!value) {
1139                 if (!is.s.not_found)
1140                         error = ext4_xattr_ibody_set(handle, inode, &i, &is);
1141                 else if (!bs.s.not_found)
1142                         error = ext4_xattr_block_set(handle, inode, &i, &bs);
1143         } else {
1144                 error = ext4_xattr_ibody_set(handle, inode, &i, &is);
1145                 if (!error && !bs.s.not_found) {
1146                         i.value = NULL;
1147                         error = ext4_xattr_block_set(handle, inode, &i, &bs);
1148                 } else if (error == -ENOSPC) {
1149                         if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
1150                                 error = ext4_xattr_block_find(inode, &i, &bs);
1151                                 if (error)
1152                                         goto cleanup;
1153                         }
1154                         error = ext4_xattr_block_set(handle, inode, &i, &bs);
1155                         if (error)
1156                                 goto cleanup;
1157                         if (!is.s.not_found) {
1158                                 i.value = NULL;
1159                                 error = ext4_xattr_ibody_set(handle, inode, &i,
1160                                                              &is);
1161                         }
1162                 }
1163         }
1164         if (!error) {
1165                 ext4_xattr_update_super_block(handle, inode->i_sb);
1166                 inode->i_ctime = ext4_current_time(inode);
1167                 if (!value)
1168                         ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1169                 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
1170                 /*
1171                  * The bh is consumed by ext4_mark_iloc_dirty, even with
1172                  * error != 0.
1173                  */
1174                 is.iloc.bh = NULL;
1175                 if (IS_SYNC(inode))
1176                         ext4_handle_sync(handle);
1177         }
1178
1179 cleanup:
1180         brelse(is.iloc.bh);
1181         brelse(bs.bh);
1182         if (no_expand == 0)
1183                 ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1184         up_write(&EXT4_I(inode)->xattr_sem);
1185         return error;
1186 }
1187
1188 /*
1189  * ext4_xattr_set()
1190  *
1191  * Like ext4_xattr_set_handle, but start from an inode. This extended
1192  * attribute modification is a filesystem transaction by itself.
1193  *
1194  * Returns 0, or a negative error number on failure.
1195  */
1196 int
1197 ext4_xattr_set(struct inode *inode, int name_index, const char *name,
1198                const void *value, size_t value_len, int flags)
1199 {
1200         handle_t *handle;
1201         int error, retries = 0;
1202         int credits = ext4_jbd2_credits_xattr(inode);
1203
1204 retry:
1205         handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
1206         if (IS_ERR(handle)) {
1207                 error = PTR_ERR(handle);
1208         } else {
1209                 int error2;
1210
1211                 error = ext4_xattr_set_handle(handle, inode, name_index, name,
1212                                               value, value_len, flags);
1213                 error2 = ext4_journal_stop(handle);
1214                 if (error == -ENOSPC &&
1215                     ext4_should_retry_alloc(inode->i_sb, &retries))
1216                         goto retry;
1217                 if (error == 0)
1218                         error = error2;
1219         }
1220
1221         return error;
1222 }
1223
1224 /*
1225  * Shift the EA entries in the inode to create space for the increased
1226  * i_extra_isize.
1227  */
1228 static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
1229                                      int value_offs_shift, void *to,
1230                                      void *from, size_t n, int blocksize)
1231 {
1232         struct ext4_xattr_entry *last = entry;
1233         int new_offs;
1234
1235         /* Adjust the value offsets of the entries */
1236         for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1237                 if (!last->e_value_block && last->e_value_size) {
1238                         new_offs = le16_to_cpu(last->e_value_offs) +
1239                                                         value_offs_shift;
1240                         BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
1241                                  > blocksize);
1242                         last->e_value_offs = cpu_to_le16(new_offs);
1243                 }
1244         }
1245         /* Shift the entries by n bytes */
1246         memmove(to, from, n);
1247 }
1248
1249 /*
1250  * Expand an inode by new_extra_isize bytes when EAs are present.
1251  * Returns 0 on success or negative error number on failure.
1252  */
1253 int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
1254                                struct ext4_inode *raw_inode, handle_t *handle)
1255 {
1256         struct ext4_xattr_ibody_header *header;
1257         struct ext4_xattr_entry *entry, *last, *first;
1258         struct buffer_head *bh = NULL;
1259         struct ext4_xattr_ibody_find *is = NULL;
1260         struct ext4_xattr_block_find *bs = NULL;
1261         char *buffer = NULL, *b_entry_name = NULL;
1262         size_t min_offs, free;
1263         int total_ino;
1264         void *base, *start, *end;
1265         int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
1266         int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
1267
1268         down_write(&EXT4_I(inode)->xattr_sem);
1269 retry:
1270         if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
1271                 up_write(&EXT4_I(inode)->xattr_sem);
1272                 return 0;
1273         }
1274
1275         header = IHDR(inode, raw_inode);
1276         entry = IFIRST(header);
1277
1278         /*
1279          * Check if enough free space is available in the inode to shift the
1280          * entries ahead by new_extra_isize.
1281          */
1282
1283         base = start = entry;
1284         end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
1285         min_offs = end - base;
1286         last = entry;
1287         total_ino = sizeof(struct ext4_xattr_ibody_header);
1288
1289         free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
1290         if (free >= new_extra_isize) {
1291                 entry = IFIRST(header);
1292                 ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
1293                                 - new_extra_isize, (void *)raw_inode +
1294                                 EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
1295                                 (void *)header, total_ino,
1296                                 inode->i_sb->s_blocksize);
1297                 EXT4_I(inode)->i_extra_isize = new_extra_isize;
1298                 error = 0;
1299                 goto cleanup;
1300         }
1301
1302         /*
1303          * Enough free space isn't available in the inode, check if
1304          * EA block can hold new_extra_isize bytes.
1305          */
1306         if (EXT4_I(inode)->i_file_acl) {
1307                 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
1308                 error = -EIO;
1309                 if (!bh)
1310                         goto cleanup;
1311                 if (ext4_xattr_check_block(inode, bh)) {
1312                         EXT4_ERROR_INODE(inode, "bad block %llu",
1313                                          EXT4_I(inode)->i_file_acl);
1314                         error = -EFSCORRUPTED;
1315                         goto cleanup;
1316                 }
1317                 base = BHDR(bh);
1318                 first = BFIRST(bh);
1319                 end = bh->b_data + bh->b_size;
1320                 min_offs = end - base;
1321                 free = ext4_xattr_free_space(first, &min_offs, base, NULL);
1322                 if (free < new_extra_isize) {
1323                         if (!tried_min_extra_isize && s_min_extra_isize) {
1324                                 tried_min_extra_isize++;
1325                                 new_extra_isize = s_min_extra_isize;
1326                                 brelse(bh);
1327                                 goto retry;
1328                         }
1329                         error = -1;
1330                         goto cleanup;
1331                 }
1332         } else {
1333                 free = inode->i_sb->s_blocksize;
1334         }
1335
1336         while (new_extra_isize > 0) {
1337                 size_t offs, size, entry_size;
1338                 struct ext4_xattr_entry *small_entry = NULL;
1339                 struct ext4_xattr_info i = {
1340                         .value = NULL,
1341                         .value_len = 0,
1342                 };
1343                 unsigned int total_size;  /* EA entry size + value size */
1344                 unsigned int shift_bytes; /* No. of bytes to shift EAs by? */
1345                 unsigned int min_total_size = ~0U;
1346
1347                 is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
1348                 bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
1349                 if (!is || !bs) {
1350                         error = -ENOMEM;
1351                         goto cleanup;
1352                 }
1353
1354                 is->s.not_found = -ENODATA;
1355                 bs->s.not_found = -ENODATA;
1356                 is->iloc.bh = NULL;
1357                 bs->bh = NULL;
1358
1359                 last = IFIRST(header);
1360                 /* Find the entry best suited to be pushed into EA block */
1361                 entry = NULL;
1362                 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1363                         total_size =
1364                         EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
1365                                         EXT4_XATTR_LEN(last->e_name_len);
1366                         if (total_size <= free && total_size < min_total_size) {
1367                                 if (total_size < new_extra_isize) {
1368                                         small_entry = last;
1369                                 } else {
1370                                         entry = last;
1371                                         min_total_size = total_size;
1372                                 }
1373                         }
1374                 }
1375
1376                 if (entry == NULL) {
1377                         if (small_entry) {
1378                                 entry = small_entry;
1379                         } else {
1380                                 if (!tried_min_extra_isize &&
1381                                     s_min_extra_isize) {
1382                                         tried_min_extra_isize++;
1383                                         new_extra_isize = s_min_extra_isize;
1384                                         kfree(is); is = NULL;
1385                                         kfree(bs); bs = NULL;
1386                                         brelse(bh);
1387                                         goto retry;
1388                                 }
1389                                 error = -1;
1390                                 goto cleanup;
1391                         }
1392                 }
1393                 offs = le16_to_cpu(entry->e_value_offs);
1394                 size = le32_to_cpu(entry->e_value_size);
1395                 entry_size = EXT4_XATTR_LEN(entry->e_name_len);
1396                 i.name_index = entry->e_name_index,
1397                 buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS);
1398                 b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
1399                 if (!buffer || !b_entry_name) {
1400                         error = -ENOMEM;
1401                         goto cleanup;
1402                 }
1403                 /* Save the entry name and the entry value */
1404                 memcpy(buffer, (void *)IFIRST(header) + offs,
1405                        EXT4_XATTR_SIZE(size));
1406                 memcpy(b_entry_name, entry->e_name, entry->e_name_len);
1407                 b_entry_name[entry->e_name_len] = '\0';
1408                 i.name = b_entry_name;
1409
1410                 error = ext4_get_inode_loc(inode, &is->iloc);
1411                 if (error)
1412                         goto cleanup;
1413
1414                 error = ext4_xattr_ibody_find(inode, &i, is);
1415                 if (error)
1416                         goto cleanup;
1417
1418                 /* Remove the chosen entry from the inode */
1419                 error = ext4_xattr_ibody_set(handle, inode, &i, is);
1420                 if (error)
1421                         goto cleanup;
1422
1423                 entry = IFIRST(header);
1424                 if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
1425                         shift_bytes = new_extra_isize;
1426                 else
1427                         shift_bytes = entry_size + size;
1428                 /* Adjust the offsets and shift the remaining entries ahead */
1429                 ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
1430                         shift_bytes, (void *)raw_inode +
1431                         EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
1432                         (void *)header, total_ino - entry_size,
1433                         inode->i_sb->s_blocksize);
1434
1435                 extra_isize += shift_bytes;
1436                 new_extra_isize -= shift_bytes;
1437                 EXT4_I(inode)->i_extra_isize = extra_isize;
1438
1439                 i.name = b_entry_name;
1440                 i.value = buffer;
1441                 i.value_len = size;
1442                 error = ext4_xattr_block_find(inode, &i, bs);
1443                 if (error)
1444                         goto cleanup;
1445
1446                 /* Add entry which was removed from the inode into the block */
1447                 error = ext4_xattr_block_set(handle, inode, &i, bs);
1448                 if (error)
1449                         goto cleanup;
1450                 kfree(b_entry_name);
1451                 kfree(buffer);
1452                 b_entry_name = NULL;
1453                 buffer = NULL;
1454                 brelse(is->iloc.bh);
1455                 kfree(is);
1456                 kfree(bs);
1457         }
1458         brelse(bh);
1459         up_write(&EXT4_I(inode)->xattr_sem);
1460         return 0;
1461
1462 cleanup:
1463         kfree(b_entry_name);
1464         kfree(buffer);
1465         if (is)
1466                 brelse(is->iloc.bh);
1467         kfree(is);
1468         kfree(bs);
1469         brelse(bh);
1470         up_write(&EXT4_I(inode)->xattr_sem);
1471         return error;
1472 }
1473
1474
1475
1476 /*
1477  * ext4_xattr_delete_inode()
1478  *
1479  * Free extended attribute resources associated with this inode. This
1480  * is called immediately before an inode is freed. We have exclusive
1481  * access to the inode.
1482  */
1483 void
1484 ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
1485 {
1486         struct buffer_head *bh = NULL;
1487
1488         if (!EXT4_I(inode)->i_file_acl)
1489                 goto cleanup;
1490         bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
1491         if (!bh) {
1492                 EXT4_ERROR_INODE(inode, "block %llu read error",
1493                                  EXT4_I(inode)->i_file_acl);
1494                 goto cleanup;
1495         }
1496         if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
1497             BHDR(bh)->h_blocks != cpu_to_le32(1)) {
1498                 EXT4_ERROR_INODE(inode, "bad block %llu",
1499                                  EXT4_I(inode)->i_file_acl);
1500                 goto cleanup;
1501         }
1502         ext4_xattr_release_block(handle, inode, bh);
1503         EXT4_I(inode)->i_file_acl = 0;
1504
1505 cleanup:
1506         brelse(bh);
1507 }
1508
1509 /*
1510  * ext4_xattr_put_super()
1511  *
1512  * This is called when a file system is unmounted.
1513  */
1514 void
1515 ext4_xattr_put_super(struct super_block *sb)
1516 {
1517         mb_cache_shrink(sb->s_bdev);
1518 }
1519
1520 /*
1521  * ext4_xattr_cache_insert()
1522  *
1523  * Create a new entry in the extended attribute cache, and insert
1524  * it unless such an entry is already in the cache.
1525  *
1526  * Returns 0, or a negative error number on failure.
1527  */
1528 static void
1529 ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
1530 {
1531         __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
1532         struct mb_cache_entry *ce;
1533         int error;
1534
1535         ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
1536         if (!ce) {
1537                 ea_bdebug(bh, "out of memory");
1538                 return;
1539         }
1540         error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
1541         if (error) {
1542                 mb_cache_entry_free(ce);
1543                 if (error == -EBUSY) {
1544                         ea_bdebug(bh, "already in cache");
1545                         error = 0;
1546                 }
1547         } else {
1548                 ea_bdebug(bh, "inserting [%x]", (int)hash);
1549                 mb_cache_entry_release(ce);
1550         }
1551 }
1552
1553 /*
1554  * ext4_xattr_cmp()
1555  *
1556  * Compare two extended attribute blocks for equality.
1557  *
1558  * Returns 0 if the blocks are equal, 1 if they differ, and
1559  * a negative error number on errors.
1560  */
1561 static int
1562 ext4_xattr_cmp(struct ext4_xattr_header *header1,
1563                struct ext4_xattr_header *header2)
1564 {
1565         struct ext4_xattr_entry *entry1, *entry2;
1566
1567         entry1 = ENTRY(header1+1);
1568         entry2 = ENTRY(header2+1);
1569         while (!IS_LAST_ENTRY(entry1)) {
1570                 if (IS_LAST_ENTRY(entry2))
1571                         return 1;
1572                 if (entry1->e_hash != entry2->e_hash ||
1573                     entry1->e_name_index != entry2->e_name_index ||
1574                     entry1->e_name_len != entry2->e_name_len ||
1575                     entry1->e_value_size != entry2->e_value_size ||
1576                     memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
1577                         return 1;
1578                 if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
1579                         return -EFSCORRUPTED;
1580                 if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
1581                            (char *)header2 + le16_to_cpu(entry2->e_value_offs),
1582                            le32_to_cpu(entry1->e_value_size)))
1583                         return 1;
1584
1585                 entry1 = EXT4_XATTR_NEXT(entry1);
1586                 entry2 = EXT4_XATTR_NEXT(entry2);
1587         }
1588         if (!IS_LAST_ENTRY(entry2))
1589                 return 1;
1590         return 0;
1591 }
1592
1593 /*
1594  * ext4_xattr_cache_find()
1595  *
1596  * Find an identical extended attribute block.
1597  *
1598  * Returns a pointer to the block found, or NULL if such a block was
1599  * not found or an error occurred.
1600  */
1601 static struct buffer_head *
1602 ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
1603                       struct mb_cache_entry **pce)
1604 {
1605         __u32 hash = le32_to_cpu(header->h_hash);
1606         struct mb_cache_entry *ce;
1607         struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
1608
1609         if (!header->h_hash)
1610                 return NULL;  /* never share */
1611         ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
1612 again:
1613         ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev,
1614                                        hash);
1615         while (ce) {
1616                 struct buffer_head *bh;
1617
1618                 if (IS_ERR(ce)) {
1619                         if (PTR_ERR(ce) == -EAGAIN)
1620                                 goto again;
1621                         break;
1622                 }
1623                 bh = sb_bread(inode->i_sb, ce->e_block);
1624                 if (!bh) {
1625                         EXT4_ERROR_INODE(inode, "block %lu read error",
1626                                          (unsigned long) ce->e_block);
1627                 } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
1628                                 EXT4_XATTR_REFCOUNT_MAX) {
1629                         ea_idebug(inode, "block %lu refcount %d>=%d",
1630                                   (unsigned long) ce->e_block,
1631                                   le32_to_cpu(BHDR(bh)->h_refcount),
1632                                           EXT4_XATTR_REFCOUNT_MAX);
1633                 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
1634                         *pce = ce;
1635                         return bh;
1636                 }
1637                 brelse(bh);
1638                 ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
1639         }
1640         return NULL;
1641 }
1642
1643 #define NAME_HASH_SHIFT 5
1644 #define VALUE_HASH_SHIFT 16
1645
1646 /*
1647  * ext4_xattr_hash_entry()
1648  *
1649  * Compute the hash of an extended attribute.
1650  */
1651 static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
1652                                          struct ext4_xattr_entry *entry)
1653 {
1654         __u32 hash = 0;
1655         char *name = entry->e_name;
1656         int n;
1657
1658         for (n = 0; n < entry->e_name_len; n++) {
1659                 hash = (hash << NAME_HASH_SHIFT) ^
1660                        (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
1661                        *name++;
1662         }
1663
1664         if (entry->e_value_block == 0 && entry->e_value_size != 0) {
1665                 __le32 *value = (__le32 *)((char *)header +
1666                         le16_to_cpu(entry->e_value_offs));
1667                 for (n = (le32_to_cpu(entry->e_value_size) +
1668                      EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) {
1669                         hash = (hash << VALUE_HASH_SHIFT) ^
1670                                (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
1671                                le32_to_cpu(*value++);
1672                 }
1673         }
1674         entry->e_hash = cpu_to_le32(hash);
1675 }
1676
1677 #undef NAME_HASH_SHIFT
1678 #undef VALUE_HASH_SHIFT
1679
1680 #define BLOCK_HASH_SHIFT 16
1681
1682 /*
1683  * ext4_xattr_rehash()
1684  *
1685  * Re-compute the extended attribute hash value after an entry has changed.
1686  */
1687 static void ext4_xattr_rehash(struct ext4_xattr_header *header,
1688                               struct ext4_xattr_entry *entry)
1689 {
1690         struct ext4_xattr_entry *here;
1691         __u32 hash = 0;
1692
1693         ext4_xattr_hash_entry(header, entry);
1694         here = ENTRY(header+1);
1695         while (!IS_LAST_ENTRY(here)) {
1696                 if (!here->e_hash) {
1697                         /* Block is not shared if an entry's hash value == 0 */
1698                         hash = 0;
1699                         break;
1700                 }
1701                 hash = (hash << BLOCK_HASH_SHIFT) ^
1702                        (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
1703                        le32_to_cpu(here->e_hash);
1704                 here = EXT4_XATTR_NEXT(here);
1705         }
1706         header->h_hash = cpu_to_le32(hash);
1707 }
1708
1709 #undef BLOCK_HASH_SHIFT
1710
1711 #define HASH_BUCKET_BITS        10
1712
1713 struct mb_cache *
1714 ext4_xattr_create_cache(char *name)
1715 {
1716         return mb_cache_create(name, HASH_BUCKET_BITS);
1717 }
1718
1719 void ext4_xattr_destroy_cache(struct mb_cache *cache)
1720 {
1721         if (cache)
1722                 mb_cache_destroy(cache);
1723 }
1724