2 * linux/fs/ext4/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
17 #include <linux/jbd2.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
21 #include "ext4_jbd2.h"
24 #include <trace/events/ext4.h>
27 * balloc.c contains the blocks allocation and deallocation routines
31 * Calculate the block group number and offset, given a block number
33 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
34 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
36 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
39 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
40 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
48 static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
49 ext4_group_t block_group)
51 ext4_group_t actual_group;
52 ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
53 if (actual_group == block_group)
58 /* Return the number of clusters used for file system metadata; this
59 * represents the overhead needed by the file system.
61 unsigned ext4_num_overhead_clusters(struct super_block *sb,
62 ext4_group_t block_group,
63 struct ext4_group_desc *gdp)
65 unsigned num_clusters;
66 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
67 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
68 ext4_fsblk_t itbl_blk;
69 struct ext4_sb_info *sbi = EXT4_SB(sb);
71 /* This is the number of clusters used by the superblock,
72 * block group descriptors, and reserved block group
73 * descriptor blocks */
74 num_clusters = ext4_num_base_meta_clusters(sb, block_group);
77 * For the allocation bitmaps and inode table, we first need
78 * to check to see if the block is in the block group. If it
79 * is, then check to see if the cluster is already accounted
80 * for in the clusters used for the base metadata cluster, or
81 * if we can increment the base metadata cluster to include
82 * that block. Otherwise, we will have to track the cluster
83 * used for the allocation bitmap or inode table explicitly.
84 * Normally all of these blocks are contiguous, so the special
85 * case handling shouldn't be necessary except for *very*
86 * unusual file system layouts.
88 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
89 block_cluster = EXT4_B2C(sbi, (start -
90 ext4_block_bitmap(sb, gdp)));
91 if (block_cluster < num_clusters)
93 else if (block_cluster == num_clusters) {
99 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
100 inode_cluster = EXT4_B2C(sbi,
101 start - ext4_inode_bitmap(sb, gdp));
102 if (inode_cluster < num_clusters)
104 else if (inode_cluster == num_clusters) {
110 itbl_blk = ext4_inode_table(sb, gdp);
111 for (i = 0; i < sbi->s_itb_per_group; i++) {
112 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
113 c = EXT4_B2C(sbi, start - itbl_blk + i);
114 if ((c < num_clusters) || (c == inode_cluster) ||
115 (c == block_cluster) || (c == itbl_cluster))
117 if (c == num_clusters) {
126 if (block_cluster != -1)
128 if (inode_cluster != -1)
134 static unsigned int num_clusters_in_group(struct super_block *sb,
135 ext4_group_t block_group)
139 if (block_group == ext4_get_groups_count(sb) - 1) {
141 * Even though mke2fs always initializes the first and
142 * last group, just in case some other tool was used,
143 * we need to make sure we calculate the right free
146 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
147 ext4_group_first_block_no(sb, block_group);
149 blocks = EXT4_BLOCKS_PER_GROUP(sb);
150 return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
153 /* Initializes an uninitialized block bitmap */
154 void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
155 ext4_group_t block_group,
156 struct ext4_group_desc *gdp)
158 unsigned int bit, bit_max;
159 struct ext4_sb_info *sbi = EXT4_SB(sb);
160 ext4_fsblk_t start, tmp;
163 J_ASSERT_BH(bh, buffer_locked(bh));
165 /* If checksum is bad mark all blocks used to prevent allocation
166 * essentially implementing a per-group read-only flag. */
167 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
168 ext4_error(sb, "Checksum bad for group %u", block_group);
169 ext4_free_blks_set(sb, gdp, 0);
170 ext4_free_inodes_set(sb, gdp, 0);
171 ext4_itable_unused_set(sb, gdp, 0);
172 memset(bh->b_data, 0xff, sb->s_blocksize);
175 memset(bh->b_data, 0, sb->s_blocksize);
177 bit_max = ext4_num_base_meta_clusters(sb, block_group);
178 for (bit = 0; bit < bit_max; bit++)
179 ext4_set_bit(bit, bh->b_data);
181 start = ext4_group_first_block_no(sb, block_group);
183 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
186 /* Set bits for block and inode bitmaps, and inode table */
187 tmp = ext4_block_bitmap(sb, gdp);
188 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
189 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
191 tmp = ext4_inode_bitmap(sb, gdp);
192 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
193 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
195 tmp = ext4_inode_table(sb, gdp);
196 for (; tmp < ext4_inode_table(sb, gdp) +
197 sbi->s_itb_per_group; tmp++) {
198 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
199 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
203 * Also if the number of blocks within the group is less than
204 * the blocksize * 8 ( which is the size of bitmap ), set rest
205 * of the block bitmap to 1
207 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
208 sb->s_blocksize * 8, bh->b_data);
211 /* Return the number of free blocks in a block group. It is used when
212 * the block bitmap is uninitialized, so we can't just count the bits
214 unsigned ext4_free_blocks_after_init(struct super_block *sb,
215 ext4_group_t block_group,
216 struct ext4_group_desc *gdp)
218 return num_clusters_in_group(sb, block_group) -
219 ext4_num_overhead_clusters(sb, block_group, gdp);
223 * The free blocks are managed by bitmaps. A file system contains several
224 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
225 * block for inodes, N blocks for the inode table and data blocks.
227 * The file system contains group descriptors which are located after the
228 * super block. Each descriptor contains the number of the bitmap block and
229 * the free blocks count in the block. The descriptors are loaded in memory
230 * when a file system is mounted (see ext4_fill_super).
234 * ext4_get_group_desc() -- load group descriptor from disk
236 * @block_group: given block group
237 * @bh: pointer to the buffer head to store the block
240 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
241 ext4_group_t block_group,
242 struct buffer_head **bh)
244 unsigned int group_desc;
246 ext4_group_t ngroups = ext4_get_groups_count(sb);
247 struct ext4_group_desc *desc;
248 struct ext4_sb_info *sbi = EXT4_SB(sb);
250 if (block_group >= ngroups) {
251 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
252 " groups_count = %u", block_group, ngroups);
257 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
258 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
259 if (!sbi->s_group_desc[group_desc]) {
260 ext4_error(sb, "Group descriptor not loaded - "
261 "block_group = %u, group_desc = %u, desc = %u",
262 block_group, group_desc, offset);
266 desc = (struct ext4_group_desc *)(
267 (__u8 *)sbi->s_group_desc[group_desc]->b_data +
268 offset * EXT4_DESC_SIZE(sb));
270 *bh = sbi->s_group_desc[group_desc];
274 static int ext4_valid_block_bitmap(struct super_block *sb,
275 struct ext4_group_desc *desc,
276 unsigned int block_group,
277 struct buffer_head *bh)
279 ext4_grpblk_t offset;
280 ext4_grpblk_t next_zero_bit;
281 ext4_fsblk_t bitmap_blk;
282 ext4_fsblk_t group_first_block;
284 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
285 /* with FLEX_BG, the inode/block bitmaps and itable
286 * blocks may not be in the group at all
287 * so the bitmap validation will be skipped for those groups
288 * or it has to also read the block group where the bitmaps
289 * are located to verify they are set.
293 group_first_block = ext4_group_first_block_no(sb, block_group);
295 /* check whether block bitmap block number is set */
296 bitmap_blk = ext4_block_bitmap(sb, desc);
297 offset = bitmap_blk - group_first_block;
298 if (!ext4_test_bit(offset, bh->b_data))
299 /* bad block bitmap */
302 /* check whether the inode bitmap block number is set */
303 bitmap_blk = ext4_inode_bitmap(sb, desc);
304 offset = bitmap_blk - group_first_block;
305 if (!ext4_test_bit(offset, bh->b_data))
306 /* bad block bitmap */
309 /* check whether the inode table block number is set */
310 bitmap_blk = ext4_inode_table(sb, desc);
311 offset = bitmap_blk - group_first_block;
312 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
313 offset + EXT4_SB(sb)->s_itb_per_group,
315 if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
316 /* good bitmap for inode tables */
320 ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
321 block_group, bitmap_blk);
325 * ext4_read_block_bitmap()
327 * @block_group: given block group
329 * Read the bitmap for a given block_group,and validate the
330 * bits for block/inode/inode tables are set in the bitmaps
332 * Return buffer_head on success or NULL in case of failure.
335 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
337 struct ext4_group_desc *desc;
338 struct buffer_head *bh = NULL;
339 ext4_fsblk_t bitmap_blk;
341 desc = ext4_get_group_desc(sb, block_group, NULL);
344 bitmap_blk = ext4_block_bitmap(sb, desc);
345 bh = sb_getblk(sb, bitmap_blk);
347 ext4_error(sb, "Cannot read block bitmap - "
348 "block_group = %u, block_bitmap = %llu",
349 block_group, bitmap_blk);
353 if (bitmap_uptodate(bh))
357 if (bitmap_uptodate(bh)) {
361 ext4_lock_group(sb, block_group);
362 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
363 ext4_init_block_bitmap(sb, bh, block_group, desc);
364 set_bitmap_uptodate(bh);
365 set_buffer_uptodate(bh);
366 ext4_unlock_group(sb, block_group);
370 ext4_unlock_group(sb, block_group);
371 if (buffer_uptodate(bh)) {
373 * if not uninit if bh is uptodate,
374 * bitmap is also uptodate
376 set_bitmap_uptodate(bh);
381 * submit the buffer_head for read. We can
382 * safely mark the bitmap as uptodate now.
383 * We do it here so the bitmap uptodate bit
384 * get set with buffer lock held.
386 trace_ext4_read_block_bitmap_load(sb, block_group);
387 set_bitmap_uptodate(bh);
388 if (bh_submit_read(bh) < 0) {
390 ext4_error(sb, "Cannot read block bitmap - "
391 "block_group = %u, block_bitmap = %llu",
392 block_group, bitmap_blk);
395 ext4_valid_block_bitmap(sb, desc, block_group, bh);
397 * file system mounted not to panic on error,
398 * continue with corrupt bitmap
404 * ext4_has_free_blocks()
405 * @sbi: in-core super block structure.
406 * @nblocks: number of needed blocks
408 * Check if filesystem has nblocks free & available for allocation.
409 * On success return 1, return 0 on failure.
411 static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
412 s64 nblocks, unsigned int flags)
414 s64 free_blocks, dirty_blocks, root_blocks;
415 struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
416 struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
418 free_blocks = percpu_counter_read_positive(fbc);
419 dirty_blocks = percpu_counter_read_positive(dbc);
420 root_blocks = ext4_r_blocks_count(sbi->s_es);
422 if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
423 EXT4_FREEBLOCKS_WATERMARK) {
424 free_blocks = percpu_counter_sum_positive(fbc);
425 dirty_blocks = percpu_counter_sum_positive(dbc);
427 /* Check whether we have space after
428 * accounting for current dirty blocks & root reserved blocks.
430 if (free_blocks >= ((root_blocks + nblocks) + dirty_blocks))
433 /* Hm, nope. Are (enough) root reserved blocks available? */
434 if (sbi->s_resuid == current_fsuid() ||
435 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
436 capable(CAP_SYS_RESOURCE) ||
437 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
439 if (free_blocks >= (nblocks + dirty_blocks))
446 int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
447 s64 nblocks, unsigned int flags)
449 if (ext4_has_free_blocks(sbi, nblocks, flags)) {
450 percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks);
457 * ext4_should_retry_alloc()
459 * @retries number of attemps has been made
461 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
462 * it is profitable to retry the operation, this function will wait
463 * for the current or committing transaction to complete, and then
466 * if the total number of retries exceed three times, return FALSE.
468 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
470 if (!ext4_has_free_blocks(EXT4_SB(sb), 1, 0) ||
472 !EXT4_SB(sb)->s_journal)
475 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
477 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
481 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
483 * @handle: handle to this transaction
485 * @goal: given target block(filesystem wide)
486 * @count: pointer to total number of blocks needed
489 * Return 1st allocated block number on success, *count stores total account
490 * error stores in errp pointer
492 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
493 ext4_fsblk_t goal, unsigned int flags,
494 unsigned long *count, int *errp)
496 struct ext4_allocation_request ar;
499 memset(&ar, 0, sizeof(ar));
500 /* Fill with neighbour allocated blocks */
503 ar.len = count ? *count : 1;
506 ret = ext4_mb_new_blocks(handle, &ar, errp);
510 * Account for the allocated meta blocks. We will never
511 * fail EDQUOT for metdata, but we do account for it.
514 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
515 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
516 EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
517 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
518 dquot_alloc_block_nofail(inode, ar.len);
524 * ext4_count_free_blocks() -- count filesystem free blocks
527 * Adds up the number of free blocks from each block group.
529 ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
531 ext4_fsblk_t desc_count;
532 struct ext4_group_desc *gdp;
534 ext4_group_t ngroups = ext4_get_groups_count(sb);
536 struct ext4_super_block *es;
537 ext4_fsblk_t bitmap_count;
539 struct buffer_head *bitmap_bh = NULL;
541 es = EXT4_SB(sb)->s_es;
546 for (i = 0; i < ngroups; i++) {
547 gdp = ext4_get_group_desc(sb, i, NULL);
550 desc_count += ext4_free_blks_count(sb, gdp);
552 bitmap_bh = ext4_read_block_bitmap(sb, i);
553 if (bitmap_bh == NULL)
556 x = ext4_count_free(bitmap_bh, sb->s_blocksize);
557 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
558 i, ext4_free_blks_count(sb, gdp), x);
562 printk(KERN_DEBUG "ext4_count_free_blocks: stored = %llu"
563 ", computed = %llu, %llu\n", ext4_free_blocks_count(es),
564 desc_count, bitmap_count);
568 for (i = 0; i < ngroups; i++) {
569 gdp = ext4_get_group_desc(sb, i, NULL);
572 desc_count += ext4_free_blks_count(sb, gdp);
579 static inline int test_root(ext4_group_t a, int b)
588 static int ext4_group_sparse(ext4_group_t group)
594 return (test_root(group, 7) || test_root(group, 5) ||
595 test_root(group, 3));
599 * ext4_bg_has_super - number of blocks used by the superblock in group
600 * @sb: superblock for filesystem
601 * @group: group number to check
603 * Return the number of blocks used by the superblock (primary or backup)
604 * in this group. Currently this will be only 0 or 1.
606 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
608 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
609 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
610 !ext4_group_sparse(group))
615 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
618 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
619 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
620 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
622 if (group == first || group == first + 1 || group == last)
627 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
630 if (!ext4_bg_has_super(sb, group))
633 if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
634 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
636 return EXT4_SB(sb)->s_gdb_count;
640 * ext4_bg_num_gdb - number of blocks used by the group table in group
641 * @sb: superblock for filesystem
642 * @group: group number to check
644 * Return the number of blocks used by the group descriptor table
645 * (primary or backup) in this group. In the future there may be a
646 * different number of descriptor blocks in each group.
648 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
650 unsigned long first_meta_bg =
651 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
652 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
654 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
655 metagroup < first_meta_bg)
656 return ext4_bg_num_gdb_nometa(sb, group);
658 return ext4_bg_num_gdb_meta(sb,group);
663 * This function returns the number of file system metadata clusters at
664 * the beginning of a block group, including the reserved gdt blocks.
666 unsigned ext4_num_base_meta_clusters(struct super_block *sb,
667 ext4_group_t block_group)
669 struct ext4_sb_info *sbi = EXT4_SB(sb);
672 /* Check for superblock and gdt backups in this group */
673 num = ext4_bg_has_super(sb, block_group);
675 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
676 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
677 sbi->s_desc_per_block) {
679 num += ext4_bg_num_gdb(sb, block_group);
680 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
682 } else { /* For META_BG_BLOCK_GROUPS */
683 num += ext4_bg_num_gdb(sb, block_group);
685 return EXT4_NUM_B2C(sbi, num);
688 * ext4_inode_to_goal_block - return a hint for block allocation
689 * @inode: inode for block allocation
691 * Return the ideal location to start allocating blocks for a
692 * newly created inode.
694 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
696 struct ext4_inode_info *ei = EXT4_I(inode);
697 ext4_group_t block_group;
698 ext4_grpblk_t colour;
699 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
700 ext4_fsblk_t bg_start;
701 ext4_fsblk_t last_block;
703 block_group = ei->i_block_group;
704 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
706 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
707 * block groups per flexgroup, reserve the first block
708 * group for directories and special files. Regular
709 * files will start at the second block group. This
710 * tends to speed up directory access and improves
713 block_group &= ~(flex_size-1);
714 if (S_ISREG(inode->i_mode))
717 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
718 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
721 * If we are doing delayed allocation, we don't need take
722 * colour into account.
724 if (test_opt(inode->i_sb, DELALLOC))
727 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
728 colour = (current->pid % 16) *
729 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
731 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
732 return bg_start + colour;