2 * linux/fs/ext4/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
17 #include <linux/jbd2.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
21 #include "ext4_jbd2.h"
25 * balloc.c contains the blocks allocation and deallocation routines
29 * Calculate the block group number and offset, given a block number
31 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
32 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
34 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
37 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
38 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
46 static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
47 ext4_group_t block_group)
49 ext4_group_t actual_group;
50 ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
51 if (actual_group == block_group)
56 static int ext4_group_used_meta_blocks(struct super_block *sb,
57 ext4_group_t block_group)
60 struct ext4_sb_info *sbi = EXT4_SB(sb);
61 /* block bitmap, inode bitmap, and inode table blocks */
62 int used_blocks = sbi->s_itb_per_group + 2;
64 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
65 struct ext4_group_desc *gdp;
66 struct buffer_head *bh;
68 gdp = ext4_get_group_desc(sb, block_group, &bh);
69 if (!ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp),
73 if (!ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp),
77 tmp = ext4_inode_table(sb, gdp);
78 for (; tmp < ext4_inode_table(sb, gdp) +
79 sbi->s_itb_per_group; tmp++) {
80 if (!ext4_block_in_group(sb, tmp, block_group))
86 /* Initializes an uninitialized block bitmap if given, and returns the
87 * number of blocks free in the group. */
88 unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
89 ext4_group_t block_group, struct ext4_group_desc *gdp)
92 unsigned free_blocks, group_blocks;
93 struct ext4_sb_info *sbi = EXT4_SB(sb);
96 J_ASSERT_BH(bh, buffer_locked(bh));
98 /* If checksum is bad mark all blocks used to prevent allocation
99 * essentially implementing a per-group read-only flag. */
100 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
101 ext4_error(sb, __func__,
102 "Checksum bad for group %lu\n", block_group);
103 gdp->bg_free_blocks_count = 0;
104 gdp->bg_free_inodes_count = 0;
105 gdp->bg_itable_unused = 0;
106 memset(bh->b_data, 0xff, sb->s_blocksize);
109 memset(bh->b_data, 0, sb->s_blocksize);
112 /* Check for superblock and gdt backups in this group */
113 bit_max = ext4_bg_has_super(sb, block_group);
115 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
116 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
117 sbi->s_desc_per_block) {
119 bit_max += ext4_bg_num_gdb(sb, block_group);
121 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
123 } else { /* For META_BG_BLOCK_GROUPS */
124 bit_max += ext4_bg_num_gdb(sb, block_group);
127 if (block_group == sbi->s_groups_count - 1) {
129 * Even though mke2fs always initialize first and last group
130 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
131 * to make sure we calculate the right free blocks
133 group_blocks = ext4_blocks_count(sbi->s_es) -
134 le32_to_cpu(sbi->s_es->s_first_data_block) -
135 (EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count - 1));
137 group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
140 free_blocks = group_blocks - bit_max;
143 ext4_fsblk_t start, tmp;
146 for (bit = 0; bit < bit_max; bit++)
147 ext4_set_bit(bit, bh->b_data);
149 start = ext4_group_first_block_no(sb, block_group);
151 if (EXT4_HAS_INCOMPAT_FEATURE(sb,
152 EXT4_FEATURE_INCOMPAT_FLEX_BG))
155 /* Set bits for block and inode bitmaps, and inode table */
156 tmp = ext4_block_bitmap(sb, gdp);
157 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
158 ext4_set_bit(tmp - start, bh->b_data);
160 tmp = ext4_inode_bitmap(sb, gdp);
161 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
162 ext4_set_bit(tmp - start, bh->b_data);
164 tmp = ext4_inode_table(sb, gdp);
165 for (; tmp < ext4_inode_table(sb, gdp) +
166 sbi->s_itb_per_group; tmp++) {
168 ext4_block_in_group(sb, tmp, block_group))
169 ext4_set_bit(tmp - start, bh->b_data);
172 * Also if the number of blocks within the group is
173 * less than the blocksize * 8 ( which is the size
174 * of bitmap ), set rest of the block bitmap to 1
176 mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
178 return free_blocks - ext4_group_used_meta_blocks(sb, block_group);
183 * The free blocks are managed by bitmaps. A file system contains several
184 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
185 * block for inodes, N blocks for the inode table and data blocks.
187 * The file system contains group descriptors which are located after the
188 * super block. Each descriptor contains the number of the bitmap block and
189 * the free blocks count in the block. The descriptors are loaded in memory
190 * when a file system is mounted (see ext4_fill_super).
194 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
197 * ext4_get_group_desc() -- load group descriptor from disk
199 * @block_group: given block group
200 * @bh: pointer to the buffer head to store the block
203 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
204 ext4_group_t block_group,
205 struct buffer_head **bh)
207 unsigned long group_desc;
208 unsigned long offset;
209 struct ext4_group_desc *desc;
210 struct ext4_sb_info *sbi = EXT4_SB(sb);
212 if (block_group >= sbi->s_groups_count) {
213 ext4_error(sb, "ext4_get_group_desc",
214 "block_group >= groups_count - "
215 "block_group = %lu, groups_count = %lu",
216 block_group, sbi->s_groups_count);
222 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
223 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
224 if (!sbi->s_group_desc[group_desc]) {
225 ext4_error(sb, "ext4_get_group_desc",
226 "Group descriptor not loaded - "
227 "block_group = %lu, group_desc = %lu, desc = %lu",
228 block_group, group_desc, offset);
232 desc = (struct ext4_group_desc *)(
233 (__u8 *)sbi->s_group_desc[group_desc]->b_data +
234 offset * EXT4_DESC_SIZE(sb));
236 *bh = sbi->s_group_desc[group_desc];
240 static int ext4_valid_block_bitmap(struct super_block *sb,
241 struct ext4_group_desc *desc,
242 unsigned int block_group,
243 struct buffer_head *bh)
245 ext4_grpblk_t offset;
246 ext4_grpblk_t next_zero_bit;
247 ext4_fsblk_t bitmap_blk;
248 ext4_fsblk_t group_first_block;
250 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
251 /* with FLEX_BG, the inode/block bitmaps and itable
252 * blocks may not be in the group at all
253 * so the bitmap validation will be skipped for those groups
254 * or it has to also read the block group where the bitmaps
255 * are located to verify they are set.
259 group_first_block = ext4_group_first_block_no(sb, block_group);
261 /* check whether block bitmap block number is set */
262 bitmap_blk = ext4_block_bitmap(sb, desc);
263 offset = bitmap_blk - group_first_block;
264 if (!ext4_test_bit(offset, bh->b_data))
265 /* bad block bitmap */
268 /* check whether the inode bitmap block number is set */
269 bitmap_blk = ext4_inode_bitmap(sb, desc);
270 offset = bitmap_blk - group_first_block;
271 if (!ext4_test_bit(offset, bh->b_data))
272 /* bad block bitmap */
275 /* check whether the inode table block number is set */
276 bitmap_blk = ext4_inode_table(sb, desc);
277 offset = bitmap_blk - group_first_block;
278 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
279 offset + EXT4_SB(sb)->s_itb_per_group,
281 if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
282 /* good bitmap for inode tables */
286 ext4_error(sb, __func__,
287 "Invalid block bitmap - "
288 "block_group = %d, block = %llu",
289 block_group, bitmap_blk);
293 * ext4_read_block_bitmap()
295 * @block_group: given block group
297 * Read the bitmap for a given block_group,and validate the
298 * bits for block/inode/inode tables are set in the bitmaps
300 * Return buffer_head on success or NULL in case of failure.
303 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
305 struct ext4_group_desc *desc;
306 struct buffer_head *bh = NULL;
307 ext4_fsblk_t bitmap_blk;
309 desc = ext4_get_group_desc(sb, block_group, NULL);
312 bitmap_blk = ext4_block_bitmap(sb, desc);
313 bh = sb_getblk(sb, bitmap_blk);
315 ext4_error(sb, __func__,
316 "Cannot read block bitmap - "
317 "block_group = %lu, block_bitmap = %llu",
318 block_group, bitmap_blk);
321 if (bh_uptodate_or_lock(bh))
324 spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
325 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
326 ext4_init_block_bitmap(sb, bh, block_group, desc);
327 set_buffer_uptodate(bh);
329 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
332 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
333 if (bh_submit_read(bh) < 0) {
335 ext4_error(sb, __func__,
336 "Cannot read block bitmap - "
337 "block_group = %lu, block_bitmap = %llu",
338 block_group, bitmap_blk);
341 ext4_valid_block_bitmap(sb, desc, block_group, bh);
343 * file system mounted not to panic on error,
344 * continue with corrupt bitmap
349 * The reservation window structure operations
350 * --------------------------------------------
351 * Operations include:
352 * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
354 * We use a red-black tree to represent per-filesystem reservation
360 * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
361 * @rb_root: root of per-filesystem reservation rb tree
362 * @verbose: verbose mode
363 * @fn: function which wishes to dump the reservation map
365 * If verbose is turned on, it will print the whole block reservation
366 * windows(start, end). Otherwise, it will only print out the "bad" windows,
367 * those windows that overlap with their immediate neighbors.
370 static void __rsv_window_dump(struct rb_root *root, int verbose,
374 struct ext4_reserve_window_node *rsv, *prev;
382 printk(KERN_DEBUG "Block Allocation Reservation "
383 "Windows Map (%s):\n", fn);
385 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
387 printk(KERN_DEBUG "reservation window 0x%p "
388 "start: %llu, end: %llu\n",
389 rsv, rsv->rsv_start, rsv->rsv_end);
390 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
391 printk(KERN_DEBUG "Bad reservation %p (start >= end)\n",
395 if (prev && prev->rsv_end >= rsv->rsv_start) {
396 printk(KERN_DEBUG "Bad reservation %p "
397 "(prev->end >= start)\n", rsv);
402 printk(KERN_DEBUG "Restarting reservation "
403 "walk in verbose mode\n");
411 printk(KERN_DEBUG "Window map complete.\n");
414 #define rsv_window_dump(root, verbose) \
415 __rsv_window_dump((root), (verbose), __func__)
417 #define rsv_window_dump(root, verbose) do {} while (0)
421 * goal_in_my_reservation()
422 * @rsv: inode's reservation window
423 * @grp_goal: given goal block relative to the allocation block group
424 * @group: the current allocation block group
425 * @sb: filesystem super block
427 * Test if the given goal block (group relative) is within the file's
428 * own block reservation window range.
430 * If the reservation window is outside the goal allocation group, return 0;
431 * grp_goal (given goal block) could be -1, which means no specific
432 * goal block. In this case, always return 1.
433 * If the goal block is within the reservation window, return 1;
434 * otherwise, return 0;
437 goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,
438 ext4_group_t group, struct super_block *sb)
440 ext4_fsblk_t group_first_block, group_last_block;
442 group_first_block = ext4_group_first_block_no(sb, group);
443 group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
445 if ((rsv->_rsv_start > group_last_block) ||
446 (rsv->_rsv_end < group_first_block))
448 if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
449 || (grp_goal + group_first_block > rsv->_rsv_end)))
455 * search_reserve_window()
456 * @rb_root: root of reservation tree
457 * @goal: target allocation block
459 * Find the reserved window which includes the goal, or the previous one
460 * if the goal is not in any window.
461 * Returns NULL if there are no windows or if all windows start after the goal.
463 static struct ext4_reserve_window_node *
464 search_reserve_window(struct rb_root *root, ext4_fsblk_t goal)
466 struct rb_node *n = root->rb_node;
467 struct ext4_reserve_window_node *rsv;
473 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
475 if (goal < rsv->rsv_start)
477 else if (goal > rsv->rsv_end)
483 * We've fallen off the end of the tree: the goal wasn't inside
484 * any particular node. OK, the previous node must be to one
485 * side of the interval containing the goal. If it's the RHS,
486 * we need to back up one.
488 if (rsv->rsv_start > goal) {
489 n = rb_prev(&rsv->rsv_node);
490 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
496 * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree.
498 * @rsv: reservation window to add
500 * Must be called with rsv_lock hold.
502 void ext4_rsv_window_add(struct super_block *sb,
503 struct ext4_reserve_window_node *rsv)
505 struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;
506 struct rb_node *node = &rsv->rsv_node;
507 ext4_fsblk_t start = rsv->rsv_start;
509 struct rb_node **p = &root->rb_node;
510 struct rb_node *parent = NULL;
511 struct ext4_reserve_window_node *this;
516 this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);
518 if (start < this->rsv_start)
520 else if (start > this->rsv_end)
523 rsv_window_dump(root, 1);
528 rb_link_node(node, parent, p);
529 rb_insert_color(node, root);
533 * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree
535 * @rsv: reservation window to remove
537 * Mark the block reservation window as not allocated, and unlink it
538 * from the filesystem reservation window rb tree. Must be called with
541 static void rsv_window_remove(struct super_block *sb,
542 struct ext4_reserve_window_node *rsv)
544 rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
545 rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
546 rsv->rsv_alloc_hit = 0;
547 rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);
551 * rsv_is_empty() -- Check if the reservation window is allocated.
552 * @rsv: given reservation window to check
554 * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED.
556 static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
558 /* a valid reservation end block could not be 0 */
559 return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
563 * ext4_init_block_alloc_info()
564 * @inode: file inode structure
566 * Allocate and initialize the reservation window structure, and
567 * link the window to the ext4 inode structure at last
569 * The reservation window structure is only dynamically allocated
570 * and linked to ext4 inode the first time the open file
571 * needs a new block. So, before every ext4_new_block(s) call, for
572 * regular files, we should check whether the reservation window
573 * structure exists or not. In the latter case, this function is called.
574 * Fail to do so will result in block reservation being turned off for that
577 * This function is called from ext4_get_blocks_handle(), also called
578 * when setting the reservation window size through ioctl before the file
579 * is open for write (needs block allocation).
581 * Needs down_write(i_data_sem) protection prior to call this function.
583 void ext4_init_block_alloc_info(struct inode *inode)
585 struct ext4_inode_info *ei = EXT4_I(inode);
586 struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
587 struct super_block *sb = inode->i_sb;
589 block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
591 struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;
593 rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
594 rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
597 * if filesystem is mounted with NORESERVATION, the goal
598 * reservation window size is set to zero to indicate
599 * block reservation is off
601 if (!test_opt(sb, RESERVATION))
602 rsv->rsv_goal_size = 0;
604 rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS;
605 rsv->rsv_alloc_hit = 0;
606 block_i->last_alloc_logical_block = 0;
607 block_i->last_alloc_physical_block = 0;
609 ei->i_block_alloc_info = block_i;
613 * ext4_discard_reservation()
616 * Discard(free) block reservation window on last file close, or truncate
619 * It is being called in three cases:
620 * ext4_release_file(): last writer close the file
621 * ext4_clear_inode(): last iput(), when nobody link to this file.
622 * ext4_truncate(): when the block indirect map is about to change.
625 void ext4_discard_reservation(struct inode *inode)
627 struct ext4_inode_info *ei = EXT4_I(inode);
628 struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
629 struct ext4_reserve_window_node *rsv;
630 spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock;
632 ext4_mb_discard_inode_preallocations(inode);
637 rsv = &block_i->rsv_window_node;
638 if (!rsv_is_empty(&rsv->rsv_window)) {
640 if (!rsv_is_empty(&rsv->rsv_window))
641 rsv_window_remove(inode->i_sb, rsv);
642 spin_unlock(rsv_lock);
647 * ext4_free_blocks_sb() -- Free given blocks and update quota
648 * @handle: handle to this transaction
650 * @block: start physcial block to free
651 * @count: number of blocks to free
652 * @pdquot_freed_blocks: pointer to quota
654 void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
655 ext4_fsblk_t block, unsigned long count,
656 unsigned long *pdquot_freed_blocks)
658 struct buffer_head *bitmap_bh = NULL;
659 struct buffer_head *gd_bh;
660 ext4_group_t block_group;
663 unsigned long overflow;
664 struct ext4_group_desc *desc;
665 struct ext4_super_block *es;
666 struct ext4_sb_info *sbi;
668 ext4_grpblk_t group_freed;
670 *pdquot_freed_blocks = 0;
673 if (block < le32_to_cpu(es->s_first_data_block) ||
674 block + count < block ||
675 block + count > ext4_blocks_count(es)) {
676 ext4_error(sb, "ext4_free_blocks",
677 "Freeing blocks not in datazone - "
678 "block = %llu, count = %lu", block, count);
682 ext4_debug("freeing block(s) %llu-%llu\n", block, block + count - 1);
686 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
688 * Check to see if we are freeing blocks across a group
691 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
692 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
696 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
699 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
703 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
704 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
705 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
706 in_range(block + count - 1, ext4_inode_table(sb, desc),
707 sbi->s_itb_per_group)) {
708 ext4_error(sb, "ext4_free_blocks",
709 "Freeing blocks in system zones - "
710 "Block = %llu, count = %lu",
716 * We are about to start releasing blocks in the bitmap,
717 * so we need undo access.
719 /* @@@ check errors */
720 BUFFER_TRACE(bitmap_bh, "getting undo access");
721 err = ext4_journal_get_undo_access(handle, bitmap_bh);
726 * We are about to modify some metadata. Call the journal APIs
727 * to unshare ->b_data if a currently-committing transaction is
730 BUFFER_TRACE(gd_bh, "get_write_access");
731 err = ext4_journal_get_write_access(handle, gd_bh);
735 jbd_lock_bh_state(bitmap_bh);
737 for (i = 0, group_freed = 0; i < count; i++) {
739 * An HJ special. This is expensive...
741 #ifdef CONFIG_JBD2_DEBUG
742 jbd_unlock_bh_state(bitmap_bh);
744 struct buffer_head *debug_bh;
745 debug_bh = sb_find_get_block(sb, block + i);
747 BUFFER_TRACE(debug_bh, "Deleted!");
748 if (!bh2jh(bitmap_bh)->b_committed_data)
749 BUFFER_TRACE(debug_bh,
750 "No commited data in bitmap");
751 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
755 jbd_lock_bh_state(bitmap_bh);
757 if (need_resched()) {
758 jbd_unlock_bh_state(bitmap_bh);
760 jbd_lock_bh_state(bitmap_bh);
762 /* @@@ This prevents newly-allocated data from being
763 * freed and then reallocated within the same
766 * Ideally we would want to allow that to happen, but to
767 * do so requires making jbd2_journal_forget() capable of
768 * revoking the queued write of a data block, which
769 * implies blocking on the journal lock. *forget()
770 * cannot block due to truncate races.
772 * Eventually we can fix this by making jbd2_journal_forget()
773 * return a status indicating whether or not it was able
774 * to revoke the buffer. On successful revoke, it is
775 * safe not to set the allocation bit in the committed
776 * bitmap, because we know that there is no outstanding
777 * activity on the buffer any more and so it is safe to
780 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
781 J_ASSERT_BH(bitmap_bh,
782 bh2jh(bitmap_bh)->b_committed_data != NULL);
783 ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
784 bh2jh(bitmap_bh)->b_committed_data);
787 * We clear the bit in the bitmap after setting the committed
788 * data bit, because this is the reverse order to that which
789 * the allocator uses.
791 BUFFER_TRACE(bitmap_bh, "clear bit");
792 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
793 bit + i, bitmap_bh->b_data)) {
794 jbd_unlock_bh_state(bitmap_bh);
795 ext4_error(sb, __func__,
796 "bit already cleared for block %llu",
797 (ext4_fsblk_t)(block + i));
798 jbd_lock_bh_state(bitmap_bh);
799 BUFFER_TRACE(bitmap_bh, "bit already cleared");
804 jbd_unlock_bh_state(bitmap_bh);
806 spin_lock(sb_bgl_lock(sbi, block_group));
807 le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
808 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
809 spin_unlock(sb_bgl_lock(sbi, block_group));
810 percpu_counter_add(&sbi->s_freeblocks_counter, count);
812 if (sbi->s_log_groups_per_flex) {
813 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
814 spin_lock(sb_bgl_lock(sbi, flex_group));
815 sbi->s_flex_groups[flex_group].free_blocks += count;
816 spin_unlock(sb_bgl_lock(sbi, flex_group));
819 /* We dirtied the bitmap block */
820 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
821 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
823 /* And the group descriptor block */
824 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
825 ret = ext4_journal_dirty_metadata(handle, gd_bh);
827 *pdquot_freed_blocks += group_freed;
829 if (overflow && !err) {
837 ext4_std_error(sb, err);
842 * ext4_free_blocks() -- Free given blocks and update quota
843 * @handle: handle for this transaction
845 * @block: start physical block to free
846 * @count: number of blocks to count
847 * @metadata: Are these metadata blocks
849 void ext4_free_blocks(handle_t *handle, struct inode *inode,
850 ext4_fsblk_t block, unsigned long count,
853 struct super_block *sb;
854 unsigned long dquot_freed_blocks;
856 /* this isn't the right place to decide whether block is metadata
857 * inode.c/extents.c knows better, but for safety ... */
858 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
859 ext4_should_journal_data(inode))
864 if (!test_opt(sb, MBALLOC) || !EXT4_SB(sb)->s_group_info)
865 ext4_free_blocks_sb(handle, sb, block, count,
866 &dquot_freed_blocks);
868 ext4_mb_free_blocks(handle, inode, block, count,
869 metadata, &dquot_freed_blocks);
870 if (dquot_freed_blocks)
871 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
876 * ext4_test_allocatable()
877 * @nr: given allocation block group
878 * @bh: bufferhead contains the bitmap of the given block group
880 * For ext4 allocations, we must not reuse any blocks which are
881 * allocated in the bitmap buffer's "last committed data" copy. This
882 * prevents deletes from freeing up the page for reuse until we have
883 * committed the delete transaction.
885 * If we didn't do this, then deleting something and reallocating it as
886 * data would allow the old block to be overwritten before the
887 * transaction committed (because we force data to disk before commit).
888 * This would lead to corruption if we crashed between overwriting the
889 * data and committing the delete.
891 * @@@ We may want to make this allocation behaviour conditional on
892 * data-writes at some point, and disable it for metadata allocations or
895 static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh)
898 struct journal_head *jh = bh2jh(bh);
900 if (ext4_test_bit(nr, bh->b_data))
903 jbd_lock_bh_state(bh);
904 if (!jh->b_committed_data)
907 ret = !ext4_test_bit(nr, jh->b_committed_data);
908 jbd_unlock_bh_state(bh);
913 * bitmap_search_next_usable_block()
914 * @start: the starting block (group relative) of the search
915 * @bh: bufferhead contains the block group bitmap
916 * @maxblocks: the ending block (group relative) of the reservation
918 * The bitmap search --- search forward alternately through the actual
919 * bitmap on disk and the last-committed copy in journal, until we find a
920 * bit free in both bitmaps.
923 bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
924 ext4_grpblk_t maxblocks)
927 struct journal_head *jh = bh2jh(bh);
929 while (start < maxblocks) {
930 next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start);
931 if (next >= maxblocks)
933 if (ext4_test_allocatable(next, bh))
935 jbd_lock_bh_state(bh);
936 if (jh->b_committed_data)
937 start = ext4_find_next_zero_bit(jh->b_committed_data,
939 jbd_unlock_bh_state(bh);
945 * find_next_usable_block()
946 * @start: the starting block (group relative) to find next
947 * allocatable block in bitmap.
948 * @bh: bufferhead contains the block group bitmap
949 * @maxblocks: the ending block (group relative) for the search
951 * Find an allocatable block in a bitmap. We honor both the bitmap and
952 * its last-committed copy (if that exists), and perform the "most
953 * appropriate allocation" algorithm of looking for a free block near
954 * the initial goal; then for a free byte somewhere in the bitmap; then
955 * for any free bit in the bitmap.
958 find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
959 ext4_grpblk_t maxblocks)
961 ext4_grpblk_t here, next;
966 * The goal was occupied; search forward for a free
967 * block within the next XX blocks.
969 * end_goal is more or less random, but it has to be
970 * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the
971 * next 64-bit boundary is simple..
973 ext4_grpblk_t end_goal = (start + 63) & ~63;
974 if (end_goal > maxblocks)
975 end_goal = maxblocks;
976 here = ext4_find_next_zero_bit(bh->b_data, end_goal, start);
977 if (here < end_goal && ext4_test_allocatable(here, bh))
979 ext4_debug("Bit not found near goal\n");
986 p = ((char *)bh->b_data) + (here >> 3);
987 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
988 next = (r - ((char *)bh->b_data)) << 3;
990 if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
994 * The bitmap search --- search forward alternately through the actual
995 * bitmap and the last-committed copy until we find a bit free in
998 here = bitmap_search_next_usable_block(here, bh, maxblocks);
1004 * @block: the free block (group relative) to allocate
1005 * @bh: the bufferhead containts the block group bitmap
1007 * We think we can allocate this block in this bitmap. Try to set the bit.
1008 * If that succeeds then check that nobody has allocated and then freed the
1009 * block since we saw that is was not marked in b_committed_data. If it _was_
1010 * allocated and freed then clear the bit in the bitmap again and return
1014 claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh)
1016 struct journal_head *jh = bh2jh(bh);
1019 if (ext4_set_bit_atomic(lock, block, bh->b_data))
1021 jbd_lock_bh_state(bh);
1022 if (jh->b_committed_data && ext4_test_bit(block, jh->b_committed_data)) {
1023 ext4_clear_bit_atomic(lock, block, bh->b_data);
1028 jbd_unlock_bh_state(bh);
1033 * ext4_try_to_allocate()
1035 * @handle: handle to this transaction
1036 * @group: given allocation block group
1037 * @bitmap_bh: bufferhead holds the block bitmap
1038 * @grp_goal: given target block within the group
1039 * @count: target number of blocks to allocate
1040 * @my_rsv: reservation window
1042 * Attempt to allocate blocks within a give range. Set the range of allocation
1043 * first, then find the first free bit(s) from the bitmap (within the range),
1044 * and at last, allocate the blocks by claiming the found free bit as allocated.
1046 * To set the range of this allocation:
1047 * if there is a reservation window, only try to allocate block(s) from the
1048 * file's own reservation window;
1049 * Otherwise, the allocation range starts from the give goal block, ends at
1050 * the block group's last block.
1052 * If we failed to allocate the desired block then we may end up crossing to a
1053 * new bitmap. In that case we must release write access to the old one via
1054 * ext4_journal_release_buffer(), else we'll run out of credits.
1056 static ext4_grpblk_t
1057 ext4_try_to_allocate(struct super_block *sb, handle_t *handle,
1058 ext4_group_t group, struct buffer_head *bitmap_bh,
1059 ext4_grpblk_t grp_goal, unsigned long *count,
1060 struct ext4_reserve_window *my_rsv)
1062 ext4_fsblk_t group_first_block;
1063 ext4_grpblk_t start, end;
1064 unsigned long num = 0;
1066 /* we do allocation within the reservation window if we have a window */
1068 group_first_block = ext4_group_first_block_no(sb, group);
1069 if (my_rsv->_rsv_start >= group_first_block)
1070 start = my_rsv->_rsv_start - group_first_block;
1072 /* reservation window cross group boundary */
1074 end = my_rsv->_rsv_end - group_first_block + 1;
1075 if (end > EXT4_BLOCKS_PER_GROUP(sb))
1076 /* reservation window crosses group boundary */
1077 end = EXT4_BLOCKS_PER_GROUP(sb);
1078 if ((start <= grp_goal) && (grp_goal < end))
1087 end = EXT4_BLOCKS_PER_GROUP(sb);
1090 BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));
1093 if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) {
1094 grp_goal = find_next_usable_block(start, bitmap_bh, end);
1100 for (i = 0; i < 7 && grp_goal > start &&
1101 ext4_test_allocatable(grp_goal - 1,
1109 if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
1110 grp_goal, bitmap_bh)) {
1112 * The block was allocated by another thread, or it was
1113 * allocated and then freed by another thread
1123 while (num < *count && grp_goal < end
1124 && ext4_test_allocatable(grp_goal, bitmap_bh)
1125 && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
1126 grp_goal, bitmap_bh)) {
1131 return grp_goal - num;
1138 * find_next_reservable_window():
1139 * find a reservable space within the given range.
1140 * It does not allocate the reservation window for now:
1141 * alloc_new_reservation() will do the work later.
1143 * @search_head: the head of the searching list;
1144 * This is not necessarily the list head of the whole filesystem
1146 * We have both head and start_block to assist the search
1147 * for the reservable space. The list starts from head,
1148 * but we will shift to the place where start_block is,
1149 * then start from there, when looking for a reservable space.
1151 * @size: the target new reservation window size
1153 * @group_first_block: the first block we consider to start
1154 * the real search from
1157 * the maximum block number that our goal reservable space
1158 * could start from. This is normally the last block in this
1159 * group. The search will end when we found the start of next
1160 * possible reservable space is out of this boundary.
1161 * This could handle the cross boundary reservation window
1164 * basically we search from the given range, rather than the whole
1165 * reservation double linked list, (start_block, last_block)
1166 * to find a free region that is of my size and has not
1170 static int find_next_reservable_window(
1171 struct ext4_reserve_window_node *search_head,
1172 struct ext4_reserve_window_node *my_rsv,
1173 struct super_block *sb,
1174 ext4_fsblk_t start_block,
1175 ext4_fsblk_t last_block)
1177 struct rb_node *next;
1178 struct ext4_reserve_window_node *rsv, *prev;
1180 int size = my_rsv->rsv_goal_size;
1182 /* TODO: make the start of the reservation window byte-aligned */
1183 /* cur = *start_block & ~7;*/
1190 if (cur <= rsv->rsv_end)
1191 cur = rsv->rsv_end + 1;
1194 * in the case we could not find a reservable space
1195 * that is what is expected, during the re-search, we could
1196 * remember what's the largest reservable space we could have
1197 * and return that one.
1199 * For now it will fail if we could not find the reservable
1200 * space with expected-size (or more)...
1202 if (cur > last_block)
1203 return -1; /* fail */
1206 next = rb_next(&rsv->rsv_node);
1207 rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
1210 * Reached the last reservation, we can just append to the
1216 if (cur + size <= rsv->rsv_start) {
1218 * Found a reserveable space big enough. We could
1219 * have a reservation across the group boundary here
1225 * we come here either :
1226 * when we reach the end of the whole list,
1227 * and there is empty reservable space after last entry in the list.
1228 * append it to the end of the list.
1230 * or we found one reservable space in the middle of the list,
1231 * return the reservation window that we could append to.
1235 if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
1236 rsv_window_remove(sb, my_rsv);
1239 * Let's book the whole avaliable window for now. We will check the
1240 * disk bitmap later and then, if there are free blocks then we adjust
1241 * the window size if it's larger than requested.
1242 * Otherwise, we will remove this node from the tree next time
1243 * call find_next_reservable_window.
1245 my_rsv->rsv_start = cur;
1246 my_rsv->rsv_end = cur + size - 1;
1247 my_rsv->rsv_alloc_hit = 0;
1250 ext4_rsv_window_add(sb, my_rsv);
1256 * alloc_new_reservation()--allocate a new reservation window
1258 * To make a new reservation, we search part of the filesystem
1259 * reservation list (the list that inside the group). We try to
1260 * allocate a new reservation window near the allocation goal,
1261 * or the beginning of the group, if there is no goal.
1263 * We first find a reservable space after the goal, then from
1264 * there, we check the bitmap for the first free block after
1265 * it. If there is no free block until the end of group, then the
1266 * whole group is full, we failed. Otherwise, check if the free
1267 * block is inside the expected reservable space, if so, we
1269 * If the first free block is outside the reservable space, then
1270 * start from the first free block, we search for next available
1273 * on succeed, a new reservation will be found and inserted into the list
1274 * It contains at least one free block, and it does not overlap with other
1275 * reservation windows.
1277 * failed: we failed to find a reservation window in this group
1279 * @rsv: the reservation
1281 * @grp_goal: The goal (group-relative). It is where the search for a
1282 * free reservable space should start from.
1283 * if we have a grp_goal(grp_goal >0 ), then start from there,
1284 * no grp_goal(grp_goal = -1), we start from the first block
1287 * @sb: the super block
1288 * @group: the group we are trying to allocate in
1289 * @bitmap_bh: the block group block bitmap
1292 static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv,
1293 ext4_grpblk_t grp_goal, struct super_block *sb,
1294 ext4_group_t group, struct buffer_head *bitmap_bh)
1296 struct ext4_reserve_window_node *search_head;
1297 ext4_fsblk_t group_first_block, group_end_block, start_block;
1298 ext4_grpblk_t first_free_block;
1299 struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root;
1302 spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1304 group_first_block = ext4_group_first_block_no(sb, group);
1305 group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1308 start_block = group_first_block;
1310 start_block = grp_goal + group_first_block;
1312 size = my_rsv->rsv_goal_size;
1314 if (!rsv_is_empty(&my_rsv->rsv_window)) {
1316 * if the old reservation is cross group boundary
1317 * and if the goal is inside the old reservation window,
1318 * we will come here when we just failed to allocate from
1319 * the first part of the window. We still have another part
1320 * that belongs to the next group. In this case, there is no
1321 * point to discard our window and try to allocate a new one
1322 * in this group(which will fail). we should
1323 * keep the reservation window, just simply move on.
1325 * Maybe we could shift the start block of the reservation
1326 * window to the first block of next group.
1329 if ((my_rsv->rsv_start <= group_end_block) &&
1330 (my_rsv->rsv_end > group_end_block) &&
1331 (start_block >= my_rsv->rsv_start))
1334 if ((my_rsv->rsv_alloc_hit >
1335 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
1337 * if the previously allocation hit ratio is
1338 * greater than 1/2, then we double the size of
1339 * the reservation window the next time,
1340 * otherwise we keep the same size window
1343 if (size > EXT4_MAX_RESERVE_BLOCKS)
1344 size = EXT4_MAX_RESERVE_BLOCKS;
1345 my_rsv->rsv_goal_size = size;
1349 spin_lock(rsv_lock);
1351 * shift the search start to the window near the goal block
1353 search_head = search_reserve_window(fs_rsv_root, start_block);
1356 * find_next_reservable_window() simply finds a reservable window
1357 * inside the given range(start_block, group_end_block).
1359 * To make sure the reservation window has a free bit inside it, we
1360 * need to check the bitmap after we found a reservable window.
1363 ret = find_next_reservable_window(search_head, my_rsv, sb,
1364 start_block, group_end_block);
1367 if (!rsv_is_empty(&my_rsv->rsv_window))
1368 rsv_window_remove(sb, my_rsv);
1369 spin_unlock(rsv_lock);
1374 * On success, find_next_reservable_window() returns the
1375 * reservation window where there is a reservable space after it.
1376 * Before we reserve this reservable space, we need
1377 * to make sure there is at least a free block inside this region.
1379 * searching the first free bit on the block bitmap and copy of
1380 * last committed bitmap alternatively, until we found a allocatable
1381 * block. Search start from the start block of the reservable space
1384 spin_unlock(rsv_lock);
1385 first_free_block = bitmap_search_next_usable_block(
1386 my_rsv->rsv_start - group_first_block,
1387 bitmap_bh, group_end_block - group_first_block + 1);
1389 if (first_free_block < 0) {
1391 * no free block left on the bitmap, no point
1392 * to reserve the space. return failed.
1394 spin_lock(rsv_lock);
1395 if (!rsv_is_empty(&my_rsv->rsv_window))
1396 rsv_window_remove(sb, my_rsv);
1397 spin_unlock(rsv_lock);
1398 return -1; /* failed */
1401 start_block = first_free_block + group_first_block;
1403 * check if the first free block is within the
1404 * free space we just reserved
1406 if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1407 return 0; /* success */
1409 * if the first free bit we found is out of the reservable space
1410 * continue search for next reservable space,
1411 * start from where the free block is,
1412 * we also shift the list head to where we stopped last time
1414 search_head = my_rsv;
1415 spin_lock(rsv_lock);
1420 * try_to_extend_reservation()
1421 * @my_rsv: given reservation window
1423 * @size: the delta to extend
1425 * Attempt to expand the reservation window large enough to have
1426 * required number of free blocks
1428 * Since ext4_try_to_allocate() will always allocate blocks within
1429 * the reservation window range, if the window size is too small,
1430 * multiple blocks allocation has to stop at the end of the reservation
1431 * window. To make this more efficient, given the total number of
1432 * blocks needed and the current size of the window, we try to
1433 * expand the reservation window size if necessary on a best-effort
1434 * basis before ext4_new_blocks() tries to allocate blocks,
1436 static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
1437 struct super_block *sb, int size)
1439 struct ext4_reserve_window_node *next_rsv;
1440 struct rb_node *next;
1441 spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1443 if (!spin_trylock(rsv_lock))
1446 next = rb_next(&my_rsv->rsv_node);
1449 my_rsv->rsv_end += size;
1451 next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
1453 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1454 my_rsv->rsv_end += size;
1456 my_rsv->rsv_end = next_rsv->rsv_start - 1;
1458 spin_unlock(rsv_lock);
1462 * ext4_try_to_allocate_with_rsv()
1464 * @handle: handle to this transaction
1465 * @group: given allocation block group
1466 * @bitmap_bh: bufferhead holds the block bitmap
1467 * @grp_goal: given target block within the group
1468 * @count: target number of blocks to allocate
1469 * @my_rsv: reservation window
1470 * @errp: pointer to store the error code
1472 * This is the main function used to allocate a new block and its reservation
1475 * Each time when a new block allocation is need, first try to allocate from
1476 * its own reservation. If it does not have a reservation window, instead of
1477 * looking for a free bit on bitmap first, then look up the reservation list to
1478 * see if it is inside somebody else's reservation window, we try to allocate a
1479 * reservation window for it starting from the goal first. Then do the block
1480 * allocation within the reservation window.
1482 * This will avoid keeping on searching the reservation list again and
1483 * again when somebody is looking for a free block (without
1484 * reservation), and there are lots of free blocks, but they are all
1487 * We use a red-black tree for the per-filesystem reservation list.
1490 static ext4_grpblk_t
1491 ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1492 ext4_group_t group, struct buffer_head *bitmap_bh,
1493 ext4_grpblk_t grp_goal,
1494 struct ext4_reserve_window_node *my_rsv,
1495 unsigned long *count, int *errp)
1497 ext4_fsblk_t group_first_block, group_last_block;
1498 ext4_grpblk_t ret = 0;
1500 unsigned long num = *count;
1505 * Make sure we use undo access for the bitmap, because it is critical
1506 * that we do the frozen_data COW on bitmap buffers in all cases even
1507 * if the buffer is in BJ_Forget state in the committing transaction.
1509 BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1510 fatal = ext4_journal_get_undo_access(handle, bitmap_bh);
1517 * we don't deal with reservation when
1518 * filesystem is mounted without reservation
1519 * or the file is not a regular file
1520 * or last attempt to allocate a block with reservation turned on failed
1522 if (my_rsv == NULL) {
1523 ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1524 grp_goal, count, NULL);
1528 * grp_goal is a group relative block number (if there is a goal)
1529 * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
1530 * first block is a filesystem wide block number
1531 * first block is the block number of the first block in this group
1533 group_first_block = ext4_group_first_block_no(sb, group);
1534 group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1537 * Basically we will allocate a new block from inode's reservation
1540 * We need to allocate a new reservation window, if:
1541 * a) inode does not have a reservation window; or
1542 * b) last attempt to allocate a block from existing reservation
1544 * c) we come here with a goal and with a reservation window
1546 * We do not need to allocate a new reservation window if we come here
1547 * at the beginning with a goal and the goal is inside the window, or
1548 * we don't have a goal but already have a reservation window.
1549 * then we could go to allocate from the reservation window directly.
1552 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1553 !goal_in_my_reservation(&my_rsv->rsv_window,
1554 grp_goal, group, sb)) {
1555 if (my_rsv->rsv_goal_size < *count)
1556 my_rsv->rsv_goal_size = *count;
1557 ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1562 if (!goal_in_my_reservation(&my_rsv->rsv_window,
1563 grp_goal, group, sb))
1565 } else if (grp_goal >= 0) {
1566 int curr = my_rsv->rsv_end -
1567 (grp_goal + group_first_block) + 1;
1570 try_to_extend_reservation(my_rsv, sb,
1574 if ((my_rsv->rsv_start > group_last_block) ||
1575 (my_rsv->rsv_end < group_first_block)) {
1576 rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1);
1579 ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1580 grp_goal, &num, &my_rsv->rsv_window);
1582 my_rsv->rsv_alloc_hit += num;
1584 break; /* succeed */
1590 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
1592 fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);
1600 BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1601 ext4_journal_release_buffer(handle, bitmap_bh);
1605 int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
1608 s64 free_blocks, dirty_blocks;
1609 s64 root_blocks = 0;
1610 struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
1611 struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
1613 free_blocks = percpu_counter_read_positive(fbc);
1614 dirty_blocks = percpu_counter_read_positive(dbc);
1616 if (!capable(CAP_SYS_RESOURCE) &&
1617 sbi->s_resuid != current->fsuid &&
1618 (sbi->s_resgid == 0 || !in_group_p(sbi->s_resgid)))
1619 root_blocks = ext4_r_blocks_count(sbi->s_es);
1621 if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
1622 EXT4_FREEBLOCKS_WATERMARK) {
1623 free_blocks = percpu_counter_sum(fbc);
1624 dirty_blocks = percpu_counter_sum(dbc);
1625 if (dirty_blocks < 0) {
1626 printk(KERN_CRIT "Dirty block accounting "
1627 "went wrong %lld\n",
1631 /* Check whether we have space after
1632 * accounting for current dirty blocks
1634 if (free_blocks < ((root_blocks + nblocks) + dirty_blocks))
1635 /* we don't have free space */
1638 /* Add the blocks to nblocks */
1639 percpu_counter_add(dbc, nblocks);
1644 * ext4_has_free_blocks()
1645 * @sbi: in-core super block structure.
1646 * @nblocks: number of neeed blocks
1648 * Check if filesystem has free blocks available for allocation.
1649 * Return the number of blocks avaible for allocation for this request
1650 * On success, return nblocks
1652 ext4_fsblk_t ext4_has_free_blocks(struct ext4_sb_info *sbi,
1655 s64 free_blocks, dirty_blocks;
1656 s64 root_blocks = 0;
1657 struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
1658 struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
1660 free_blocks = percpu_counter_read_positive(fbc);
1661 dirty_blocks = percpu_counter_read_positive(dbc);
1663 if (!capable(CAP_SYS_RESOURCE) &&
1664 sbi->s_resuid != current->fsuid &&
1665 (sbi->s_resgid == 0 || !in_group_p(sbi->s_resgid)))
1666 root_blocks = ext4_r_blocks_count(sbi->s_es);
1668 if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
1669 EXT4_FREEBLOCKS_WATERMARK) {
1670 free_blocks = percpu_counter_sum(fbc);
1671 dirty_blocks = percpu_counter_sum(dbc);
1673 if (free_blocks <= (root_blocks + dirty_blocks))
1674 /* we don't have free space */
1677 if (free_blocks - (root_blocks + dirty_blocks) < nblocks)
1678 return free_blocks - (root_blocks + dirty_blocks);
1684 * ext4_should_retry_alloc()
1686 * @retries number of attemps has been made
1688 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
1689 * it is profitable to retry the operation, this function will wait
1690 * for the current or commiting transaction to complete, and then
1693 * if the total number of retries exceed three times, return FALSE.
1695 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
1697 if (!ext4_has_free_blocks(EXT4_SB(sb), 1) || (*retries)++ > 3)
1700 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
1702 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
1706 * ext4_old_new_blocks() -- core block bitmap based block allocation function
1708 * @handle: handle to this transaction
1709 * @inode: file inode
1710 * @goal: given target block(filesystem wide)
1711 * @count: target number of blocks to allocate
1714 * ext4_old_new_blocks uses a goal block to assist allocation and look up
1715 * the block bitmap directly to do block allocation. It tries to
1716 * allocate block(s) from the block group contains the goal block first. If
1717 * that fails, it will try to allocate block(s) from other block groups
1718 * without any specific goal block.
1720 * This function is called when -o nomballoc mount option is enabled
1723 ext4_fsblk_t ext4_old_new_blocks(handle_t *handle, struct inode *inode,
1724 ext4_fsblk_t goal, unsigned long *count, int *errp)
1726 struct buffer_head *bitmap_bh = NULL;
1727 struct buffer_head *gdp_bh;
1728 ext4_group_t group_no;
1729 ext4_group_t goal_group;
1730 ext4_grpblk_t grp_target_blk; /* blockgroup relative goal block */
1731 ext4_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/
1732 ext4_fsblk_t ret_block; /* filesyetem-wide allocated block */
1733 ext4_group_t bgi; /* blockgroup iteration index */
1735 int performed_allocation = 0;
1736 ext4_grpblk_t free_blocks; /* number of free blocks in a group */
1737 struct super_block *sb;
1738 struct ext4_group_desc *gdp;
1739 struct ext4_super_block *es;
1740 struct ext4_sb_info *sbi;
1741 struct ext4_reserve_window_node *my_rsv = NULL;
1742 struct ext4_block_alloc_info *block_i;
1743 unsigned short windowsz = 0;
1744 ext4_group_t ngroups;
1745 unsigned long num = *count;
1750 printk(KERN_ERR "ext4_new_block: nonexistent superblock");
1755 if (!EXT4_I(inode)->i_delalloc_reserved_flag) {
1757 * With delalloc we already reserved the blocks
1759 while (*count && ext4_claim_free_blocks(sbi, *count)) {
1760 /* let others to free the space */
1762 *count = *count >> 1;
1766 return 0; /*return with ENOSPC error */
1771 * Check quota for allocation of this block.
1773 if (DQUOT_ALLOC_BLOCK(inode, num)) {
1779 es = EXT4_SB(sb)->s_es;
1780 ext4_debug("goal=%llu.\n", goal);
1782 * Allocate a block from reservation only when
1783 * filesystem is mounted with reservation(default,-o reservation), and
1784 * it's a regular file, and
1785 * the desired window size is greater than 0 (One could use ioctl
1786 * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off
1787 * reservation on that particular file)
1789 block_i = EXT4_I(inode)->i_block_alloc_info;
1790 if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
1791 my_rsv = &block_i->rsv_window_node;
1794 * First, test whether the goal block is free.
1796 if (goal < le32_to_cpu(es->s_first_data_block) ||
1797 goal >= ext4_blocks_count(es))
1798 goal = le32_to_cpu(es->s_first_data_block);
1799 ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);
1800 goal_group = group_no;
1802 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1806 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1808 if (free_blocks > 0) {
1810 * try to allocate with group target block
1811 * in the goal group. If we have low free_blocks
1812 * count turn off reservation
1814 if (my_rsv && (free_blocks < windowsz)
1815 && (rsv_is_empty(&my_rsv->rsv_window)))
1818 bitmap_bh = ext4_read_block_bitmap(sb, group_no);
1821 grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1822 group_no, bitmap_bh, grp_target_blk,
1823 my_rsv, &num, &fatal);
1826 if (grp_alloc_blk >= 0)
1830 ngroups = EXT4_SB(sb)->s_groups_count;
1834 * Now search the rest of the groups. We assume that
1835 * group_no and gdp correctly point to the last group visited.
1837 for (bgi = 0; bgi < ngroups; bgi++) {
1839 if (group_no >= ngroups)
1841 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1844 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1846 * skip this group if the number of
1847 * free blocks is less than half of the reservation
1850 if (my_rsv && (free_blocks <= (windowsz/2)))
1854 bitmap_bh = ext4_read_block_bitmap(sb, group_no);
1858 * try to allocate block(s) from this group, without a goal(-1).
1860 grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1861 group_no, bitmap_bh, -1, my_rsv,
1865 if (grp_alloc_blk >= 0)
1869 * We may end up a bogus ealier ENOSPC error due to
1870 * filesystem is "full" of reservations, but
1871 * there maybe indeed free blocks avaliable on disk
1872 * In this case, we just forget about the reservations
1873 * just do block allocation as without reservations.
1878 group_no = goal_group;
1881 /* No space left on the device */
1887 ext4_debug("using block group %lu(%d)\n",
1888 group_no, gdp->bg_free_blocks_count);
1890 BUFFER_TRACE(gdp_bh, "get_write_access");
1891 fatal = ext4_journal_get_write_access(handle, gdp_bh);
1895 ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);
1897 if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
1898 in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) ||
1899 in_range(ret_block, ext4_inode_table(sb, gdp),
1900 EXT4_SB(sb)->s_itb_per_group) ||
1901 in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),
1902 EXT4_SB(sb)->s_itb_per_group)) {
1903 ext4_error(sb, "ext4_new_block",
1904 "Allocating block in system zone - "
1905 "blocks from %llu, length %lu",
1908 * claim_block marked the blocks we allocated
1909 * as in use. So we may want to selectively
1910 * mark some of the blocks as free
1915 performed_allocation = 1;
1917 #ifdef CONFIG_JBD2_DEBUG
1919 struct buffer_head *debug_bh;
1921 /* Record bitmap buffer state in the newly allocated block */
1922 debug_bh = sb_find_get_block(sb, ret_block);
1924 BUFFER_TRACE(debug_bh, "state when allocated");
1925 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
1929 jbd_lock_bh_state(bitmap_bh);
1930 spin_lock(sb_bgl_lock(sbi, group_no));
1931 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1934 for (i = 0; i < num; i++) {
1935 if (ext4_test_bit(grp_alloc_blk+i,
1936 bh2jh(bitmap_bh)->b_committed_data)) {
1937 printk(KERN_ERR "%s: block was unexpectedly "
1938 "set in b_committed_data\n", __func__);
1942 ext4_debug("found bit %d\n", grp_alloc_blk);
1943 spin_unlock(sb_bgl_lock(sbi, group_no));
1944 jbd_unlock_bh_state(bitmap_bh);
1947 if (ret_block + num - 1 >= ext4_blocks_count(es)) {
1948 ext4_error(sb, "ext4_new_block",
1949 "block(%llu) >= blocks count(%llu) - "
1950 "block_group = %lu, es == %p ", ret_block,
1951 ext4_blocks_count(es), group_no, es);
1956 * It is up to the caller to add the new buffer to a journal
1957 * list of some description. We don't know in advance whether
1958 * the caller wants to use it as metadata or data.
1960 spin_lock(sb_bgl_lock(sbi, group_no));
1961 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1962 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1963 le16_add_cpu(&gdp->bg_free_blocks_count, -num);
1964 gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
1965 spin_unlock(sb_bgl_lock(sbi, group_no));
1966 percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1968 * Now reduce the dirty block count also. Should not go negative
1970 if (!EXT4_I(inode)->i_delalloc_reserved_flag)
1971 percpu_counter_sub(&sbi->s_dirtyblocks_counter, *count);
1973 percpu_counter_sub(&sbi->s_dirtyblocks_counter, num);
1974 if (sbi->s_log_groups_per_flex) {
1975 ext4_group_t flex_group = ext4_flex_group(sbi, group_no);
1976 spin_lock(sb_bgl_lock(sbi, flex_group));
1977 sbi->s_flex_groups[flex_group].free_blocks -= num;
1978 spin_unlock(sb_bgl_lock(sbi, flex_group));
1981 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1982 err = ext4_journal_dirty_metadata(handle, gdp_bh);
1992 DQUOT_FREE_BLOCK(inode, *count-num);
2001 ext4_std_error(sb, fatal);
2004 * Undo the block allocation
2006 if (!performed_allocation)
2007 DQUOT_FREE_BLOCK(inode, *count);
2012 #define EXT4_META_BLOCK 0x1
2014 static ext4_fsblk_t do_blk_alloc(handle_t *handle, struct inode *inode,
2015 ext4_lblk_t iblock, ext4_fsblk_t goal,
2016 unsigned long *count, int *errp, int flags)
2018 struct ext4_allocation_request ar;
2021 if (!test_opt(inode->i_sb, MBALLOC)) {
2022 return ext4_old_new_blocks(handle, inode, goal, count, errp);
2025 memset(&ar, 0, sizeof(ar));
2026 /* Fill with neighbour allocated blocks */
2031 ar.logical = iblock;
2033 if (S_ISREG(inode->i_mode) && !(flags & EXT4_META_BLOCK))
2034 /* enable in-core preallocation for data block allocation */
2035 ar.flags = EXT4_MB_HINT_DATA;
2037 /* disable in-core preallocation for non-regular files */
2040 ret = ext4_mb_new_blocks(handle, &ar, errp);
2046 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
2048 * @handle: handle to this transaction
2049 * @inode: file inode
2050 * @goal: given target block(filesystem wide)
2051 * @count: total number of blocks need
2054 * Return 1st allocated block numberon success, *count stores total account
2055 * error stores in errp pointer
2057 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
2058 ext4_fsblk_t goal, unsigned long *count, int *errp)
2061 ret = do_blk_alloc(handle, inode, 0, goal,
2062 count, errp, EXT4_META_BLOCK);
2064 * Account for the allocated meta blocks
2067 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2068 EXT4_I(inode)->i_allocated_meta_blocks += *count;
2069 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2075 * ext4_new_meta_block() -- allocate block for meta data (indexing) blocks
2077 * @handle: handle to this transaction
2078 * @inode: file inode
2079 * @goal: given target block(filesystem wide)
2082 * Return allocated block number on success
2084 ext4_fsblk_t ext4_new_meta_block(handle_t *handle, struct inode *inode,
2085 ext4_fsblk_t goal, int *errp)
2087 unsigned long count = 1;
2088 return ext4_new_meta_blocks(handle, inode, goal, &count, errp);
2092 * ext4_new_blocks() -- allocate data blocks
2094 * @handle: handle to this transaction
2095 * @inode: file inode
2096 * @goal: given target block(filesystem wide)
2097 * @count: total number of blocks need
2100 * Return 1st allocated block numberon success, *count stores total account
2101 * error stores in errp pointer
2104 ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
2105 ext4_lblk_t iblock, ext4_fsblk_t goal,
2106 unsigned long *count, int *errp)
2108 return do_blk_alloc(handle, inode, iblock, goal, count, errp, 0);
2112 * ext4_count_free_blocks() -- count filesystem free blocks
2115 * Adds up the number of free blocks from each block group.
2117 ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
2119 ext4_fsblk_t desc_count;
2120 struct ext4_group_desc *gdp;
2122 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
2124 struct ext4_super_block *es;
2125 ext4_fsblk_t bitmap_count;
2127 struct buffer_head *bitmap_bh = NULL;
2129 es = EXT4_SB(sb)->s_es;
2135 for (i = 0; i < ngroups; i++) {
2136 gdp = ext4_get_group_desc(sb, i, NULL);
2139 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
2141 bitmap_bh = ext4_read_block_bitmap(sb, i);
2142 if (bitmap_bh == NULL)
2145 x = ext4_count_free(bitmap_bh, sb->s_blocksize);
2146 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
2147 i, le16_to_cpu(gdp->bg_free_blocks_count), x);
2151 printk(KERN_DEBUG "ext4_count_free_blocks: stored = %llu"
2152 ", computed = %llu, %llu\n", ext4_free_blocks_count(es),
2153 desc_count, bitmap_count);
2154 return bitmap_count;
2158 for (i = 0; i < ngroups; i++) {
2159 gdp = ext4_get_group_desc(sb, i, NULL);
2162 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
2169 static inline int test_root(ext4_group_t a, int b)
2178 static int ext4_group_sparse(ext4_group_t group)
2184 return (test_root(group, 7) || test_root(group, 5) ||
2185 test_root(group, 3));
2189 * ext4_bg_has_super - number of blocks used by the superblock in group
2190 * @sb: superblock for filesystem
2191 * @group: group number to check
2193 * Return the number of blocks used by the superblock (primary or backup)
2194 * in this group. Currently this will be only 0 or 1.
2196 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
2198 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
2199 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
2200 !ext4_group_sparse(group))
2205 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
2208 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
2209 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
2210 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
2212 if (group == first || group == first + 1 || group == last)
2217 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
2220 return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0;
2224 * ext4_bg_num_gdb - number of blocks used by the group table in group
2225 * @sb: superblock for filesystem
2226 * @group: group number to check
2228 * Return the number of blocks used by the group descriptor table
2229 * (primary or backup) in this group. In the future there may be a
2230 * different number of descriptor blocks in each group.
2232 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
2234 unsigned long first_meta_bg =
2235 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
2236 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
2238 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
2239 metagroup < first_meta_bg)
2240 return ext4_bg_num_gdb_nometa(sb, group);
2242 return ext4_bg_num_gdb_meta(sb,group);