2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
24 #include "free-space-cache.h"
25 #include "transaction.h"
28 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
29 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
31 static void recalculate_thresholds(struct btrfs_block_group_cache
33 static int link_free_space(struct btrfs_block_group_cache *block_group,
34 struct btrfs_free_space *info);
36 struct inode *lookup_free_space_inode(struct btrfs_root *root,
37 struct btrfs_block_group_cache
38 *block_group, struct btrfs_path *path)
41 struct btrfs_key location;
42 struct btrfs_disk_key disk_key;
43 struct btrfs_free_space_header *header;
44 struct extent_buffer *leaf;
45 struct inode *inode = NULL;
48 spin_lock(&block_group->lock);
49 if (block_group->inode)
50 inode = igrab(block_group->inode);
51 spin_unlock(&block_group->lock);
55 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
56 key.offset = block_group->key.objectid;
59 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
63 btrfs_release_path(root, path);
64 return ERR_PTR(-ENOENT);
67 leaf = path->nodes[0];
68 header = btrfs_item_ptr(leaf, path->slots[0],
69 struct btrfs_free_space_header);
70 btrfs_free_space_key(leaf, header, &disk_key);
71 btrfs_disk_key_to_cpu(&location, &disk_key);
72 btrfs_release_path(root, path);
74 inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
76 return ERR_PTR(-ENOENT);
79 if (is_bad_inode(inode)) {
81 return ERR_PTR(-ENOENT);
84 spin_lock(&block_group->lock);
85 if (!root->fs_info->closing) {
86 block_group->inode = igrab(inode);
87 block_group->iref = 1;
89 spin_unlock(&block_group->lock);
94 int create_free_space_inode(struct btrfs_root *root,
95 struct btrfs_trans_handle *trans,
96 struct btrfs_block_group_cache *block_group,
97 struct btrfs_path *path)
100 struct btrfs_disk_key disk_key;
101 struct btrfs_free_space_header *header;
102 struct btrfs_inode_item *inode_item;
103 struct extent_buffer *leaf;
107 ret = btrfs_find_free_objectid(trans, root, 0, &objectid);
111 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
115 leaf = path->nodes[0];
116 inode_item = btrfs_item_ptr(leaf, path->slots[0],
117 struct btrfs_inode_item);
118 btrfs_item_key(leaf, &disk_key, path->slots[0]);
119 memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
120 sizeof(*inode_item));
121 btrfs_set_inode_generation(leaf, inode_item, trans->transid);
122 btrfs_set_inode_size(leaf, inode_item, 0);
123 btrfs_set_inode_nbytes(leaf, inode_item, 0);
124 btrfs_set_inode_uid(leaf, inode_item, 0);
125 btrfs_set_inode_gid(leaf, inode_item, 0);
126 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
127 btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS |
128 BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
129 btrfs_set_inode_nlink(leaf, inode_item, 1);
130 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
131 btrfs_set_inode_block_group(leaf, inode_item,
132 block_group->key.objectid);
133 btrfs_mark_buffer_dirty(leaf);
134 btrfs_release_path(root, path);
136 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
137 key.offset = block_group->key.objectid;
140 ret = btrfs_insert_empty_item(trans, root, path, &key,
141 sizeof(struct btrfs_free_space_header));
143 btrfs_release_path(root, path);
146 leaf = path->nodes[0];
147 header = btrfs_item_ptr(leaf, path->slots[0],
148 struct btrfs_free_space_header);
149 memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
150 btrfs_set_free_space_key(leaf, header, &disk_key);
151 btrfs_mark_buffer_dirty(leaf);
152 btrfs_release_path(root, path);
157 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
158 struct btrfs_trans_handle *trans,
159 struct btrfs_path *path,
165 trans->block_rsv = root->orphan_block_rsv;
166 ret = btrfs_block_rsv_check(trans, root,
167 root->orphan_block_rsv,
172 oldsize = i_size_read(inode);
173 btrfs_i_size_write(inode, 0);
174 truncate_pagecache(inode, oldsize, 0);
177 * We don't need an orphan item because truncating the free space cache
178 * will never be split across transactions.
180 ret = btrfs_truncate_inode_items(trans, root, inode,
181 0, BTRFS_EXTENT_DATA_KEY);
187 return btrfs_update_inode(trans, root, inode);
190 static int readahead_cache(struct inode *inode)
192 struct file_ra_state *ra;
193 unsigned long last_index;
195 ra = kzalloc(sizeof(*ra), GFP_NOFS);
199 file_ra_state_init(ra, inode->i_mapping);
200 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
202 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
209 int load_free_space_cache(struct btrfs_fs_info *fs_info,
210 struct btrfs_block_group_cache *block_group)
212 struct btrfs_root *root = fs_info->tree_root;
214 struct btrfs_free_space_header *header;
215 struct extent_buffer *leaf;
217 struct btrfs_path *path;
218 u32 *checksums = NULL, *crc;
219 char *disk_crcs = NULL;
220 struct btrfs_key key;
221 struct list_head bitmaps;
225 u32 cur_crc = ~(u32)0;
227 unsigned long first_page_offset;
232 * If we're unmounting then just return, since this does a search on the
233 * normal root and not the commit root and we could deadlock.
236 if (fs_info->closing)
240 * If this block group has been marked to be cleared for one reason or
241 * another then we can't trust the on disk cache, so just return.
243 spin_lock(&block_group->lock);
244 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
245 spin_unlock(&block_group->lock);
248 spin_unlock(&block_group->lock);
250 INIT_LIST_HEAD(&bitmaps);
252 path = btrfs_alloc_path();
256 inode = lookup_free_space_inode(root, block_group, path);
258 btrfs_free_path(path);
262 /* Nothing in the space cache, goodbye */
263 if (!i_size_read(inode)) {
264 btrfs_free_path(path);
268 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
269 key.offset = block_group->key.objectid;
272 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
274 btrfs_free_path(path);
278 leaf = path->nodes[0];
279 header = btrfs_item_ptr(leaf, path->slots[0],
280 struct btrfs_free_space_header);
281 num_entries = btrfs_free_space_entries(leaf, header);
282 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
283 generation = btrfs_free_space_generation(leaf, header);
284 btrfs_free_path(path);
286 if (BTRFS_I(inode)->generation != generation) {
287 printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
288 " not match free space cache generation (%llu) for "
289 "block group %llu\n",
290 (unsigned long long)BTRFS_I(inode)->generation,
291 (unsigned long long)generation,
292 (unsigned long long)block_group->key.objectid);
299 /* Setup everything for doing checksumming */
300 num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE;
301 checksums = crc = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
304 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
305 disk_crcs = kzalloc(first_page_offset, GFP_NOFS);
309 ret = readahead_cache(inode);
316 struct btrfs_free_space_entry *entry;
317 struct btrfs_free_space *e;
319 unsigned long offset = 0;
320 unsigned long start_offset = 0;
323 if (!num_entries && !num_bitmaps)
327 start_offset = first_page_offset;
328 offset = start_offset;
331 page = grab_cache_page(inode->i_mapping, index);
337 if (!PageUptodate(page)) {
338 btrfs_readpage(NULL, page);
340 if (!PageUptodate(page)) {
342 page_cache_release(page);
343 printk(KERN_ERR "btrfs: error reading free "
344 "space cache: %llu\n",
346 block_group->key.objectid);
355 memcpy(disk_crcs, addr, first_page_offset);
356 gen = addr + (sizeof(u32) * num_checksums);
357 if (*gen != BTRFS_I(inode)->generation) {
358 printk(KERN_ERR "btrfs: space cache generation"
359 " (%llu) does not match inode (%llu) "
360 "for block group %llu\n",
361 (unsigned long long)*gen,
363 BTRFS_I(inode)->generation,
365 block_group->key.objectid);
368 page_cache_release(page);
371 crc = (u32 *)disk_crcs;
373 entry = addr + start_offset;
375 /* First lets check our crc before we do anything fun */
377 cur_crc = btrfs_csum_data(root, addr + start_offset, cur_crc,
378 PAGE_CACHE_SIZE - start_offset);
379 btrfs_csum_final(cur_crc, (char *)&cur_crc);
380 if (cur_crc != *crc) {
381 printk(KERN_ERR "btrfs: crc mismatch for page %lu in "
382 "block group %llu\n", index,
383 (unsigned long long)block_group->key.objectid);
386 page_cache_release(page);
396 e = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
400 page_cache_release(page);
404 e->offset = le64_to_cpu(entry->offset);
405 e->bytes = le64_to_cpu(entry->bytes);
410 page_cache_release(page);
414 if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
415 spin_lock(&block_group->tree_lock);
416 ret = link_free_space(block_group, e);
417 spin_unlock(&block_group->tree_lock);
420 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
425 page_cache_release(page);
428 spin_lock(&block_group->tree_lock);
429 ret = link_free_space(block_group, e);
430 block_group->total_bitmaps++;
431 recalculate_thresholds(block_group);
432 spin_unlock(&block_group->tree_lock);
433 list_add_tail(&e->list, &bitmaps);
437 offset += sizeof(struct btrfs_free_space_entry);
438 if (offset + sizeof(struct btrfs_free_space_entry) >=
445 * We read an entry out of this page, we need to move on to the
454 * We add the bitmaps at the end of the entries in order that
455 * the bitmap entries are added to the cache.
457 e = list_entry(bitmaps.next, struct btrfs_free_space, list);
458 list_del_init(&e->list);
459 memcpy(e->bitmap, addr, PAGE_CACHE_SIZE);
464 page_cache_release(page);
476 /* This cache is bogus, make sure it gets cleared */
477 spin_lock(&block_group->lock);
478 block_group->disk_cache_state = BTRFS_DC_CLEAR;
479 spin_unlock(&block_group->lock);
480 btrfs_remove_free_space_cache(block_group);
484 int btrfs_write_out_cache(struct btrfs_root *root,
485 struct btrfs_trans_handle *trans,
486 struct btrfs_block_group_cache *block_group,
487 struct btrfs_path *path)
489 struct btrfs_free_space_header *header;
490 struct extent_buffer *leaf;
492 struct rb_node *node;
493 struct list_head *pos, *n;
495 struct extent_state *cached_state = NULL;
496 struct list_head bitmap_list;
497 struct btrfs_key key;
499 u32 *crc, *checksums;
500 pgoff_t index = 0, last_index = 0;
501 unsigned long first_page_offset;
507 root = root->fs_info->tree_root;
509 INIT_LIST_HEAD(&bitmap_list);
511 spin_lock(&block_group->lock);
512 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
513 spin_unlock(&block_group->lock);
516 spin_unlock(&block_group->lock);
518 inode = lookup_free_space_inode(root, block_group, path);
522 if (!i_size_read(inode)) {
527 node = rb_first(&block_group->free_space_offset);
533 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
534 filemap_write_and_wait(inode->i_mapping);
535 btrfs_wait_ordered_range(inode, inode->i_size &
536 ~(root->sectorsize - 1), (u64)-1);
538 /* We need a checksum per page. */
539 num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE;
540 crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
546 /* Since the first page has all of our checksums and our generation we
547 * need to calculate the offset into the page that we can start writing
550 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
553 * Lock all pages first so we can lock the extent safely.
555 * NOTE: Because we hold the ref the entire time we're going to write to
556 * the page find_get_page should never fail, so we don't do a check
557 * after find_get_page at this point. Just putting this here so people
558 * know and don't freak out.
560 while (index <= last_index) {
561 page = grab_cache_page(inode->i_mapping, index);
566 page = find_get_page(inode->i_mapping, i);
568 page_cache_release(page);
569 page_cache_release(page);
578 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
579 0, &cached_state, GFP_NOFS);
581 /* Write out the extent entries */
583 struct btrfs_free_space_entry *entry;
585 unsigned long offset = 0;
586 unsigned long start_offset = 0;
589 start_offset = first_page_offset;
590 offset = start_offset;
593 page = find_get_page(inode->i_mapping, index);
596 entry = addr + start_offset;
598 memset(addr, 0, PAGE_CACHE_SIZE);
600 struct btrfs_free_space *e;
602 e = rb_entry(node, struct btrfs_free_space, offset_index);
605 entry->offset = cpu_to_le64(e->offset);
606 entry->bytes = cpu_to_le64(e->bytes);
608 entry->type = BTRFS_FREE_SPACE_BITMAP;
609 list_add_tail(&e->list, &bitmap_list);
612 entry->type = BTRFS_FREE_SPACE_EXTENT;
614 node = rb_next(node);
617 offset += sizeof(struct btrfs_free_space_entry);
618 if (offset + sizeof(struct btrfs_free_space_entry) >=
624 *crc = btrfs_csum_data(root, addr + start_offset, *crc,
625 PAGE_CACHE_SIZE - start_offset);
628 btrfs_csum_final(*crc, (char *)crc);
631 bytes += PAGE_CACHE_SIZE;
633 ClearPageChecked(page);
634 set_page_extent_mapped(page);
635 SetPageUptodate(page);
636 set_page_dirty(page);
639 * We need to release our reference we got for grab_cache_page,
640 * except for the first page which will hold our checksums, we
645 page_cache_release(page);
648 page_cache_release(page);
653 /* Write out the bitmaps */
654 list_for_each_safe(pos, n, &bitmap_list) {
656 struct btrfs_free_space *entry =
657 list_entry(pos, struct btrfs_free_space, list);
659 page = find_get_page(inode->i_mapping, index);
662 memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
664 *crc = btrfs_csum_data(root, addr, *crc, PAGE_CACHE_SIZE);
666 btrfs_csum_final(*crc, (char *)crc);
668 bytes += PAGE_CACHE_SIZE;
670 ClearPageChecked(page);
671 set_page_extent_mapped(page);
672 SetPageUptodate(page);
673 set_page_dirty(page);
675 page_cache_release(page);
676 page_cache_release(page);
677 list_del_init(&entry->list);
681 /* Zero out the rest of the pages just to make sure */
682 while (index <= last_index) {
685 page = find_get_page(inode->i_mapping, index);
688 memset(addr, 0, PAGE_CACHE_SIZE);
690 ClearPageChecked(page);
691 set_page_extent_mapped(page);
692 SetPageUptodate(page);
693 set_page_dirty(page);
695 page_cache_release(page);
696 page_cache_release(page);
697 bytes += PAGE_CACHE_SIZE;
701 btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state);
703 /* Write the checksums and trans id to the first page */
708 page = find_get_page(inode->i_mapping, 0);
711 memcpy(addr, checksums, sizeof(u32) * num_checksums);
712 gen = addr + (sizeof(u32) * num_checksums);
713 *gen = trans->transid;
715 ClearPageChecked(page);
716 set_page_extent_mapped(page);
717 SetPageUptodate(page);
718 set_page_dirty(page);
720 page_cache_release(page);
721 page_cache_release(page);
723 BTRFS_I(inode)->generation = trans->transid;
725 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
726 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
728 filemap_write_and_wait(inode->i_mapping);
730 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
731 key.offset = block_group->key.objectid;
734 ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
737 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
738 EXTENT_DIRTY | EXTENT_DELALLOC |
739 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
742 leaf = path->nodes[0];
744 struct btrfs_key found_key;
745 BUG_ON(!path->slots[0]);
747 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
748 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
749 found_key.offset != block_group->key.objectid) {
751 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
752 EXTENT_DIRTY | EXTENT_DELALLOC |
753 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
755 btrfs_release_path(root, path);
759 header = btrfs_item_ptr(leaf, path->slots[0],
760 struct btrfs_free_space_header);
761 btrfs_set_free_space_entries(leaf, header, entries);
762 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
763 btrfs_set_free_space_generation(leaf, header, trans->transid);
764 btrfs_mark_buffer_dirty(leaf);
765 btrfs_release_path(root, path);
771 invalidate_inode_pages2_range(inode->i_mapping, 0, index);
772 spin_lock(&block_group->lock);
773 block_group->disk_cache_state = BTRFS_DC_ERROR;
774 spin_unlock(&block_group->lock);
775 BTRFS_I(inode)->generation = 0;
778 btrfs_update_inode(trans, root, inode);
783 static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
786 BUG_ON(offset < bitmap_start);
787 offset -= bitmap_start;
788 return (unsigned long)(div64_u64(offset, sectorsize));
791 static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
793 return (unsigned long)(div64_u64(bytes, sectorsize));
796 static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
800 u64 bytes_per_bitmap;
802 bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
803 bitmap_start = offset - block_group->key.objectid;
804 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
805 bitmap_start *= bytes_per_bitmap;
806 bitmap_start += block_group->key.objectid;
811 static int tree_insert_offset(struct rb_root *root, u64 offset,
812 struct rb_node *node, int bitmap)
814 struct rb_node **p = &root->rb_node;
815 struct rb_node *parent = NULL;
816 struct btrfs_free_space *info;
820 info = rb_entry(parent, struct btrfs_free_space, offset_index);
822 if (offset < info->offset) {
824 } else if (offset > info->offset) {
828 * we could have a bitmap entry and an extent entry
829 * share the same offset. If this is the case, we want
830 * the extent entry to always be found first if we do a
831 * linear search through the tree, since we want to have
832 * the quickest allocation time, and allocating from an
833 * extent is faster than allocating from a bitmap. So
834 * if we're inserting a bitmap and we find an entry at
835 * this offset, we want to go right, or after this entry
836 * logically. If we are inserting an extent and we've
837 * found a bitmap, we want to go left, or before
841 WARN_ON(info->bitmap);
844 WARN_ON(!info->bitmap);
850 rb_link_node(node, parent, p);
851 rb_insert_color(node, root);
857 * searches the tree for the given offset.
859 * fuzzy - If this is set, then we are trying to make an allocation, and we just
860 * want a section that has at least bytes size and comes at or after the given
863 static struct btrfs_free_space *
864 tree_search_offset(struct btrfs_block_group_cache *block_group,
865 u64 offset, int bitmap_only, int fuzzy)
867 struct rb_node *n = block_group->free_space_offset.rb_node;
868 struct btrfs_free_space *entry, *prev = NULL;
870 /* find entry that is closest to the 'offset' */
877 entry = rb_entry(n, struct btrfs_free_space, offset_index);
880 if (offset < entry->offset)
882 else if (offset > entry->offset)
895 * bitmap entry and extent entry may share same offset,
896 * in that case, bitmap entry comes after extent entry.
901 entry = rb_entry(n, struct btrfs_free_space, offset_index);
902 if (entry->offset != offset)
905 WARN_ON(!entry->bitmap);
910 * if previous extent entry covers the offset,
911 * we should return it instead of the bitmap entry
913 n = &entry->offset_index;
918 prev = rb_entry(n, struct btrfs_free_space,
921 if (prev->offset + prev->bytes > offset)
933 /* find last entry before the 'offset' */
935 if (entry->offset > offset) {
936 n = rb_prev(&entry->offset_index);
938 entry = rb_entry(n, struct btrfs_free_space,
940 BUG_ON(entry->offset > offset);
950 n = &entry->offset_index;
955 prev = rb_entry(n, struct btrfs_free_space,
958 if (prev->offset + prev->bytes > offset)
963 if (entry->offset + BITS_PER_BITMAP *
964 block_group->sectorsize > offset)
966 } else if (entry->offset + entry->bytes > offset)
974 if (entry->offset + BITS_PER_BITMAP *
975 block_group->sectorsize > offset)
978 if (entry->offset + entry->bytes > offset)
982 n = rb_next(&entry->offset_index);
985 entry = rb_entry(n, struct btrfs_free_space, offset_index);
991 __unlink_free_space(struct btrfs_block_group_cache *block_group,
992 struct btrfs_free_space *info)
994 rb_erase(&info->offset_index, &block_group->free_space_offset);
995 block_group->free_extents--;
998 static void unlink_free_space(struct btrfs_block_group_cache *block_group,
999 struct btrfs_free_space *info)
1001 __unlink_free_space(block_group, info);
1002 block_group->free_space -= info->bytes;
1005 static int link_free_space(struct btrfs_block_group_cache *block_group,
1006 struct btrfs_free_space *info)
1010 BUG_ON(!info->bitmap && !info->bytes);
1011 ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
1012 &info->offset_index, (info->bitmap != NULL));
1016 block_group->free_space += info->bytes;
1017 block_group->free_extents++;
1021 static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
1026 u64 size = block_group->key.offset;
1029 * The goal is to keep the total amount of memory used per 1gb of space
1030 * at or below 32k, so we need to adjust how much memory we allow to be
1031 * used by extent based free space tracking
1033 if (size < 1024 * 1024 * 1024)
1034 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1036 max_bytes = MAX_CACHE_BYTES_PER_GIG *
1037 div64_u64(size, 1024 * 1024 * 1024);
1040 * we want to account for 1 more bitmap than what we have so we can make
1041 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1042 * we add more bitmaps.
1044 bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1046 if (bitmap_bytes >= max_bytes) {
1047 block_group->extents_thresh = 0;
1052 * we want the extent entry threshold to always be at most 1/2 the maxw
1053 * bytes we can have, or whatever is less than that.
1055 extent_bytes = max_bytes - bitmap_bytes;
1056 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1058 block_group->extents_thresh =
1059 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1062 static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
1063 struct btrfs_free_space *info, u64 offset,
1066 unsigned long start, end;
1069 start = offset_to_bit(info->offset, block_group->sectorsize, offset);
1070 end = start + bytes_to_bits(bytes, block_group->sectorsize);
1071 BUG_ON(end > BITS_PER_BITMAP);
1073 for (i = start; i < end; i++)
1074 clear_bit(i, info->bitmap);
1076 info->bytes -= bytes;
1077 block_group->free_space -= bytes;
1080 static void bitmap_set_bits(struct btrfs_block_group_cache *block_group,
1081 struct btrfs_free_space *info, u64 offset,
1084 unsigned long start, end;
1087 start = offset_to_bit(info->offset, block_group->sectorsize, offset);
1088 end = start + bytes_to_bits(bytes, block_group->sectorsize);
1089 BUG_ON(end > BITS_PER_BITMAP);
1091 for (i = start; i < end; i++)
1092 set_bit(i, info->bitmap);
1094 info->bytes += bytes;
1095 block_group->free_space += bytes;
1098 static int search_bitmap(struct btrfs_block_group_cache *block_group,
1099 struct btrfs_free_space *bitmap_info, u64 *offset,
1102 unsigned long found_bits = 0;
1103 unsigned long bits, i;
1104 unsigned long next_zero;
1106 i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
1107 max_t(u64, *offset, bitmap_info->offset));
1108 bits = bytes_to_bits(*bytes, block_group->sectorsize);
1110 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
1111 i < BITS_PER_BITMAP;
1112 i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
1113 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1114 BITS_PER_BITMAP, i);
1115 if ((next_zero - i) >= bits) {
1116 found_bits = next_zero - i;
1123 *offset = (u64)(i * block_group->sectorsize) +
1124 bitmap_info->offset;
1125 *bytes = (u64)(found_bits) * block_group->sectorsize;
1132 static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
1133 *block_group, u64 *offset,
1134 u64 *bytes, int debug)
1136 struct btrfs_free_space *entry;
1137 struct rb_node *node;
1140 if (!block_group->free_space_offset.rb_node)
1143 entry = tree_search_offset(block_group,
1144 offset_to_bitmap(block_group, *offset),
1149 for (node = &entry->offset_index; node; node = rb_next(node)) {
1150 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1151 if (entry->bytes < *bytes)
1154 if (entry->bitmap) {
1155 ret = search_bitmap(block_group, entry, offset, bytes);
1161 *offset = entry->offset;
1162 *bytes = entry->bytes;
1169 static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
1170 struct btrfs_free_space *info, u64 offset)
1172 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
1173 int max_bitmaps = (int)div64_u64(block_group->key.offset +
1174 bytes_per_bg - 1, bytes_per_bg);
1175 BUG_ON(block_group->total_bitmaps >= max_bitmaps);
1177 info->offset = offset_to_bitmap(block_group, offset);
1179 link_free_space(block_group, info);
1180 block_group->total_bitmaps++;
1182 recalculate_thresholds(block_group);
1185 static void free_bitmap(struct btrfs_block_group_cache *block_group,
1186 struct btrfs_free_space *bitmap_info)
1188 unlink_free_space(block_group, bitmap_info);
1189 kfree(bitmap_info->bitmap);
1191 block_group->total_bitmaps--;
1192 recalculate_thresholds(block_group);
1195 static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
1196 struct btrfs_free_space *bitmap_info,
1197 u64 *offset, u64 *bytes)
1200 u64 search_start, search_bytes;
1204 end = bitmap_info->offset +
1205 (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
1208 * XXX - this can go away after a few releases.
1210 * since the only user of btrfs_remove_free_space is the tree logging
1211 * stuff, and the only way to test that is under crash conditions, we
1212 * want to have this debug stuff here just in case somethings not
1213 * working. Search the bitmap for the space we are trying to use to
1214 * make sure its actually there. If its not there then we need to stop
1215 * because something has gone wrong.
1217 search_start = *offset;
1218 search_bytes = *bytes;
1219 ret = search_bitmap(block_group, bitmap_info, &search_start,
1221 BUG_ON(ret < 0 || search_start != *offset);
1223 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
1224 bitmap_clear_bits(block_group, bitmap_info, *offset,
1226 *bytes -= end - *offset + 1;
1228 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
1229 bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes);
1234 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1235 if (!bitmap_info->bytes)
1236 free_bitmap(block_group, bitmap_info);
1239 * no entry after this bitmap, but we still have bytes to
1240 * remove, so something has gone wrong.
1245 bitmap_info = rb_entry(next, struct btrfs_free_space,
1249 * if the next entry isn't a bitmap we need to return to let the
1250 * extent stuff do its work.
1252 if (!bitmap_info->bitmap)
1256 * Ok the next item is a bitmap, but it may not actually hold
1257 * the information for the rest of this free space stuff, so
1258 * look for it, and if we don't find it return so we can try
1259 * everything over again.
1261 search_start = *offset;
1262 search_bytes = *bytes;
1263 ret = search_bitmap(block_group, bitmap_info, &search_start,
1265 if (ret < 0 || search_start != *offset)
1269 } else if (!bitmap_info->bytes)
1270 free_bitmap(block_group, bitmap_info);
1275 static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
1276 struct btrfs_free_space *info)
1278 struct btrfs_free_space *bitmap_info;
1280 u64 bytes, offset, end;
1284 * If we are below the extents threshold then we can add this as an
1285 * extent, and don't have to deal with the bitmap
1287 if (block_group->free_extents < block_group->extents_thresh &&
1288 info->bytes > block_group->sectorsize * 4)
1292 * some block groups are so tiny they can't be enveloped by a bitmap, so
1293 * don't even bother to create a bitmap for this
1295 if (BITS_PER_BITMAP * block_group->sectorsize >
1296 block_group->key.offset)
1299 bytes = info->bytes;
1300 offset = info->offset;
1303 bitmap_info = tree_search_offset(block_group,
1304 offset_to_bitmap(block_group, offset),
1311 end = bitmap_info->offset +
1312 (u64)(BITS_PER_BITMAP * block_group->sectorsize);
1314 if (offset >= bitmap_info->offset && offset + bytes > end) {
1315 bitmap_set_bits(block_group, bitmap_info, offset,
1317 bytes -= end - offset;
1320 } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
1321 bitmap_set_bits(block_group, bitmap_info, offset, bytes);
1334 if (info && info->bitmap) {
1335 add_new_bitmap(block_group, info, offset);
1340 spin_unlock(&block_group->tree_lock);
1342 /* no pre-allocated info, allocate a new one */
1344 info = kzalloc(sizeof(struct btrfs_free_space),
1347 spin_lock(&block_group->tree_lock);
1353 /* allocate the bitmap */
1354 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1355 spin_lock(&block_group->tree_lock);
1356 if (!info->bitmap) {
1366 kfree(info->bitmap);
1373 bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1374 struct btrfs_free_space *info, bool update_stat)
1376 struct btrfs_free_space *left_info;
1377 struct btrfs_free_space *right_info;
1378 bool merged = false;
1379 u64 offset = info->offset;
1380 u64 bytes = info->bytes;
1383 * first we want to see if there is free space adjacent to the range we
1384 * are adding, if there is remove that struct and add a new one to
1385 * cover the entire range
1387 right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
1388 if (right_info && rb_prev(&right_info->offset_index))
1389 left_info = rb_entry(rb_prev(&right_info->offset_index),
1390 struct btrfs_free_space, offset_index);
1392 left_info = tree_search_offset(block_group, offset - 1, 0, 0);
1394 if (right_info && !right_info->bitmap) {
1396 unlink_free_space(block_group, right_info);
1398 __unlink_free_space(block_group, right_info);
1399 info->bytes += right_info->bytes;
1404 if (left_info && !left_info->bitmap &&
1405 left_info->offset + left_info->bytes == offset) {
1407 unlink_free_space(block_group, left_info);
1409 __unlink_free_space(block_group, left_info);
1410 info->offset = left_info->offset;
1411 info->bytes += left_info->bytes;
1419 int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1420 u64 offset, u64 bytes)
1422 struct btrfs_free_space *info;
1425 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
1429 info->offset = offset;
1430 info->bytes = bytes;
1432 spin_lock(&block_group->tree_lock);
1434 if (try_merge_free_space(block_group, info, true))
1438 * There was no extent directly to the left or right of this new
1439 * extent then we know we're going to have to allocate a new extent, so
1440 * before we do that see if we need to drop this into a bitmap
1442 ret = insert_into_bitmap(block_group, info);
1450 ret = link_free_space(block_group, info);
1454 spin_unlock(&block_group->tree_lock);
1457 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
1458 BUG_ON(ret == -EEXIST);
1464 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1465 u64 offset, u64 bytes)
1467 struct btrfs_free_space *info;
1468 struct btrfs_free_space *next_info = NULL;
1471 spin_lock(&block_group->tree_lock);
1474 info = tree_search_offset(block_group, offset, 0, 0);
1477 * oops didn't find an extent that matched the space we wanted
1478 * to remove, look for a bitmap instead
1480 info = tree_search_offset(block_group,
1481 offset_to_bitmap(block_group, offset),
1489 if (info->bytes < bytes && rb_next(&info->offset_index)) {
1491 next_info = rb_entry(rb_next(&info->offset_index),
1492 struct btrfs_free_space,
1495 if (next_info->bitmap)
1496 end = next_info->offset + BITS_PER_BITMAP *
1497 block_group->sectorsize - 1;
1499 end = next_info->offset + next_info->bytes;
1501 if (next_info->bytes < bytes ||
1502 next_info->offset > offset || offset > end) {
1503 printk(KERN_CRIT "Found free space at %llu, size %llu,"
1504 " trying to use %llu\n",
1505 (unsigned long long)info->offset,
1506 (unsigned long long)info->bytes,
1507 (unsigned long long)bytes);
1516 if (info->bytes == bytes) {
1517 unlink_free_space(block_group, info);
1519 kfree(info->bitmap);
1520 block_group->total_bitmaps--;
1526 if (!info->bitmap && info->offset == offset) {
1527 unlink_free_space(block_group, info);
1528 info->offset += bytes;
1529 info->bytes -= bytes;
1530 link_free_space(block_group, info);
1534 if (!info->bitmap && info->offset <= offset &&
1535 info->offset + info->bytes >= offset + bytes) {
1536 u64 old_start = info->offset;
1538 * we're freeing space in the middle of the info,
1539 * this can happen during tree log replay
1541 * first unlink the old info and then
1542 * insert it again after the hole we're creating
1544 unlink_free_space(block_group, info);
1545 if (offset + bytes < info->offset + info->bytes) {
1546 u64 old_end = info->offset + info->bytes;
1548 info->offset = offset + bytes;
1549 info->bytes = old_end - info->offset;
1550 ret = link_free_space(block_group, info);
1555 /* the hole we're creating ends at the end
1556 * of the info struct, just free the info
1560 spin_unlock(&block_group->tree_lock);
1562 /* step two, insert a new info struct to cover
1563 * anything before the hole
1565 ret = btrfs_add_free_space(block_group, old_start,
1566 offset - old_start);
1571 ret = remove_from_bitmap(block_group, info, &offset, &bytes);
1576 spin_unlock(&block_group->tree_lock);
1581 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1584 struct btrfs_free_space *info;
1588 for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
1589 info = rb_entry(n, struct btrfs_free_space, offset_index);
1590 if (info->bytes >= bytes)
1592 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
1593 (unsigned long long)info->offset,
1594 (unsigned long long)info->bytes,
1595 (info->bitmap) ? "yes" : "no");
1597 printk(KERN_INFO "block group has cluster?: %s\n",
1598 list_empty(&block_group->cluster_list) ? "no" : "yes");
1599 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
1603 u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
1605 struct btrfs_free_space *info;
1609 for (n = rb_first(&block_group->free_space_offset); n;
1611 info = rb_entry(n, struct btrfs_free_space, offset_index);
1619 * for a given cluster, put all of its extents back into the free
1620 * space cache. If the block group passed doesn't match the block group
1621 * pointed to by the cluster, someone else raced in and freed the
1622 * cluster already. In that case, we just return without changing anything
1625 __btrfs_return_cluster_to_free_space(
1626 struct btrfs_block_group_cache *block_group,
1627 struct btrfs_free_cluster *cluster)
1629 struct btrfs_free_space *entry;
1630 struct rb_node *node;
1633 spin_lock(&cluster->lock);
1634 if (cluster->block_group != block_group)
1637 bitmap = cluster->points_to_bitmap;
1638 cluster->block_group = NULL;
1639 cluster->window_start = 0;
1640 list_del_init(&cluster->block_group_list);
1641 cluster->points_to_bitmap = false;
1646 node = rb_first(&cluster->root);
1648 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1649 node = rb_next(&entry->offset_index);
1650 rb_erase(&entry->offset_index, &cluster->root);
1651 BUG_ON(entry->bitmap);
1652 try_merge_free_space(block_group, entry, false);
1653 tree_insert_offset(&block_group->free_space_offset,
1654 entry->offset, &entry->offset_index, 0);
1656 cluster->root = RB_ROOT;
1659 spin_unlock(&cluster->lock);
1660 btrfs_put_block_group(block_group);
1664 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1666 struct btrfs_free_space *info;
1667 struct rb_node *node;
1668 struct btrfs_free_cluster *cluster;
1669 struct list_head *head;
1671 spin_lock(&block_group->tree_lock);
1672 while ((head = block_group->cluster_list.next) !=
1673 &block_group->cluster_list) {
1674 cluster = list_entry(head, struct btrfs_free_cluster,
1677 WARN_ON(cluster->block_group != block_group);
1678 __btrfs_return_cluster_to_free_space(block_group, cluster);
1679 if (need_resched()) {
1680 spin_unlock(&block_group->tree_lock);
1682 spin_lock(&block_group->tree_lock);
1686 while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
1687 info = rb_entry(node, struct btrfs_free_space, offset_index);
1688 unlink_free_space(block_group, info);
1690 kfree(info->bitmap);
1692 if (need_resched()) {
1693 spin_unlock(&block_group->tree_lock);
1695 spin_lock(&block_group->tree_lock);
1699 spin_unlock(&block_group->tree_lock);
1702 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1703 u64 offset, u64 bytes, u64 empty_size)
1705 struct btrfs_free_space *entry = NULL;
1706 u64 bytes_search = bytes + empty_size;
1709 spin_lock(&block_group->tree_lock);
1710 entry = find_free_space(block_group, &offset, &bytes_search, 0);
1715 if (entry->bitmap) {
1716 bitmap_clear_bits(block_group, entry, offset, bytes);
1718 free_bitmap(block_group, entry);
1720 unlink_free_space(block_group, entry);
1721 entry->offset += bytes;
1722 entry->bytes -= bytes;
1726 link_free_space(block_group, entry);
1730 spin_unlock(&block_group->tree_lock);
1736 * given a cluster, put all of its extents back into the free space
1737 * cache. If a block group is passed, this function will only free
1738 * a cluster that belongs to the passed block group.
1740 * Otherwise, it'll get a reference on the block group pointed to by the
1741 * cluster and remove the cluster from it.
1743 int btrfs_return_cluster_to_free_space(
1744 struct btrfs_block_group_cache *block_group,
1745 struct btrfs_free_cluster *cluster)
1749 /* first, get a safe pointer to the block group */
1750 spin_lock(&cluster->lock);
1752 block_group = cluster->block_group;
1754 spin_unlock(&cluster->lock);
1757 } else if (cluster->block_group != block_group) {
1758 /* someone else has already freed it don't redo their work */
1759 spin_unlock(&cluster->lock);
1762 atomic_inc(&block_group->count);
1763 spin_unlock(&cluster->lock);
1765 /* now return any extents the cluster had on it */
1766 spin_lock(&block_group->tree_lock);
1767 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
1768 spin_unlock(&block_group->tree_lock);
1770 /* finally drop our ref */
1771 btrfs_put_block_group(block_group);
1775 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1776 struct btrfs_free_cluster *cluster,
1777 u64 bytes, u64 min_start)
1779 struct btrfs_free_space *entry;
1781 u64 search_start = cluster->window_start;
1782 u64 search_bytes = bytes;
1785 spin_lock(&block_group->tree_lock);
1786 spin_lock(&cluster->lock);
1788 if (!cluster->points_to_bitmap)
1791 if (cluster->block_group != block_group)
1795 * search_start is the beginning of the bitmap, but at some point it may
1796 * be a good idea to point to the actual start of the free area in the
1797 * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only
1798 * to 1 to make sure we get the bitmap entry
1800 entry = tree_search_offset(block_group,
1801 offset_to_bitmap(block_group, search_start),
1803 if (!entry || !entry->bitmap)
1806 search_start = min_start;
1807 search_bytes = bytes;
1809 err = search_bitmap(block_group, entry, &search_start,
1815 bitmap_clear_bits(block_group, entry, ret, bytes);
1816 if (entry->bytes == 0)
1817 free_bitmap(block_group, entry);
1819 spin_unlock(&cluster->lock);
1820 spin_unlock(&block_group->tree_lock);
1826 * given a cluster, try to allocate 'bytes' from it, returns 0
1827 * if it couldn't find anything suitably large, or a logical disk offset
1828 * if things worked out
1830 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1831 struct btrfs_free_cluster *cluster, u64 bytes,
1834 struct btrfs_free_space *entry = NULL;
1835 struct rb_node *node;
1838 if (cluster->points_to_bitmap)
1839 return btrfs_alloc_from_bitmap(block_group, cluster, bytes,
1842 spin_lock(&cluster->lock);
1843 if (bytes > cluster->max_size)
1846 if (cluster->block_group != block_group)
1849 node = rb_first(&cluster->root);
1853 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1856 if (entry->bytes < bytes || entry->offset < min_start) {
1857 struct rb_node *node;
1859 node = rb_next(&entry->offset_index);
1862 entry = rb_entry(node, struct btrfs_free_space,
1866 ret = entry->offset;
1868 entry->offset += bytes;
1869 entry->bytes -= bytes;
1871 if (entry->bytes == 0)
1872 rb_erase(&entry->offset_index, &cluster->root);
1876 spin_unlock(&cluster->lock);
1881 spin_lock(&block_group->tree_lock);
1883 block_group->free_space -= bytes;
1884 if (entry->bytes == 0) {
1885 block_group->free_extents--;
1889 spin_unlock(&block_group->tree_lock);
1894 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
1895 struct btrfs_free_space *entry,
1896 struct btrfs_free_cluster *cluster,
1897 u64 offset, u64 bytes, u64 min_bytes)
1899 unsigned long next_zero;
1901 unsigned long search_bits;
1902 unsigned long total_bits;
1903 unsigned long found_bits;
1904 unsigned long start = 0;
1905 unsigned long total_found = 0;
1908 i = offset_to_bit(entry->offset, block_group->sectorsize,
1909 max_t(u64, offset, entry->offset));
1910 search_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
1911 total_bits = bytes_to_bits(bytes, block_group->sectorsize);
1915 for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
1916 i < BITS_PER_BITMAP;
1917 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
1918 next_zero = find_next_zero_bit(entry->bitmap,
1919 BITS_PER_BITMAP, i);
1920 if (next_zero - i >= search_bits) {
1921 found_bits = next_zero - i;
1935 total_found += found_bits;
1937 if (cluster->max_size < found_bits * block_group->sectorsize)
1938 cluster->max_size = found_bits * block_group->sectorsize;
1940 if (total_found < total_bits) {
1941 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
1942 if (i - start > total_bits * 2) {
1944 cluster->max_size = 0;
1950 cluster->window_start = start * block_group->sectorsize +
1952 cluster->points_to_bitmap = true;
1958 * here we try to find a cluster of blocks in a block group. The goal
1959 * is to find at least bytes free and up to empty_size + bytes free.
1960 * We might not find them all in one contiguous area.
1962 * returns zero and sets up cluster if things worked out, otherwise
1963 * it returns -enospc
1965 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
1966 struct btrfs_root *root,
1967 struct btrfs_block_group_cache *block_group,
1968 struct btrfs_free_cluster *cluster,
1969 u64 offset, u64 bytes, u64 empty_size)
1971 struct btrfs_free_space *entry = NULL;
1972 struct rb_node *node;
1973 struct btrfs_free_space *next;
1974 struct btrfs_free_space *last = NULL;
1979 bool found_bitmap = false;
1982 /* for metadata, allow allocates with more holes */
1983 if (btrfs_test_opt(root, SSD_SPREAD)) {
1984 min_bytes = bytes + empty_size;
1985 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
1987 * we want to do larger allocations when we are
1988 * flushing out the delayed refs, it helps prevent
1989 * making more work as we go along.
1991 if (trans->transaction->delayed_refs.flushing)
1992 min_bytes = max(bytes, (bytes + empty_size) >> 1);
1994 min_bytes = max(bytes, (bytes + empty_size) >> 4);
1996 min_bytes = max(bytes, (bytes + empty_size) >> 2);
1998 spin_lock(&block_group->tree_lock);
1999 spin_lock(&cluster->lock);
2001 /* someone already found a cluster, hooray */
2002 if (cluster->block_group) {
2007 entry = tree_search_offset(block_group, offset, found_bitmap, 1);
2014 * If found_bitmap is true, we exhausted our search for extent entries,
2015 * and we just want to search all of the bitmaps that we can find, and
2016 * ignore any extent entries we find.
2018 while (entry->bitmap || found_bitmap ||
2019 (!entry->bitmap && entry->bytes < min_bytes)) {
2020 struct rb_node *node = rb_next(&entry->offset_index);
2022 if (entry->bitmap && entry->bytes > bytes + empty_size) {
2023 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
2024 offset, bytes + empty_size,
2034 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2038 * We already searched all the extent entries from the passed in offset
2039 * to the end and didn't find enough space for the cluster, and we also
2040 * didn't find any bitmaps that met our criteria, just go ahead and exit
2047 cluster->points_to_bitmap = false;
2048 window_start = entry->offset;
2049 window_free = entry->bytes;
2051 max_extent = entry->bytes;
2054 /* out window is just right, lets fill it */
2055 if (window_free >= bytes + empty_size)
2058 node = rb_next(&last->offset_index);
2065 next = rb_entry(node, struct btrfs_free_space, offset_index);
2068 * we found a bitmap, so if this search doesn't result in a
2069 * cluster, we know to go and search again for the bitmaps and
2070 * start looking for space there
2074 offset = next->offset;
2075 found_bitmap = true;
2081 * we haven't filled the empty size and the window is
2082 * very large. reset and try again
2084 if (next->offset - (last->offset + last->bytes) > 128 * 1024 ||
2085 next->offset - window_start > (bytes + empty_size) * 2) {
2087 window_start = entry->offset;
2088 window_free = entry->bytes;
2090 max_extent = entry->bytes;
2093 window_free += next->bytes;
2094 if (entry->bytes > max_extent)
2095 max_extent = entry->bytes;
2099 cluster->window_start = entry->offset;
2102 * now we've found our entries, pull them out of the free space
2103 * cache and put them into the cluster rbtree
2105 * The cluster includes an rbtree, but only uses the offset index
2106 * of each free space cache entry.
2109 node = rb_next(&entry->offset_index);
2110 if (entry->bitmap && node) {
2111 entry = rb_entry(node, struct btrfs_free_space,
2114 } else if (entry->bitmap && !node) {
2118 rb_erase(&entry->offset_index, &block_group->free_space_offset);
2119 ret = tree_insert_offset(&cluster->root, entry->offset,
2120 &entry->offset_index, 0);
2123 if (!node || entry == last)
2126 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2129 cluster->max_size = max_extent;
2132 atomic_inc(&block_group->count);
2133 list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
2134 cluster->block_group = block_group;
2136 spin_unlock(&cluster->lock);
2137 spin_unlock(&block_group->tree_lock);
2143 * simple code to zero out a cluster
2145 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2147 spin_lock_init(&cluster->lock);
2148 spin_lock_init(&cluster->refill_lock);
2149 cluster->root = RB_ROOT;
2150 cluster->max_size = 0;
2151 cluster->points_to_bitmap = false;
2152 INIT_LIST_HEAD(&cluster->block_group_list);
2153 cluster->block_group = NULL;