2 #include <linux/slab.h>
3 #include <linux/module.h>
4 #include <linux/spinlock.h>
5 #include <linux/hardirq.h>
7 #include "extent_map.h"
10 static struct kmem_cache *extent_map_cache;
12 int __init extent_map_init(void)
14 extent_map_cache = kmem_cache_create("extent_map",
15 sizeof(struct extent_map), 0,
16 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
17 if (!extent_map_cache)
22 void extent_map_exit(void)
25 kmem_cache_destroy(extent_map_cache);
29 * extent_map_tree_init - initialize extent map tree
30 * @tree: tree to initialize
31 * @mask: flags for memory allocations during tree operations
33 * Initialize the extent tree @tree. Should be called for each new inode
34 * or other user of the extent_map interface.
36 void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
39 rwlock_init(&tree->lock);
43 * alloc_extent_map - allocate new extent map structure
44 * @mask: memory allocation flags
46 * Allocate a new extent_map structure. The new structure is
47 * returned with a reference count of one and needs to be
48 * freed using free_extent_map()
50 struct extent_map *alloc_extent_map(gfp_t mask)
52 struct extent_map *em;
53 em = kmem_cache_alloc(extent_map_cache, mask);
58 em->compress_type = BTRFS_COMPRESS_NONE;
59 atomic_set(&em->refs, 1);
64 * free_extent_map - drop reference count of an extent_map
65 * @em: extent map beeing releasead
67 * Drops the reference out on @em by one and free the structure
68 * if the reference count hits zero.
70 void free_extent_map(struct extent_map *em)
74 WARN_ON(atomic_read(&em->refs) == 0);
75 if (atomic_dec_and_test(&em->refs)) {
77 kmem_cache_free(extent_map_cache, em);
81 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
84 struct rb_node **p = &root->rb_node;
85 struct rb_node *parent = NULL;
86 struct extent_map *entry;
90 entry = rb_entry(parent, struct extent_map, rb_node);
92 WARN_ON(!entry->in_tree);
94 if (offset < entry->start)
96 else if (offset >= extent_map_end(entry))
102 entry = rb_entry(node, struct extent_map, rb_node);
104 rb_link_node(node, parent, p);
105 rb_insert_color(node, root);
110 * search through the tree for an extent_map with a given offset. If
111 * it can't be found, try to find some neighboring extents
113 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
114 struct rb_node **prev_ret,
115 struct rb_node **next_ret)
117 struct rb_node *n = root->rb_node;
118 struct rb_node *prev = NULL;
119 struct rb_node *orig_prev = NULL;
120 struct extent_map *entry;
121 struct extent_map *prev_entry = NULL;
124 entry = rb_entry(n, struct extent_map, rb_node);
128 WARN_ON(!entry->in_tree);
130 if (offset < entry->start)
132 else if (offset >= extent_map_end(entry))
140 while (prev && offset >= extent_map_end(prev_entry)) {
141 prev = rb_next(prev);
142 prev_entry = rb_entry(prev, struct extent_map, rb_node);
149 prev_entry = rb_entry(prev, struct extent_map, rb_node);
150 while (prev && offset < prev_entry->start) {
151 prev = rb_prev(prev);
152 prev_entry = rb_entry(prev, struct extent_map, rb_node);
159 /* check to see if two extent_map structs are adjacent and safe to merge */
160 static int mergable_maps(struct extent_map *prev, struct extent_map *next)
162 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
166 * don't merge compressed extents, we need to know their
169 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
172 if (extent_map_end(prev) == next->start &&
173 prev->flags == next->flags &&
174 prev->bdev == next->bdev &&
175 ((next->block_start == EXTENT_MAP_HOLE &&
176 prev->block_start == EXTENT_MAP_HOLE) ||
177 (next->block_start == EXTENT_MAP_INLINE &&
178 prev->block_start == EXTENT_MAP_INLINE) ||
179 (next->block_start == EXTENT_MAP_DELALLOC &&
180 prev->block_start == EXTENT_MAP_DELALLOC) ||
181 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
182 next->block_start == extent_map_block_end(prev)))) {
188 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
191 struct extent_map *merge = NULL;
193 struct extent_map *em;
195 write_lock(&tree->lock);
196 em = lookup_extent_mapping(tree, start, len);
198 WARN_ON(!em || em->start != start);
203 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
205 if (em->start != 0) {
206 rb = rb_prev(&em->rb_node);
208 merge = rb_entry(rb, struct extent_map, rb_node);
209 if (rb && mergable_maps(merge, em)) {
210 em->start = merge->start;
211 em->len += merge->len;
212 em->block_len += merge->block_len;
213 em->block_start = merge->block_start;
215 rb_erase(&merge->rb_node, &tree->map);
216 free_extent_map(merge);
220 rb = rb_next(&em->rb_node);
222 merge = rb_entry(rb, struct extent_map, rb_node);
223 if (rb && mergable_maps(em, merge)) {
224 em->len += merge->len;
225 em->block_len += merge->len;
226 rb_erase(&merge->rb_node, &tree->map);
228 free_extent_map(merge);
233 write_unlock(&tree->lock);
239 * add_extent_mapping - add new extent map to the extent tree
240 * @tree: tree to insert new map in
243 * Insert @em into @tree or perform a simple forward/backward merge with
244 * existing mappings. The extent_map struct passed in will be inserted
245 * into the tree directly, with an additional reference taken, or a
246 * reference dropped if the merge attempt was successfull.
248 int add_extent_mapping(struct extent_map_tree *tree,
249 struct extent_map *em)
252 struct extent_map *merge = NULL;
254 struct extent_map *exist;
256 exist = lookup_extent_mapping(tree, em->start, em->len);
258 free_extent_map(exist);
262 rb = tree_insert(&tree->map, em->start, &em->rb_node);
267 atomic_inc(&em->refs);
268 if (em->start != 0) {
269 rb = rb_prev(&em->rb_node);
271 merge = rb_entry(rb, struct extent_map, rb_node);
272 if (rb && mergable_maps(merge, em)) {
273 em->start = merge->start;
274 em->len += merge->len;
275 em->block_len += merge->block_len;
276 em->block_start = merge->block_start;
278 rb_erase(&merge->rb_node, &tree->map);
279 free_extent_map(merge);
282 rb = rb_next(&em->rb_node);
284 merge = rb_entry(rb, struct extent_map, rb_node);
285 if (rb && mergable_maps(em, merge)) {
286 em->len += merge->len;
287 em->block_len += merge->len;
288 rb_erase(&merge->rb_node, &tree->map);
290 free_extent_map(merge);
296 /* simple helper to do math around the end of an extent, handling wrap */
297 static u64 range_end(u64 start, u64 len)
299 if (start + len < start)
305 * lookup_extent_mapping - lookup extent_map
306 * @tree: tree to lookup in
307 * @start: byte offset to start the search
308 * @len: length of the lookup range
310 * Find and return the first extent_map struct in @tree that intersects the
311 * [start, len] range. There may be additional objects in the tree that
312 * intersect, so check the object returned carefully to make sure that no
313 * additional lookups are needed.
315 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
318 struct extent_map *em;
319 struct rb_node *rb_node;
320 struct rb_node *prev = NULL;
321 struct rb_node *next = NULL;
322 u64 end = range_end(start, len);
324 rb_node = __tree_search(&tree->map, start, &prev, &next);
325 if (!rb_node && prev) {
326 em = rb_entry(prev, struct extent_map, rb_node);
327 if (end > em->start && start < extent_map_end(em))
330 if (!rb_node && next) {
331 em = rb_entry(next, struct extent_map, rb_node);
332 if (end > em->start && start < extent_map_end(em))
339 if (IS_ERR(rb_node)) {
340 em = ERR_CAST(rb_node);
343 em = rb_entry(rb_node, struct extent_map, rb_node);
344 if (end > em->start && start < extent_map_end(em))
351 atomic_inc(&em->refs);
357 * search_extent_mapping - find a nearby extent map
358 * @tree: tree to lookup in
359 * @start: byte offset to start the search
360 * @len: length of the lookup range
362 * Find and return the first extent_map struct in @tree that intersects the
363 * [start, len] range.
365 * If one can't be found, any nearby extent may be returned
367 struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
370 struct extent_map *em;
371 struct rb_node *rb_node;
372 struct rb_node *prev = NULL;
373 struct rb_node *next = NULL;
375 rb_node = __tree_search(&tree->map, start, &prev, &next);
376 if (!rb_node && prev) {
377 em = rb_entry(prev, struct extent_map, rb_node);
380 if (!rb_node && next) {
381 em = rb_entry(next, struct extent_map, rb_node);
388 if (IS_ERR(rb_node)) {
389 em = ERR_CAST(rb_node);
392 em = rb_entry(rb_node, struct extent_map, rb_node);
399 atomic_inc(&em->refs);
405 * remove_extent_mapping - removes an extent_map from the extent tree
406 * @tree: extent tree to remove from
407 * @em: extent map beeing removed
409 * Removes @em from @tree. No reference counts are dropped, and no checks
410 * are done to see if the range is in use
412 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
416 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
417 rb_erase(&em->rb_node, &tree->map);