1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
49 #define MM_UNUSED_TARGET 4
51 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 struct drm_mm_node *child;
56 child = kzalloc(sizeof(*child), GFP_ATOMIC);
58 child = kzalloc(sizeof(*child), GFP_KERNEL);
60 if (unlikely(child == NULL)) {
61 spin_lock(&mm->unused_lock);
62 if (list_empty(&mm->unused_nodes))
66 list_entry(mm->unused_nodes.next,
67 struct drm_mm_node, free_stack);
68 list_del(&child->free_stack);
71 spin_unlock(&mm->unused_lock);
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77 * drm_mm: memory manager struct we are pre-allocating for
79 * Returns 0 on success or -ENOMEM if allocation fails.
81 int drm_mm_pre_get(struct drm_mm *mm)
83 struct drm_mm_node *node;
85 spin_lock(&mm->unused_lock);
86 while (mm->num_unused < MM_UNUSED_TARGET) {
87 spin_unlock(&mm->unused_lock);
88 node = kzalloc(sizeof(*node), GFP_KERNEL);
89 spin_lock(&mm->unused_lock);
91 if (unlikely(node == NULL)) {
92 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93 spin_unlock(&mm->unused_lock);
97 list_add_tail(&node->free_stack, &mm->unused_nodes);
99 spin_unlock(&mm->unused_lock);
102 EXPORT_SYMBOL(drm_mm_pre_get);
104 static int drm_mm_create_tail_node(struct drm_mm *mm,
106 unsigned long size, int atomic)
108 struct drm_mm_node *child;
110 child = drm_mm_kmalloc(mm, atomic);
111 if (unlikely(child == NULL))
116 child->start = start;
119 list_add_tail(&child->node_list, &mm->node_list);
120 list_add_tail(&child->free_stack, &mm->free_stack);
125 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
129 struct drm_mm_node *child;
131 child = drm_mm_kmalloc(parent->mm, atomic);
132 if (unlikely(child == NULL))
135 INIT_LIST_HEAD(&child->free_stack);
138 child->start = parent->start;
139 child->mm = parent->mm;
141 list_add_tail(&child->node_list, &parent->node_list);
142 INIT_LIST_HEAD(&child->free_stack);
144 parent->size -= size;
145 parent->start += size;
150 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
156 struct drm_mm_node *align_splitoff = NULL;
160 tmp = node->start % alignment;
164 drm_mm_split_at_start(node, alignment - tmp, atomic);
165 if (unlikely(align_splitoff == NULL))
169 if (node->size == size) {
170 list_del_init(&node->free_stack);
173 node = drm_mm_split_at_start(node, size, atomic);
177 drm_mm_put_block(align_splitoff);
181 EXPORT_SYMBOL(drm_mm_get_block_generic);
183 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
190 struct drm_mm_node *align_splitoff = NULL;
194 if (node->start < start)
195 wasted += start - node->start;
197 tmp = ((node->start + wasted) % alignment);
200 wasted += alignment - tmp;
202 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
203 if (unlikely(align_splitoff == NULL))
207 if (node->size == size) {
208 list_del_init(&node->free_stack);
211 node = drm_mm_split_at_start(node, size, atomic);
215 drm_mm_put_block(align_splitoff);
219 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
222 * Put a block. Merge with the previous and / or next block if they are free.
223 * Otherwise add to the free stack.
226 void drm_mm_put_block(struct drm_mm_node *cur)
229 struct drm_mm *mm = cur->mm;
230 struct list_head *cur_head = &cur->node_list;
231 struct list_head *root_head = &mm->node_list;
232 struct drm_mm_node *prev_node = NULL;
233 struct drm_mm_node *next_node;
237 BUG_ON(cur->scanned_block || cur->scanned_prev_free
238 || cur->scanned_next_free);
240 if (cur_head->prev != root_head) {
242 list_entry(cur_head->prev, struct drm_mm_node, node_list);
243 if (prev_node->free) {
244 prev_node->size += cur->size;
248 if (cur_head->next != root_head) {
250 list_entry(cur_head->next, struct drm_mm_node, node_list);
251 if (next_node->free) {
253 prev_node->size += next_node->size;
254 list_del(&next_node->node_list);
255 list_del(&next_node->free_stack);
256 spin_lock(&mm->unused_lock);
257 if (mm->num_unused < MM_UNUSED_TARGET) {
258 list_add(&next_node->free_stack,
263 spin_unlock(&mm->unused_lock);
265 next_node->size += cur->size;
266 next_node->start = cur->start;
273 list_add(&cur->free_stack, &mm->free_stack);
275 list_del(&cur->node_list);
276 spin_lock(&mm->unused_lock);
277 if (mm->num_unused < MM_UNUSED_TARGET) {
278 list_add(&cur->free_stack, &mm->unused_nodes);
282 spin_unlock(&mm->unused_lock);
286 EXPORT_SYMBOL(drm_mm_put_block);
288 static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
293 if (entry->size < size)
297 register unsigned tmp = entry->start % alignment;
299 wasted = alignment - tmp;
302 if (entry->size >= size + wasted) {
309 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
311 unsigned alignment, int best_match)
313 struct drm_mm_node *entry;
314 struct drm_mm_node *best;
315 unsigned long best_size;
317 BUG_ON(mm->scanned_blocks);
322 list_for_each_entry(entry, &mm->free_stack, free_stack) {
323 if (!check_free_mm_node(entry, size, alignment))
329 if (entry->size < best_size) {
331 best_size = entry->size;
337 EXPORT_SYMBOL(drm_mm_search_free);
339 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
346 struct drm_mm_node *entry;
347 struct drm_mm_node *best;
348 unsigned long best_size;
350 BUG_ON(mm->scanned_blocks);
355 list_for_each_entry(entry, &mm->free_stack, free_stack) {
356 if (entry->start > end || (entry->start+entry->size) < start)
359 if (!check_free_mm_node(entry, size, alignment))
365 if (entry->size < best_size) {
367 best_size = entry->size;
373 EXPORT_SYMBOL(drm_mm_search_free_in_range);
376 * Initializa lru scanning.
378 * This simply sets up the scanning routines with the parameters for the desired
381 * Warning: As long as the scan list is non-empty, no other operations than
382 * adding/removing nodes to/from the scan list are allowed.
384 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
387 mm->scan_alignment = alignment;
388 mm->scan_size = size;
389 mm->scanned_blocks = 0;
390 mm->scan_hit_start = 0;
391 mm->scan_hit_size = 0;
393 EXPORT_SYMBOL(drm_mm_init_scan);
396 * Add a node to the scan list that might be freed to make space for the desired
399 * Returns non-zero, if a hole has been found, zero otherwise.
401 int drm_mm_scan_add_block(struct drm_mm_node *node)
403 struct drm_mm *mm = node->mm;
404 struct list_head *prev_free, *next_free;
405 struct drm_mm_node *prev_node, *next_node;
407 mm->scanned_blocks++;
409 prev_free = next_free = NULL;
412 node->scanned_block = 1;
415 if (node->node_list.prev != &mm->node_list) {
416 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
419 if (prev_node->free) {
420 list_del(&prev_node->node_list);
422 node->start = prev_node->start;
423 node->size += prev_node->size;
425 prev_node->scanned_prev_free = 1;
427 prev_free = &prev_node->free_stack;
431 if (node->node_list.next != &mm->node_list) {
432 next_node = list_entry(node->node_list.next, struct drm_mm_node,
435 if (next_node->free) {
436 list_del(&next_node->node_list);
438 node->size += next_node->size;
440 next_node->scanned_next_free = 1;
442 next_free = &next_node->free_stack;
446 /* The free_stack list is not used for allocated objects, so these two
447 * pointers can be abused (as long as no allocations in this memory
448 * manager happens). */
449 node->free_stack.prev = prev_free;
450 node->free_stack.next = next_free;
452 if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
453 mm->scan_hit_start = node->start;
454 mm->scan_hit_size = node->size;
461 EXPORT_SYMBOL(drm_mm_scan_add_block);
464 * Remove a node from the scan list.
466 * Nodes _must_ be removed in the exact same order from the scan list as they
467 * have been added, otherwise the internal state of the memory manager will be
470 * When the scan list is empty, the selected memory nodes can be freed. An
471 * immediatly following drm_mm_search_free with best_match = 0 will then return
472 * the just freed block (because its at the top of the free_stack list).
474 * Returns one if this block should be evicted, zero otherwise. Will always
475 * return zero when no hole has been found.
477 int drm_mm_scan_remove_block(struct drm_mm_node *node)
479 struct drm_mm *mm = node->mm;
480 struct drm_mm_node *prev_node, *next_node;
482 mm->scanned_blocks--;
484 BUG_ON(!node->scanned_block);
485 node->scanned_block = 0;
488 prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
490 next_node = list_entry(node->free_stack.next, struct drm_mm_node,
494 BUG_ON(!prev_node->scanned_prev_free);
495 prev_node->scanned_prev_free = 0;
497 list_add_tail(&prev_node->node_list, &node->node_list);
499 node->start = prev_node->start + prev_node->size;
500 node->size -= prev_node->size;
504 BUG_ON(!next_node->scanned_next_free);
505 next_node->scanned_next_free = 0;
507 list_add(&next_node->node_list, &node->node_list);
509 node->size -= next_node->size;
512 INIT_LIST_HEAD(&node->free_stack);
514 /* Only need to check for containement because start&size for the
515 * complete resulting free block (not just the desired part) is
517 if (node->start >= mm->scan_hit_start &&
518 node->start + node->size
519 <= mm->scan_hit_start + mm->scan_hit_size) {
525 EXPORT_SYMBOL(drm_mm_scan_remove_block);
527 int drm_mm_clean(struct drm_mm * mm)
529 struct list_head *head = &mm->node_list;
531 return (head->next->next == head);
533 EXPORT_SYMBOL(drm_mm_clean);
535 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
537 INIT_LIST_HEAD(&mm->node_list);
538 INIT_LIST_HEAD(&mm->free_stack);
539 INIT_LIST_HEAD(&mm->unused_nodes);
541 mm->scanned_blocks = 0;
542 spin_lock_init(&mm->unused_lock);
544 return drm_mm_create_tail_node(mm, start, size, 0);
546 EXPORT_SYMBOL(drm_mm_init);
548 void drm_mm_takedown(struct drm_mm * mm)
550 struct list_head *bnode = mm->free_stack.next;
551 struct drm_mm_node *entry;
552 struct drm_mm_node *next;
554 entry = list_entry(bnode, struct drm_mm_node, free_stack);
556 if (entry->node_list.next != &mm->node_list ||
557 entry->free_stack.next != &mm->free_stack) {
558 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
562 list_del(&entry->free_stack);
563 list_del(&entry->node_list);
566 spin_lock(&mm->unused_lock);
567 list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
568 list_del(&entry->free_stack);
572 spin_unlock(&mm->unused_lock);
574 BUG_ON(mm->num_unused != 0);
576 EXPORT_SYMBOL(drm_mm_takedown);
578 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
580 struct drm_mm_node *entry;
581 int total_used = 0, total_free = 0, total = 0;
583 list_for_each_entry(entry, &mm->node_list, node_list) {
584 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
585 prefix, entry->start, entry->start + entry->size,
586 entry->size, entry->free ? "free" : "used");
587 total += entry->size;
589 total_free += entry->size;
591 total_used += entry->size;
593 printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
594 total_used, total_free);
596 EXPORT_SYMBOL(drm_mm_debug_table);
598 #if defined(CONFIG_DEBUG_FS)
599 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
601 struct drm_mm_node *entry;
602 int total_used = 0, total_free = 0, total = 0;
604 list_for_each_entry(entry, &mm->node_list, node_list) {
605 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
606 total += entry->size;
608 total_free += entry->size;
610 total_used += entry->size;
612 seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
615 EXPORT_SYMBOL(drm_mm_dump_table);