2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
9 * Modified by Nadia Derbey to make it RCU safe.
11 * Small id to pointer translation service.
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
29 #ifndef TEST // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/export.h>
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 #include <linux/spinlock.h>
39 static struct kmem_cache *idr_layer_cache;
40 static DEFINE_SPINLOCK(simple_ida_lock);
42 static struct idr_layer *get_from_free_list(struct idr *idp)
47 spin_lock_irqsave(&idp->lock, flags);
48 if ((p = idp->id_free)) {
49 idp->id_free = p->ary[0];
53 spin_unlock_irqrestore(&idp->lock, flags);
57 static void idr_layer_rcu_free(struct rcu_head *head)
59 struct idr_layer *layer;
61 layer = container_of(head, struct idr_layer, rcu_head);
62 kmem_cache_free(idr_layer_cache, layer);
65 static inline void free_layer(struct idr_layer *p)
67 call_rcu(&p->rcu_head, idr_layer_rcu_free);
70 /* only called when idp->lock is held */
71 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
73 p->ary[0] = idp->id_free;
78 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
83 * Depends on the return element being zeroed.
85 spin_lock_irqsave(&idp->lock, flags);
86 __move_to_free_list(idp, p);
87 spin_unlock_irqrestore(&idp->lock, flags);
90 static void idr_mark_full(struct idr_layer **pa, int id)
92 struct idr_layer *p = pa[0];
95 __set_bit(id & IDR_MASK, &p->bitmap);
97 * If this layer is full mark the bit in the layer above to
98 * show that this part of the radix tree is full. This may
99 * complete the layer above and require walking up the radix
102 while (p->bitmap == IDR_FULL) {
106 __set_bit((id & IDR_MASK), &p->bitmap);
111 * idr_pre_get - reserve resources for idr allocation
113 * @gfp_mask: memory allocation flags
115 * This function should be called prior to calling the idr_get_new* functions.
116 * It preallocates enough memory to satisfy the worst possible allocation. The
117 * caller should pass in GFP_KERNEL if possible. This of course requires that
118 * no spinning locks be held.
120 * If the system is REALLY out of memory this function returns %0,
123 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
125 while (idp->id_free_cnt < MAX_IDR_FREE) {
126 struct idr_layer *new;
127 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
130 move_to_free_list(idp, new);
134 EXPORT_SYMBOL(idr_pre_get);
137 * sub_alloc - try to allocate an id without growing the tree depth
139 * @starting_id: id to start search at
140 * @id: pointer to the allocated handle
141 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
143 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
144 * growing its depth. Returns
146 * the allocated id >= 0 if successful,
147 * -EAGAIN if the tree needs to grow for allocation to succeed,
148 * -ENOSPC if the id space is exhausted,
149 * -ENOMEM if more idr_layers need to be allocated.
151 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
154 struct idr_layer *p, *new;
165 * We run around this while until we reach the leaf node...
167 n = (id >> (IDR_BITS*l)) & IDR_MASK;
169 m = find_next_bit(&bm, IDR_SIZE, n);
171 /* no space available go back to previous layer. */
174 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
176 /* if already at the top layer, we need to grow */
177 if (id >= 1 << (idp->layers * IDR_BITS)) {
184 /* If we need to go up one layer, continue the
185 * loop; otherwise, restart from the top.
187 sh = IDR_BITS * (l + 1);
188 if (oid >> sh == id >> sh)
195 id = ((id >> sh) ^ n ^ m) << sh;
197 if ((id >= MAX_IDR_BIT) || (id < 0))
202 * Create the layer below if it is missing.
205 new = get_from_free_list(idp);
209 rcu_assign_pointer(p->ary[m], new);
220 static int idr_get_empty_slot(struct idr *idp, int starting_id,
221 struct idr_layer **pa)
223 struct idr_layer *p, *new;
230 layers = idp->layers;
232 if (!(p = get_from_free_list(idp)))
238 * Add a new layer to the top of the tree if the requested
239 * id is larger than the currently allocated space.
241 while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
244 /* special case: if the tree is currently empty,
245 * then we grow the tree by moving the top node
251 if (!(new = get_from_free_list(idp))) {
253 * The allocation failed. If we built part of
254 * the structure tear it down.
256 spin_lock_irqsave(&idp->lock, flags);
257 for (new = p; p && p != idp->top; new = p) {
260 new->bitmap = new->count = 0;
261 __move_to_free_list(idp, new);
263 spin_unlock_irqrestore(&idp->lock, flags);
268 new->layer = layers-1;
269 if (p->bitmap == IDR_FULL)
270 __set_bit(0, &new->bitmap);
273 rcu_assign_pointer(idp->top, p);
274 idp->layers = layers;
275 v = sub_alloc(idp, &id, pa);
282 * @id and @pa are from a successful allocation from idr_get_empty_slot().
283 * Install the user pointer @ptr and mark the slot full.
285 static void idr_fill_slot(void *ptr, int id, struct idr_layer **pa)
287 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
289 idr_mark_full(pa, id);
293 * idr_get_new_above - allocate new idr entry above or equal to a start id
295 * @ptr: pointer you want associated with the id
296 * @starting_id: id to start search at
297 * @id: pointer to the allocated handle
299 * This is the allocate id function. It should be called with any
302 * If allocation from IDR's private freelist fails, idr_get_new_above() will
303 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
304 * IDR's preallocation and then retry the idr_get_new_above() call.
306 * If the idr is full idr_get_new_above() will return %-ENOSPC.
308 * @id returns a value in the range @starting_id ... %0x7fffffff
310 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
312 struct idr_layer *pa[MAX_IDR_LEVEL];
315 rv = idr_get_empty_slot(idp, starting_id, pa);
317 return rv == -ENOMEM ? -EAGAIN : rv;
319 idr_fill_slot(ptr, rv, pa);
323 EXPORT_SYMBOL(idr_get_new_above);
325 static void idr_remove_warning(int id)
328 "idr_remove called for id=%d which is not allocated.\n", id);
332 static void sub_remove(struct idr *idp, int shift, int id)
334 struct idr_layer *p = idp->top;
335 struct idr_layer **pa[MAX_IDR_LEVEL];
336 struct idr_layer ***paa = &pa[0];
337 struct idr_layer *to_free;
343 while ((shift > 0) && p) {
344 n = (id >> shift) & IDR_MASK;
345 __clear_bit(n, &p->bitmap);
351 if (likely(p != NULL && test_bit(n, &p->bitmap))){
352 __clear_bit(n, &p->bitmap);
353 rcu_assign_pointer(p->ary[n], NULL);
355 while(*paa && ! --((**paa)->count)){
366 idr_remove_warning(id);
370 * idr_remove - remove the given id and free its slot
374 void idr_remove(struct idr *idp, int id)
377 struct idr_layer *to_free;
379 /* Mask off upper bits we don't use for the search. */
382 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
383 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
386 * Single child at leftmost slot: we can shrink the tree.
387 * This level is not needed anymore since when layers are
388 * inserted, they are inserted at the top of the existing
392 p = idp->top->ary[0];
393 rcu_assign_pointer(idp->top, p);
395 to_free->bitmap = to_free->count = 0;
398 while (idp->id_free_cnt >= MAX_IDR_FREE) {
399 p = get_from_free_list(idp);
401 * Note: we don't call the rcu callback here, since the only
402 * layers that fall into the freelist are those that have been
405 kmem_cache_free(idr_layer_cache, p);
409 EXPORT_SYMBOL(idr_remove);
411 void __idr_remove_all(struct idr *idp)
416 struct idr_layer *pa[MAX_IDR_LEVEL];
417 struct idr_layer **paa = &pa[0];
419 n = idp->layers * IDR_BITS;
421 rcu_assign_pointer(idp->top, NULL);
426 while (n > IDR_BITS && p) {
429 p = p->ary[(id >> n) & IDR_MASK];
434 /* Get the highest bit that the above add changed from 0->1. */
435 while (n < fls(id ^ bt_mask)) {
444 EXPORT_SYMBOL(__idr_remove_all);
447 * idr_destroy - release all cached layers within an idr tree
450 * Free all id mappings and all idp_layers. After this function, @idp is
451 * completely unused and can be freed / recycled. The caller is
452 * responsible for ensuring that no one else accesses @idp during or after
455 * A typical clean-up sequence for objects stored in an idr tree will use
456 * idr_for_each() to free all objects, if necessay, then idr_destroy() to
457 * free up the id mappings and cached idr_layers.
459 void idr_destroy(struct idr *idp)
461 __idr_remove_all(idp);
463 while (idp->id_free_cnt) {
464 struct idr_layer *p = get_from_free_list(idp);
465 kmem_cache_free(idr_layer_cache, p);
468 EXPORT_SYMBOL(idr_destroy);
471 * idr_find - return pointer for given id
475 * Return the pointer given the id it has been registered with. A %NULL
476 * return indicates that @id is not valid or you passed %NULL in
479 * This function can be called under rcu_read_lock(), given that the leaf
480 * pointers lifetimes are correctly managed.
482 void *idr_find(struct idr *idp, int id)
487 p = rcu_dereference_raw(idp->top);
490 n = (p->layer+1) * IDR_BITS;
492 /* Mask off upper bits we don't use for the search. */
501 BUG_ON(n != p->layer*IDR_BITS);
502 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
506 EXPORT_SYMBOL(idr_find);
509 * idr_for_each - iterate through all stored pointers
511 * @fn: function to be called for each pointer
512 * @data: data passed back to callback function
514 * Iterate over the pointers registered with the given idr. The
515 * callback function will be called for each pointer currently
516 * registered, passing the id, the pointer and the data pointer passed
517 * to this function. It is not safe to modify the idr tree while in
518 * the callback, so functions such as idr_get_new and idr_remove are
521 * We check the return of @fn each time. If it returns anything other
522 * than %0, we break out and return that value.
524 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
526 int idr_for_each(struct idr *idp,
527 int (*fn)(int id, void *p, void *data), void *data)
529 int n, id, max, error = 0;
531 struct idr_layer *pa[MAX_IDR_LEVEL];
532 struct idr_layer **paa = &pa[0];
534 n = idp->layers * IDR_BITS;
535 p = rcu_dereference_raw(idp->top);
543 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
547 error = fn(id, (void *)p, data);
553 while (n < fls(id)) {
561 EXPORT_SYMBOL(idr_for_each);
564 * idr_get_next - lookup next object of id to given id.
566 * @nextidp: pointer to lookup key
568 * Returns pointer to registered object with id, which is next number to
569 * given id. After being looked up, *@nextidp will be updated for the next
572 * This function can be called under rcu_read_lock(), given that the leaf
573 * pointers lifetimes are correctly managed.
575 void *idr_get_next(struct idr *idp, int *nextidp)
577 struct idr_layer *p, *pa[MAX_IDR_LEVEL];
578 struct idr_layer **paa = &pa[0];
583 p = rcu_dereference_raw(idp->top);
586 n = (p->layer + 1) * IDR_BITS;
593 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
602 * Proceed to the next layer at the current level. Unlike
603 * idr_for_each(), @id isn't guaranteed to be aligned to
604 * layer boundary at this point and adding 1 << n may
605 * incorrectly skip IDs. Make sure we jump to the
606 * beginning of the next layer using round_up().
608 id = round_up(id + 1, 1 << n);
609 while (n < fls(id)) {
616 EXPORT_SYMBOL(idr_get_next);
620 * idr_replace - replace pointer for given id
622 * @ptr: pointer you want associated with the id
625 * Replace the pointer registered with an id and return the old value.
626 * A %-ENOENT return indicates that @id was not found.
627 * A %-EINVAL return indicates that @id was not within valid constraints.
629 * The caller must serialize with writers.
631 void *idr_replace(struct idr *idp, void *ptr, int id)
634 struct idr_layer *p, *old_p;
638 return ERR_PTR(-EINVAL);
640 n = (p->layer+1) * IDR_BITS;
645 return ERR_PTR(-EINVAL);
648 while ((n > 0) && p) {
649 p = p->ary[(id >> n) & IDR_MASK];
654 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
655 return ERR_PTR(-ENOENT);
658 rcu_assign_pointer(p->ary[n], ptr);
662 EXPORT_SYMBOL(idr_replace);
664 void __init idr_init_cache(void)
666 idr_layer_cache = kmem_cache_create("idr_layer_cache",
667 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
671 * idr_init - initialize idr handle
674 * This function is use to set up the handle (@idp) that you will pass
675 * to the rest of the functions.
677 void idr_init(struct idr *idp)
679 memset(idp, 0, sizeof(struct idr));
680 spin_lock_init(&idp->lock);
682 EXPORT_SYMBOL(idr_init);
686 * DOC: IDA description
687 * IDA - IDR based ID allocator
689 * This is id allocator without id -> pointer translation. Memory
690 * usage is much lower than full blown idr because each id only
691 * occupies a bit. ida uses a custom leaf node which contains
692 * IDA_BITMAP_BITS slots.
694 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
697 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
701 if (!ida->free_bitmap) {
702 spin_lock_irqsave(&ida->idr.lock, flags);
703 if (!ida->free_bitmap) {
704 ida->free_bitmap = bitmap;
707 spin_unlock_irqrestore(&ida->idr.lock, flags);
714 * ida_pre_get - reserve resources for ida allocation
716 * @gfp_mask: memory allocation flag
718 * This function should be called prior to locking and calling the
719 * following function. It preallocates enough memory to satisfy the
720 * worst possible allocation.
722 * If the system is REALLY out of memory this function returns %0,
725 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
727 /* allocate idr_layers */
728 if (!idr_pre_get(&ida->idr, gfp_mask))
731 /* allocate free_bitmap */
732 if (!ida->free_bitmap) {
733 struct ida_bitmap *bitmap;
735 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
739 free_bitmap(ida, bitmap);
744 EXPORT_SYMBOL(ida_pre_get);
747 * ida_get_new_above - allocate new ID above or equal to a start id
749 * @starting_id: id to start search at
750 * @p_id: pointer to the allocated handle
752 * Allocate new ID above or equal to @starting_id. It should be called
753 * with any required locks.
755 * If memory is required, it will return %-EAGAIN, you should unlock
756 * and go back to the ida_pre_get() call. If the ida is full, it will
759 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
761 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
763 struct idr_layer *pa[MAX_IDR_LEVEL];
764 struct ida_bitmap *bitmap;
766 int idr_id = starting_id / IDA_BITMAP_BITS;
767 int offset = starting_id % IDA_BITMAP_BITS;
771 /* get vacant slot */
772 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
774 return t == -ENOMEM ? -EAGAIN : t;
776 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
783 /* if bitmap isn't there, create a new one */
784 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
786 spin_lock_irqsave(&ida->idr.lock, flags);
787 bitmap = ida->free_bitmap;
788 ida->free_bitmap = NULL;
789 spin_unlock_irqrestore(&ida->idr.lock, flags);
794 memset(bitmap, 0, sizeof(struct ida_bitmap));
795 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
800 /* lookup for empty slot */
801 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
802 if (t == IDA_BITMAP_BITS) {
803 /* no empty slot after offset, continue to the next chunk */
809 id = idr_id * IDA_BITMAP_BITS + t;
810 if (id >= MAX_IDR_BIT)
813 __set_bit(t, bitmap->bitmap);
814 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
815 idr_mark_full(pa, idr_id);
819 /* Each leaf node can handle nearly a thousand slots and the
820 * whole idea of ida is to have small memory foot print.
821 * Throw away extra resources one by one after each successful
824 if (ida->idr.id_free_cnt || ida->free_bitmap) {
825 struct idr_layer *p = get_from_free_list(&ida->idr);
827 kmem_cache_free(idr_layer_cache, p);
832 EXPORT_SYMBOL(ida_get_new_above);
835 * ida_remove - remove the given ID
839 void ida_remove(struct ida *ida, int id)
841 struct idr_layer *p = ida->idr.top;
842 int shift = (ida->idr.layers - 1) * IDR_BITS;
843 int idr_id = id / IDA_BITMAP_BITS;
844 int offset = id % IDA_BITMAP_BITS;
846 struct ida_bitmap *bitmap;
848 /* clear full bits while looking up the leaf idr_layer */
849 while ((shift > 0) && p) {
850 n = (idr_id >> shift) & IDR_MASK;
851 __clear_bit(n, &p->bitmap);
859 n = idr_id & IDR_MASK;
860 __clear_bit(n, &p->bitmap);
862 bitmap = (void *)p->ary[n];
863 if (!test_bit(offset, bitmap->bitmap))
866 /* update bitmap and remove it if empty */
867 __clear_bit(offset, bitmap->bitmap);
868 if (--bitmap->nr_busy == 0) {
869 __set_bit(n, &p->bitmap); /* to please idr_remove() */
870 idr_remove(&ida->idr, idr_id);
871 free_bitmap(ida, bitmap);
878 "ida_remove called for id=%d which is not allocated.\n", id);
880 EXPORT_SYMBOL(ida_remove);
883 * ida_destroy - release all cached layers within an ida tree
886 void ida_destroy(struct ida *ida)
888 idr_destroy(&ida->idr);
889 kfree(ida->free_bitmap);
891 EXPORT_SYMBOL(ida_destroy);
894 * ida_simple_get - get a new id.
895 * @ida: the (initialized) ida.
896 * @start: the minimum id (inclusive, < 0x8000000)
897 * @end: the maximum id (exclusive, < 0x8000000 or 0)
898 * @gfp_mask: memory allocation flags
900 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
901 * On memory allocation failure, returns -ENOMEM.
903 * Use ida_simple_remove() to get rid of an id.
905 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
912 BUG_ON((int)start < 0);
913 BUG_ON((int)end < 0);
923 if (!ida_pre_get(ida, gfp_mask))
926 spin_lock_irqsave(&simple_ida_lock, flags);
927 ret = ida_get_new_above(ida, start, &id);
936 spin_unlock_irqrestore(&simple_ida_lock, flags);
938 if (unlikely(ret == -EAGAIN))
943 EXPORT_SYMBOL(ida_simple_get);
946 * ida_simple_remove - remove an allocated id.
947 * @ida: the (initialized) ida.
948 * @id: the id returned by ida_simple_get.
950 void ida_simple_remove(struct ida *ida, unsigned int id)
955 spin_lock_irqsave(&simple_ida_lock, flags);
957 spin_unlock_irqrestore(&simple_ida_lock, flags);
959 EXPORT_SYMBOL(ida_simple_remove);
962 * ida_init - initialize ida handle
965 * This function is use to set up the handle (@ida) that you will pass
966 * to the rest of the functions.
968 void ida_init(struct ida *ida)
970 memset(ida, 0, sizeof(struct ida));
974 EXPORT_SYMBOL(ida_init);