2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
9 * Modified by Nadia Derbey to make it RCU safe.
11 * Small id to pointer translation service.
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
29 #ifndef TEST // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/export.h>
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 #include <linux/spinlock.h>
38 #include <linux/percpu.h>
39 #include <linux/hardirq.h>
41 #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
42 #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
44 /* Leave the possibility of an incomplete final layer */
45 #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
47 /* Number of id_layer structs to leave in free list */
48 #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
50 static struct kmem_cache *idr_layer_cache;
51 static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
52 static DEFINE_PER_CPU(int, idr_preload_cnt);
53 static DEFINE_SPINLOCK(simple_ida_lock);
55 /* the maximum ID which can be allocated given idr->layers */
56 static int idr_max(int layers)
58 int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
60 return (1 << bits) - 1;
63 static struct idr_layer *get_from_free_list(struct idr *idp)
68 spin_lock_irqsave(&idp->lock, flags);
69 if ((p = idp->id_free)) {
70 idp->id_free = p->ary[0];
74 spin_unlock_irqrestore(&idp->lock, flags);
79 * idr_layer_alloc - allocate a new idr_layer
80 * @gfp_mask: allocation mask
81 * @layer_idr: optional idr to allocate from
83 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
84 * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
85 * an idr_layer from @idr->id_free.
87 * @layer_idr is to maintain backward compatibility with the old alloc
88 * interface - idr_pre_get() and idr_get_new*() - and will be removed
89 * together with per-pool preload buffer.
91 static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
93 struct idr_layer *new;
95 /* this is the old path, bypass to get_from_free_list() */
97 return get_from_free_list(layer_idr);
99 /* try to allocate directly from kmem_cache */
100 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
105 * Try to fetch one from the per-cpu preload buffer if in process
106 * context. See idr_preload() for details.
112 new = __this_cpu_read(idr_preload_head);
114 __this_cpu_write(idr_preload_head, new->ary[0]);
115 __this_cpu_dec(idr_preload_cnt);
122 static void idr_layer_rcu_free(struct rcu_head *head)
124 struct idr_layer *layer;
126 layer = container_of(head, struct idr_layer, rcu_head);
127 kmem_cache_free(idr_layer_cache, layer);
130 static inline void free_layer(struct idr_layer *p)
132 call_rcu(&p->rcu_head, idr_layer_rcu_free);
135 /* only called when idp->lock is held */
136 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
138 p->ary[0] = idp->id_free;
143 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
148 * Depends on the return element being zeroed.
150 spin_lock_irqsave(&idp->lock, flags);
151 __move_to_free_list(idp, p);
152 spin_unlock_irqrestore(&idp->lock, flags);
155 static void idr_mark_full(struct idr_layer **pa, int id)
157 struct idr_layer *p = pa[0];
160 __set_bit(id & IDR_MASK, &p->bitmap);
162 * If this layer is full mark the bit in the layer above to
163 * show that this part of the radix tree is full. This may
164 * complete the layer above and require walking up the radix
167 while (p->bitmap == IDR_FULL) {
171 __set_bit((id & IDR_MASK), &p->bitmap);
176 * idr_pre_get - reserve resources for idr allocation
178 * @gfp_mask: memory allocation flags
180 * This function should be called prior to calling the idr_get_new* functions.
181 * It preallocates enough memory to satisfy the worst possible allocation. The
182 * caller should pass in GFP_KERNEL if possible. This of course requires that
183 * no spinning locks be held.
185 * If the system is REALLY out of memory this function returns %0,
188 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
190 while (idp->id_free_cnt < MAX_IDR_FREE) {
191 struct idr_layer *new;
192 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
195 move_to_free_list(idp, new);
199 EXPORT_SYMBOL(idr_pre_get);
202 * sub_alloc - try to allocate an id without growing the tree depth
204 * @starting_id: id to start search at
205 * @id: pointer to the allocated handle
206 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
207 * @gfp_mask: allocation mask for idr_layer_alloc()
208 * @layer_idr: optional idr passed to idr_layer_alloc()
210 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
211 * growing its depth. Returns
213 * the allocated id >= 0 if successful,
214 * -EAGAIN if the tree needs to grow for allocation to succeed,
215 * -ENOSPC if the id space is exhausted,
216 * -ENOMEM if more idr_layers need to be allocated.
218 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
219 gfp_t gfp_mask, struct idr *layer_idr)
222 struct idr_layer *p, *new;
233 * We run around this while until we reach the leaf node...
235 n = (id >> (IDR_BITS*l)) & IDR_MASK;
237 m = find_next_bit(&bm, IDR_SIZE, n);
239 /* no space available go back to previous layer. */
242 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
244 /* if already at the top layer, we need to grow */
245 if (id >= 1 << (idp->layers * IDR_BITS)) {
252 /* If we need to go up one layer, continue the
253 * loop; otherwise, restart from the top.
255 sh = IDR_BITS * (l + 1);
256 if (oid >> sh == id >> sh)
263 id = ((id >> sh) ^ n ^ m) << sh;
265 if ((id >= MAX_IDR_BIT) || (id < 0))
270 * Create the layer below if it is missing.
273 new = idr_layer_alloc(gfp_mask, layer_idr);
277 rcu_assign_pointer(p->ary[m], new);
288 static int idr_get_empty_slot(struct idr *idp, int starting_id,
289 struct idr_layer **pa, gfp_t gfp_mask,
290 struct idr *layer_idr)
292 struct idr_layer *p, *new;
299 layers = idp->layers;
301 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
307 * Add a new layer to the top of the tree if the requested
308 * id is larger than the currently allocated space.
310 while (id > idr_max(layers)) {
313 /* special case: if the tree is currently empty,
314 * then we grow the tree by moving the top node
320 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
322 * The allocation failed. If we built part of
323 * the structure tear it down.
325 spin_lock_irqsave(&idp->lock, flags);
326 for (new = p; p && p != idp->top; new = p) {
329 new->bitmap = new->count = 0;
330 __move_to_free_list(idp, new);
332 spin_unlock_irqrestore(&idp->lock, flags);
337 new->layer = layers-1;
338 if (p->bitmap == IDR_FULL)
339 __set_bit(0, &new->bitmap);
342 rcu_assign_pointer(idp->top, p);
343 idp->layers = layers;
344 v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
351 * @id and @pa are from a successful allocation from idr_get_empty_slot().
352 * Install the user pointer @ptr and mark the slot full.
354 static void idr_fill_slot(void *ptr, int id, struct idr_layer **pa)
356 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
358 idr_mark_full(pa, id);
362 * idr_get_new_above - allocate new idr entry above or equal to a start id
364 * @ptr: pointer you want associated with the id
365 * @starting_id: id to start search at
366 * @id: pointer to the allocated handle
368 * This is the allocate id function. It should be called with any
371 * If allocation from IDR's private freelist fails, idr_get_new_above() will
372 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
373 * IDR's preallocation and then retry the idr_get_new_above() call.
375 * If the idr is full idr_get_new_above() will return %-ENOSPC.
377 * @id returns a value in the range @starting_id ... %0x7fffffff
379 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
381 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
384 rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
386 return rv == -ENOMEM ? -EAGAIN : rv;
388 idr_fill_slot(ptr, rv, pa);
392 EXPORT_SYMBOL(idr_get_new_above);
395 * idr_preload - preload for idr_alloc()
396 * @gfp_mask: allocation mask to use for preloading
398 * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
399 * process context and each idr_preload() invocation should be matched with
400 * idr_preload_end(). Note that preemption is disabled while preloaded.
402 * The first idr_alloc() in the preloaded section can be treated as if it
403 * were invoked with @gfp_mask used for preloading. This allows using more
404 * permissive allocation masks for idrs protected by spinlocks.
406 * For example, if idr_alloc() below fails, the failure can be treated as
407 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
409 * idr_preload(GFP_KERNEL);
412 * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
419 void idr_preload(gfp_t gfp_mask)
422 * Consuming preload buffer from non-process context breaks preload
423 * allocation guarantee. Disallow usage from those contexts.
425 WARN_ON_ONCE(in_interrupt());
426 might_sleep_if(gfp_mask & __GFP_WAIT);
431 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
432 * return value from idr_alloc() needs to be checked for failure
433 * anyway. Silently give up if allocation fails. The caller can
434 * treat failures from idr_alloc() as if idr_alloc() were called
435 * with @gfp_mask which should be enough.
437 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
438 struct idr_layer *new;
441 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
446 /* link the new one to per-cpu preload list */
447 new->ary[0] = __this_cpu_read(idr_preload_head);
448 __this_cpu_write(idr_preload_head, new);
449 __this_cpu_inc(idr_preload_cnt);
452 EXPORT_SYMBOL(idr_preload);
455 * idr_alloc - allocate new idr entry
456 * @idr: the (initialized) idr
457 * @ptr: pointer to be associated with the new id
458 * @start: the minimum id (inclusive)
459 * @end: the maximum id (exclusive, <= 0 for max)
460 * @gfp_mask: memory allocation flags
462 * Allocate an id in [start, end) and associate it with @ptr. If no ID is
463 * available in the specified range, returns -ENOSPC. On memory allocation
464 * failure, returns -ENOMEM.
466 * Note that @end is treated as max when <= 0. This is to always allow
467 * using @start + N as @end as long as N is inside integer range.
469 * The user is responsible for exclusively synchronizing all operations
470 * which may modify @idr. However, read-only accesses such as idr_find()
471 * or iteration can be performed under RCU read lock provided the user
472 * destroys @ptr in RCU-safe way after removal from idr.
474 int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
476 int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
477 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
480 might_sleep_if(gfp_mask & __GFP_WAIT);
483 if (WARN_ON_ONCE(start < 0))
485 if (unlikely(max < start))
489 id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
490 if (unlikely(id < 0))
492 if (unlikely(id > max))
495 idr_fill_slot(ptr, id, pa);
498 EXPORT_SYMBOL_GPL(idr_alloc);
500 static void idr_remove_warning(int id)
503 "idr_remove called for id=%d which is not allocated.\n", id);
507 static void sub_remove(struct idr *idp, int shift, int id)
509 struct idr_layer *p = idp->top;
510 struct idr_layer **pa[MAX_IDR_LEVEL + 1];
511 struct idr_layer ***paa = &pa[0];
512 struct idr_layer *to_free;
518 while ((shift > 0) && p) {
519 n = (id >> shift) & IDR_MASK;
520 __clear_bit(n, &p->bitmap);
526 if (likely(p != NULL && test_bit(n, &p->bitmap))){
527 __clear_bit(n, &p->bitmap);
528 rcu_assign_pointer(p->ary[n], NULL);
530 while(*paa && ! --((**paa)->count)){
541 idr_remove_warning(id);
545 * idr_remove - remove the given id and free its slot
549 void idr_remove(struct idr *idp, int id)
552 struct idr_layer *to_free;
554 if (WARN_ON_ONCE(id < 0))
557 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
558 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
561 * Single child at leftmost slot: we can shrink the tree.
562 * This level is not needed anymore since when layers are
563 * inserted, they are inserted at the top of the existing
567 p = idp->top->ary[0];
568 rcu_assign_pointer(idp->top, p);
570 to_free->bitmap = to_free->count = 0;
573 while (idp->id_free_cnt >= MAX_IDR_FREE) {
574 p = get_from_free_list(idp);
576 * Note: we don't call the rcu callback here, since the only
577 * layers that fall into the freelist are those that have been
580 kmem_cache_free(idr_layer_cache, p);
584 EXPORT_SYMBOL(idr_remove);
586 void __idr_remove_all(struct idr *idp)
591 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
592 struct idr_layer **paa = &pa[0];
594 n = idp->layers * IDR_BITS;
596 rcu_assign_pointer(idp->top, NULL);
597 max = idr_max(idp->layers);
600 while (id >= 0 && id <= max) {
601 while (n > IDR_BITS && p) {
604 p = p->ary[(id >> n) & IDR_MASK];
609 /* Get the highest bit that the above add changed from 0->1. */
610 while (n < fls(id ^ bt_mask)) {
619 EXPORT_SYMBOL(__idr_remove_all);
622 * idr_destroy - release all cached layers within an idr tree
625 * Free all id mappings and all idp_layers. After this function, @idp is
626 * completely unused and can be freed / recycled. The caller is
627 * responsible for ensuring that no one else accesses @idp during or after
630 * A typical clean-up sequence for objects stored in an idr tree will use
631 * idr_for_each() to free all objects, if necessay, then idr_destroy() to
632 * free up the id mappings and cached idr_layers.
634 void idr_destroy(struct idr *idp)
636 __idr_remove_all(idp);
638 while (idp->id_free_cnt) {
639 struct idr_layer *p = get_from_free_list(idp);
640 kmem_cache_free(idr_layer_cache, p);
643 EXPORT_SYMBOL(idr_destroy);
646 * idr_find - return pointer for given id
650 * Return the pointer given the id it has been registered with. A %NULL
651 * return indicates that @id is not valid or you passed %NULL in
654 * This function can be called under rcu_read_lock(), given that the leaf
655 * pointers lifetimes are correctly managed.
657 void *idr_find(struct idr *idp, int id)
662 if (WARN_ON_ONCE(id < 0))
665 p = rcu_dereference_raw(idp->top);
668 n = (p->layer+1) * IDR_BITS;
670 if (id > idr_max(p->layer + 1))
676 BUG_ON(n != p->layer*IDR_BITS);
677 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
681 EXPORT_SYMBOL(idr_find);
684 * idr_for_each - iterate through all stored pointers
686 * @fn: function to be called for each pointer
687 * @data: data passed back to callback function
689 * Iterate over the pointers registered with the given idr. The
690 * callback function will be called for each pointer currently
691 * registered, passing the id, the pointer and the data pointer passed
692 * to this function. It is not safe to modify the idr tree while in
693 * the callback, so functions such as idr_get_new and idr_remove are
696 * We check the return of @fn each time. If it returns anything other
697 * than %0, we break out and return that value.
699 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
701 int idr_for_each(struct idr *idp,
702 int (*fn)(int id, void *p, void *data), void *data)
704 int n, id, max, error = 0;
706 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
707 struct idr_layer **paa = &pa[0];
709 n = idp->layers * IDR_BITS;
710 p = rcu_dereference_raw(idp->top);
711 max = idr_max(idp->layers);
714 while (id >= 0 && id <= max) {
718 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
722 error = fn(id, (void *)p, data);
728 while (n < fls(id)) {
736 EXPORT_SYMBOL(idr_for_each);
739 * idr_get_next - lookup next object of id to given id.
741 * @nextidp: pointer to lookup key
743 * Returns pointer to registered object with id, which is next number to
744 * given id. After being looked up, *@nextidp will be updated for the next
747 * This function can be called under rcu_read_lock(), given that the leaf
748 * pointers lifetimes are correctly managed.
750 void *idr_get_next(struct idr *idp, int *nextidp)
752 struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
753 struct idr_layer **paa = &pa[0];
758 p = rcu_dereference_raw(idp->top);
761 n = (p->layer + 1) * IDR_BITS;
762 max = idr_max(p->layer + 1);
764 while (id >= 0 && id <= max) {
768 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
777 * Proceed to the next layer at the current level. Unlike
778 * idr_for_each(), @id isn't guaranteed to be aligned to
779 * layer boundary at this point and adding 1 << n may
780 * incorrectly skip IDs. Make sure we jump to the
781 * beginning of the next layer using round_up().
783 id = round_up(id + 1, 1 << n);
784 while (n < fls(id)) {
791 EXPORT_SYMBOL(idr_get_next);
795 * idr_replace - replace pointer for given id
797 * @ptr: pointer you want associated with the id
800 * Replace the pointer registered with an id and return the old value.
801 * A %-ENOENT return indicates that @id was not found.
802 * A %-EINVAL return indicates that @id was not within valid constraints.
804 * The caller must serialize with writers.
806 void *idr_replace(struct idr *idp, void *ptr, int id)
809 struct idr_layer *p, *old_p;
811 if (WARN_ON_ONCE(id < 0))
812 return ERR_PTR(-EINVAL);
816 return ERR_PTR(-EINVAL);
818 n = (p->layer+1) * IDR_BITS;
821 return ERR_PTR(-EINVAL);
824 while ((n > 0) && p) {
825 p = p->ary[(id >> n) & IDR_MASK];
830 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
831 return ERR_PTR(-ENOENT);
834 rcu_assign_pointer(p->ary[n], ptr);
838 EXPORT_SYMBOL(idr_replace);
840 void __init idr_init_cache(void)
842 idr_layer_cache = kmem_cache_create("idr_layer_cache",
843 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
847 * idr_init - initialize idr handle
850 * This function is use to set up the handle (@idp) that you will pass
851 * to the rest of the functions.
853 void idr_init(struct idr *idp)
855 memset(idp, 0, sizeof(struct idr));
856 spin_lock_init(&idp->lock);
858 EXPORT_SYMBOL(idr_init);
862 * DOC: IDA description
863 * IDA - IDR based ID allocator
865 * This is id allocator without id -> pointer translation. Memory
866 * usage is much lower than full blown idr because each id only
867 * occupies a bit. ida uses a custom leaf node which contains
868 * IDA_BITMAP_BITS slots.
870 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
873 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
877 if (!ida->free_bitmap) {
878 spin_lock_irqsave(&ida->idr.lock, flags);
879 if (!ida->free_bitmap) {
880 ida->free_bitmap = bitmap;
883 spin_unlock_irqrestore(&ida->idr.lock, flags);
890 * ida_pre_get - reserve resources for ida allocation
892 * @gfp_mask: memory allocation flag
894 * This function should be called prior to locking and calling the
895 * following function. It preallocates enough memory to satisfy the
896 * worst possible allocation.
898 * If the system is REALLY out of memory this function returns %0,
901 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
903 /* allocate idr_layers */
904 if (!idr_pre_get(&ida->idr, gfp_mask))
907 /* allocate free_bitmap */
908 if (!ida->free_bitmap) {
909 struct ida_bitmap *bitmap;
911 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
915 free_bitmap(ida, bitmap);
920 EXPORT_SYMBOL(ida_pre_get);
923 * ida_get_new_above - allocate new ID above or equal to a start id
925 * @starting_id: id to start search at
926 * @p_id: pointer to the allocated handle
928 * Allocate new ID above or equal to @starting_id. It should be called
929 * with any required locks.
931 * If memory is required, it will return %-EAGAIN, you should unlock
932 * and go back to the ida_pre_get() call. If the ida is full, it will
935 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
937 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
939 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
940 struct ida_bitmap *bitmap;
942 int idr_id = starting_id / IDA_BITMAP_BITS;
943 int offset = starting_id % IDA_BITMAP_BITS;
947 /* get vacant slot */
948 t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
950 return t == -ENOMEM ? -EAGAIN : t;
952 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
959 /* if bitmap isn't there, create a new one */
960 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
962 spin_lock_irqsave(&ida->idr.lock, flags);
963 bitmap = ida->free_bitmap;
964 ida->free_bitmap = NULL;
965 spin_unlock_irqrestore(&ida->idr.lock, flags);
970 memset(bitmap, 0, sizeof(struct ida_bitmap));
971 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
976 /* lookup for empty slot */
977 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
978 if (t == IDA_BITMAP_BITS) {
979 /* no empty slot after offset, continue to the next chunk */
985 id = idr_id * IDA_BITMAP_BITS + t;
986 if (id >= MAX_IDR_BIT)
989 __set_bit(t, bitmap->bitmap);
990 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
991 idr_mark_full(pa, idr_id);
995 /* Each leaf node can handle nearly a thousand slots and the
996 * whole idea of ida is to have small memory foot print.
997 * Throw away extra resources one by one after each successful
1000 if (ida->idr.id_free_cnt || ida->free_bitmap) {
1001 struct idr_layer *p = get_from_free_list(&ida->idr);
1003 kmem_cache_free(idr_layer_cache, p);
1008 EXPORT_SYMBOL(ida_get_new_above);
1011 * ida_remove - remove the given ID
1015 void ida_remove(struct ida *ida, int id)
1017 struct idr_layer *p = ida->idr.top;
1018 int shift = (ida->idr.layers - 1) * IDR_BITS;
1019 int idr_id = id / IDA_BITMAP_BITS;
1020 int offset = id % IDA_BITMAP_BITS;
1022 struct ida_bitmap *bitmap;
1024 /* clear full bits while looking up the leaf idr_layer */
1025 while ((shift > 0) && p) {
1026 n = (idr_id >> shift) & IDR_MASK;
1027 __clear_bit(n, &p->bitmap);
1035 n = idr_id & IDR_MASK;
1036 __clear_bit(n, &p->bitmap);
1038 bitmap = (void *)p->ary[n];
1039 if (!test_bit(offset, bitmap->bitmap))
1042 /* update bitmap and remove it if empty */
1043 __clear_bit(offset, bitmap->bitmap);
1044 if (--bitmap->nr_busy == 0) {
1045 __set_bit(n, &p->bitmap); /* to please idr_remove() */
1046 idr_remove(&ida->idr, idr_id);
1047 free_bitmap(ida, bitmap);
1054 "ida_remove called for id=%d which is not allocated.\n", id);
1056 EXPORT_SYMBOL(ida_remove);
1059 * ida_destroy - release all cached layers within an ida tree
1062 void ida_destroy(struct ida *ida)
1064 idr_destroy(&ida->idr);
1065 kfree(ida->free_bitmap);
1067 EXPORT_SYMBOL(ida_destroy);
1070 * ida_simple_get - get a new id.
1071 * @ida: the (initialized) ida.
1072 * @start: the minimum id (inclusive, < 0x8000000)
1073 * @end: the maximum id (exclusive, < 0x8000000 or 0)
1074 * @gfp_mask: memory allocation flags
1076 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1077 * On memory allocation failure, returns -ENOMEM.
1079 * Use ida_simple_remove() to get rid of an id.
1081 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1086 unsigned long flags;
1088 BUG_ON((int)start < 0);
1089 BUG_ON((int)end < 0);
1094 BUG_ON(end < start);
1099 if (!ida_pre_get(ida, gfp_mask))
1102 spin_lock_irqsave(&simple_ida_lock, flags);
1103 ret = ida_get_new_above(ida, start, &id);
1106 ida_remove(ida, id);
1112 spin_unlock_irqrestore(&simple_ida_lock, flags);
1114 if (unlikely(ret == -EAGAIN))
1119 EXPORT_SYMBOL(ida_simple_get);
1122 * ida_simple_remove - remove an allocated id.
1123 * @ida: the (initialized) ida.
1124 * @id: the id returned by ida_simple_get.
1126 void ida_simple_remove(struct ida *ida, unsigned int id)
1128 unsigned long flags;
1130 BUG_ON((int)id < 0);
1131 spin_lock_irqsave(&simple_ida_lock, flags);
1132 ida_remove(ida, id);
1133 spin_unlock_irqrestore(&simple_ida_lock, flags);
1135 EXPORT_SYMBOL(ida_simple_remove);
1138 * ida_init - initialize ida handle
1141 * This function is use to set up the handle (@ida) that you will pass
1142 * to the rest of the functions.
1144 void ida_init(struct ida *ida)
1146 memset(ida, 0, sizeof(struct ida));
1147 idr_init(&ida->idr);
1150 EXPORT_SYMBOL(ida_init);