2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
9 * Small id to pointer translation service.
11 * It uses a radix tree like structure as a sparse array indexed
12 * by the id to obtain the pointer. The bitmap makes allocating
15 * You call it to allocate an id (an int) an associate with that id a
16 * pointer or what ever, we treat it as a (void *). You can pass this
17 * id to a user for him to pass back at a later time. You then pass
18 * that id to this code and it returns your pointer.
20 * You can release ids at any time. When all ids are released, most of
21 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
22 * don't need to go to the memory "store" during an id allocate, just
23 * so you don't need to be too concerned about locking and conflicts
24 * with the slab allocator.
27 #ifndef TEST // to test in user space...
28 #include <linux/slab.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
32 #include <linux/err.h>
33 #include <linux/string.h>
34 #include <linux/idr.h>
36 static struct kmem_cache *idr_layer_cache;
38 static struct idr_layer *get_from_free_list(struct idr *idp)
43 spin_lock_irqsave(&idp->lock, flags);
44 if ((p = idp->id_free)) {
45 idp->id_free = p->ary[0];
49 spin_unlock_irqrestore(&idp->lock, flags);
53 /* only called when idp->lock is held */
54 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
56 p->ary[0] = idp->id_free;
61 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
66 * Depends on the return element being zeroed.
68 spin_lock_irqsave(&idp->lock, flags);
69 __move_to_free_list(idp, p);
70 spin_unlock_irqrestore(&idp->lock, flags);
73 static void idr_mark_full(struct idr_layer **pa, int id)
75 struct idr_layer *p = pa[0];
78 __set_bit(id & IDR_MASK, &p->bitmap);
80 * If this layer is full mark the bit in the layer above to
81 * show that this part of the radix tree is full. This may
82 * complete the layer above and require walking up the radix
85 while (p->bitmap == IDR_FULL) {
89 __set_bit((id & IDR_MASK), &p->bitmap);
94 * idr_pre_get - reserver resources for idr allocation
96 * @gfp_mask: memory allocation flags
98 * This function should be called prior to locking and calling the
99 * following function. It preallocates enough memory to satisfy
100 * the worst possible allocation.
102 * If the system is REALLY out of memory this function returns 0,
105 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
107 while (idp->id_free_cnt < IDR_FREE_MAX) {
108 struct idr_layer *new;
109 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
112 move_to_free_list(idp, new);
116 EXPORT_SYMBOL(idr_pre_get);
118 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
121 struct idr_layer *p, *new;
132 * We run around this while until we reach the leaf node...
134 n = (id >> (IDR_BITS*l)) & IDR_MASK;
136 m = find_next_bit(&bm, IDR_SIZE, n);
138 /* no space available go back to previous layer. */
141 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
143 /* if already at the top layer, we need to grow */
149 /* If we need to go up one layer, continue the
150 * loop; otherwise, restart from the top.
152 sh = IDR_BITS * (l + 1);
153 if (oid >> sh == id >> sh)
160 id = ((id >> sh) ^ n ^ m) << sh;
162 if ((id >= MAX_ID_BIT) || (id < 0))
167 * Create the layer below if it is missing.
170 new = get_from_free_list(idp);
184 static int idr_get_empty_slot(struct idr *idp, int starting_id,
185 struct idr_layer **pa)
187 struct idr_layer *p, *new;
194 layers = idp->layers;
196 if (!(p = get_from_free_list(idp)))
201 * Add a new layer to the top of the tree if the requested
202 * id is larger than the currently allocated space.
204 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
208 if (!(new = get_from_free_list(idp))) {
210 * The allocation failed. If we built part of
211 * the structure tear it down.
213 spin_lock_irqsave(&idp->lock, flags);
214 for (new = p; p && p != idp->top; new = p) {
217 new->bitmap = new->count = 0;
218 __move_to_free_list(idp, new);
220 spin_unlock_irqrestore(&idp->lock, flags);
225 if (p->bitmap == IDR_FULL)
226 __set_bit(0, &new->bitmap);
230 idp->layers = layers;
231 v = sub_alloc(idp, &id, pa);
237 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
239 struct idr_layer *pa[MAX_LEVEL];
242 id = idr_get_empty_slot(idp, starting_id, pa);
245 * Successfully found an empty slot. Install the user
246 * pointer and mark the slot full.
248 pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr;
250 idr_mark_full(pa, id);
257 * idr_get_new_above - allocate new idr entry above or equal to a start id
259 * @ptr: pointer you want associated with the ide
260 * @start_id: id to start search at
261 * @id: pointer to the allocated handle
263 * This is the allocate id function. It should be called with any
266 * If memory is required, it will return -EAGAIN, you should unlock
267 * and go back to the idr_pre_get() call. If the idr is full, it will
270 * @id returns a value in the range 0 ... 0x7fffffff
272 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
276 rv = idr_get_new_above_int(idp, ptr, starting_id);
278 * This is a cheap hack until the IDR code can be fixed to
279 * return proper error values.
284 else /* Will be -3 */
290 EXPORT_SYMBOL(idr_get_new_above);
293 * idr_get_new - allocate new idr entry
295 * @ptr: pointer you want associated with the ide
296 * @id: pointer to the allocated handle
298 * This is the allocate id function. It should be called with any
301 * If memory is required, it will return -EAGAIN, you should unlock
302 * and go back to the idr_pre_get() call. If the idr is full, it will
305 * @id returns a value in the range 0 ... 0x7fffffff
307 int idr_get_new(struct idr *idp, void *ptr, int *id)
311 rv = idr_get_new_above_int(idp, ptr, 0);
313 * This is a cheap hack until the IDR code can be fixed to
314 * return proper error values.
319 else /* Will be -3 */
325 EXPORT_SYMBOL(idr_get_new);
327 static void idr_remove_warning(int id)
329 printk("idr_remove called for id=%d which is not allocated.\n", id);
333 static void sub_remove(struct idr *idp, int shift, int id)
335 struct idr_layer *p = idp->top;
336 struct idr_layer **pa[MAX_LEVEL];
337 struct idr_layer ***paa = &pa[0];
343 while ((shift > 0) && p) {
344 n = (id >> shift) & IDR_MASK;
345 __clear_bit(n, &p->bitmap);
351 if (likely(p != NULL && test_bit(n, &p->bitmap))){
352 __clear_bit(n, &p->bitmap);
354 while(*paa && ! --((**paa)->count)){
355 move_to_free_list(idp, **paa);
361 idr_remove_warning(id);
365 * idr_remove - remove the given id and free it's slot
369 void idr_remove(struct idr *idp, int id)
373 /* Mask off upper bits we don't use for the search. */
376 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
377 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
378 idp->top->ary[0]) { // We can drop a layer
380 p = idp->top->ary[0];
381 idp->top->bitmap = idp->top->count = 0;
382 move_to_free_list(idp, idp->top);
386 while (idp->id_free_cnt >= IDR_FREE_MAX) {
387 p = get_from_free_list(idp);
388 kmem_cache_free(idr_layer_cache, p);
392 EXPORT_SYMBOL(idr_remove);
395 * idr_remove_all - remove all ids from the given idr tree
398 * idr_destroy() only frees up unused, cached idp_layers, but this
399 * function will remove all id mappings and leave all idp_layers
402 * A typical clean-up sequence for objects stored in an idr tree, will
403 * use idr_for_each() to free all objects, if necessay, then
404 * idr_remove_all() to remove all ids, and idr_destroy() to free
405 * up the cached idr_layers.
407 void idr_remove_all(struct idr *idp)
411 struct idr_layer *pa[MAX_LEVEL];
412 struct idr_layer **paa = &pa[0];
414 n = idp->layers * IDR_BITS;
420 while (n > IDR_BITS && p) {
423 p = p->ary[(id >> n) & IDR_MASK];
427 while (n < fls(id)) {
429 memset(p, 0, sizeof *p);
430 move_to_free_list(idp, p);
439 EXPORT_SYMBOL(idr_remove_all);
442 * idr_destroy - release all cached layers within an idr tree
445 void idr_destroy(struct idr *idp)
447 while (idp->id_free_cnt) {
448 struct idr_layer *p = get_from_free_list(idp);
449 kmem_cache_free(idr_layer_cache, p);
452 EXPORT_SYMBOL(idr_destroy);
455 * idr_find - return pointer for given id
459 * Return the pointer given the id it has been registered with. A %NULL
460 * return indicates that @id is not valid or you passed %NULL in
463 * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
465 void *idr_find(struct idr *idp, int id)
470 n = idp->layers * IDR_BITS;
473 /* Mask off upper bits we don't use for the search. */
481 p = p->ary[(id >> n) & IDR_MASK];
485 EXPORT_SYMBOL(idr_find);
488 * idr_for_each - iterate through all stored pointers
490 * @fn: function to be called for each pointer
491 * @data: data passed back to callback function
493 * Iterate over the pointers registered with the given idr. The
494 * callback function will be called for each pointer currently
495 * registered, passing the id, the pointer and the data pointer passed
496 * to this function. It is not safe to modify the idr tree while in
497 * the callback, so functions such as idr_get_new and idr_remove are
500 * We check the return of @fn each time. If it returns anything other
501 * than 0, we break out and return that value.
503 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
505 int idr_for_each(struct idr *idp,
506 int (*fn)(int id, void *p, void *data), void *data)
508 int n, id, max, error = 0;
510 struct idr_layer *pa[MAX_LEVEL];
511 struct idr_layer **paa = &pa[0];
513 n = idp->layers * IDR_BITS;
522 p = p->ary[(id >> n) & IDR_MASK];
526 error = fn(id, (void *)p, data);
532 while (n < fls(id)) {
540 EXPORT_SYMBOL(idr_for_each);
543 * idr_replace - replace pointer for given id
545 * @ptr: pointer you want associated with the id
548 * Replace the pointer registered with an id and return the old value.
549 * A -ENOENT return indicates that @id was not found.
550 * A -EINVAL return indicates that @id was not within valid constraints.
552 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
554 void *idr_replace(struct idr *idp, void *ptr, int id)
557 struct idr_layer *p, *old_p;
559 n = idp->layers * IDR_BITS;
565 return ERR_PTR(-EINVAL);
568 while ((n > 0) && p) {
569 p = p->ary[(id >> n) & IDR_MASK];
574 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
575 return ERR_PTR(-ENOENT);
582 EXPORT_SYMBOL(idr_replace);
584 static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer)
586 memset(idr_layer, 0, sizeof(struct idr_layer));
589 void __init idr_init_cache(void)
591 idr_layer_cache = kmem_cache_create("idr_layer_cache",
592 sizeof(struct idr_layer), 0, SLAB_PANIC,
597 * idr_init - initialize idr handle
600 * This function is use to set up the handle (@idp) that you will pass
601 * to the rest of the functions.
603 void idr_init(struct idr *idp)
605 memset(idp, 0, sizeof(struct idr));
606 spin_lock_init(&idp->lock);
608 EXPORT_SYMBOL(idr_init);
612 * IDA - IDR based ID allocator
614 * this is id allocator without id -> pointer translation. Memory
615 * usage is much lower than full blown idr because each id only
616 * occupies a bit. ida uses a custom leaf node which contains
617 * IDA_BITMAP_BITS slots.
619 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
622 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
626 if (!ida->free_bitmap) {
627 spin_lock_irqsave(&ida->idr.lock, flags);
628 if (!ida->free_bitmap) {
629 ida->free_bitmap = bitmap;
632 spin_unlock_irqrestore(&ida->idr.lock, flags);
639 * ida_pre_get - reserve resources for ida allocation
641 * @gfp_mask: memory allocation flag
643 * This function should be called prior to locking and calling the
644 * following function. It preallocates enough memory to satisfy the
645 * worst possible allocation.
647 * If the system is REALLY out of memory this function returns 0,
650 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
652 /* allocate idr_layers */
653 if (!idr_pre_get(&ida->idr, gfp_mask))
656 /* allocate free_bitmap */
657 if (!ida->free_bitmap) {
658 struct ida_bitmap *bitmap;
660 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
664 free_bitmap(ida, bitmap);
669 EXPORT_SYMBOL(ida_pre_get);
672 * ida_get_new_above - allocate new ID above or equal to a start id
674 * @staring_id: id to start search at
675 * @p_id: pointer to the allocated handle
677 * Allocate new ID above or equal to @ida. It should be called with
678 * any required locks.
680 * If memory is required, it will return -EAGAIN, you should unlock
681 * and go back to the ida_pre_get() call. If the ida is full, it will
684 * @p_id returns a value in the range 0 ... 0x7fffffff.
686 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
688 struct idr_layer *pa[MAX_LEVEL];
689 struct ida_bitmap *bitmap;
691 int idr_id = starting_id / IDA_BITMAP_BITS;
692 int offset = starting_id % IDA_BITMAP_BITS;
696 /* get vacant slot */
697 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
701 else /* will be -3 */
705 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
712 /* if bitmap isn't there, create a new one */
713 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
715 spin_lock_irqsave(&ida->idr.lock, flags);
716 bitmap = ida->free_bitmap;
717 ida->free_bitmap = NULL;
718 spin_unlock_irqrestore(&ida->idr.lock, flags);
723 memset(bitmap, 0, sizeof(struct ida_bitmap));
724 pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap;
728 /* lookup for empty slot */
729 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
730 if (t == IDA_BITMAP_BITS) {
731 /* no empty slot after offset, continue to the next chunk */
737 id = idr_id * IDA_BITMAP_BITS + t;
738 if (id >= MAX_ID_BIT)
741 __set_bit(t, bitmap->bitmap);
742 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
743 idr_mark_full(pa, idr_id);
747 /* Each leaf node can handle nearly a thousand slots and the
748 * whole idea of ida is to have small memory foot print.
749 * Throw away extra resources one by one after each successful
752 if (ida->idr.id_free_cnt || ida->free_bitmap) {
753 struct idr_layer *p = get_from_free_list(&ida->idr);
755 kmem_cache_free(idr_layer_cache, p);
760 EXPORT_SYMBOL(ida_get_new_above);
763 * ida_get_new - allocate new ID
765 * @p_id: pointer to the allocated handle
767 * Allocate new ID. It should be called with any required locks.
769 * If memory is required, it will return -EAGAIN, you should unlock
770 * and go back to the idr_pre_get() call. If the idr is full, it will
773 * @id returns a value in the range 0 ... 0x7fffffff.
775 int ida_get_new(struct ida *ida, int *p_id)
777 return ida_get_new_above(ida, 0, p_id);
779 EXPORT_SYMBOL(ida_get_new);
782 * ida_remove - remove the given ID
786 void ida_remove(struct ida *ida, int id)
788 struct idr_layer *p = ida->idr.top;
789 int shift = (ida->idr.layers - 1) * IDR_BITS;
790 int idr_id = id / IDA_BITMAP_BITS;
791 int offset = id % IDA_BITMAP_BITS;
793 struct ida_bitmap *bitmap;
795 /* clear full bits while looking up the leaf idr_layer */
796 while ((shift > 0) && p) {
797 n = (idr_id >> shift) & IDR_MASK;
798 __clear_bit(n, &p->bitmap);
806 n = idr_id & IDR_MASK;
807 __clear_bit(n, &p->bitmap);
809 bitmap = (void *)p->ary[n];
810 if (!test_bit(offset, bitmap->bitmap))
813 /* update bitmap and remove it if empty */
814 __clear_bit(offset, bitmap->bitmap);
815 if (--bitmap->nr_busy == 0) {
816 __set_bit(n, &p->bitmap); /* to please idr_remove() */
817 idr_remove(&ida->idr, idr_id);
818 free_bitmap(ida, bitmap);
825 "ida_remove called for id=%d which is not allocated.\n", id);
827 EXPORT_SYMBOL(ida_remove);
830 * ida_destroy - release all cached layers within an ida tree
833 void ida_destroy(struct ida *ida)
835 idr_destroy(&ida->idr);
836 kfree(ida->free_bitmap);
838 EXPORT_SYMBOL(ida_destroy);
841 * ida_init - initialize ida handle
844 * This function is use to set up the handle (@ida) that you will pass
845 * to the rest of the functions.
847 void ida_init(struct ida *ida)
849 memset(ida, 0, sizeof(struct ida));
853 EXPORT_SYMBOL(ida_init);