| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * 2002-10-18  written by Jim Houston jim.houston@ccur.com | 
|  | 3 | *	Copyright (C) 2002 by Concurrent Computer Corporation | 
|  | 4 | *	Distributed under the GNU GPL license version 2. | 
|  | 5 | * | 
|  | 6 | * Modified by George Anzinger to reuse immediately and to use | 
|  | 7 | * find bit instructions.  Also removed _irq on spinlocks. | 
|  | 8 | * | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 9 | * Modified by Nadia Derbey to make it RCU safe. | 
|  | 10 | * | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 11 | * Small id to pointer translation service. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 13 | * It uses a radix tree like structure as a sparse array indexed | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * by the id to obtain the pointer.  The bitmap makes allocating | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 15 | * a new id quick. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | * | 
|  | 17 | * You call it to allocate an id (an int) an associate with that id a | 
|  | 18 | * pointer or what ever, we treat it as a (void *).  You can pass this | 
|  | 19 | * id to a user for him to pass back at a later time.  You then pass | 
|  | 20 | * that id to this code and it returns your pointer. | 
|  | 21 |  | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 22 | * You can release ids at any time. When all ids are released, most of | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 24 | * don't need to go to the memory "store" during an id allocate, just | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * so you don't need to be too concerned about locking and conflicts | 
|  | 26 | * with the slab allocator. | 
|  | 27 | */ | 
|  | 28 |  | 
|  | 29 | #ifndef TEST                        // to test in user space... | 
|  | 30 | #include <linux/slab.h> | 
|  | 31 | #include <linux/init.h> | 
|  | 32 | #include <linux/module.h> | 
|  | 33 | #endif | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 34 | #include <linux/err.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/string.h> | 
|  | 36 | #include <linux/idr.h> | 
|  | 37 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 38 | static struct kmem_cache *idr_layer_cache; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 40 | static struct idr_layer *get_from_free_list(struct idr *idp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | { | 
|  | 42 | struct idr_layer *p; | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 43 | unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 |  | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 45 | spin_lock_irqsave(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | if ((p = idp->id_free)) { | 
|  | 47 | idp->id_free = p->ary[0]; | 
|  | 48 | idp->id_free_cnt--; | 
|  | 49 | p->ary[0] = NULL; | 
|  | 50 | } | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 51 | spin_unlock_irqrestore(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | return(p); | 
|  | 53 | } | 
|  | 54 |  | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 55 | static void idr_layer_rcu_free(struct rcu_head *head) | 
|  | 56 | { | 
|  | 57 | struct idr_layer *layer; | 
|  | 58 |  | 
|  | 59 | layer = container_of(head, struct idr_layer, rcu_head); | 
|  | 60 | kmem_cache_free(idr_layer_cache, layer); | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 | static inline void free_layer(struct idr_layer *p) | 
|  | 64 | { | 
|  | 65 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | 
|  | 66 | } | 
|  | 67 |  | 
| Sonny Rao | 1eec005 | 2006-06-25 05:49:34 -0700 | [diff] [blame] | 68 | /* only called when idp->lock is held */ | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 69 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) | 
| Sonny Rao | 1eec005 | 2006-06-25 05:49:34 -0700 | [diff] [blame] | 70 | { | 
|  | 71 | p->ary[0] = idp->id_free; | 
|  | 72 | idp->id_free = p; | 
|  | 73 | idp->id_free_cnt++; | 
|  | 74 | } | 
|  | 75 |  | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 76 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | { | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 78 | unsigned long flags; | 
|  | 79 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | /* | 
|  | 81 | * Depends on the return element being zeroed. | 
|  | 82 | */ | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 83 | spin_lock_irqsave(&idp->lock, flags); | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 84 | __move_to_free_list(idp, p); | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 85 | spin_unlock_irqrestore(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | } | 
|  | 87 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 88 | static void idr_mark_full(struct idr_layer **pa, int id) | 
|  | 89 | { | 
|  | 90 | struct idr_layer *p = pa[0]; | 
|  | 91 | int l = 0; | 
|  | 92 |  | 
|  | 93 | __set_bit(id & IDR_MASK, &p->bitmap); | 
|  | 94 | /* | 
|  | 95 | * If this layer is full mark the bit in the layer above to | 
|  | 96 | * show that this part of the radix tree is full.  This may | 
|  | 97 | * complete the layer above and require walking up the radix | 
|  | 98 | * tree. | 
|  | 99 | */ | 
|  | 100 | while (p->bitmap == IDR_FULL) { | 
|  | 101 | if (!(p = pa[++l])) | 
|  | 102 | break; | 
|  | 103 | id = id >> IDR_BITS; | 
|  | 104 | __set_bit((id & IDR_MASK), &p->bitmap); | 
|  | 105 | } | 
|  | 106 | } | 
|  | 107 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | /** | 
|  | 109 | * idr_pre_get - reserver resources for idr allocation | 
|  | 110 | * @idp:	idr handle | 
|  | 111 | * @gfp_mask:	memory allocation flags | 
|  | 112 | * | 
|  | 113 | * This function should be called prior to locking and calling the | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 114 | * idr_get_new* functions. It preallocates enough memory to satisfy | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | * the worst possible allocation. | 
|  | 116 | * | 
|  | 117 | * If the system is REALLY out of memory this function returns 0, | 
|  | 118 | * otherwise 1. | 
|  | 119 | */ | 
| Al Viro | fd4f2df | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 120 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | { | 
|  | 122 | while (idp->id_free_cnt < IDR_FREE_MAX) { | 
|  | 123 | struct idr_layer *new; | 
| Andrew Morton | 5b019e9 | 2009-01-15 13:51:21 -0800 | [diff] [blame] | 124 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 125 | if (new == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | return (0); | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 127 | move_to_free_list(idp, new); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | } | 
|  | 129 | return 1; | 
|  | 130 | } | 
|  | 131 | EXPORT_SYMBOL(idr_pre_get); | 
|  | 132 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 133 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | { | 
|  | 135 | int n, m, sh; | 
|  | 136 | struct idr_layer *p, *new; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 137 | int l, id, oid; | 
| Al Viro | 5ba2533 | 2007-10-14 19:35:50 +0100 | [diff] [blame] | 138 | unsigned long bm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 |  | 
|  | 140 | id = *starting_id; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 141 | restart: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | p = idp->top; | 
|  | 143 | l = idp->layers; | 
|  | 144 | pa[l--] = NULL; | 
|  | 145 | while (1) { | 
|  | 146 | /* | 
|  | 147 | * We run around this while until we reach the leaf node... | 
|  | 148 | */ | 
|  | 149 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | 
|  | 150 | bm = ~p->bitmap; | 
|  | 151 | m = find_next_bit(&bm, IDR_SIZE, n); | 
|  | 152 | if (m == IDR_SIZE) { | 
|  | 153 | /* no space available go back to previous layer. */ | 
|  | 154 | l++; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 155 | oid = id; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 157 |  | 
|  | 158 | /* if already at the top layer, we need to grow */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | if (!(p = pa[l])) { | 
|  | 160 | *starting_id = id; | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 161 | return IDR_NEED_TO_GROW; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 163 |  | 
|  | 164 | /* If we need to go up one layer, continue the | 
|  | 165 | * loop; otherwise, restart from the top. | 
|  | 166 | */ | 
|  | 167 | sh = IDR_BITS * (l + 1); | 
|  | 168 | if (oid >> sh == id >> sh) | 
|  | 169 | continue; | 
|  | 170 | else | 
|  | 171 | goto restart; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | } | 
|  | 173 | if (m != n) { | 
|  | 174 | sh = IDR_BITS*l; | 
|  | 175 | id = ((id >> sh) ^ n ^ m) << sh; | 
|  | 176 | } | 
|  | 177 | if ((id >= MAX_ID_BIT) || (id < 0)) | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 178 | return IDR_NOMORE_SPACE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | if (l == 0) | 
|  | 180 | break; | 
|  | 181 | /* | 
|  | 182 | * Create the layer below if it is missing. | 
|  | 183 | */ | 
|  | 184 | if (!p->ary[m]) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 185 | new = get_from_free_list(idp); | 
|  | 186 | if (!new) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | return -1; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 188 | new->layer = l-1; | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 189 | rcu_assign_pointer(p->ary[m], new); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | p->count++; | 
|  | 191 | } | 
|  | 192 | pa[l--] = p; | 
|  | 193 | p = p->ary[m]; | 
|  | 194 | } | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 195 |  | 
|  | 196 | pa[l] = p; | 
|  | 197 | return id; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | } | 
|  | 199 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 200 | static int idr_get_empty_slot(struct idr *idp, int starting_id, | 
|  | 201 | struct idr_layer **pa) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | { | 
|  | 203 | struct idr_layer *p, *new; | 
|  | 204 | int layers, v, id; | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 205 | unsigned long flags; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 206 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | id = starting_id; | 
|  | 208 | build_up: | 
|  | 209 | p = idp->top; | 
|  | 210 | layers = idp->layers; | 
|  | 211 | if (unlikely(!p)) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 212 | if (!(p = get_from_free_list(idp))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | return -1; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 214 | p->layer = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | layers = 1; | 
|  | 216 | } | 
|  | 217 | /* | 
|  | 218 | * Add a new layer to the top of the tree if the requested | 
|  | 219 | * id is larger than the currently allocated space. | 
|  | 220 | */ | 
| Zaur Kambarov | 589777e | 2005-06-21 17:14:31 -0700 | [diff] [blame] | 221 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | layers++; | 
| Manfred Spraul | 711a49a | 2008-12-10 18:17:06 +0100 | [diff] [blame] | 223 | if (!p->count) { | 
|  | 224 | /* special case: if the tree is currently empty, | 
|  | 225 | * then we grow the tree by moving the top node | 
|  | 226 | * upwards. | 
|  | 227 | */ | 
|  | 228 | p->layer++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | continue; | 
| Manfred Spraul | 711a49a | 2008-12-10 18:17:06 +0100 | [diff] [blame] | 230 | } | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 231 | if (!(new = get_from_free_list(idp))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | /* | 
|  | 233 | * The allocation failed.  If we built part of | 
|  | 234 | * the structure tear it down. | 
|  | 235 | */ | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 236 | spin_lock_irqsave(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | for (new = p; p && p != idp->top; new = p) { | 
|  | 238 | p = p->ary[0]; | 
|  | 239 | new->ary[0] = NULL; | 
|  | 240 | new->bitmap = new->count = 0; | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 241 | __move_to_free_list(idp, new); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | } | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 243 | spin_unlock_irqrestore(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | return -1; | 
|  | 245 | } | 
|  | 246 | new->ary[0] = p; | 
|  | 247 | new->count = 1; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 248 | new->layer = layers-1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | if (p->bitmap == IDR_FULL) | 
|  | 250 | __set_bit(0, &new->bitmap); | 
|  | 251 | p = new; | 
|  | 252 | } | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 253 | rcu_assign_pointer(idp->top, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | idp->layers = layers; | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 255 | v = sub_alloc(idp, &id, pa); | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 256 | if (v == IDR_NEED_TO_GROW) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | goto build_up; | 
|  | 258 | return(v); | 
|  | 259 | } | 
|  | 260 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 261 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | 
|  | 262 | { | 
|  | 263 | struct idr_layer *pa[MAX_LEVEL]; | 
|  | 264 | int id; | 
|  | 265 |  | 
|  | 266 | id = idr_get_empty_slot(idp, starting_id, pa); | 
|  | 267 | if (id >= 0) { | 
|  | 268 | /* | 
|  | 269 | * Successfully found an empty slot.  Install the user | 
|  | 270 | * pointer and mark the slot full. | 
|  | 271 | */ | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 272 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], | 
|  | 273 | (struct idr_layer *)ptr); | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 274 | pa[0]->count++; | 
|  | 275 | idr_mark_full(pa, id); | 
|  | 276 | } | 
|  | 277 |  | 
|  | 278 | return id; | 
|  | 279 | } | 
|  | 280 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | /** | 
| John McCutchan | 7c657f2 | 2005-08-26 14:02:04 -0400 | [diff] [blame] | 282 | * idr_get_new_above - allocate new idr entry above or equal to a start id | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | * @idp: idr handle | 
|  | 284 | * @ptr: pointer you want associated with the ide | 
|  | 285 | * @start_id: id to start search at | 
|  | 286 | * @id: pointer to the allocated handle | 
|  | 287 | * | 
|  | 288 | * This is the allocate id function.  It should be called with any | 
|  | 289 | * required locks. | 
|  | 290 | * | 
|  | 291 | * If memory is required, it will return -EAGAIN, you should unlock | 
|  | 292 | * and go back to the idr_pre_get() call.  If the idr is full, it will | 
|  | 293 | * return -ENOSPC. | 
|  | 294 | * | 
| Li Zefan | b098161 | 2009-01-15 13:51:00 -0800 | [diff] [blame] | 295 | * @id returns a value in the range @starting_id ... 0x7fffffff | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | */ | 
|  | 297 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 
|  | 298 | { | 
|  | 299 | int rv; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 300 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | rv = idr_get_new_above_int(idp, ptr, starting_id); | 
|  | 302 | /* | 
|  | 303 | * This is a cheap hack until the IDR code can be fixed to | 
|  | 304 | * return proper error values. | 
|  | 305 | */ | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 306 | if (rv < 0) | 
|  | 307 | return _idr_rc_to_errno(rv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | *id = rv; | 
|  | 309 | return 0; | 
|  | 310 | } | 
|  | 311 | EXPORT_SYMBOL(idr_get_new_above); | 
|  | 312 |  | 
|  | 313 | /** | 
|  | 314 | * idr_get_new - allocate new idr entry | 
|  | 315 | * @idp: idr handle | 
|  | 316 | * @ptr: pointer you want associated with the ide | 
|  | 317 | * @id: pointer to the allocated handle | 
|  | 318 | * | 
|  | 319 | * This is the allocate id function.  It should be called with any | 
|  | 320 | * required locks. | 
|  | 321 | * | 
|  | 322 | * If memory is required, it will return -EAGAIN, you should unlock | 
|  | 323 | * and go back to the idr_pre_get() call.  If the idr is full, it will | 
|  | 324 | * return -ENOSPC. | 
|  | 325 | * | 
|  | 326 | * @id returns a value in the range 0 ... 0x7fffffff | 
|  | 327 | */ | 
|  | 328 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 
|  | 329 | { | 
|  | 330 | int rv; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 331 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | rv = idr_get_new_above_int(idp, ptr, 0); | 
|  | 333 | /* | 
|  | 334 | * This is a cheap hack until the IDR code can be fixed to | 
|  | 335 | * return proper error values. | 
|  | 336 | */ | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 337 | if (rv < 0) | 
|  | 338 | return _idr_rc_to_errno(rv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | *id = rv; | 
|  | 340 | return 0; | 
|  | 341 | } | 
|  | 342 | EXPORT_SYMBOL(idr_get_new); | 
|  | 343 |  | 
|  | 344 | static void idr_remove_warning(int id) | 
|  | 345 | { | 
| Nadia Derbey | f098ad6 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 346 | printk(KERN_WARNING | 
|  | 347 | "idr_remove called for id=%d which is not allocated.\n", id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | dump_stack(); | 
|  | 349 | } | 
|  | 350 |  | 
|  | 351 | static void sub_remove(struct idr *idp, int shift, int id) | 
|  | 352 | { | 
|  | 353 | struct idr_layer *p = idp->top; | 
|  | 354 | struct idr_layer **pa[MAX_LEVEL]; | 
|  | 355 | struct idr_layer ***paa = &pa[0]; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 356 | struct idr_layer *to_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | int n; | 
|  | 358 |  | 
|  | 359 | *paa = NULL; | 
|  | 360 | *++paa = &idp->top; | 
|  | 361 |  | 
|  | 362 | while ((shift > 0) && p) { | 
|  | 363 | n = (id >> shift) & IDR_MASK; | 
|  | 364 | __clear_bit(n, &p->bitmap); | 
|  | 365 | *++paa = &p->ary[n]; | 
|  | 366 | p = p->ary[n]; | 
|  | 367 | shift -= IDR_BITS; | 
|  | 368 | } | 
|  | 369 | n = id & IDR_MASK; | 
|  | 370 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | 
|  | 371 | __clear_bit(n, &p->bitmap); | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 372 | rcu_assign_pointer(p->ary[n], NULL); | 
|  | 373 | to_free = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | while(*paa && ! --((**paa)->count)){ | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 375 | if (to_free) | 
|  | 376 | free_layer(to_free); | 
|  | 377 | to_free = **paa; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | **paa-- = NULL; | 
|  | 379 | } | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 380 | if (!*paa) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | idp->layers = 0; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 382 | if (to_free) | 
|  | 383 | free_layer(to_free); | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 384 | } else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | idr_remove_warning(id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | } | 
|  | 387 |  | 
|  | 388 | /** | 
|  | 389 | * idr_remove - remove the given id and free it's slot | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 390 | * @idp: idr handle | 
|  | 391 | * @id: unique key | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | */ | 
|  | 393 | void idr_remove(struct idr *idp, int id) | 
|  | 394 | { | 
|  | 395 | struct idr_layer *p; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 396 | struct idr_layer *to_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 |  | 
|  | 398 | /* Mask off upper bits we don't use for the search. */ | 
|  | 399 | id &= MAX_ID_MASK; | 
|  | 400 |  | 
|  | 401 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 402 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 403 | idp->top->ary[0]) { | 
|  | 404 | /* | 
|  | 405 | * Single child at leftmost slot: we can shrink the tree. | 
|  | 406 | * This level is not needed anymore since when layers are | 
|  | 407 | * inserted, they are inserted at the top of the existing | 
|  | 408 | * tree. | 
|  | 409 | */ | 
|  | 410 | to_free = idp->top; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | p = idp->top->ary[0]; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 412 | rcu_assign_pointer(idp->top, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | --idp->layers; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 414 | to_free->bitmap = to_free->count = 0; | 
|  | 415 | free_layer(to_free); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | } | 
|  | 417 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 418 | p = get_from_free_list(idp); | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 419 | /* | 
|  | 420 | * Note: we don't call the rcu callback here, since the only | 
|  | 421 | * layers that fall into the freelist are those that have been | 
|  | 422 | * preallocated. | 
|  | 423 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | kmem_cache_free(idr_layer_cache, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | } | 
| Nadia Derbey | af8e2a4 | 2008-05-01 04:34:57 -0700 | [diff] [blame] | 426 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | } | 
|  | 428 | EXPORT_SYMBOL(idr_remove); | 
|  | 429 |  | 
|  | 430 | /** | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 431 | * idr_remove_all - remove all ids from the given idr tree | 
|  | 432 | * @idp: idr handle | 
|  | 433 | * | 
|  | 434 | * idr_destroy() only frees up unused, cached idp_layers, but this | 
|  | 435 | * function will remove all id mappings and leave all idp_layers | 
|  | 436 | * unused. | 
|  | 437 | * | 
|  | 438 | * A typical clean-up sequence for objects stored in an idr tree, will | 
|  | 439 | * use idr_for_each() to free all objects, if necessay, then | 
|  | 440 | * idr_remove_all() to remove all ids, and idr_destroy() to free | 
|  | 441 | * up the cached idr_layers. | 
|  | 442 | */ | 
|  | 443 | void idr_remove_all(struct idr *idp) | 
|  | 444 | { | 
| Oleg Nesterov | 6ace06dc | 2007-07-31 00:39:19 -0700 | [diff] [blame] | 445 | int n, id, max; | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 446 | struct idr_layer *p; | 
|  | 447 | struct idr_layer *pa[MAX_LEVEL]; | 
|  | 448 | struct idr_layer **paa = &pa[0]; | 
|  | 449 |  | 
|  | 450 | n = idp->layers * IDR_BITS; | 
|  | 451 | p = idp->top; | 
|  | 452 | max = 1 << n; | 
|  | 453 |  | 
|  | 454 | id = 0; | 
| Oleg Nesterov | 6ace06dc | 2007-07-31 00:39:19 -0700 | [diff] [blame] | 455 | while (id < max) { | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 456 | while (n > IDR_BITS && p) { | 
|  | 457 | n -= IDR_BITS; | 
|  | 458 | *paa++ = p; | 
|  | 459 | p = p->ary[(id >> n) & IDR_MASK]; | 
|  | 460 | } | 
|  | 461 |  | 
|  | 462 | id += 1 << n; | 
|  | 463 | while (n < fls(id)) { | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 464 | if (p) | 
|  | 465 | free_layer(p); | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 466 | n += IDR_BITS; | 
|  | 467 | p = *--paa; | 
|  | 468 | } | 
|  | 469 | } | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 470 | rcu_assign_pointer(idp->top, NULL); | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 471 | idp->layers = 0; | 
|  | 472 | } | 
|  | 473 | EXPORT_SYMBOL(idr_remove_all); | 
|  | 474 |  | 
|  | 475 | /** | 
| Andrew Morton | 8d3b359 | 2005-10-23 12:57:18 -0700 | [diff] [blame] | 476 | * idr_destroy - release all cached layers within an idr tree | 
|  | 477 | * idp: idr handle | 
|  | 478 | */ | 
|  | 479 | void idr_destroy(struct idr *idp) | 
|  | 480 | { | 
|  | 481 | while (idp->id_free_cnt) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 482 | struct idr_layer *p = get_from_free_list(idp); | 
| Andrew Morton | 8d3b359 | 2005-10-23 12:57:18 -0700 | [diff] [blame] | 483 | kmem_cache_free(idr_layer_cache, p); | 
|  | 484 | } | 
|  | 485 | } | 
|  | 486 | EXPORT_SYMBOL(idr_destroy); | 
|  | 487 |  | 
|  | 488 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | * idr_find - return pointer for given id | 
|  | 490 | * @idp: idr handle | 
|  | 491 | * @id: lookup key | 
|  | 492 | * | 
|  | 493 | * Return the pointer given the id it has been registered with.  A %NULL | 
|  | 494 | * return indicates that @id is not valid or you passed %NULL in | 
|  | 495 | * idr_get_new(). | 
|  | 496 | * | 
| Nadia Derbey | f9c46d6 | 2008-07-25 01:48:01 -0700 | [diff] [blame] | 497 | * This function can be called under rcu_read_lock(), given that the leaf | 
|  | 498 | * pointers lifetimes are correctly managed. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | */ | 
|  | 500 | void *idr_find(struct idr *idp, int id) | 
|  | 501 | { | 
|  | 502 | int n; | 
|  | 503 | struct idr_layer *p; | 
|  | 504 |  | 
| Nadia Derbey | f9c46d6 | 2008-07-25 01:48:01 -0700 | [diff] [blame] | 505 | p = rcu_dereference(idp->top); | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 506 | if (!p) | 
|  | 507 | return NULL; | 
|  | 508 | n = (p->layer+1) * IDR_BITS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 |  | 
|  | 510 | /* Mask off upper bits we don't use for the search. */ | 
|  | 511 | id &= MAX_ID_MASK; | 
|  | 512 |  | 
|  | 513 | if (id >= (1 << n)) | 
|  | 514 | return NULL; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 515 | BUG_ON(n == 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 |  | 
|  | 517 | while (n > 0 && p) { | 
|  | 518 | n -= IDR_BITS; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 519 | BUG_ON(n != p->layer*IDR_BITS); | 
| Nadia Derbey | f9c46d6 | 2008-07-25 01:48:01 -0700 | [diff] [blame] | 520 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | } | 
|  | 522 | return((void *)p); | 
|  | 523 | } | 
|  | 524 | EXPORT_SYMBOL(idr_find); | 
|  | 525 |  | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 526 | /** | 
| Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 527 | * idr_for_each - iterate through all stored pointers | 
|  | 528 | * @idp: idr handle | 
|  | 529 | * @fn: function to be called for each pointer | 
|  | 530 | * @data: data passed back to callback function | 
|  | 531 | * | 
|  | 532 | * Iterate over the pointers registered with the given idr.  The | 
|  | 533 | * callback function will be called for each pointer currently | 
|  | 534 | * registered, passing the id, the pointer and the data pointer passed | 
|  | 535 | * to this function.  It is not safe to modify the idr tree while in | 
|  | 536 | * the callback, so functions such as idr_get_new and idr_remove are | 
|  | 537 | * not allowed. | 
|  | 538 | * | 
|  | 539 | * We check the return of @fn each time. If it returns anything other | 
|  | 540 | * than 0, we break out and return that value. | 
|  | 541 | * | 
|  | 542 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | 
|  | 543 | */ | 
|  | 544 | int idr_for_each(struct idr *idp, | 
|  | 545 | int (*fn)(int id, void *p, void *data), void *data) | 
|  | 546 | { | 
|  | 547 | int n, id, max, error = 0; | 
|  | 548 | struct idr_layer *p; | 
|  | 549 | struct idr_layer *pa[MAX_LEVEL]; | 
|  | 550 | struct idr_layer **paa = &pa[0]; | 
|  | 551 |  | 
|  | 552 | n = idp->layers * IDR_BITS; | 
| Nadia Derbey | f9c46d6 | 2008-07-25 01:48:01 -0700 | [diff] [blame] | 553 | p = rcu_dereference(idp->top); | 
| Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 554 | max = 1 << n; | 
|  | 555 |  | 
|  | 556 | id = 0; | 
|  | 557 | while (id < max) { | 
|  | 558 | while (n > 0 && p) { | 
|  | 559 | n -= IDR_BITS; | 
|  | 560 | *paa++ = p; | 
| Nadia Derbey | f9c46d6 | 2008-07-25 01:48:01 -0700 | [diff] [blame] | 561 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 
| Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 562 | } | 
|  | 563 |  | 
|  | 564 | if (p) { | 
|  | 565 | error = fn(id, (void *)p, data); | 
|  | 566 | if (error) | 
|  | 567 | break; | 
|  | 568 | } | 
|  | 569 |  | 
|  | 570 | id += 1 << n; | 
|  | 571 | while (n < fls(id)) { | 
|  | 572 | n += IDR_BITS; | 
|  | 573 | p = *--paa; | 
|  | 574 | } | 
|  | 575 | } | 
|  | 576 |  | 
|  | 577 | return error; | 
|  | 578 | } | 
|  | 579 | EXPORT_SYMBOL(idr_for_each); | 
|  | 580 |  | 
|  | 581 | /** | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 582 | * idr_replace - replace pointer for given id | 
|  | 583 | * @idp: idr handle | 
|  | 584 | * @ptr: pointer you want associated with the id | 
|  | 585 | * @id: lookup key | 
|  | 586 | * | 
|  | 587 | * Replace the pointer registered with an id and return the old value. | 
|  | 588 | * A -ENOENT return indicates that @id was not found. | 
|  | 589 | * A -EINVAL return indicates that @id was not within valid constraints. | 
|  | 590 | * | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 591 | * The caller must serialize with writers. | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 592 | */ | 
|  | 593 | void *idr_replace(struct idr *idp, void *ptr, int id) | 
|  | 594 | { | 
|  | 595 | int n; | 
|  | 596 | struct idr_layer *p, *old_p; | 
|  | 597 |  | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 598 | p = idp->top; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 599 | if (!p) | 
|  | 600 | return ERR_PTR(-EINVAL); | 
|  | 601 |  | 
|  | 602 | n = (p->layer+1) * IDR_BITS; | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 603 |  | 
|  | 604 | id &= MAX_ID_MASK; | 
|  | 605 |  | 
|  | 606 | if (id >= (1 << n)) | 
|  | 607 | return ERR_PTR(-EINVAL); | 
|  | 608 |  | 
|  | 609 | n -= IDR_BITS; | 
|  | 610 | while ((n > 0) && p) { | 
|  | 611 | p = p->ary[(id >> n) & IDR_MASK]; | 
|  | 612 | n -= IDR_BITS; | 
|  | 613 | } | 
|  | 614 |  | 
|  | 615 | n = id & IDR_MASK; | 
|  | 616 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | 
|  | 617 | return ERR_PTR(-ENOENT); | 
|  | 618 |  | 
|  | 619 | old_p = p->ary[n]; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 620 | rcu_assign_pointer(p->ary[n], ptr); | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 621 |  | 
|  | 622 | return old_p; | 
|  | 623 | } | 
|  | 624 | EXPORT_SYMBOL(idr_replace); | 
|  | 625 |  | 
| Akinobu Mita | 199f0ca | 2008-04-29 01:03:13 -0700 | [diff] [blame] | 626 | void __init idr_init_cache(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | { | 
| Akinobu Mita | 199f0ca | 2008-04-29 01:03:13 -0700 | [diff] [blame] | 628 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | 
| Andrew Morton | 5b019e9 | 2009-01-15 13:51:21 -0800 | [diff] [blame] | 629 | sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | } | 
|  | 631 |  | 
|  | 632 | /** | 
|  | 633 | * idr_init - initialize idr handle | 
|  | 634 | * @idp:	idr handle | 
|  | 635 | * | 
|  | 636 | * This function is use to set up the handle (@idp) that you will pass | 
|  | 637 | * to the rest of the functions. | 
|  | 638 | */ | 
|  | 639 | void idr_init(struct idr *idp) | 
|  | 640 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | memset(idp, 0, sizeof(struct idr)); | 
|  | 642 | spin_lock_init(&idp->lock); | 
|  | 643 | } | 
|  | 644 | EXPORT_SYMBOL(idr_init); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 645 |  | 
|  | 646 |  | 
|  | 647 | /* | 
|  | 648 | * IDA - IDR based ID allocator | 
|  | 649 | * | 
|  | 650 | * this is id allocator without id -> pointer translation.  Memory | 
|  | 651 | * usage is much lower than full blown idr because each id only | 
|  | 652 | * occupies a bit.  ida uses a custom leaf node which contains | 
|  | 653 | * IDA_BITMAP_BITS slots. | 
|  | 654 | * | 
|  | 655 | * 2007-04-25  written by Tejun Heo <htejun@gmail.com> | 
|  | 656 | */ | 
|  | 657 |  | 
|  | 658 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | 
|  | 659 | { | 
|  | 660 | unsigned long flags; | 
|  | 661 |  | 
|  | 662 | if (!ida->free_bitmap) { | 
|  | 663 | spin_lock_irqsave(&ida->idr.lock, flags); | 
|  | 664 | if (!ida->free_bitmap) { | 
|  | 665 | ida->free_bitmap = bitmap; | 
|  | 666 | bitmap = NULL; | 
|  | 667 | } | 
|  | 668 | spin_unlock_irqrestore(&ida->idr.lock, flags); | 
|  | 669 | } | 
|  | 670 |  | 
|  | 671 | kfree(bitmap); | 
|  | 672 | } | 
|  | 673 |  | 
|  | 674 | /** | 
|  | 675 | * ida_pre_get - reserve resources for ida allocation | 
|  | 676 | * @ida:	ida handle | 
|  | 677 | * @gfp_mask:	memory allocation flag | 
|  | 678 | * | 
|  | 679 | * This function should be called prior to locking and calling the | 
|  | 680 | * following function.  It preallocates enough memory to satisfy the | 
|  | 681 | * worst possible allocation. | 
|  | 682 | * | 
|  | 683 | * If the system is REALLY out of memory this function returns 0, | 
|  | 684 | * otherwise 1. | 
|  | 685 | */ | 
|  | 686 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | 
|  | 687 | { | 
|  | 688 | /* allocate idr_layers */ | 
|  | 689 | if (!idr_pre_get(&ida->idr, gfp_mask)) | 
|  | 690 | return 0; | 
|  | 691 |  | 
|  | 692 | /* allocate free_bitmap */ | 
|  | 693 | if (!ida->free_bitmap) { | 
|  | 694 | struct ida_bitmap *bitmap; | 
|  | 695 |  | 
|  | 696 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | 
|  | 697 | if (!bitmap) | 
|  | 698 | return 0; | 
|  | 699 |  | 
|  | 700 | free_bitmap(ida, bitmap); | 
|  | 701 | } | 
|  | 702 |  | 
|  | 703 | return 1; | 
|  | 704 | } | 
|  | 705 | EXPORT_SYMBOL(ida_pre_get); | 
|  | 706 |  | 
|  | 707 | /** | 
|  | 708 | * ida_get_new_above - allocate new ID above or equal to a start id | 
|  | 709 | * @ida:	ida handle | 
|  | 710 | * @staring_id:	id to start search at | 
|  | 711 | * @p_id:	pointer to the allocated handle | 
|  | 712 | * | 
|  | 713 | * Allocate new ID above or equal to @ida.  It should be called with | 
|  | 714 | * any required locks. | 
|  | 715 | * | 
|  | 716 | * If memory is required, it will return -EAGAIN, you should unlock | 
|  | 717 | * and go back to the ida_pre_get() call.  If the ida is full, it will | 
|  | 718 | * return -ENOSPC. | 
|  | 719 | * | 
| Li Zefan | b098161 | 2009-01-15 13:51:00 -0800 | [diff] [blame] | 720 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 721 | */ | 
|  | 722 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 
|  | 723 | { | 
|  | 724 | struct idr_layer *pa[MAX_LEVEL]; | 
|  | 725 | struct ida_bitmap *bitmap; | 
|  | 726 | unsigned long flags; | 
|  | 727 | int idr_id = starting_id / IDA_BITMAP_BITS; | 
|  | 728 | int offset = starting_id % IDA_BITMAP_BITS; | 
|  | 729 | int t, id; | 
|  | 730 |  | 
|  | 731 | restart: | 
|  | 732 | /* get vacant slot */ | 
|  | 733 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 734 | if (t < 0) | 
|  | 735 | return _idr_rc_to_errno(t); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 736 |  | 
|  | 737 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | 
|  | 738 | return -ENOSPC; | 
|  | 739 |  | 
|  | 740 | if (t != idr_id) | 
|  | 741 | offset = 0; | 
|  | 742 | idr_id = t; | 
|  | 743 |  | 
|  | 744 | /* if bitmap isn't there, create a new one */ | 
|  | 745 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | 
|  | 746 | if (!bitmap) { | 
|  | 747 | spin_lock_irqsave(&ida->idr.lock, flags); | 
|  | 748 | bitmap = ida->free_bitmap; | 
|  | 749 | ida->free_bitmap = NULL; | 
|  | 750 | spin_unlock_irqrestore(&ida->idr.lock, flags); | 
|  | 751 |  | 
|  | 752 | if (!bitmap) | 
|  | 753 | return -EAGAIN; | 
|  | 754 |  | 
|  | 755 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 756 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], | 
|  | 757 | (void *)bitmap); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 758 | pa[0]->count++; | 
|  | 759 | } | 
|  | 760 |  | 
|  | 761 | /* lookup for empty slot */ | 
|  | 762 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | 
|  | 763 | if (t == IDA_BITMAP_BITS) { | 
|  | 764 | /* no empty slot after offset, continue to the next chunk */ | 
|  | 765 | idr_id++; | 
|  | 766 | offset = 0; | 
|  | 767 | goto restart; | 
|  | 768 | } | 
|  | 769 |  | 
|  | 770 | id = idr_id * IDA_BITMAP_BITS + t; | 
|  | 771 | if (id >= MAX_ID_BIT) | 
|  | 772 | return -ENOSPC; | 
|  | 773 |  | 
|  | 774 | __set_bit(t, bitmap->bitmap); | 
|  | 775 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | 
|  | 776 | idr_mark_full(pa, idr_id); | 
|  | 777 |  | 
|  | 778 | *p_id = id; | 
|  | 779 |  | 
|  | 780 | /* Each leaf node can handle nearly a thousand slots and the | 
|  | 781 | * whole idea of ida is to have small memory foot print. | 
|  | 782 | * Throw away extra resources one by one after each successful | 
|  | 783 | * allocation. | 
|  | 784 | */ | 
|  | 785 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 786 | struct idr_layer *p = get_from_free_list(&ida->idr); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 787 | if (p) | 
|  | 788 | kmem_cache_free(idr_layer_cache, p); | 
|  | 789 | } | 
|  | 790 |  | 
|  | 791 | return 0; | 
|  | 792 | } | 
|  | 793 | EXPORT_SYMBOL(ida_get_new_above); | 
|  | 794 |  | 
|  | 795 | /** | 
|  | 796 | * ida_get_new - allocate new ID | 
|  | 797 | * @ida:	idr handle | 
|  | 798 | * @p_id:	pointer to the allocated handle | 
|  | 799 | * | 
|  | 800 | * Allocate new ID.  It should be called with any required locks. | 
|  | 801 | * | 
|  | 802 | * If memory is required, it will return -EAGAIN, you should unlock | 
|  | 803 | * and go back to the idr_pre_get() call.  If the idr is full, it will | 
|  | 804 | * return -ENOSPC. | 
|  | 805 | * | 
|  | 806 | * @id returns a value in the range 0 ... 0x7fffffff. | 
|  | 807 | */ | 
|  | 808 | int ida_get_new(struct ida *ida, int *p_id) | 
|  | 809 | { | 
|  | 810 | return ida_get_new_above(ida, 0, p_id); | 
|  | 811 | } | 
|  | 812 | EXPORT_SYMBOL(ida_get_new); | 
|  | 813 |  | 
|  | 814 | /** | 
|  | 815 | * ida_remove - remove the given ID | 
|  | 816 | * @ida:	ida handle | 
|  | 817 | * @id:		ID to free | 
|  | 818 | */ | 
|  | 819 | void ida_remove(struct ida *ida, int id) | 
|  | 820 | { | 
|  | 821 | struct idr_layer *p = ida->idr.top; | 
|  | 822 | int shift = (ida->idr.layers - 1) * IDR_BITS; | 
|  | 823 | int idr_id = id / IDA_BITMAP_BITS; | 
|  | 824 | int offset = id % IDA_BITMAP_BITS; | 
|  | 825 | int n; | 
|  | 826 | struct ida_bitmap *bitmap; | 
|  | 827 |  | 
|  | 828 | /* clear full bits while looking up the leaf idr_layer */ | 
|  | 829 | while ((shift > 0) && p) { | 
|  | 830 | n = (idr_id >> shift) & IDR_MASK; | 
|  | 831 | __clear_bit(n, &p->bitmap); | 
|  | 832 | p = p->ary[n]; | 
|  | 833 | shift -= IDR_BITS; | 
|  | 834 | } | 
|  | 835 |  | 
|  | 836 | if (p == NULL) | 
|  | 837 | goto err; | 
|  | 838 |  | 
|  | 839 | n = idr_id & IDR_MASK; | 
|  | 840 | __clear_bit(n, &p->bitmap); | 
|  | 841 |  | 
|  | 842 | bitmap = (void *)p->ary[n]; | 
|  | 843 | if (!test_bit(offset, bitmap->bitmap)) | 
|  | 844 | goto err; | 
|  | 845 |  | 
|  | 846 | /* update bitmap and remove it if empty */ | 
|  | 847 | __clear_bit(offset, bitmap->bitmap); | 
|  | 848 | if (--bitmap->nr_busy == 0) { | 
|  | 849 | __set_bit(n, &p->bitmap);	/* to please idr_remove() */ | 
|  | 850 | idr_remove(&ida->idr, idr_id); | 
|  | 851 | free_bitmap(ida, bitmap); | 
|  | 852 | } | 
|  | 853 |  | 
|  | 854 | return; | 
|  | 855 |  | 
|  | 856 | err: | 
|  | 857 | printk(KERN_WARNING | 
|  | 858 | "ida_remove called for id=%d which is not allocated.\n", id); | 
|  | 859 | } | 
|  | 860 | EXPORT_SYMBOL(ida_remove); | 
|  | 861 |  | 
|  | 862 | /** | 
|  | 863 | * ida_destroy - release all cached layers within an ida tree | 
|  | 864 | * ida:		ida handle | 
|  | 865 | */ | 
|  | 866 | void ida_destroy(struct ida *ida) | 
|  | 867 | { | 
|  | 868 | idr_destroy(&ida->idr); | 
|  | 869 | kfree(ida->free_bitmap); | 
|  | 870 | } | 
|  | 871 | EXPORT_SYMBOL(ida_destroy); | 
|  | 872 |  | 
|  | 873 | /** | 
|  | 874 | * ida_init - initialize ida handle | 
|  | 875 | * @ida:	ida handle | 
|  | 876 | * | 
|  | 877 | * This function is use to set up the handle (@ida) that you will pass | 
|  | 878 | * to the rest of the functions. | 
|  | 879 | */ | 
|  | 880 | void ida_init(struct ida *ida) | 
|  | 881 | { | 
|  | 882 | memset(ida, 0, sizeof(struct ida)); | 
|  | 883 | idr_init(&ida->idr); | 
|  | 884 |  | 
|  | 885 | } | 
|  | 886 | EXPORT_SYMBOL(ida_init); |