| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * 2002-10-18  written by Jim Houston jim.houston@ccur.com | 
 | 3 |  *	Copyright (C) 2002 by Concurrent Computer Corporation | 
 | 4 |  *	Distributed under the GNU GPL license version 2. | 
 | 5 |  * | 
 | 6 |  * Modified by George Anzinger to reuse immediately and to use | 
 | 7 |  * find bit instructions.  Also removed _irq on spinlocks. | 
 | 8 |  * | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 9 |  * Modified by Nadia Derbey to make it RCU safe. | 
 | 10 |  * | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 11 |  * Small id to pointer translation service. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 |  * | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 13 |  * It uses a radix tree like structure as a sparse array indexed | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 |  * by the id to obtain the pointer.  The bitmap makes allocating | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 15 |  * a new id quick. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 |  * | 
 | 17 |  * You call it to allocate an id (an int) an associate with that id a | 
 | 18 |  * pointer or what ever, we treat it as a (void *).  You can pass this | 
 | 19 |  * id to a user for him to pass back at a later time.  You then pass | 
 | 20 |  * that id to this code and it returns your pointer. | 
 | 21 |  | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 22 |  * You can release ids at any time. When all ids are released, most of | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 24 |  * don't need to go to the memory "store" during an id allocate, just | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 |  * so you don't need to be too concerned about locking and conflicts | 
 | 26 |  * with the slab allocator. | 
 | 27 |  */ | 
 | 28 |  | 
 | 29 | #ifndef TEST                        // to test in user space... | 
 | 30 | #include <linux/slab.h> | 
 | 31 | #include <linux/init.h> | 
 | 32 | #include <linux/module.h> | 
 | 33 | #endif | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 34 | #include <linux/err.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/string.h> | 
 | 36 | #include <linux/idr.h> | 
| Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 37 | #include <linux/spinlock.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 39 | static struct kmem_cache *idr_layer_cache; | 
| Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 40 | static DEFINE_SPINLOCK(simple_ida_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 42 | static struct idr_layer *get_from_free_list(struct idr *idp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | { | 
 | 44 | 	struct idr_layer *p; | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 45 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 |  | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 47 | 	spin_lock_irqsave(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | 	if ((p = idp->id_free)) { | 
 | 49 | 		idp->id_free = p->ary[0]; | 
 | 50 | 		idp->id_free_cnt--; | 
 | 51 | 		p->ary[0] = NULL; | 
 | 52 | 	} | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 53 | 	spin_unlock_irqrestore(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | 	return(p); | 
 | 55 | } | 
 | 56 |  | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 57 | static void idr_layer_rcu_free(struct rcu_head *head) | 
 | 58 | { | 
 | 59 | 	struct idr_layer *layer; | 
 | 60 |  | 
 | 61 | 	layer = container_of(head, struct idr_layer, rcu_head); | 
 | 62 | 	kmem_cache_free(idr_layer_cache, layer); | 
 | 63 | } | 
 | 64 |  | 
 | 65 | static inline void free_layer(struct idr_layer *p) | 
 | 66 | { | 
 | 67 | 	call_rcu(&p->rcu_head, idr_layer_rcu_free); | 
 | 68 | } | 
 | 69 |  | 
| Sonny Rao | 1eec005 | 2006-06-25 05:49:34 -0700 | [diff] [blame] | 70 | /* only called when idp->lock is held */ | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 71 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) | 
| Sonny Rao | 1eec005 | 2006-06-25 05:49:34 -0700 | [diff] [blame] | 72 | { | 
 | 73 | 	p->ary[0] = idp->id_free; | 
 | 74 | 	idp->id_free = p; | 
 | 75 | 	idp->id_free_cnt++; | 
 | 76 | } | 
 | 77 |  | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 78 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | { | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 80 | 	unsigned long flags; | 
 | 81 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | 	/* | 
 | 83 | 	 * Depends on the return element being zeroed. | 
 | 84 | 	 */ | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 85 | 	spin_lock_irqsave(&idp->lock, flags); | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 86 | 	__move_to_free_list(idp, p); | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 87 | 	spin_unlock_irqrestore(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | } | 
 | 89 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 90 | static void idr_mark_full(struct idr_layer **pa, int id) | 
 | 91 | { | 
 | 92 | 	struct idr_layer *p = pa[0]; | 
 | 93 | 	int l = 0; | 
 | 94 |  | 
 | 95 | 	__set_bit(id & IDR_MASK, &p->bitmap); | 
 | 96 | 	/* | 
 | 97 | 	 * If this layer is full mark the bit in the layer above to | 
 | 98 | 	 * show that this part of the radix tree is full.  This may | 
 | 99 | 	 * complete the layer above and require walking up the radix | 
 | 100 | 	 * tree. | 
 | 101 | 	 */ | 
 | 102 | 	while (p->bitmap == IDR_FULL) { | 
 | 103 | 		if (!(p = pa[++l])) | 
 | 104 | 			break; | 
 | 105 | 		id = id >> IDR_BITS; | 
 | 106 | 		__set_bit((id & IDR_MASK), &p->bitmap); | 
 | 107 | 	} | 
 | 108 | } | 
 | 109 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | /** | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 111 |  * idr_pre_get - reserve resources for idr allocation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 |  * @idp:	idr handle | 
 | 113 |  * @gfp_mask:	memory allocation flags | 
 | 114 |  * | 
| Naohiro Aota | 066a9be | 2010-10-26 14:23:03 -0700 | [diff] [blame] | 115 |  * This function should be called prior to calling the idr_get_new* functions. | 
 | 116 |  * It preallocates enough memory to satisfy the worst possible allocation. The | 
 | 117 |  * caller should pass in GFP_KERNEL if possible.  This of course requires that | 
 | 118 |  * no spinning locks be held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 120 |  * If the system is REALLY out of memory this function returns %0, | 
 | 121 |  * otherwise %1. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 |  */ | 
| Al Viro | fd4f2df | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 123 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | { | 
 | 125 | 	while (idp->id_free_cnt < IDR_FREE_MAX) { | 
 | 126 | 		struct idr_layer *new; | 
| Andrew Morton | 5b019e9 | 2009-01-15 13:51:21 -0800 | [diff] [blame] | 127 | 		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 128 | 		if (new == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | 			return (0); | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 130 | 		move_to_free_list(idp, new); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | 	} | 
 | 132 | 	return 1; | 
 | 133 | } | 
 | 134 | EXPORT_SYMBOL(idr_pre_get); | 
 | 135 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 136 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | { | 
 | 138 | 	int n, m, sh; | 
 | 139 | 	struct idr_layer *p, *new; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 140 | 	int l, id, oid; | 
| Al Viro | 5ba2533 | 2007-10-14 19:35:50 +0100 | [diff] [blame] | 141 | 	unsigned long bm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 |  | 
 | 143 | 	id = *starting_id; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 144 |  restart: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | 	p = idp->top; | 
 | 146 | 	l = idp->layers; | 
 | 147 | 	pa[l--] = NULL; | 
 | 148 | 	while (1) { | 
 | 149 | 		/* | 
 | 150 | 		 * We run around this while until we reach the leaf node... | 
 | 151 | 		 */ | 
 | 152 | 		n = (id >> (IDR_BITS*l)) & IDR_MASK; | 
 | 153 | 		bm = ~p->bitmap; | 
 | 154 | 		m = find_next_bit(&bm, IDR_SIZE, n); | 
 | 155 | 		if (m == IDR_SIZE) { | 
 | 156 | 			/* no space available go back to previous layer. */ | 
 | 157 | 			l++; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 158 | 			oid = id; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 159 | 			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 160 |  | 
 | 161 | 			/* if already at the top layer, we need to grow */ | 
| Tejun Heo | d2e7276 | 2010-02-22 12:44:19 -0800 | [diff] [blame] | 162 | 			if (id >= 1 << (idp->layers * IDR_BITS)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | 				*starting_id = id; | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 164 | 				return IDR_NEED_TO_GROW; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | 			} | 
| Tejun Heo | d2e7276 | 2010-02-22 12:44:19 -0800 | [diff] [blame] | 166 | 			p = pa[l]; | 
 | 167 | 			BUG_ON(!p); | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 168 |  | 
 | 169 | 			/* If we need to go up one layer, continue the | 
 | 170 | 			 * loop; otherwise, restart from the top. | 
 | 171 | 			 */ | 
 | 172 | 			sh = IDR_BITS * (l + 1); | 
 | 173 | 			if (oid >> sh == id >> sh) | 
 | 174 | 				continue; | 
 | 175 | 			else | 
 | 176 | 				goto restart; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | 		} | 
 | 178 | 		if (m != n) { | 
 | 179 | 			sh = IDR_BITS*l; | 
 | 180 | 			id = ((id >> sh) ^ n ^ m) << sh; | 
 | 181 | 		} | 
 | 182 | 		if ((id >= MAX_ID_BIT) || (id < 0)) | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 183 | 			return IDR_NOMORE_SPACE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | 		if (l == 0) | 
 | 185 | 			break; | 
 | 186 | 		/* | 
 | 187 | 		 * Create the layer below if it is missing. | 
 | 188 | 		 */ | 
 | 189 | 		if (!p->ary[m]) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 190 | 			new = get_from_free_list(idp); | 
 | 191 | 			if (!new) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | 				return -1; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 193 | 			new->layer = l-1; | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 194 | 			rcu_assign_pointer(p->ary[m], new); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | 			p->count++; | 
 | 196 | 		} | 
 | 197 | 		pa[l--] = p; | 
 | 198 | 		p = p->ary[m]; | 
 | 199 | 	} | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 200 |  | 
 | 201 | 	pa[l] = p; | 
 | 202 | 	return id; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } | 
 | 204 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 205 | static int idr_get_empty_slot(struct idr *idp, int starting_id, | 
 | 206 | 			      struct idr_layer **pa) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | { | 
 | 208 | 	struct idr_layer *p, *new; | 
 | 209 | 	int layers, v, id; | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 210 | 	unsigned long flags; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 211 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | 	id = starting_id; | 
 | 213 | build_up: | 
 | 214 | 	p = idp->top; | 
 | 215 | 	layers = idp->layers; | 
 | 216 | 	if (unlikely(!p)) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 217 | 		if (!(p = get_from_free_list(idp))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | 			return -1; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 219 | 		p->layer = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | 		layers = 1; | 
 | 221 | 	} | 
 | 222 | 	/* | 
 | 223 | 	 * Add a new layer to the top of the tree if the requested | 
 | 224 | 	 * id is larger than the currently allocated space. | 
 | 225 | 	 */ | 
| Zaur Kambarov | 589777e | 2005-06-21 17:14:31 -0700 | [diff] [blame] | 226 | 	while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | 		layers++; | 
| Manfred Spraul | 711a49a | 2008-12-10 18:17:06 +0100 | [diff] [blame] | 228 | 		if (!p->count) { | 
 | 229 | 			/* special case: if the tree is currently empty, | 
 | 230 | 			 * then we grow the tree by moving the top node | 
 | 231 | 			 * upwards. | 
 | 232 | 			 */ | 
 | 233 | 			p->layer++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | 			continue; | 
| Manfred Spraul | 711a49a | 2008-12-10 18:17:06 +0100 | [diff] [blame] | 235 | 		} | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 236 | 		if (!(new = get_from_free_list(idp))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | 			/* | 
 | 238 | 			 * The allocation failed.  If we built part of | 
 | 239 | 			 * the structure tear it down. | 
 | 240 | 			 */ | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 241 | 			spin_lock_irqsave(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | 			for (new = p; p && p != idp->top; new = p) { | 
 | 243 | 				p = p->ary[0]; | 
 | 244 | 				new->ary[0] = NULL; | 
 | 245 | 				new->bitmap = new->count = 0; | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 246 | 				__move_to_free_list(idp, new); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | 			} | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 248 | 			spin_unlock_irqrestore(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | 			return -1; | 
 | 250 | 		} | 
 | 251 | 		new->ary[0] = p; | 
 | 252 | 		new->count = 1; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 253 | 		new->layer = layers-1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | 		if (p->bitmap == IDR_FULL) | 
 | 255 | 			__set_bit(0, &new->bitmap); | 
 | 256 | 		p = new; | 
 | 257 | 	} | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 258 | 	rcu_assign_pointer(idp->top, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | 	idp->layers = layers; | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 260 | 	v = sub_alloc(idp, &id, pa); | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 261 | 	if (v == IDR_NEED_TO_GROW) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | 		goto build_up; | 
 | 263 | 	return(v); | 
 | 264 | } | 
 | 265 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 266 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | 
 | 267 | { | 
 | 268 | 	struct idr_layer *pa[MAX_LEVEL]; | 
 | 269 | 	int id; | 
 | 270 |  | 
 | 271 | 	id = idr_get_empty_slot(idp, starting_id, pa); | 
 | 272 | 	if (id >= 0) { | 
 | 273 | 		/* | 
 | 274 | 		 * Successfully found an empty slot.  Install the user | 
 | 275 | 		 * pointer and mark the slot full. | 
 | 276 | 		 */ | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 277 | 		rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], | 
 | 278 | 				(struct idr_layer *)ptr); | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 279 | 		pa[0]->count++; | 
 | 280 | 		idr_mark_full(pa, id); | 
 | 281 | 	} | 
 | 282 |  | 
 | 283 | 	return id; | 
 | 284 | } | 
 | 285 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | /** | 
| John McCutchan | 7c657f2 | 2005-08-26 14:02:04 -0400 | [diff] [blame] | 287 |  * idr_get_new_above - allocate new idr entry above or equal to a start id | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 |  * @idp: idr handle | 
| Thadeu Lima de Souza Cascardo | 94e2bd6 | 2009-10-16 15:20:49 +0200 | [diff] [blame] | 289 |  * @ptr: pointer you want associated with the id | 
| Naohiro Aota | ea24ea8 | 2010-08-31 00:37:03 +0900 | [diff] [blame] | 290 |  * @starting_id: id to start search at | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 |  * @id: pointer to the allocated handle | 
 | 292 |  * | 
 | 293 |  * This is the allocate id function.  It should be called with any | 
 | 294 |  * required locks. | 
 | 295 |  * | 
| Naohiro Aota | 066a9be | 2010-10-26 14:23:03 -0700 | [diff] [blame] | 296 |  * If allocation from IDR's private freelist fails, idr_get_new_above() will | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 297 |  * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill | 
| Naohiro Aota | 066a9be | 2010-10-26 14:23:03 -0700 | [diff] [blame] | 298 |  * IDR's preallocation and then retry the idr_get_new_above() call. | 
 | 299 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 300 |  * If the idr is full idr_get_new_above() will return %-ENOSPC. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 302 |  * @id returns a value in the range @starting_id ... %0x7fffffff | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 |  */ | 
 | 304 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 
 | 305 | { | 
 | 306 | 	int rv; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 307 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | 	rv = idr_get_new_above_int(idp, ptr, starting_id); | 
 | 309 | 	/* | 
 | 310 | 	 * This is a cheap hack until the IDR code can be fixed to | 
 | 311 | 	 * return proper error values. | 
 | 312 | 	 */ | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 313 | 	if (rv < 0) | 
 | 314 | 		return _idr_rc_to_errno(rv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | 	*id = rv; | 
 | 316 | 	return 0; | 
 | 317 | } | 
 | 318 | EXPORT_SYMBOL(idr_get_new_above); | 
 | 319 |  | 
 | 320 | /** | 
 | 321 |  * idr_get_new - allocate new idr entry | 
 | 322 |  * @idp: idr handle | 
| Thadeu Lima de Souza Cascardo | 94e2bd6 | 2009-10-16 15:20:49 +0200 | [diff] [blame] | 323 |  * @ptr: pointer you want associated with the id | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 |  * @id: pointer to the allocated handle | 
 | 325 |  * | 
| Naohiro Aota | 066a9be | 2010-10-26 14:23:03 -0700 | [diff] [blame] | 326 |  * If allocation from IDR's private freelist fails, idr_get_new_above() will | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 327 |  * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill | 
| Naohiro Aota | 066a9be | 2010-10-26 14:23:03 -0700 | [diff] [blame] | 328 |  * IDR's preallocation and then retry the idr_get_new_above() call. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 330 |  * If the idr is full idr_get_new_above() will return %-ENOSPC. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 332 |  * @id returns a value in the range %0 ... %0x7fffffff | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 |  */ | 
 | 334 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 
 | 335 | { | 
 | 336 | 	int rv; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 337 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | 	rv = idr_get_new_above_int(idp, ptr, 0); | 
 | 339 | 	/* | 
 | 340 | 	 * This is a cheap hack until the IDR code can be fixed to | 
 | 341 | 	 * return proper error values. | 
 | 342 | 	 */ | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 343 | 	if (rv < 0) | 
 | 344 | 		return _idr_rc_to_errno(rv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | 	*id = rv; | 
 | 346 | 	return 0; | 
 | 347 | } | 
 | 348 | EXPORT_SYMBOL(idr_get_new); | 
 | 349 |  | 
 | 350 | static void idr_remove_warning(int id) | 
 | 351 | { | 
| Nadia Derbey | f098ad6 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 352 | 	printk(KERN_WARNING | 
 | 353 | 		"idr_remove called for id=%d which is not allocated.\n", id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | 	dump_stack(); | 
 | 355 | } | 
 | 356 |  | 
 | 357 | static void sub_remove(struct idr *idp, int shift, int id) | 
 | 358 | { | 
 | 359 | 	struct idr_layer *p = idp->top; | 
 | 360 | 	struct idr_layer **pa[MAX_LEVEL]; | 
 | 361 | 	struct idr_layer ***paa = &pa[0]; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 362 | 	struct idr_layer *to_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | 	int n; | 
 | 364 |  | 
 | 365 | 	*paa = NULL; | 
 | 366 | 	*++paa = &idp->top; | 
 | 367 |  | 
 | 368 | 	while ((shift > 0) && p) { | 
 | 369 | 		n = (id >> shift) & IDR_MASK; | 
 | 370 | 		__clear_bit(n, &p->bitmap); | 
 | 371 | 		*++paa = &p->ary[n]; | 
 | 372 | 		p = p->ary[n]; | 
 | 373 | 		shift -= IDR_BITS; | 
 | 374 | 	} | 
 | 375 | 	n = id & IDR_MASK; | 
 | 376 | 	if (likely(p != NULL && test_bit(n, &p->bitmap))){ | 
 | 377 | 		__clear_bit(n, &p->bitmap); | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 378 | 		rcu_assign_pointer(p->ary[n], NULL); | 
 | 379 | 		to_free = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | 		while(*paa && ! --((**paa)->count)){ | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 381 | 			if (to_free) | 
 | 382 | 				free_layer(to_free); | 
 | 383 | 			to_free = **paa; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | 			**paa-- = NULL; | 
 | 385 | 		} | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 386 | 		if (!*paa) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | 			idp->layers = 0; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 388 | 		if (to_free) | 
 | 389 | 			free_layer(to_free); | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 390 | 	} else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | 		idr_remove_warning(id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | } | 
 | 393 |  | 
 | 394 | /** | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 395 |  * idr_remove - remove the given id and free its slot | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 396 |  * @idp: idr handle | 
 | 397 |  * @id: unique key | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 |  */ | 
 | 399 | void idr_remove(struct idr *idp, int id) | 
 | 400 | { | 
 | 401 | 	struct idr_layer *p; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 402 | 	struct idr_layer *to_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 |  | 
 | 404 | 	/* Mask off upper bits we don't use for the search. */ | 
 | 405 | 	id &= MAX_ID_MASK; | 
 | 406 |  | 
 | 407 | 	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 408 | 	if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 409 | 	    idp->top->ary[0]) { | 
 | 410 | 		/* | 
 | 411 | 		 * Single child at leftmost slot: we can shrink the tree. | 
 | 412 | 		 * This level is not needed anymore since when layers are | 
 | 413 | 		 * inserted, they are inserted at the top of the existing | 
 | 414 | 		 * tree. | 
 | 415 | 		 */ | 
 | 416 | 		to_free = idp->top; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | 		p = idp->top->ary[0]; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 418 | 		rcu_assign_pointer(idp->top, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | 		--idp->layers; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 420 | 		to_free->bitmap = to_free->count = 0; | 
 | 421 | 		free_layer(to_free); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | 	} | 
 | 423 | 	while (idp->id_free_cnt >= IDR_FREE_MAX) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 424 | 		p = get_from_free_list(idp); | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 425 | 		/* | 
 | 426 | 		 * Note: we don't call the rcu callback here, since the only | 
 | 427 | 		 * layers that fall into the freelist are those that have been | 
 | 428 | 		 * preallocated. | 
 | 429 | 		 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | 		kmem_cache_free(idr_layer_cache, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | 	} | 
| Nadia Derbey | af8e2a4 | 2008-05-01 04:34:57 -0700 | [diff] [blame] | 432 | 	return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | } | 
 | 434 | EXPORT_SYMBOL(idr_remove); | 
 | 435 |  | 
 | 436 | /** | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 437 |  * idr_remove_all - remove all ids from the given idr tree | 
 | 438 |  * @idp: idr handle | 
 | 439 |  * | 
 | 440 |  * idr_destroy() only frees up unused, cached idp_layers, but this | 
 | 441 |  * function will remove all id mappings and leave all idp_layers | 
 | 442 |  * unused. | 
 | 443 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 444 |  * A typical clean-up sequence for objects stored in an idr tree will | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 445 |  * use idr_for_each() to free all objects, if necessay, then | 
 | 446 |  * idr_remove_all() to remove all ids, and idr_destroy() to free | 
 | 447 |  * up the cached idr_layers. | 
 | 448 |  */ | 
 | 449 | void idr_remove_all(struct idr *idp) | 
 | 450 | { | 
| Oleg Nesterov | 6ace06dc | 2007-07-31 00:39:19 -0700 | [diff] [blame] | 451 | 	int n, id, max; | 
| Imre Deak | 2dcb22b | 2010-05-26 14:43:38 -0700 | [diff] [blame] | 452 | 	int bt_mask; | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 453 | 	struct idr_layer *p; | 
 | 454 | 	struct idr_layer *pa[MAX_LEVEL]; | 
 | 455 | 	struct idr_layer **paa = &pa[0]; | 
 | 456 |  | 
 | 457 | 	n = idp->layers * IDR_BITS; | 
 | 458 | 	p = idp->top; | 
| Paul E. McKenney | 1b23336 | 2009-03-10 12:55:52 -0700 | [diff] [blame] | 459 | 	rcu_assign_pointer(idp->top, NULL); | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 460 | 	max = 1 << n; | 
 | 461 |  | 
 | 462 | 	id = 0; | 
| Oleg Nesterov | 6ace06dc | 2007-07-31 00:39:19 -0700 | [diff] [blame] | 463 | 	while (id < max) { | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 464 | 		while (n > IDR_BITS && p) { | 
 | 465 | 			n -= IDR_BITS; | 
 | 466 | 			*paa++ = p; | 
 | 467 | 			p = p->ary[(id >> n) & IDR_MASK]; | 
 | 468 | 		} | 
 | 469 |  | 
| Imre Deak | 2dcb22b | 2010-05-26 14:43:38 -0700 | [diff] [blame] | 470 | 		bt_mask = id; | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 471 | 		id += 1 << n; | 
| Imre Deak | 2dcb22b | 2010-05-26 14:43:38 -0700 | [diff] [blame] | 472 | 		/* Get the highest bit that the above add changed from 0->1. */ | 
 | 473 | 		while (n < fls(id ^ bt_mask)) { | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 474 | 			if (p) | 
 | 475 | 				free_layer(p); | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 476 | 			n += IDR_BITS; | 
 | 477 | 			p = *--paa; | 
 | 478 | 		} | 
 | 479 | 	} | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 480 | 	idp->layers = 0; | 
 | 481 | } | 
 | 482 | EXPORT_SYMBOL(idr_remove_all); | 
 | 483 |  | 
 | 484 | /** | 
| Andrew Morton | 8d3b359 | 2005-10-23 12:57:18 -0700 | [diff] [blame] | 485 |  * idr_destroy - release all cached layers within an idr tree | 
| Naohiro Aota | ea24ea8 | 2010-08-31 00:37:03 +0900 | [diff] [blame] | 486 |  * @idp: idr handle | 
| Andrew Morton | 8d3b359 | 2005-10-23 12:57:18 -0700 | [diff] [blame] | 487 |  */ | 
 | 488 | void idr_destroy(struct idr *idp) | 
 | 489 | { | 
 | 490 | 	while (idp->id_free_cnt) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 491 | 		struct idr_layer *p = get_from_free_list(idp); | 
| Andrew Morton | 8d3b359 | 2005-10-23 12:57:18 -0700 | [diff] [blame] | 492 | 		kmem_cache_free(idr_layer_cache, p); | 
 | 493 | 	} | 
 | 494 | } | 
 | 495 | EXPORT_SYMBOL(idr_destroy); | 
 | 496 |  | 
 | 497 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 |  * idr_find - return pointer for given id | 
 | 499 |  * @idp: idr handle | 
 | 500 |  * @id: lookup key | 
 | 501 |  * | 
 | 502 |  * Return the pointer given the id it has been registered with.  A %NULL | 
 | 503 |  * return indicates that @id is not valid or you passed %NULL in | 
 | 504 |  * idr_get_new(). | 
 | 505 |  * | 
| Nadia Derbey | f9c46d6 | 2008-07-25 01:48:01 -0700 | [diff] [blame] | 506 |  * This function can be called under rcu_read_lock(), given that the leaf | 
 | 507 |  * pointers lifetimes are correctly managed. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 |  */ | 
 | 509 | void *idr_find(struct idr *idp, int id) | 
 | 510 | { | 
 | 511 | 	int n; | 
 | 512 | 	struct idr_layer *p; | 
 | 513 |  | 
| Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 514 | 	p = rcu_dereference_raw(idp->top); | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 515 | 	if (!p) | 
 | 516 | 		return NULL; | 
 | 517 | 	n = (p->layer+1) * IDR_BITS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 |  | 
 | 519 | 	/* Mask off upper bits we don't use for the search. */ | 
 | 520 | 	id &= MAX_ID_MASK; | 
 | 521 |  | 
 | 522 | 	if (id >= (1 << n)) | 
 | 523 | 		return NULL; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 524 | 	BUG_ON(n == 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 |  | 
 | 526 | 	while (n > 0 && p) { | 
 | 527 | 		n -= IDR_BITS; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 528 | 		BUG_ON(n != p->layer*IDR_BITS); | 
| Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 529 | 		p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | 	} | 
 | 531 | 	return((void *)p); | 
 | 532 | } | 
 | 533 | EXPORT_SYMBOL(idr_find); | 
 | 534 |  | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 535 | /** | 
| Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 536 |  * idr_for_each - iterate through all stored pointers | 
 | 537 |  * @idp: idr handle | 
 | 538 |  * @fn: function to be called for each pointer | 
 | 539 |  * @data: data passed back to callback function | 
 | 540 |  * | 
 | 541 |  * Iterate over the pointers registered with the given idr.  The | 
 | 542 |  * callback function will be called for each pointer currently | 
 | 543 |  * registered, passing the id, the pointer and the data pointer passed | 
 | 544 |  * to this function.  It is not safe to modify the idr tree while in | 
 | 545 |  * the callback, so functions such as idr_get_new and idr_remove are | 
 | 546 |  * not allowed. | 
 | 547 |  * | 
 | 548 |  * We check the return of @fn each time. If it returns anything other | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 549 |  * than %0, we break out and return that value. | 
| Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 550 |  * | 
 | 551 |  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | 
 | 552 |  */ | 
 | 553 | int idr_for_each(struct idr *idp, | 
 | 554 | 		 int (*fn)(int id, void *p, void *data), void *data) | 
 | 555 | { | 
 | 556 | 	int n, id, max, error = 0; | 
 | 557 | 	struct idr_layer *p; | 
 | 558 | 	struct idr_layer *pa[MAX_LEVEL]; | 
 | 559 | 	struct idr_layer **paa = &pa[0]; | 
 | 560 |  | 
 | 561 | 	n = idp->layers * IDR_BITS; | 
| Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 562 | 	p = rcu_dereference_raw(idp->top); | 
| Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 563 | 	max = 1 << n; | 
 | 564 |  | 
 | 565 | 	id = 0; | 
 | 566 | 	while (id < max) { | 
 | 567 | 		while (n > 0 && p) { | 
 | 568 | 			n -= IDR_BITS; | 
 | 569 | 			*paa++ = p; | 
| Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 570 | 			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); | 
| Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 571 | 		} | 
 | 572 |  | 
 | 573 | 		if (p) { | 
 | 574 | 			error = fn(id, (void *)p, data); | 
 | 575 | 			if (error) | 
 | 576 | 				break; | 
 | 577 | 		} | 
 | 578 |  | 
 | 579 | 		id += 1 << n; | 
 | 580 | 		while (n < fls(id)) { | 
 | 581 | 			n += IDR_BITS; | 
 | 582 | 			p = *--paa; | 
 | 583 | 		} | 
 | 584 | 	} | 
 | 585 |  | 
 | 586 | 	return error; | 
 | 587 | } | 
 | 588 | EXPORT_SYMBOL(idr_for_each); | 
 | 589 |  | 
 | 590 | /** | 
| KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 591 |  * idr_get_next - lookup next object of id to given id. | 
 | 592 |  * @idp: idr handle | 
| Naohiro Aota | ea24ea8 | 2010-08-31 00:37:03 +0900 | [diff] [blame] | 593 |  * @nextidp:  pointer to lookup key | 
| KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 594 |  * | 
 | 595 |  * Returns pointer to registered object with id, which is next number to | 
| Naohiro Aota | 1458ce1 | 2010-08-27 17:43:46 +0900 | [diff] [blame] | 596 |  * given id. After being looked up, *@nextidp will be updated for the next | 
 | 597 |  * iteration. | 
| KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 598 |  */ | 
 | 599 |  | 
 | 600 | void *idr_get_next(struct idr *idp, int *nextidp) | 
 | 601 | { | 
 | 602 | 	struct idr_layer *p, *pa[MAX_LEVEL]; | 
 | 603 | 	struct idr_layer **paa = &pa[0]; | 
 | 604 | 	int id = *nextidp; | 
 | 605 | 	int n, max; | 
 | 606 |  | 
 | 607 | 	/* find first ent */ | 
 | 608 | 	n = idp->layers * IDR_BITS; | 
 | 609 | 	max = 1 << n; | 
| Paul E. McKenney | 94bfa3b | 2010-06-07 17:09:45 -0700 | [diff] [blame] | 610 | 	p = rcu_dereference_raw(idp->top); | 
| KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 611 | 	if (!p) | 
 | 612 | 		return NULL; | 
 | 613 |  | 
 | 614 | 	while (id < max) { | 
 | 615 | 		while (n > 0 && p) { | 
 | 616 | 			n -= IDR_BITS; | 
 | 617 | 			*paa++ = p; | 
| Paul E. McKenney | 94bfa3b | 2010-06-07 17:09:45 -0700 | [diff] [blame] | 618 | 			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); | 
| KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 619 | 		} | 
 | 620 |  | 
 | 621 | 		if (p) { | 
 | 622 | 			*nextidp = id; | 
 | 623 | 			return p; | 
 | 624 | 		} | 
 | 625 |  | 
 | 626 | 		id += 1 << n; | 
 | 627 | 		while (n < fls(id)) { | 
 | 628 | 			n += IDR_BITS; | 
 | 629 | 			p = *--paa; | 
 | 630 | 		} | 
 | 631 | 	} | 
 | 632 | 	return NULL; | 
 | 633 | } | 
| Ben Hutchings | 4d1ee80 | 2010-01-29 20:59:17 +0000 | [diff] [blame] | 634 | EXPORT_SYMBOL(idr_get_next); | 
| KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 635 |  | 
 | 636 |  | 
 | 637 | /** | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 638 |  * idr_replace - replace pointer for given id | 
 | 639 |  * @idp: idr handle | 
 | 640 |  * @ptr: pointer you want associated with the id | 
 | 641 |  * @id: lookup key | 
 | 642 |  * | 
 | 643 |  * Replace the pointer registered with an id and return the old value. | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 644 |  * A %-ENOENT return indicates that @id was not found. | 
 | 645 |  * A %-EINVAL return indicates that @id was not within valid constraints. | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 646 |  * | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 647 |  * The caller must serialize with writers. | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 648 |  */ | 
 | 649 | void *idr_replace(struct idr *idp, void *ptr, int id) | 
 | 650 | { | 
 | 651 | 	int n; | 
 | 652 | 	struct idr_layer *p, *old_p; | 
 | 653 |  | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 654 | 	p = idp->top; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 655 | 	if (!p) | 
 | 656 | 		return ERR_PTR(-EINVAL); | 
 | 657 |  | 
 | 658 | 	n = (p->layer+1) * IDR_BITS; | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 659 |  | 
 | 660 | 	id &= MAX_ID_MASK; | 
 | 661 |  | 
 | 662 | 	if (id >= (1 << n)) | 
 | 663 | 		return ERR_PTR(-EINVAL); | 
 | 664 |  | 
 | 665 | 	n -= IDR_BITS; | 
 | 666 | 	while ((n > 0) && p) { | 
 | 667 | 		p = p->ary[(id >> n) & IDR_MASK]; | 
 | 668 | 		n -= IDR_BITS; | 
 | 669 | 	} | 
 | 670 |  | 
 | 671 | 	n = id & IDR_MASK; | 
 | 672 | 	if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | 
 | 673 | 		return ERR_PTR(-ENOENT); | 
 | 674 |  | 
 | 675 | 	old_p = p->ary[n]; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 676 | 	rcu_assign_pointer(p->ary[n], ptr); | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 677 |  | 
 | 678 | 	return old_p; | 
 | 679 | } | 
 | 680 | EXPORT_SYMBOL(idr_replace); | 
 | 681 |  | 
| Akinobu Mita | 199f0ca | 2008-04-29 01:03:13 -0700 | [diff] [blame] | 682 | void __init idr_init_cache(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | { | 
| Akinobu Mita | 199f0ca | 2008-04-29 01:03:13 -0700 | [diff] [blame] | 684 | 	idr_layer_cache = kmem_cache_create("idr_layer_cache", | 
| Andrew Morton | 5b019e9 | 2009-01-15 13:51:21 -0800 | [diff] [blame] | 685 | 				sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | } | 
 | 687 |  | 
 | 688 | /** | 
 | 689 |  * idr_init - initialize idr handle | 
 | 690 |  * @idp:	idr handle | 
 | 691 |  * | 
 | 692 |  * This function is use to set up the handle (@idp) that you will pass | 
 | 693 |  * to the rest of the functions. | 
 | 694 |  */ | 
 | 695 | void idr_init(struct idr *idp) | 
 | 696 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | 	memset(idp, 0, sizeof(struct idr)); | 
 | 698 | 	spin_lock_init(&idp->lock); | 
 | 699 | } | 
 | 700 | EXPORT_SYMBOL(idr_init); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 701 |  | 
 | 702 |  | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 703 | /** | 
 | 704 |  * DOC: IDA description | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 705 |  * IDA - IDR based ID allocator | 
 | 706 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 707 |  * This is id allocator without id -> pointer translation.  Memory | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 708 |  * usage is much lower than full blown idr because each id only | 
 | 709 |  * occupies a bit.  ida uses a custom leaf node which contains | 
 | 710 |  * IDA_BITMAP_BITS slots. | 
 | 711 |  * | 
 | 712 |  * 2007-04-25  written by Tejun Heo <htejun@gmail.com> | 
 | 713 |  */ | 
 | 714 |  | 
 | 715 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | 
 | 716 | { | 
 | 717 | 	unsigned long flags; | 
 | 718 |  | 
 | 719 | 	if (!ida->free_bitmap) { | 
 | 720 | 		spin_lock_irqsave(&ida->idr.lock, flags); | 
 | 721 | 		if (!ida->free_bitmap) { | 
 | 722 | 			ida->free_bitmap = bitmap; | 
 | 723 | 			bitmap = NULL; | 
 | 724 | 		} | 
 | 725 | 		spin_unlock_irqrestore(&ida->idr.lock, flags); | 
 | 726 | 	} | 
 | 727 |  | 
 | 728 | 	kfree(bitmap); | 
 | 729 | } | 
 | 730 |  | 
 | 731 | /** | 
 | 732 |  * ida_pre_get - reserve resources for ida allocation | 
 | 733 |  * @ida:	ida handle | 
 | 734 |  * @gfp_mask:	memory allocation flag | 
 | 735 |  * | 
 | 736 |  * This function should be called prior to locking and calling the | 
 | 737 |  * following function.  It preallocates enough memory to satisfy the | 
 | 738 |  * worst possible allocation. | 
 | 739 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 740 |  * If the system is REALLY out of memory this function returns %0, | 
 | 741 |  * otherwise %1. | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 742 |  */ | 
 | 743 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | 
 | 744 | { | 
 | 745 | 	/* allocate idr_layers */ | 
 | 746 | 	if (!idr_pre_get(&ida->idr, gfp_mask)) | 
 | 747 | 		return 0; | 
 | 748 |  | 
 | 749 | 	/* allocate free_bitmap */ | 
 | 750 | 	if (!ida->free_bitmap) { | 
 | 751 | 		struct ida_bitmap *bitmap; | 
 | 752 |  | 
 | 753 | 		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | 
 | 754 | 		if (!bitmap) | 
 | 755 | 			return 0; | 
 | 756 |  | 
 | 757 | 		free_bitmap(ida, bitmap); | 
 | 758 | 	} | 
 | 759 |  | 
 | 760 | 	return 1; | 
 | 761 | } | 
 | 762 | EXPORT_SYMBOL(ida_pre_get); | 
 | 763 |  | 
 | 764 | /** | 
 | 765 |  * ida_get_new_above - allocate new ID above or equal to a start id | 
 | 766 |  * @ida:	ida handle | 
| Naohiro Aota | ea24ea8 | 2010-08-31 00:37:03 +0900 | [diff] [blame] | 767 |  * @starting_id: id to start search at | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 768 |  * @p_id:	pointer to the allocated handle | 
 | 769 |  * | 
| Wang Sheng-Hui | e3816c5 | 2011-10-31 17:12:36 -0700 | [diff] [blame] | 770 |  * Allocate new ID above or equal to @starting_id.  It should be called | 
 | 771 |  * with any required locks. | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 772 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 773 |  * If memory is required, it will return %-EAGAIN, you should unlock | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 774 |  * and go back to the ida_pre_get() call.  If the ida is full, it will | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 775 |  * return %-ENOSPC. | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 776 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 777 |  * @p_id returns a value in the range @starting_id ... %0x7fffffff. | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 778 |  */ | 
 | 779 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 
 | 780 | { | 
 | 781 | 	struct idr_layer *pa[MAX_LEVEL]; | 
 | 782 | 	struct ida_bitmap *bitmap; | 
 | 783 | 	unsigned long flags; | 
 | 784 | 	int idr_id = starting_id / IDA_BITMAP_BITS; | 
 | 785 | 	int offset = starting_id % IDA_BITMAP_BITS; | 
 | 786 | 	int t, id; | 
 | 787 |  | 
 | 788 |  restart: | 
 | 789 | 	/* get vacant slot */ | 
 | 790 | 	t = idr_get_empty_slot(&ida->idr, idr_id, pa); | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 791 | 	if (t < 0) | 
 | 792 | 		return _idr_rc_to_errno(t); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 793 |  | 
 | 794 | 	if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | 
 | 795 | 		return -ENOSPC; | 
 | 796 |  | 
 | 797 | 	if (t != idr_id) | 
 | 798 | 		offset = 0; | 
 | 799 | 	idr_id = t; | 
 | 800 |  | 
 | 801 | 	/* if bitmap isn't there, create a new one */ | 
 | 802 | 	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | 
 | 803 | 	if (!bitmap) { | 
 | 804 | 		spin_lock_irqsave(&ida->idr.lock, flags); | 
 | 805 | 		bitmap = ida->free_bitmap; | 
 | 806 | 		ida->free_bitmap = NULL; | 
 | 807 | 		spin_unlock_irqrestore(&ida->idr.lock, flags); | 
 | 808 |  | 
 | 809 | 		if (!bitmap) | 
 | 810 | 			return -EAGAIN; | 
 | 811 |  | 
 | 812 | 		memset(bitmap, 0, sizeof(struct ida_bitmap)); | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 813 | 		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], | 
 | 814 | 				(void *)bitmap); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 815 | 		pa[0]->count++; | 
 | 816 | 	} | 
 | 817 |  | 
 | 818 | 	/* lookup for empty slot */ | 
 | 819 | 	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | 
 | 820 | 	if (t == IDA_BITMAP_BITS) { | 
 | 821 | 		/* no empty slot after offset, continue to the next chunk */ | 
 | 822 | 		idr_id++; | 
 | 823 | 		offset = 0; | 
 | 824 | 		goto restart; | 
 | 825 | 	} | 
 | 826 |  | 
 | 827 | 	id = idr_id * IDA_BITMAP_BITS + t; | 
 | 828 | 	if (id >= MAX_ID_BIT) | 
 | 829 | 		return -ENOSPC; | 
 | 830 |  | 
 | 831 | 	__set_bit(t, bitmap->bitmap); | 
 | 832 | 	if (++bitmap->nr_busy == IDA_BITMAP_BITS) | 
 | 833 | 		idr_mark_full(pa, idr_id); | 
 | 834 |  | 
 | 835 | 	*p_id = id; | 
 | 836 |  | 
 | 837 | 	/* Each leaf node can handle nearly a thousand slots and the | 
 | 838 | 	 * whole idea of ida is to have small memory foot print. | 
 | 839 | 	 * Throw away extra resources one by one after each successful | 
 | 840 | 	 * allocation. | 
 | 841 | 	 */ | 
 | 842 | 	if (ida->idr.id_free_cnt || ida->free_bitmap) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 843 | 		struct idr_layer *p = get_from_free_list(&ida->idr); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 844 | 		if (p) | 
 | 845 | 			kmem_cache_free(idr_layer_cache, p); | 
 | 846 | 	} | 
 | 847 |  | 
 | 848 | 	return 0; | 
 | 849 | } | 
 | 850 | EXPORT_SYMBOL(ida_get_new_above); | 
 | 851 |  | 
 | 852 | /** | 
 | 853 |  * ida_get_new - allocate new ID | 
 | 854 |  * @ida:	idr handle | 
 | 855 |  * @p_id:	pointer to the allocated handle | 
 | 856 |  * | 
 | 857 |  * Allocate new ID.  It should be called with any required locks. | 
 | 858 |  * | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 859 |  * If memory is required, it will return %-EAGAIN, you should unlock | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 860 |  * and go back to the idr_pre_get() call.  If the idr is full, it will | 
| Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 861 |  * return %-ENOSPC. | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 862 |  * | 
| Paul Bolle | f5c3dd7 | 2011-08-03 16:18:39 +0200 | [diff] [blame] | 863 |  * @p_id returns a value in the range %0 ... %0x7fffffff. | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 864 |  */ | 
 | 865 | int ida_get_new(struct ida *ida, int *p_id) | 
 | 866 | { | 
 | 867 | 	return ida_get_new_above(ida, 0, p_id); | 
 | 868 | } | 
 | 869 | EXPORT_SYMBOL(ida_get_new); | 
 | 870 |  | 
 | 871 | /** | 
 | 872 |  * ida_remove - remove the given ID | 
 | 873 |  * @ida:	ida handle | 
 | 874 |  * @id:		ID to free | 
 | 875 |  */ | 
 | 876 | void ida_remove(struct ida *ida, int id) | 
 | 877 | { | 
 | 878 | 	struct idr_layer *p = ida->idr.top; | 
 | 879 | 	int shift = (ida->idr.layers - 1) * IDR_BITS; | 
 | 880 | 	int idr_id = id / IDA_BITMAP_BITS; | 
 | 881 | 	int offset = id % IDA_BITMAP_BITS; | 
 | 882 | 	int n; | 
 | 883 | 	struct ida_bitmap *bitmap; | 
 | 884 |  | 
 | 885 | 	/* clear full bits while looking up the leaf idr_layer */ | 
 | 886 | 	while ((shift > 0) && p) { | 
 | 887 | 		n = (idr_id >> shift) & IDR_MASK; | 
 | 888 | 		__clear_bit(n, &p->bitmap); | 
 | 889 | 		p = p->ary[n]; | 
 | 890 | 		shift -= IDR_BITS; | 
 | 891 | 	} | 
 | 892 |  | 
 | 893 | 	if (p == NULL) | 
 | 894 | 		goto err; | 
 | 895 |  | 
 | 896 | 	n = idr_id & IDR_MASK; | 
 | 897 | 	__clear_bit(n, &p->bitmap); | 
 | 898 |  | 
 | 899 | 	bitmap = (void *)p->ary[n]; | 
 | 900 | 	if (!test_bit(offset, bitmap->bitmap)) | 
 | 901 | 		goto err; | 
 | 902 |  | 
 | 903 | 	/* update bitmap and remove it if empty */ | 
 | 904 | 	__clear_bit(offset, bitmap->bitmap); | 
 | 905 | 	if (--bitmap->nr_busy == 0) { | 
 | 906 | 		__set_bit(n, &p->bitmap);	/* to please idr_remove() */ | 
 | 907 | 		idr_remove(&ida->idr, idr_id); | 
 | 908 | 		free_bitmap(ida, bitmap); | 
 | 909 | 	} | 
 | 910 |  | 
 | 911 | 	return; | 
 | 912 |  | 
 | 913 |  err: | 
 | 914 | 	printk(KERN_WARNING | 
 | 915 | 	       "ida_remove called for id=%d which is not allocated.\n", id); | 
 | 916 | } | 
 | 917 | EXPORT_SYMBOL(ida_remove); | 
 | 918 |  | 
 | 919 | /** | 
 | 920 |  * ida_destroy - release all cached layers within an ida tree | 
| Naohiro Aota | ea24ea8 | 2010-08-31 00:37:03 +0900 | [diff] [blame] | 921 |  * @ida:		ida handle | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 922 |  */ | 
 | 923 | void ida_destroy(struct ida *ida) | 
 | 924 | { | 
 | 925 | 	idr_destroy(&ida->idr); | 
 | 926 | 	kfree(ida->free_bitmap); | 
 | 927 | } | 
 | 928 | EXPORT_SYMBOL(ida_destroy); | 
 | 929 |  | 
 | 930 | /** | 
| Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 931 |  * ida_simple_get - get a new id. | 
 | 932 |  * @ida: the (initialized) ida. | 
 | 933 |  * @start: the minimum id (inclusive, < 0x8000000) | 
 | 934 |  * @end: the maximum id (exclusive, < 0x8000000 or 0) | 
 | 935 |  * @gfp_mask: memory allocation flags | 
 | 936 |  * | 
 | 937 |  * Allocates an id in the range start <= id < end, or returns -ENOSPC. | 
 | 938 |  * On memory allocation failure, returns -ENOMEM. | 
 | 939 |  * | 
 | 940 |  * Use ida_simple_remove() to get rid of an id. | 
 | 941 |  */ | 
 | 942 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, | 
 | 943 | 		   gfp_t gfp_mask) | 
 | 944 | { | 
 | 945 | 	int ret, id; | 
 | 946 | 	unsigned int max; | 
| Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 947 | 	unsigned long flags; | 
| Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 948 |  | 
 | 949 | 	BUG_ON((int)start < 0); | 
 | 950 | 	BUG_ON((int)end < 0); | 
 | 951 |  | 
 | 952 | 	if (end == 0) | 
 | 953 | 		max = 0x80000000; | 
 | 954 | 	else { | 
 | 955 | 		BUG_ON(end < start); | 
 | 956 | 		max = end - 1; | 
 | 957 | 	} | 
 | 958 |  | 
 | 959 | again: | 
 | 960 | 	if (!ida_pre_get(ida, gfp_mask)) | 
 | 961 | 		return -ENOMEM; | 
 | 962 |  | 
| Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 963 | 	spin_lock_irqsave(&simple_ida_lock, flags); | 
| Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 964 | 	ret = ida_get_new_above(ida, start, &id); | 
 | 965 | 	if (!ret) { | 
 | 966 | 		if (id > max) { | 
 | 967 | 			ida_remove(ida, id); | 
 | 968 | 			ret = -ENOSPC; | 
 | 969 | 		} else { | 
 | 970 | 			ret = id; | 
 | 971 | 		} | 
 | 972 | 	} | 
| Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 973 | 	spin_unlock_irqrestore(&simple_ida_lock, flags); | 
| Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 974 |  | 
 | 975 | 	if (unlikely(ret == -EAGAIN)) | 
 | 976 | 		goto again; | 
 | 977 |  | 
 | 978 | 	return ret; | 
 | 979 | } | 
 | 980 | EXPORT_SYMBOL(ida_simple_get); | 
 | 981 |  | 
 | 982 | /** | 
 | 983 |  * ida_simple_remove - remove an allocated id. | 
 | 984 |  * @ida: the (initialized) ida. | 
 | 985 |  * @id: the id returned by ida_simple_get. | 
 | 986 |  */ | 
 | 987 | void ida_simple_remove(struct ida *ida, unsigned int id) | 
 | 988 | { | 
| Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 989 | 	unsigned long flags; | 
 | 990 |  | 
| Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 991 | 	BUG_ON((int)id < 0); | 
| Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 992 | 	spin_lock_irqsave(&simple_ida_lock, flags); | 
| Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 993 | 	ida_remove(ida, id); | 
| Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 994 | 	spin_unlock_irqrestore(&simple_ida_lock, flags); | 
| Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 995 | } | 
 | 996 | EXPORT_SYMBOL(ida_simple_remove); | 
 | 997 |  | 
 | 998 | /** | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 999 |  * ida_init - initialize ida handle | 
 | 1000 |  * @ida:	ida handle | 
 | 1001 |  * | 
 | 1002 |  * This function is use to set up the handle (@ida) that you will pass | 
 | 1003 |  * to the rest of the functions. | 
 | 1004 |  */ | 
 | 1005 | void ida_init(struct ida *ida) | 
 | 1006 | { | 
 | 1007 | 	memset(ida, 0, sizeof(struct ida)); | 
 | 1008 | 	idr_init(&ida->idr); | 
 | 1009 |  | 
 | 1010 | } | 
 | 1011 | EXPORT_SYMBOL(ida_init); |