| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * A Remote Heap.  Remote means that we don't touch the memory that the | 
 | 3 |  * heap points to. Normal heap implementations use the memory they manage | 
 | 4 |  * to place their list. We cannot do that because the memory we manage may | 
 | 5 |  * have special properties, for example it is uncachable or of different | 
 | 6 |  * endianess. | 
 | 7 |  * | 
 | 8 |  * Author: Pantelis Antoniou <panto@intracom.gr> | 
 | 9 |  * | 
 | 10 |  * 2004 (c) INTRACOM S.A. Greece. This file is licensed under | 
 | 11 |  * the terms of the GNU General Public License version 2. This program | 
 | 12 |  * is licensed "as is" without any warranty of any kind, whether express | 
 | 13 |  * or implied. | 
 | 14 |  */ | 
 | 15 | #include <linux/types.h> | 
 | 16 | #include <linux/errno.h> | 
| Ahmed S. Darwish | 2366fb1 | 2007-02-05 16:14:10 -0800 | [diff] [blame] | 17 | #include <linux/kernel.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/mm.h> | 
 | 19 | #include <linux/slab.h> | 
 | 20 |  | 
 | 21 | #include <asm/rheap.h> | 
 | 22 |  | 
 | 23 | /* | 
 | 24 |  * Fixup a list_head, needed when copying lists.  If the pointers fall | 
 | 25 |  * between s and e, apply the delta.  This assumes that | 
 | 26 |  * sizeof(struct list_head *) == sizeof(unsigned long *). | 
 | 27 |  */ | 
 | 28 | static inline void fixup(unsigned long s, unsigned long e, int d, | 
 | 29 | 			 struct list_head *l) | 
 | 30 | { | 
 | 31 | 	unsigned long *pp; | 
 | 32 |  | 
 | 33 | 	pp = (unsigned long *)&l->next; | 
 | 34 | 	if (*pp >= s && *pp < e) | 
 | 35 | 		*pp += d; | 
 | 36 |  | 
 | 37 | 	pp = (unsigned long *)&l->prev; | 
 | 38 | 	if (*pp >= s && *pp < e) | 
 | 39 | 		*pp += d; | 
 | 40 | } | 
 | 41 |  | 
 | 42 | /* Grow the allocated blocks */ | 
 | 43 | static int grow(rh_info_t * info, int max_blocks) | 
 | 44 | { | 
 | 45 | 	rh_block_t *block, *blk; | 
 | 46 | 	int i, new_blocks; | 
 | 47 | 	int delta; | 
 | 48 | 	unsigned long blks, blke; | 
 | 49 |  | 
 | 50 | 	if (max_blocks <= info->max_blocks) | 
 | 51 | 		return -EINVAL; | 
 | 52 |  | 
 | 53 | 	new_blocks = max_blocks - info->max_blocks; | 
 | 54 |  | 
 | 55 | 	block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL); | 
 | 56 | 	if (block == NULL) | 
 | 57 | 		return -ENOMEM; | 
 | 58 |  | 
 | 59 | 	if (info->max_blocks > 0) { | 
 | 60 |  | 
 | 61 | 		/* copy old block area */ | 
 | 62 | 		memcpy(block, info->block, | 
 | 63 | 		       sizeof(rh_block_t) * info->max_blocks); | 
 | 64 |  | 
 | 65 | 		delta = (char *)block - (char *)info->block; | 
 | 66 |  | 
 | 67 | 		/* and fixup list pointers */ | 
 | 68 | 		blks = (unsigned long)info->block; | 
 | 69 | 		blke = (unsigned long)(info->block + info->max_blocks); | 
 | 70 |  | 
 | 71 | 		for (i = 0, blk = block; i < info->max_blocks; i++, blk++) | 
 | 72 | 			fixup(blks, blke, delta, &blk->list); | 
 | 73 |  | 
 | 74 | 		fixup(blks, blke, delta, &info->empty_list); | 
 | 75 | 		fixup(blks, blke, delta, &info->free_list); | 
 | 76 | 		fixup(blks, blke, delta, &info->taken_list); | 
 | 77 |  | 
 | 78 | 		/* free the old allocated memory */ | 
 | 79 | 		if ((info->flags & RHIF_STATIC_BLOCK) == 0) | 
 | 80 | 			kfree(info->block); | 
 | 81 | 	} | 
 | 82 |  | 
 | 83 | 	info->block = block; | 
 | 84 | 	info->empty_slots += new_blocks; | 
 | 85 | 	info->max_blocks = max_blocks; | 
 | 86 | 	info->flags &= ~RHIF_STATIC_BLOCK; | 
 | 87 |  | 
 | 88 | 	/* add all new blocks to the free list */ | 
 | 89 | 	for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++) | 
 | 90 | 		list_add(&blk->list, &info->empty_list); | 
 | 91 |  | 
 | 92 | 	return 0; | 
 | 93 | } | 
 | 94 |  | 
 | 95 | /* | 
 | 96 |  * Assure at least the required amount of empty slots.  If this function | 
 | 97 |  * causes a grow in the block area then all pointers kept to the block | 
 | 98 |  * area are invalid! | 
 | 99 |  */ | 
 | 100 | static int assure_empty(rh_info_t * info, int slots) | 
 | 101 | { | 
 | 102 | 	int max_blocks; | 
 | 103 |  | 
 | 104 | 	/* This function is not meant to be used to grow uncontrollably */ | 
 | 105 | 	if (slots >= 4) | 
 | 106 | 		return -EINVAL; | 
 | 107 |  | 
 | 108 | 	/* Enough space */ | 
 | 109 | 	if (info->empty_slots >= slots) | 
 | 110 | 		return 0; | 
 | 111 |  | 
 | 112 | 	/* Next 16 sized block */ | 
 | 113 | 	max_blocks = ((info->max_blocks + slots) + 15) & ~15; | 
 | 114 |  | 
 | 115 | 	return grow(info, max_blocks); | 
 | 116 | } | 
 | 117 |  | 
 | 118 | static rh_block_t *get_slot(rh_info_t * info) | 
 | 119 | { | 
 | 120 | 	rh_block_t *blk; | 
 | 121 |  | 
 | 122 | 	/* If no more free slots, and failure to extend. */ | 
 | 123 | 	/* XXX: You should have called assure_empty before */ | 
 | 124 | 	if (info->empty_slots == 0) { | 
 | 125 | 		printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); | 
 | 126 | 		return NULL; | 
 | 127 | 	} | 
 | 128 |  | 
 | 129 | 	/* Get empty slot to use */ | 
 | 130 | 	blk = list_entry(info->empty_list.next, rh_block_t, list); | 
 | 131 | 	list_del_init(&blk->list); | 
 | 132 | 	info->empty_slots--; | 
 | 133 |  | 
 | 134 | 	/* Initialize */ | 
 | 135 | 	blk->start = NULL; | 
 | 136 | 	blk->size = 0; | 
 | 137 | 	blk->owner = NULL; | 
 | 138 |  | 
 | 139 | 	return blk; | 
 | 140 | } | 
 | 141 |  | 
 | 142 | static inline void release_slot(rh_info_t * info, rh_block_t * blk) | 
 | 143 | { | 
 | 144 | 	list_add(&blk->list, &info->empty_list); | 
 | 145 | 	info->empty_slots++; | 
 | 146 | } | 
 | 147 |  | 
 | 148 | static void attach_free_block(rh_info_t * info, rh_block_t * blkn) | 
 | 149 | { | 
 | 150 | 	rh_block_t *blk; | 
 | 151 | 	rh_block_t *before; | 
 | 152 | 	rh_block_t *after; | 
 | 153 | 	rh_block_t *next; | 
 | 154 | 	int size; | 
 | 155 | 	unsigned long s, e, bs, be; | 
 | 156 | 	struct list_head *l; | 
 | 157 |  | 
 | 158 | 	/* We assume that they are aligned properly */ | 
 | 159 | 	size = blkn->size; | 
 | 160 | 	s = (unsigned long)blkn->start; | 
 | 161 | 	e = s + size; | 
 | 162 |  | 
 | 163 | 	/* Find the blocks immediately before and after the given one | 
 | 164 | 	 * (if any) */ | 
 | 165 | 	before = NULL; | 
 | 166 | 	after = NULL; | 
 | 167 | 	next = NULL; | 
 | 168 |  | 
 | 169 | 	list_for_each(l, &info->free_list) { | 
 | 170 | 		blk = list_entry(l, rh_block_t, list); | 
 | 171 |  | 
 | 172 | 		bs = (unsigned long)blk->start; | 
 | 173 | 		be = bs + blk->size; | 
 | 174 |  | 
 | 175 | 		if (next == NULL && s >= bs) | 
 | 176 | 			next = blk; | 
 | 177 |  | 
 | 178 | 		if (be == s) | 
 | 179 | 			before = blk; | 
 | 180 |  | 
 | 181 | 		if (e == bs) | 
 | 182 | 			after = blk; | 
 | 183 |  | 
 | 184 | 		/* If both are not null, break now */ | 
 | 185 | 		if (before != NULL && after != NULL) | 
 | 186 | 			break; | 
 | 187 | 	} | 
 | 188 |  | 
 | 189 | 	/* Now check if they are really adjacent */ | 
 | 190 | 	if (before != NULL && s != (unsigned long)before->start + before->size) | 
 | 191 | 		before = NULL; | 
 | 192 |  | 
 | 193 | 	if (after != NULL && e != (unsigned long)after->start) | 
 | 194 | 		after = NULL; | 
 | 195 |  | 
 | 196 | 	/* No coalescing; list insert and return */ | 
 | 197 | 	if (before == NULL && after == NULL) { | 
 | 198 |  | 
 | 199 | 		if (next != NULL) | 
 | 200 | 			list_add(&blkn->list, &next->list); | 
 | 201 | 		else | 
 | 202 | 			list_add(&blkn->list, &info->free_list); | 
 | 203 |  | 
 | 204 | 		return; | 
 | 205 | 	} | 
 | 206 |  | 
 | 207 | 	/* We don't need it anymore */ | 
 | 208 | 	release_slot(info, blkn); | 
 | 209 |  | 
 | 210 | 	/* Grow the before block */ | 
 | 211 | 	if (before != NULL && after == NULL) { | 
 | 212 | 		before->size += size; | 
 | 213 | 		return; | 
 | 214 | 	} | 
 | 215 |  | 
 | 216 | 	/* Grow the after block backwards */ | 
 | 217 | 	if (before == NULL && after != NULL) { | 
 | 218 | 		after->start = (int8_t *)after->start - size; | 
 | 219 | 		after->size += size; | 
 | 220 | 		return; | 
 | 221 | 	} | 
 | 222 |  | 
 | 223 | 	/* Grow the before block, and release the after block */ | 
 | 224 | 	before->size += size + after->size; | 
 | 225 | 	list_del(&after->list); | 
 | 226 | 	release_slot(info, after); | 
 | 227 | } | 
 | 228 |  | 
 | 229 | static void attach_taken_block(rh_info_t * info, rh_block_t * blkn) | 
 | 230 | { | 
 | 231 | 	rh_block_t *blk; | 
 | 232 | 	struct list_head *l; | 
 | 233 |  | 
 | 234 | 	/* Find the block immediately before the given one (if any) */ | 
 | 235 | 	list_for_each(l, &info->taken_list) { | 
 | 236 | 		blk = list_entry(l, rh_block_t, list); | 
 | 237 | 		if (blk->start > blkn->start) { | 
 | 238 | 			list_add_tail(&blkn->list, &blk->list); | 
 | 239 | 			return; | 
 | 240 | 		} | 
 | 241 | 	} | 
 | 242 |  | 
 | 243 | 	list_add_tail(&blkn->list, &info->taken_list); | 
 | 244 | } | 
 | 245 |  | 
 | 246 | /* | 
 | 247 |  * Create a remote heap dynamically.  Note that no memory for the blocks | 
 | 248 |  * are allocated.  It will upon the first allocation | 
 | 249 |  */ | 
 | 250 | rh_info_t *rh_create(unsigned int alignment) | 
 | 251 | { | 
 | 252 | 	rh_info_t *info; | 
 | 253 |  | 
 | 254 | 	/* Alignment must be a power of two */ | 
 | 255 | 	if ((alignment & (alignment - 1)) != 0) | 
 | 256 | 		return ERR_PTR(-EINVAL); | 
 | 257 |  | 
 | 258 | 	info = kmalloc(sizeof(*info), GFP_KERNEL); | 
 | 259 | 	if (info == NULL) | 
 | 260 | 		return ERR_PTR(-ENOMEM); | 
 | 261 |  | 
 | 262 | 	info->alignment = alignment; | 
 | 263 |  | 
 | 264 | 	/* Initially everything as empty */ | 
 | 265 | 	info->block = NULL; | 
 | 266 | 	info->max_blocks = 0; | 
 | 267 | 	info->empty_slots = 0; | 
 | 268 | 	info->flags = 0; | 
 | 269 |  | 
 | 270 | 	INIT_LIST_HEAD(&info->empty_list); | 
 | 271 | 	INIT_LIST_HEAD(&info->free_list); | 
 | 272 | 	INIT_LIST_HEAD(&info->taken_list); | 
 | 273 |  | 
 | 274 | 	return info; | 
 | 275 | } | 
 | 276 |  | 
 | 277 | /* | 
 | 278 |  * Destroy a dynamically created remote heap.  Deallocate only if the areas | 
 | 279 |  * are not static | 
 | 280 |  */ | 
 | 281 | void rh_destroy(rh_info_t * info) | 
 | 282 | { | 
 | 283 | 	if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) | 
 | 284 | 		kfree(info->block); | 
 | 285 |  | 
 | 286 | 	if ((info->flags & RHIF_STATIC_INFO) == 0) | 
 | 287 | 		kfree(info); | 
 | 288 | } | 
 | 289 |  | 
 | 290 | /* | 
 | 291 |  * Initialize in place a remote heap info block.  This is needed to support | 
 | 292 |  * operation very early in the startup of the kernel, when it is not yet safe | 
 | 293 |  * to call kmalloc. | 
 | 294 |  */ | 
 | 295 | void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, | 
 | 296 | 	     rh_block_t * block) | 
 | 297 | { | 
 | 298 | 	int i; | 
 | 299 | 	rh_block_t *blk; | 
 | 300 |  | 
 | 301 | 	/* Alignment must be a power of two */ | 
 | 302 | 	if ((alignment & (alignment - 1)) != 0) | 
 | 303 | 		return; | 
 | 304 |  | 
 | 305 | 	info->alignment = alignment; | 
 | 306 |  | 
 | 307 | 	/* Initially everything as empty */ | 
 | 308 | 	info->block = block; | 
 | 309 | 	info->max_blocks = max_blocks; | 
 | 310 | 	info->empty_slots = max_blocks; | 
 | 311 | 	info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; | 
 | 312 |  | 
 | 313 | 	INIT_LIST_HEAD(&info->empty_list); | 
 | 314 | 	INIT_LIST_HEAD(&info->free_list); | 
 | 315 | 	INIT_LIST_HEAD(&info->taken_list); | 
 | 316 |  | 
 | 317 | 	/* Add all new blocks to the free list */ | 
 | 318 | 	for (i = 0, blk = block; i < max_blocks; i++, blk++) | 
 | 319 | 		list_add(&blk->list, &info->empty_list); | 
 | 320 | } | 
 | 321 |  | 
 | 322 | /* Attach a free memory region, coalesces regions if adjuscent */ | 
 | 323 | int rh_attach_region(rh_info_t * info, void *start, int size) | 
 | 324 | { | 
 | 325 | 	rh_block_t *blk; | 
 | 326 | 	unsigned long s, e, m; | 
 | 327 | 	int r; | 
 | 328 |  | 
 | 329 | 	/* The region must be aligned */ | 
 | 330 | 	s = (unsigned long)start; | 
 | 331 | 	e = s + size; | 
 | 332 | 	m = info->alignment - 1; | 
 | 333 |  | 
 | 334 | 	/* Round start up */ | 
 | 335 | 	s = (s + m) & ~m; | 
 | 336 |  | 
 | 337 | 	/* Round end down */ | 
 | 338 | 	e = e & ~m; | 
 | 339 |  | 
 | 340 | 	/* Take final values */ | 
 | 341 | 	start = (void *)s; | 
 | 342 | 	size = (int)(e - s); | 
 | 343 |  | 
 | 344 | 	/* Grow the blocks, if needed */ | 
 | 345 | 	r = assure_empty(info, 1); | 
 | 346 | 	if (r < 0) | 
 | 347 | 		return r; | 
 | 348 |  | 
 | 349 | 	blk = get_slot(info); | 
 | 350 | 	blk->start = start; | 
 | 351 | 	blk->size = size; | 
 | 352 | 	blk->owner = NULL; | 
 | 353 |  | 
 | 354 | 	attach_free_block(info, blk); | 
 | 355 |  | 
 | 356 | 	return 0; | 
 | 357 | } | 
 | 358 |  | 
 | 359 | /* Detatch given address range, splits free block if needed. */ | 
 | 360 | void *rh_detach_region(rh_info_t * info, void *start, int size) | 
 | 361 | { | 
 | 362 | 	struct list_head *l; | 
 | 363 | 	rh_block_t *blk, *newblk; | 
 | 364 | 	unsigned long s, e, m, bs, be; | 
 | 365 |  | 
 | 366 | 	/* Validate size */ | 
 | 367 | 	if (size <= 0) | 
 | 368 | 		return ERR_PTR(-EINVAL); | 
 | 369 |  | 
 | 370 | 	/* The region must be aligned */ | 
 | 371 | 	s = (unsigned long)start; | 
 | 372 | 	e = s + size; | 
 | 373 | 	m = info->alignment - 1; | 
 | 374 |  | 
 | 375 | 	/* Round start up */ | 
 | 376 | 	s = (s + m) & ~m; | 
 | 377 |  | 
 | 378 | 	/* Round end down */ | 
 | 379 | 	e = e & ~m; | 
 | 380 |  | 
 | 381 | 	if (assure_empty(info, 1) < 0) | 
 | 382 | 		return ERR_PTR(-ENOMEM); | 
 | 383 |  | 
 | 384 | 	blk = NULL; | 
 | 385 | 	list_for_each(l, &info->free_list) { | 
 | 386 | 		blk = list_entry(l, rh_block_t, list); | 
 | 387 | 		/* The range must lie entirely inside one free block */ | 
 | 388 | 		bs = (unsigned long)blk->start; | 
 | 389 | 		be = (unsigned long)blk->start + blk->size; | 
 | 390 | 		if (s >= bs && e <= be) | 
 | 391 | 			break; | 
 | 392 | 		blk = NULL; | 
 | 393 | 	} | 
 | 394 |  | 
 | 395 | 	if (blk == NULL) | 
 | 396 | 		return ERR_PTR(-ENOMEM); | 
 | 397 |  | 
 | 398 | 	/* Perfect fit */ | 
 | 399 | 	if (bs == s && be == e) { | 
 | 400 | 		/* Delete from free list, release slot */ | 
 | 401 | 		list_del(&blk->list); | 
 | 402 | 		release_slot(info, blk); | 
 | 403 | 		return (void *)s; | 
 | 404 | 	} | 
 | 405 |  | 
 | 406 | 	/* blk still in free list, with updated start and/or size */ | 
 | 407 | 	if (bs == s || be == e) { | 
 | 408 | 		if (bs == s) | 
 | 409 | 			blk->start = (int8_t *)blk->start + size; | 
 | 410 | 		blk->size -= size; | 
 | 411 |  | 
 | 412 | 	} else { | 
 | 413 | 		/* The front free fragment */ | 
 | 414 | 		blk->size = s - bs; | 
 | 415 |  | 
 | 416 | 		/* the back free fragment */ | 
 | 417 | 		newblk = get_slot(info); | 
 | 418 | 		newblk->start = (void *)e; | 
 | 419 | 		newblk->size = be - e; | 
 | 420 |  | 
 | 421 | 		list_add(&newblk->list, &blk->list); | 
 | 422 | 	} | 
 | 423 |  | 
 | 424 | 	return (void *)s; | 
 | 425 | } | 
 | 426 |  | 
 | 427 | void *rh_alloc(rh_info_t * info, int size, const char *owner) | 
 | 428 | { | 
 | 429 | 	struct list_head *l; | 
 | 430 | 	rh_block_t *blk; | 
 | 431 | 	rh_block_t *newblk; | 
 | 432 | 	void *start; | 
 | 433 |  | 
 | 434 | 	/* Validate size */ | 
 | 435 | 	if (size <= 0) | 
 | 436 | 		return ERR_PTR(-EINVAL); | 
 | 437 |  | 
 | 438 | 	/* Align to configured alignment */ | 
 | 439 | 	size = (size + (info->alignment - 1)) & ~(info->alignment - 1); | 
 | 440 |  | 
 | 441 | 	if (assure_empty(info, 1) < 0) | 
 | 442 | 		return ERR_PTR(-ENOMEM); | 
 | 443 |  | 
 | 444 | 	blk = NULL; | 
 | 445 | 	list_for_each(l, &info->free_list) { | 
 | 446 | 		blk = list_entry(l, rh_block_t, list); | 
 | 447 | 		if (size <= blk->size) | 
 | 448 | 			break; | 
 | 449 | 		blk = NULL; | 
 | 450 | 	} | 
 | 451 |  | 
 | 452 | 	if (blk == NULL) | 
 | 453 | 		return ERR_PTR(-ENOMEM); | 
 | 454 |  | 
 | 455 | 	/* Just fits */ | 
 | 456 | 	if (blk->size == size) { | 
 | 457 | 		/* Move from free list to taken list */ | 
 | 458 | 		list_del(&blk->list); | 
 | 459 | 		blk->owner = owner; | 
 | 460 | 		start = blk->start; | 
 | 461 |  | 
 | 462 | 		attach_taken_block(info, blk); | 
 | 463 |  | 
 | 464 | 		return start; | 
 | 465 | 	} | 
 | 466 |  | 
 | 467 | 	newblk = get_slot(info); | 
 | 468 | 	newblk->start = blk->start; | 
 | 469 | 	newblk->size = size; | 
 | 470 | 	newblk->owner = owner; | 
 | 471 |  | 
 | 472 | 	/* blk still in free list, with updated start, size */ | 
 | 473 | 	blk->start = (int8_t *)blk->start + size; | 
 | 474 | 	blk->size -= size; | 
 | 475 |  | 
 | 476 | 	start = newblk->start; | 
 | 477 |  | 
 | 478 | 	attach_taken_block(info, newblk); | 
 | 479 |  | 
 | 480 | 	return start; | 
 | 481 | } | 
 | 482 |  | 
 | 483 | /* allocate at precisely the given address */ | 
 | 484 | void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) | 
 | 485 | { | 
 | 486 | 	struct list_head *l; | 
 | 487 | 	rh_block_t *blk, *newblk1, *newblk2; | 
 | 488 | 	unsigned long s, e, m, bs, be; | 
 | 489 |  | 
 | 490 | 	/* Validate size */ | 
 | 491 | 	if (size <= 0) | 
 | 492 | 		return ERR_PTR(-EINVAL); | 
 | 493 |  | 
 | 494 | 	/* The region must be aligned */ | 
 | 495 | 	s = (unsigned long)start; | 
 | 496 | 	e = s + size; | 
 | 497 | 	m = info->alignment - 1; | 
 | 498 |  | 
 | 499 | 	/* Round start up */ | 
 | 500 | 	s = (s + m) & ~m; | 
 | 501 |  | 
 | 502 | 	/* Round end down */ | 
 | 503 | 	e = e & ~m; | 
 | 504 |  | 
 | 505 | 	if (assure_empty(info, 2) < 0) | 
 | 506 | 		return ERR_PTR(-ENOMEM); | 
 | 507 |  | 
 | 508 | 	blk = NULL; | 
 | 509 | 	list_for_each(l, &info->free_list) { | 
 | 510 | 		blk = list_entry(l, rh_block_t, list); | 
 | 511 | 		/* The range must lie entirely inside one free block */ | 
 | 512 | 		bs = (unsigned long)blk->start; | 
 | 513 | 		be = (unsigned long)blk->start + blk->size; | 
 | 514 | 		if (s >= bs && e <= be) | 
 | 515 | 			break; | 
 | 516 | 	} | 
 | 517 |  | 
 | 518 | 	if (blk == NULL) | 
 | 519 | 		return ERR_PTR(-ENOMEM); | 
 | 520 |  | 
 | 521 | 	/* Perfect fit */ | 
 | 522 | 	if (bs == s && be == e) { | 
 | 523 | 		/* Move from free list to taken list */ | 
 | 524 | 		list_del(&blk->list); | 
 | 525 | 		blk->owner = owner; | 
 | 526 |  | 
 | 527 | 		start = blk->start; | 
 | 528 | 		attach_taken_block(info, blk); | 
 | 529 |  | 
 | 530 | 		return start; | 
 | 531 |  | 
 | 532 | 	} | 
 | 533 |  | 
 | 534 | 	/* blk still in free list, with updated start and/or size */ | 
 | 535 | 	if (bs == s || be == e) { | 
 | 536 | 		if (bs == s) | 
 | 537 | 			blk->start = (int8_t *)blk->start + size; | 
 | 538 | 		blk->size -= size; | 
 | 539 |  | 
 | 540 | 	} else { | 
 | 541 | 		/* The front free fragment */ | 
 | 542 | 		blk->size = s - bs; | 
 | 543 |  | 
 | 544 | 		/* The back free fragment */ | 
 | 545 | 		newblk2 = get_slot(info); | 
 | 546 | 		newblk2->start = (void *)e; | 
 | 547 | 		newblk2->size = be - e; | 
 | 548 |  | 
 | 549 | 		list_add(&newblk2->list, &blk->list); | 
 | 550 | 	} | 
 | 551 |  | 
 | 552 | 	newblk1 = get_slot(info); | 
 | 553 | 	newblk1->start = (void *)s; | 
 | 554 | 	newblk1->size = e - s; | 
 | 555 | 	newblk1->owner = owner; | 
 | 556 |  | 
 | 557 | 	start = newblk1->start; | 
 | 558 | 	attach_taken_block(info, newblk1); | 
 | 559 |  | 
 | 560 | 	return start; | 
 | 561 | } | 
 | 562 |  | 
 | 563 | int rh_free(rh_info_t * info, void *start) | 
 | 564 | { | 
 | 565 | 	rh_block_t *blk, *blk2; | 
 | 566 | 	struct list_head *l; | 
 | 567 | 	int size; | 
 | 568 |  | 
 | 569 | 	/* Linear search for block */ | 
 | 570 | 	blk = NULL; | 
 | 571 | 	list_for_each(l, &info->taken_list) { | 
 | 572 | 		blk2 = list_entry(l, rh_block_t, list); | 
 | 573 | 		if (start < blk2->start) | 
 | 574 | 			break; | 
 | 575 | 		blk = blk2; | 
 | 576 | 	} | 
 | 577 |  | 
 | 578 | 	if (blk == NULL || start > (blk->start + blk->size)) | 
 | 579 | 		return -EINVAL; | 
 | 580 |  | 
 | 581 | 	/* Remove from taken list */ | 
 | 582 | 	list_del(&blk->list); | 
 | 583 |  | 
 | 584 | 	/* Get size of freed block */ | 
 | 585 | 	size = blk->size; | 
 | 586 | 	attach_free_block(info, blk); | 
 | 587 |  | 
 | 588 | 	return size; | 
 | 589 | } | 
 | 590 |  | 
 | 591 | int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) | 
 | 592 | { | 
 | 593 | 	rh_block_t *blk; | 
 | 594 | 	struct list_head *l; | 
 | 595 | 	struct list_head *h; | 
 | 596 | 	int nr; | 
 | 597 |  | 
 | 598 | 	switch (what) { | 
 | 599 |  | 
 | 600 | 	case RHGS_FREE: | 
 | 601 | 		h = &info->free_list; | 
 | 602 | 		break; | 
 | 603 |  | 
 | 604 | 	case RHGS_TAKEN: | 
 | 605 | 		h = &info->taken_list; | 
 | 606 | 		break; | 
 | 607 |  | 
 | 608 | 	default: | 
 | 609 | 		return -EINVAL; | 
 | 610 | 	} | 
 | 611 |  | 
 | 612 | 	/* Linear search for block */ | 
 | 613 | 	nr = 0; | 
 | 614 | 	list_for_each(l, h) { | 
 | 615 | 		blk = list_entry(l, rh_block_t, list); | 
 | 616 | 		if (stats != NULL && nr < max_stats) { | 
 | 617 | 			stats->start = blk->start; | 
 | 618 | 			stats->size = blk->size; | 
 | 619 | 			stats->owner = blk->owner; | 
 | 620 | 			stats++; | 
 | 621 | 		} | 
 | 622 | 		nr++; | 
 | 623 | 	} | 
 | 624 |  | 
 | 625 | 	return nr; | 
 | 626 | } | 
 | 627 |  | 
 | 628 | int rh_set_owner(rh_info_t * info, void *start, const char *owner) | 
 | 629 | { | 
 | 630 | 	rh_block_t *blk, *blk2; | 
 | 631 | 	struct list_head *l; | 
 | 632 | 	int size; | 
 | 633 |  | 
 | 634 | 	/* Linear search for block */ | 
 | 635 | 	blk = NULL; | 
 | 636 | 	list_for_each(l, &info->taken_list) { | 
 | 637 | 		blk2 = list_entry(l, rh_block_t, list); | 
 | 638 | 		if (start < blk2->start) | 
 | 639 | 			break; | 
 | 640 | 		blk = blk2; | 
 | 641 | 	} | 
 | 642 |  | 
 | 643 | 	if (blk == NULL || start > (blk->start + blk->size)) | 
 | 644 | 		return -EINVAL; | 
 | 645 |  | 
 | 646 | 	blk->owner = owner; | 
 | 647 | 	size = blk->size; | 
 | 648 |  | 
 | 649 | 	return size; | 
 | 650 | } | 
 | 651 |  | 
 | 652 | void rh_dump(rh_info_t * info) | 
 | 653 | { | 
 | 654 | 	static rh_stats_t st[32];	/* XXX maximum 32 blocks */ | 
 | 655 | 	int maxnr; | 
 | 656 | 	int i, nr; | 
 | 657 |  | 
| Ahmed S. Darwish | 2366fb1 | 2007-02-05 16:14:10 -0800 | [diff] [blame] | 658 | 	maxnr = ARRAY_SIZE(st); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 |  | 
 | 660 | 	printk(KERN_INFO | 
 | 661 | 	       "info @0x%p (%d slots empty / %d max)\n", | 
 | 662 | 	       info, info->empty_slots, info->max_blocks); | 
 | 663 |  | 
 | 664 | 	printk(KERN_INFO "  Free:\n"); | 
 | 665 | 	nr = rh_get_stats(info, RHGS_FREE, maxnr, st); | 
 | 666 | 	if (nr > maxnr) | 
 | 667 | 		nr = maxnr; | 
 | 668 | 	for (i = 0; i < nr; i++) | 
 | 669 | 		printk(KERN_INFO | 
 | 670 | 		       "    0x%p-0x%p (%u)\n", | 
 | 671 | 		       st[i].start, (int8_t *) st[i].start + st[i].size, | 
 | 672 | 		       st[i].size); | 
 | 673 | 	printk(KERN_INFO "\n"); | 
 | 674 |  | 
 | 675 | 	printk(KERN_INFO "  Taken:\n"); | 
 | 676 | 	nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); | 
 | 677 | 	if (nr > maxnr) | 
 | 678 | 		nr = maxnr; | 
 | 679 | 	for (i = 0; i < nr; i++) | 
 | 680 | 		printk(KERN_INFO | 
 | 681 | 		       "    0x%p-0x%p (%u) %s\n", | 
 | 682 | 		       st[i].start, (int8_t *) st[i].start + st[i].size, | 
 | 683 | 		       st[i].size, st[i].owner != NULL ? st[i].owner : ""); | 
 | 684 | 	printk(KERN_INFO "\n"); | 
 | 685 | } | 
 | 686 |  | 
 | 687 | void rh_dump_blk(rh_info_t * info, rh_block_t * blk) | 
 | 688 | { | 
 | 689 | 	printk(KERN_INFO | 
 | 690 | 	       "blk @0x%p: 0x%p-0x%p (%u)\n", | 
 | 691 | 	       blk, blk->start, (int8_t *) blk->start + blk->size, blk->size); | 
 | 692 | } |