Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame^] | 1 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/vcm.h> |
| 17 | #include <linux/vcm_alloc.h> |
| 18 | #include <linux/string.h> |
| 19 | #include <asm/sizes.h> |
| 20 | |
| 21 | int basicalloc_init; |
| 22 | |
| 23 | #define vcm_alloc_err(a, ...) \ |
| 24 | pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__) |
| 25 | |
| 26 | struct phys_chunk_head { |
| 27 | struct list_head head; |
| 28 | int num; |
| 29 | }; |
| 30 | |
| 31 | struct phys_pool { |
| 32 | int size; |
| 33 | int chunk_size; |
| 34 | struct phys_chunk_head head; |
| 35 | }; |
| 36 | |
| 37 | static int vcm_num_phys_pools; |
| 38 | static int vcm_num_memtypes; |
| 39 | static struct phys_pool *vcm_phys_pool; |
| 40 | static struct vcm_memtype_map *memtype_map; |
| 41 | |
| 42 | static int num_pools(enum memtype_t memtype) |
| 43 | { |
| 44 | if (memtype >= vcm_num_memtypes) { |
| 45 | vcm_alloc_err("Bad memtype: %d\n", memtype); |
| 46 | return -EINVAL; |
| 47 | } |
| 48 | return memtype_map[memtype].num_pools; |
| 49 | } |
| 50 | |
| 51 | static int pool_chunk_size(enum memtype_t memtype, int prio_idx) |
| 52 | { |
| 53 | int pool_idx; |
| 54 | if (memtype >= vcm_num_memtypes) { |
| 55 | vcm_alloc_err("Bad memtype: %d\n", memtype); |
| 56 | return -EINVAL; |
| 57 | } |
| 58 | |
| 59 | if (prio_idx >= num_pools(memtype)) { |
| 60 | vcm_alloc_err("Bad prio index: %d, max=%d, mt=%d\n", prio_idx, |
| 61 | num_pools(memtype), memtype); |
| 62 | return -EINVAL; |
| 63 | } |
| 64 | |
| 65 | pool_idx = memtype_map[memtype].pool_id[prio_idx]; |
| 66 | return vcm_phys_pool[pool_idx].chunk_size; |
| 67 | } |
| 68 | |
| 69 | int vcm_alloc_pool_idx_to_size(int pool_idx) |
| 70 | { |
| 71 | if (pool_idx >= vcm_num_phys_pools) { |
| 72 | vcm_alloc_err("Bad pool index: %d\n, max=%d\n", pool_idx, |
| 73 | vcm_num_phys_pools); |
| 74 | return -EINVAL; |
| 75 | } |
| 76 | return vcm_phys_pool[pool_idx].chunk_size; |
| 77 | } |
| 78 | |
| 79 | static struct phys_chunk_head *get_chunk_list(enum memtype_t memtype, |
| 80 | int prio_idx) |
| 81 | { |
| 82 | unsigned int pool_idx; |
| 83 | |
| 84 | if (memtype >= vcm_num_memtypes) { |
| 85 | vcm_alloc_err("Bad memtype: %d\n", memtype); |
| 86 | return NULL; |
| 87 | } |
| 88 | |
| 89 | if (prio_idx >= num_pools(memtype)) { |
| 90 | vcm_alloc_err("bad chunk size: mt=%d, prioidx=%d, np=%d\n", |
| 91 | memtype, prio_idx, num_pools(memtype)); |
| 92 | BUG(); |
| 93 | return NULL; |
| 94 | } |
| 95 | |
| 96 | if (!vcm_phys_pool) { |
| 97 | vcm_alloc_err("phys_pool is null\n"); |
| 98 | return NULL; |
| 99 | } |
| 100 | |
| 101 | /* We don't have a "pool count" anywhere but this is coming |
| 102 | * strictly from data in a board file |
| 103 | */ |
| 104 | pool_idx = memtype_map[memtype].pool_id[prio_idx]; |
| 105 | |
| 106 | return &vcm_phys_pool[pool_idx].head; |
| 107 | } |
| 108 | |
| 109 | static int is_allocated(struct list_head *allocated) |
| 110 | { |
| 111 | /* This should not happen under normal conditions */ |
| 112 | if (!allocated) { |
| 113 | vcm_alloc_err("no allocated\n"); |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | if (!basicalloc_init) { |
| 118 | vcm_alloc_err("no basicalloc_init\n"); |
| 119 | return 0; |
| 120 | } |
| 121 | return !list_empty(allocated); |
| 122 | } |
| 123 | |
| 124 | static int count_allocated_size(enum memtype_t memtype, int idx) |
| 125 | { |
| 126 | int cnt = 0; |
| 127 | struct phys_chunk *chunk, *tmp; |
| 128 | struct phys_chunk_head *pch; |
| 129 | |
| 130 | if (!basicalloc_init) { |
| 131 | vcm_alloc_err("no basicalloc_init\n"); |
| 132 | return 0; |
| 133 | } |
| 134 | |
| 135 | pch = get_chunk_list(memtype, idx); |
| 136 | if (!pch) { |
| 137 | vcm_alloc_err("null pch\n"); |
| 138 | return -EINVAL; |
| 139 | } |
| 140 | |
| 141 | list_for_each_entry_safe(chunk, tmp, &pch->head, list) { |
| 142 | if (is_allocated(&chunk->allocated)) |
| 143 | cnt++; |
| 144 | } |
| 145 | |
| 146 | return cnt; |
| 147 | } |
| 148 | |
| 149 | |
| 150 | int vcm_alloc_get_mem_size(void) |
| 151 | { |
| 152 | if (!vcm_phys_pool) { |
| 153 | vcm_alloc_err("No physical pool set up!\n"); |
| 154 | return -ENODEV; |
| 155 | } |
| 156 | return vcm_phys_pool[0].size; |
| 157 | } |
| 158 | EXPORT_SYMBOL(vcm_alloc_get_mem_size); |
| 159 | |
| 160 | void vcm_alloc_print_list(enum memtype_t memtype, int just_allocated) |
| 161 | { |
| 162 | int i; |
| 163 | struct phys_chunk *chunk, *tmp; |
| 164 | struct phys_chunk_head *pch; |
| 165 | |
| 166 | if (!basicalloc_init) { |
| 167 | vcm_alloc_err("no basicalloc_init\n"); |
| 168 | return; |
| 169 | } |
| 170 | |
| 171 | for (i = 0; i < num_pools(memtype); ++i) { |
| 172 | pch = get_chunk_list(memtype, i); |
| 173 | |
| 174 | if (!pch) { |
| 175 | vcm_alloc_err("pch is null\n"); |
| 176 | return; |
| 177 | } |
| 178 | |
| 179 | if (list_empty(&pch->head)) |
| 180 | continue; |
| 181 | |
| 182 | list_for_each_entry_safe(chunk, tmp, &pch->head, list) { |
| 183 | if (just_allocated && !is_allocated(&chunk->allocated)) |
| 184 | continue; |
| 185 | |
| 186 | printk(KERN_INFO "pa = %#x, size = %#x\n", |
| 187 | chunk->pa, vcm_phys_pool[chunk->pool_idx].chunk_size); |
| 188 | } |
| 189 | } |
| 190 | } |
| 191 | EXPORT_SYMBOL(vcm_alloc_print_list); |
| 192 | |
| 193 | int vcm_alloc_blocks_avail(enum memtype_t memtype, int idx) |
| 194 | { |
| 195 | struct phys_chunk_head *pch; |
| 196 | if (!basicalloc_init) { |
| 197 | vcm_alloc_err("no basicalloc_init\n"); |
| 198 | return 0; |
| 199 | } |
| 200 | pch = get_chunk_list(memtype, idx); |
| 201 | |
| 202 | if (!pch) { |
| 203 | vcm_alloc_err("pch is null\n"); |
| 204 | return 0; |
| 205 | } |
| 206 | return pch->num; |
| 207 | } |
| 208 | EXPORT_SYMBOL(vcm_alloc_blocks_avail); |
| 209 | |
| 210 | |
| 211 | int vcm_alloc_get_num_chunks(enum memtype_t memtype) |
| 212 | { |
| 213 | return num_pools(memtype); |
| 214 | } |
| 215 | EXPORT_SYMBOL(vcm_alloc_get_num_chunks); |
| 216 | |
| 217 | |
| 218 | int vcm_alloc_all_blocks_avail(enum memtarget_t memtype) |
| 219 | { |
| 220 | int i; |
| 221 | int cnt = 0; |
| 222 | |
| 223 | if (!basicalloc_init) { |
| 224 | vcm_alloc_err("no basicalloc_init\n"); |
| 225 | return 0; |
| 226 | } |
| 227 | |
| 228 | for (i = 0; i < num_pools(memtype); ++i) |
| 229 | cnt += vcm_alloc_blocks_avail(memtype, i); |
| 230 | return cnt; |
| 231 | } |
| 232 | EXPORT_SYMBOL(vcm_alloc_all_blocks_avail); |
| 233 | |
| 234 | |
| 235 | int vcm_alloc_count_allocated(enum memtype_t memtype) |
| 236 | { |
| 237 | int i; |
| 238 | int cnt = 0; |
| 239 | |
| 240 | if (!basicalloc_init) { |
| 241 | vcm_alloc_err("no basicalloc_init\n"); |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | for (i = 0; i < num_pools(memtype); ++i) |
| 246 | cnt += count_allocated_size(memtype, i); |
| 247 | return cnt; |
| 248 | } |
| 249 | EXPORT_SYMBOL(vcm_alloc_count_allocated); |
| 250 | |
| 251 | int vcm_alloc_destroy(void) |
| 252 | { |
| 253 | int i, mt; |
| 254 | struct phys_chunk *chunk, *tmp; |
| 255 | |
| 256 | if (!basicalloc_init) { |
| 257 | vcm_alloc_err("no basicalloc_init\n"); |
| 258 | return -ENODEV; |
| 259 | } |
| 260 | |
| 261 | /* can't destroy a space that has allocations */ |
| 262 | for (mt = 0; mt < vcm_num_memtypes; mt++) |
| 263 | if (vcm_alloc_count_allocated(mt)) { |
| 264 | vcm_alloc_err("allocations still present\n"); |
| 265 | return -EBUSY; |
| 266 | } |
| 267 | |
| 268 | for (i = 0; i < vcm_num_phys_pools; i++) { |
| 269 | struct phys_chunk_head *pch = &vcm_phys_pool[i].head; |
| 270 | |
| 271 | if (list_empty(&pch->head)) |
| 272 | continue; |
| 273 | list_for_each_entry_safe(chunk, tmp, &pch->head, list) { |
| 274 | list_del(&chunk->list); |
| 275 | memset(chunk, 0, sizeof(*chunk)); |
| 276 | kfree(chunk); |
| 277 | } |
| 278 | vcm_phys_pool[i].head.num = 0; |
| 279 | } |
| 280 | |
| 281 | kfree(vcm_phys_pool); |
| 282 | kfree(memtype_map); |
| 283 | |
| 284 | vcm_phys_pool = NULL; |
| 285 | memtype_map = NULL; |
| 286 | basicalloc_init = 0; |
| 287 | vcm_num_phys_pools = 0; |
| 288 | return 0; |
| 289 | } |
| 290 | EXPORT_SYMBOL(vcm_alloc_destroy); |
| 291 | |
| 292 | |
| 293 | int vcm_alloc_init(struct physmem_region *mem, int n_regions, |
| 294 | struct vcm_memtype_map *mt_map, int n_mt) |
| 295 | { |
| 296 | int i = 0, j = 0, r = 0, num_chunks; |
| 297 | struct phys_chunk *chunk; |
| 298 | struct phys_chunk_head *pch = NULL; |
| 299 | unsigned long pa; |
| 300 | |
| 301 | /* no double inits */ |
| 302 | if (basicalloc_init) { |
| 303 | vcm_alloc_err("double basicalloc_init\n"); |
| 304 | BUG(); |
| 305 | goto fail; |
| 306 | } |
| 307 | memtype_map = kzalloc(sizeof(*mt_map) * n_mt, GFP_KERNEL); |
| 308 | if (!memtype_map) { |
| 309 | vcm_alloc_err("Could not copy memtype map\n"); |
| 310 | goto fail; |
| 311 | } |
| 312 | memcpy(memtype_map, mt_map, sizeof(*mt_map) * n_mt); |
| 313 | |
| 314 | vcm_phys_pool = kzalloc(sizeof(*vcm_phys_pool) * n_regions, GFP_KERNEL); |
| 315 | vcm_num_phys_pools = n_regions; |
| 316 | vcm_num_memtypes = n_mt; |
| 317 | |
| 318 | if (!vcm_phys_pool) { |
| 319 | vcm_alloc_err("Could not allocate physical pool structure\n"); |
| 320 | goto fail; |
| 321 | } |
| 322 | |
| 323 | /* separate out to ensure good cleanup */ |
| 324 | for (i = 0; i < n_regions; i++) { |
| 325 | pch = &vcm_phys_pool[i].head; |
| 326 | INIT_LIST_HEAD(&pch->head); |
| 327 | pch->num = 0; |
| 328 | } |
| 329 | |
| 330 | for (r = 0; r < n_regions; r++) { |
| 331 | pa = mem[r].addr; |
| 332 | vcm_phys_pool[r].size = mem[r].size; |
| 333 | vcm_phys_pool[r].chunk_size = mem[r].chunk_size; |
| 334 | pch = &vcm_phys_pool[r].head; |
| 335 | |
| 336 | num_chunks = mem[r].size / mem[r].chunk_size; |
| 337 | |
| 338 | printk(KERN_INFO "VCM Init: region %d, chunk size=%d, " |
| 339 | "num=%d, pa=%p\n", r, mem[r].chunk_size, num_chunks, |
| 340 | (void *)pa); |
| 341 | |
| 342 | for (j = 0; j < num_chunks; ++j) { |
| 343 | chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); |
| 344 | if (!chunk) { |
| 345 | vcm_alloc_err("null chunk\n"); |
| 346 | goto fail; |
| 347 | } |
| 348 | chunk->pa = pa; |
| 349 | chunk->size = mem[r].chunk_size; |
| 350 | pa += mem[r].chunk_size; |
| 351 | chunk->pool_idx = r; |
| 352 | INIT_LIST_HEAD(&chunk->allocated); |
| 353 | list_add_tail(&chunk->list, &pch->head); |
| 354 | pch->num++; |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | basicalloc_init = 1; |
| 359 | return 0; |
| 360 | fail: |
| 361 | vcm_alloc_destroy(); |
| 362 | return -EINVAL; |
| 363 | } |
| 364 | EXPORT_SYMBOL(vcm_alloc_init); |
| 365 | |
| 366 | |
| 367 | int vcm_alloc_free_blocks(enum memtype_t memtype, struct phys_chunk *alloc_head) |
| 368 | { |
| 369 | struct phys_chunk *chunk, *tmp; |
| 370 | struct phys_chunk_head *pch = NULL; |
| 371 | |
| 372 | if (!basicalloc_init) { |
| 373 | vcm_alloc_err("no basicalloc_init\n"); |
| 374 | goto fail; |
| 375 | } |
| 376 | |
| 377 | if (!alloc_head) { |
| 378 | vcm_alloc_err("no alloc_head\n"); |
| 379 | goto fail; |
| 380 | } |
| 381 | |
| 382 | list_for_each_entry_safe(chunk, tmp, &alloc_head->allocated, |
| 383 | allocated) { |
| 384 | list_del_init(&chunk->allocated); |
| 385 | pch = &vcm_phys_pool[chunk->pool_idx].head; |
| 386 | |
| 387 | if (!pch) { |
| 388 | vcm_alloc_err("null pch\n"); |
| 389 | goto fail; |
| 390 | } |
| 391 | pch->num++; |
| 392 | } |
| 393 | |
| 394 | return 0; |
| 395 | fail: |
| 396 | return -ENODEV; |
| 397 | } |
| 398 | EXPORT_SYMBOL(vcm_alloc_free_blocks); |
| 399 | |
| 400 | |
| 401 | int vcm_alloc_num_blocks(int num, enum memtype_t memtype, int idx, |
| 402 | struct phys_chunk *alloc_head) |
| 403 | { |
| 404 | struct phys_chunk *chunk; |
| 405 | struct phys_chunk_head *pch = NULL; |
| 406 | int num_allocated = 0; |
| 407 | |
| 408 | if (!basicalloc_init) { |
| 409 | vcm_alloc_err("no basicalloc_init\n"); |
| 410 | goto fail; |
| 411 | } |
| 412 | |
| 413 | if (!alloc_head) { |
| 414 | vcm_alloc_err("no alloc_head\n"); |
| 415 | goto fail; |
| 416 | } |
| 417 | |
| 418 | pch = get_chunk_list(memtype, idx); |
| 419 | |
| 420 | if (!pch) { |
| 421 | vcm_alloc_err("null pch\n"); |
| 422 | goto fail; |
| 423 | } |
| 424 | if (list_empty(&pch->head)) { |
| 425 | vcm_alloc_err("list is empty\n"); |
| 426 | goto fail; |
| 427 | } |
| 428 | |
| 429 | if (vcm_alloc_blocks_avail(memtype, idx) < num) { |
| 430 | vcm_alloc_err("not enough blocks? num=%d\n", num); |
| 431 | goto fail; |
| 432 | } |
| 433 | |
| 434 | list_for_each_entry(chunk, &pch->head, list) { |
| 435 | if (num_allocated == num) |
| 436 | break; |
| 437 | if (is_allocated(&chunk->allocated)) |
| 438 | continue; |
| 439 | |
| 440 | list_add_tail(&chunk->allocated, &alloc_head->allocated); |
| 441 | pch->num--; |
| 442 | num_allocated++; |
| 443 | } |
| 444 | return num_allocated; |
| 445 | fail: |
| 446 | return 0; |
| 447 | } |
| 448 | EXPORT_SYMBOL(vcm_alloc_num_blocks); |
| 449 | |
| 450 | |
| 451 | int vcm_alloc_max_munch(int len, enum memtype_t memtype, |
| 452 | struct phys_chunk *alloc_head) |
| 453 | { |
| 454 | int i; |
| 455 | |
| 456 | int blocks_req = 0; |
| 457 | int block_residual = 0; |
| 458 | int blocks_allocated = 0; |
| 459 | int cur_chunk_size = 0; |
| 460 | int ba = 0; |
| 461 | |
| 462 | if (!basicalloc_init) { |
| 463 | vcm_alloc_err("basicalloc_init is 0\n"); |
| 464 | goto fail; |
| 465 | } |
| 466 | |
| 467 | if (!alloc_head) { |
| 468 | vcm_alloc_err("alloc_head is NULL\n"); |
| 469 | goto fail; |
| 470 | } |
| 471 | |
| 472 | if (num_pools(memtype) <= 0) { |
| 473 | vcm_alloc_err("Memtype %d has improper mempool configuration\n", |
| 474 | memtype); |
| 475 | goto fail; |
| 476 | } |
| 477 | |
| 478 | for (i = 0; i < num_pools(memtype); ++i) { |
| 479 | cur_chunk_size = pool_chunk_size(memtype, i); |
| 480 | if (cur_chunk_size <= 0) { |
| 481 | vcm_alloc_err("Bad chunk size: %d\n", cur_chunk_size); |
| 482 | goto fail; |
| 483 | } |
| 484 | |
| 485 | blocks_req = len / cur_chunk_size; |
| 486 | block_residual = len % cur_chunk_size; |
| 487 | |
| 488 | len = block_residual; /* len left */ |
| 489 | if (blocks_req) { |
| 490 | int blocks_available = 0; |
| 491 | int blocks_diff = 0; |
| 492 | int bytes_diff = 0; |
| 493 | |
| 494 | blocks_available = vcm_alloc_blocks_avail(memtype, i); |
| 495 | if (blocks_available < blocks_req) { |
| 496 | blocks_diff = |
| 497 | (blocks_req - blocks_available); |
| 498 | bytes_diff = |
| 499 | blocks_diff * cur_chunk_size; |
| 500 | |
| 501 | /* add back in the rest */ |
| 502 | len += bytes_diff; |
| 503 | } else { |
| 504 | /* got all the blocks I need */ |
| 505 | blocks_available = |
| 506 | (blocks_available > blocks_req) |
| 507 | ? blocks_req : blocks_available; |
| 508 | } |
| 509 | |
| 510 | ba = vcm_alloc_num_blocks(blocks_available, memtype, i, |
| 511 | alloc_head); |
| 512 | |
| 513 | if (ba != blocks_available) { |
| 514 | vcm_alloc_err("blocks allocated (%i) !=" |
| 515 | " blocks_available (%i):" |
| 516 | " chunk size = %#x," |
| 517 | " alloc_head = %p\n", |
| 518 | ba, blocks_available, |
| 519 | i, (void *) alloc_head); |
| 520 | goto fail; |
| 521 | } |
| 522 | blocks_allocated += blocks_available; |
| 523 | } |
| 524 | } |
| 525 | |
| 526 | if (len) { |
| 527 | int blocks_available = 0; |
| 528 | int last_sz = num_pools(memtype) - 1; |
| 529 | blocks_available = vcm_alloc_blocks_avail(memtype, last_sz); |
| 530 | |
| 531 | if (blocks_available > 0) { |
| 532 | ba = vcm_alloc_num_blocks(1, memtype, last_sz, |
| 533 | alloc_head); |
| 534 | if (ba != 1) { |
| 535 | vcm_alloc_err("blocks allocated (%i) !=" |
| 536 | " blocks_available (%i):" |
| 537 | " chunk size = %#x," |
| 538 | " alloc_head = %p\n", |
| 539 | ba, 1, |
| 540 | last_sz, |
| 541 | (void *) alloc_head); |
| 542 | goto fail; |
| 543 | } |
| 544 | blocks_allocated += 1; |
| 545 | } else { |
| 546 | vcm_alloc_err("blocks_available (%#x) <= 1\n", |
| 547 | blocks_available); |
| 548 | goto fail; |
| 549 | } |
| 550 | } |
| 551 | |
| 552 | return blocks_allocated; |
| 553 | fail: |
| 554 | vcm_alloc_free_blocks(memtype, alloc_head); |
| 555 | return 0; |
| 556 | } |
| 557 | EXPORT_SYMBOL(vcm_alloc_max_munch); |