| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Procedures for maintaining information about logical memory blocks. | 
|  | 3 | * | 
|  | 4 | * Peter Bergner, IBM Corp.	June 2001. | 
|  | 5 | * Copyright (C) 2001 Peter Bergner. | 
| David S. Miller | d9b2b2a | 2008-02-13 16:56:49 -0800 | [diff] [blame] | 6 | * | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 7 | *      This program is free software; you can redistribute it and/or | 
|  | 8 | *      modify it under the terms of the GNU General Public License | 
|  | 9 | *      as published by the Free Software Foundation; either version | 
|  | 10 | *      2 of the License, or (at your option) any later version. | 
|  | 11 | */ | 
|  | 12 |  | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 13 | #include <linux/kernel.h> | 
|  | 14 | #include <linux/init.h> | 
|  | 15 | #include <linux/bitops.h> | 
| David S. Miller | d9b2b2a | 2008-02-13 16:56:49 -0800 | [diff] [blame] | 16 | #include <linux/lmb.h> | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 17 |  | 
| Michael Ellerman | 3b9331d | 2006-01-25 21:31:30 +1300 | [diff] [blame] | 18 | #define LMB_ALLOC_ANYWHERE	0 | 
|  | 19 |  | 
| Michael Ellerman | eb48189 | 2005-11-15 14:49:22 +1100 | [diff] [blame] | 20 | struct lmb lmb; | 
|  | 21 |  | 
| David S. Miller | faa6cfd | 2008-05-12 17:21:55 -0700 | [diff] [blame] | 22 | static int lmb_debug; | 
|  | 23 |  | 
|  | 24 | static int __init early_lmb(char *p) | 
|  | 25 | { | 
|  | 26 | if (p && strstr(p, "debug")) | 
|  | 27 | lmb_debug = 1; | 
|  | 28 | return 0; | 
|  | 29 | } | 
|  | 30 | early_param("lmb", early_lmb); | 
|  | 31 |  | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 32 | void lmb_dump_all(void) | 
|  | 33 | { | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 34 | unsigned long i; | 
|  | 35 |  | 
| David S. Miller | faa6cfd | 2008-05-12 17:21:55 -0700 | [diff] [blame] | 36 | if (!lmb_debug) | 
|  | 37 | return; | 
|  | 38 |  | 
|  | 39 | pr_info("lmb_dump_all:\n"); | 
|  | 40 | pr_info("    memory.cnt		  = 0x%lx\n", lmb.memory.cnt); | 
|  | 41 | pr_info("    memory.size		  = 0x%llx\n", | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 42 | (unsigned long long)lmb.memory.size); | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 43 | for (i=0; i < lmb.memory.cnt ;i++) { | 
| David S. Miller | faa6cfd | 2008-05-12 17:21:55 -0700 | [diff] [blame] | 44 | pr_info("    memory.region[0x%lx].base       = 0x%llx\n", | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 45 | i, (unsigned long long)lmb.memory.region[i].base); | 
| David S. Miller | faa6cfd | 2008-05-12 17:21:55 -0700 | [diff] [blame] | 46 | pr_info("		      .size     = 0x%llx\n", | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 47 | (unsigned long long)lmb.memory.region[i].size); | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 48 | } | 
|  | 49 |  | 
| David S. Miller | faa6cfd | 2008-05-12 17:21:55 -0700 | [diff] [blame] | 50 | pr_info("    reserved.cnt	  = 0x%lx\n", lmb.reserved.cnt); | 
| Kumar Gala | f9ebcd9 | 2008-05-18 13:18:01 -0500 | [diff] [blame] | 51 | pr_info("    reserved.size	  = 0x%llx\n", | 
|  | 52 | (unsigned long long)lmb.memory.size); | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 53 | for (i=0; i < lmb.reserved.cnt ;i++) { | 
| David S. Miller | faa6cfd | 2008-05-12 17:21:55 -0700 | [diff] [blame] | 54 | pr_info("    reserved.region[0x%lx].base       = 0x%llx\n", | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 55 | i, (unsigned long long)lmb.reserved.region[i].base); | 
| David S. Miller | faa6cfd | 2008-05-12 17:21:55 -0700 | [diff] [blame] | 56 | pr_info("		      .size     = 0x%llx\n", | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 57 | (unsigned long long)lmb.reserved.region[i].size); | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 58 | } | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 59 | } | 
|  | 60 |  | 
| Badari Pulavarty | 98d5c21 | 2008-04-18 13:33:52 -0700 | [diff] [blame] | 61 | static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, | 
|  | 62 | u64 size2) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 63 | { | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 64 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 65 | } | 
|  | 66 |  | 
| Badari Pulavarty | 98d5c21 | 2008-04-18 13:33:52 -0700 | [diff] [blame] | 67 | static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 68 | { | 
|  | 69 | if (base2 == base1 + size1) | 
|  | 70 | return 1; | 
|  | 71 | else if (base1 == base2 + size2) | 
|  | 72 | return -1; | 
|  | 73 |  | 
|  | 74 | return 0; | 
|  | 75 | } | 
|  | 76 |  | 
| Badari Pulavarty | 98d5c21 | 2008-04-18 13:33:52 -0700 | [diff] [blame] | 77 | static long lmb_regions_adjacent(struct lmb_region *rgn, | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 78 | unsigned long r1, unsigned long r2) | 
|  | 79 | { | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 80 | u64 base1 = rgn->region[r1].base; | 
|  | 81 | u64 size1 = rgn->region[r1].size; | 
|  | 82 | u64 base2 = rgn->region[r2].base; | 
|  | 83 | u64 size2 = rgn->region[r2].size; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 84 |  | 
|  | 85 | return lmb_addrs_adjacent(base1, size1, base2, size2); | 
|  | 86 | } | 
|  | 87 |  | 
| Badari Pulavarty | 98d5c21 | 2008-04-18 13:33:52 -0700 | [diff] [blame] | 88 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) | 
| Michael Ellerman | 2babf5c | 2006-05-17 18:00:46 +1000 | [diff] [blame] | 89 | { | 
|  | 90 | unsigned long i; | 
|  | 91 |  | 
|  | 92 | for (i = r; i < rgn->cnt - 1; i++) { | 
|  | 93 | rgn->region[i].base = rgn->region[i + 1].base; | 
|  | 94 | rgn->region[i].size = rgn->region[i + 1].size; | 
|  | 95 | } | 
|  | 96 | rgn->cnt--; | 
|  | 97 | } | 
|  | 98 |  | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 99 | /* Assumption: base addr of region 1 < base addr of region 2 */ | 
| Badari Pulavarty | 98d5c21 | 2008-04-18 13:33:52 -0700 | [diff] [blame] | 100 | static void lmb_coalesce_regions(struct lmb_region *rgn, | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 101 | unsigned long r1, unsigned long r2) | 
|  | 102 | { | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 103 | rgn->region[r1].size += rgn->region[r2].size; | 
| Michael Ellerman | 2babf5c | 2006-05-17 18:00:46 +1000 | [diff] [blame] | 104 | lmb_remove_region(rgn, r2); | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 105 | } | 
|  | 106 |  | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 107 | void __init lmb_init(void) | 
|  | 108 | { | 
|  | 109 | /* Create a dummy zero size LMB which will get coalesced away later. | 
|  | 110 | * This simplifies the lmb_add() code below... | 
|  | 111 | */ | 
|  | 112 | lmb.memory.region[0].base = 0; | 
|  | 113 | lmb.memory.region[0].size = 0; | 
|  | 114 | lmb.memory.cnt = 1; | 
|  | 115 |  | 
|  | 116 | /* Ditto. */ | 
|  | 117 | lmb.reserved.region[0].base = 0; | 
|  | 118 | lmb.reserved.region[0].size = 0; | 
|  | 119 | lmb.reserved.cnt = 1; | 
|  | 120 | } | 
|  | 121 |  | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 122 | void __init lmb_analyze(void) | 
|  | 123 | { | 
|  | 124 | int i; | 
|  | 125 |  | 
|  | 126 | lmb.memory.size = 0; | 
|  | 127 |  | 
|  | 128 | for (i = 0; i < lmb.memory.cnt; i++) | 
|  | 129 | lmb.memory.size += lmb.memory.region[i].size; | 
|  | 130 | } | 
|  | 131 |  | 
| Badari Pulavarty | 98d5c21 | 2008-04-18 13:33:52 -0700 | [diff] [blame] | 132 | static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 133 | { | 
| Manish Ahuja | 56d6d1a | 2007-07-10 05:03:45 +1000 | [diff] [blame] | 134 | unsigned long coalesced = 0; | 
|  | 135 | long adjacent, i; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 136 |  | 
| Kumar Gala | 27e6672 | 2008-02-13 16:58:11 -0800 | [diff] [blame] | 137 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { | 
|  | 138 | rgn->region[0].base = base; | 
|  | 139 | rgn->region[0].size = size; | 
|  | 140 | return 0; | 
|  | 141 | } | 
|  | 142 |  | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 143 | /* First try and coalesce this LMB with another. */ | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 144 | for (i = 0; i < rgn->cnt; i++) { | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 145 | u64 rgnbase = rgn->region[i].base; | 
|  | 146 | u64 rgnsize = rgn->region[i].size; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 147 |  | 
| David Gibson | eb6de28 | 2007-02-28 14:12:29 +1100 | [diff] [blame] | 148 | if ((rgnbase == base) && (rgnsize == size)) | 
|  | 149 | /* Already have this region, so we're done */ | 
|  | 150 | return 0; | 
|  | 151 |  | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 152 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); | 
|  | 153 | if (adjacent > 0) { | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 154 | rgn->region[i].base -= size; | 
|  | 155 | rgn->region[i].size += size; | 
|  | 156 | coalesced++; | 
|  | 157 | break; | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 158 | } else if (adjacent < 0) { | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 159 | rgn->region[i].size += size; | 
|  | 160 | coalesced++; | 
|  | 161 | break; | 
|  | 162 | } | 
|  | 163 | } | 
|  | 164 |  | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 165 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) { | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 166 | lmb_coalesce_regions(rgn, i, i+1); | 
|  | 167 | coalesced++; | 
|  | 168 | } | 
|  | 169 |  | 
|  | 170 | if (coalesced) | 
|  | 171 | return coalesced; | 
|  | 172 | if (rgn->cnt >= MAX_LMB_REGIONS) | 
|  | 173 | return -1; | 
|  | 174 |  | 
|  | 175 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 176 | for (i = rgn->cnt - 1; i >= 0; i--) { | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 177 | if (base < rgn->region[i].base) { | 
|  | 178 | rgn->region[i+1].base = rgn->region[i].base; | 
|  | 179 | rgn->region[i+1].size = rgn->region[i].size; | 
|  | 180 | } else { | 
|  | 181 | rgn->region[i+1].base = base; | 
|  | 182 | rgn->region[i+1].size = size; | 
|  | 183 | break; | 
|  | 184 | } | 
|  | 185 | } | 
| Kumar Gala | 74b20da | 2008-02-19 21:28:18 -0800 | [diff] [blame] | 186 |  | 
|  | 187 | if (base < rgn->region[0].base) { | 
|  | 188 | rgn->region[0].base = base; | 
|  | 189 | rgn->region[0].size = size; | 
|  | 190 | } | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 191 | rgn->cnt++; | 
|  | 192 |  | 
|  | 193 | return 0; | 
|  | 194 | } | 
|  | 195 |  | 
| Badari Pulavarty | 98d5c21 | 2008-04-18 13:33:52 -0700 | [diff] [blame] | 196 | long lmb_add(u64 base, u64 size) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 197 | { | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 198 | struct lmb_region *_rgn = &lmb.memory; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 199 |  | 
|  | 200 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ | 
|  | 201 | if (base == 0) | 
|  | 202 | lmb.rmo_size = size; | 
|  | 203 |  | 
|  | 204 | return lmb_add_region(_rgn, base, size); | 
|  | 205 |  | 
|  | 206 | } | 
|  | 207 |  | 
| Badari Pulavarty | 98d5c21 | 2008-04-18 13:33:52 -0700 | [diff] [blame] | 208 | long lmb_remove(u64 base, u64 size) | 
|  | 209 | { | 
|  | 210 | struct lmb_region *rgn = &(lmb.memory); | 
|  | 211 | u64 rgnbegin, rgnend; | 
|  | 212 | u64 end = base + size; | 
|  | 213 | int i; | 
|  | 214 |  | 
|  | 215 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | 
|  | 216 |  | 
|  | 217 | /* Find the region where (base, size) belongs to */ | 
|  | 218 | for (i=0; i < rgn->cnt; i++) { | 
|  | 219 | rgnbegin = rgn->region[i].base; | 
|  | 220 | rgnend = rgnbegin + rgn->region[i].size; | 
|  | 221 |  | 
|  | 222 | if ((rgnbegin <= base) && (end <= rgnend)) | 
|  | 223 | break; | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | /* Didn't find the region */ | 
|  | 227 | if (i == rgn->cnt) | 
|  | 228 | return -1; | 
|  | 229 |  | 
|  | 230 | /* Check to see if we are removing entire region */ | 
|  | 231 | if ((rgnbegin == base) && (rgnend == end)) { | 
|  | 232 | lmb_remove_region(rgn, i); | 
|  | 233 | return 0; | 
|  | 234 | } | 
|  | 235 |  | 
|  | 236 | /* Check to see if region is matching at the front */ | 
|  | 237 | if (rgnbegin == base) { | 
|  | 238 | rgn->region[i].base = end; | 
|  | 239 | rgn->region[i].size -= size; | 
|  | 240 | return 0; | 
|  | 241 | } | 
|  | 242 |  | 
|  | 243 | /* Check to see if the region is matching at the end */ | 
|  | 244 | if (rgnend == end) { | 
|  | 245 | rgn->region[i].size -= size; | 
|  | 246 | return 0; | 
|  | 247 | } | 
|  | 248 |  | 
|  | 249 | /* | 
|  | 250 | * We need to split the entry -  adjust the current one to the | 
|  | 251 | * beginging of the hole and add the region after hole. | 
|  | 252 | */ | 
|  | 253 | rgn->region[i].size = base - rgn->region[i].base; | 
|  | 254 | return lmb_add_region(rgn, end, rgnend - end); | 
|  | 255 | } | 
|  | 256 |  | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 257 | long __init lmb_reserve(u64 base, u64 size) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 258 | { | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 259 | struct lmb_region *_rgn = &lmb.reserved; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 260 |  | 
| Michael Ellerman | 8c20faf | 2006-01-25 21:31:26 +1300 | [diff] [blame] | 261 | BUG_ON(0 == size); | 
|  | 262 |  | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 263 | return lmb_add_region(_rgn, base, size); | 
|  | 264 | } | 
|  | 265 |  | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 266 | long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 267 | { | 
|  | 268 | unsigned long i; | 
|  | 269 |  | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 270 | for (i = 0; i < rgn->cnt; i++) { | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 271 | u64 rgnbase = rgn->region[i].base; | 
|  | 272 | u64 rgnsize = rgn->region[i].size; | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 273 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 274 | break; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 275 | } | 
|  | 276 |  | 
|  | 277 | return (i < rgn->cnt) ? i : -1; | 
|  | 278 | } | 
|  | 279 |  | 
| David S. Miller | c50f68c | 2008-03-24 20:50:48 +1100 | [diff] [blame] | 280 | static u64 lmb_align_down(u64 addr, u64 size) | 
|  | 281 | { | 
|  | 282 | return addr & ~(size - 1); | 
|  | 283 | } | 
|  | 284 |  | 
|  | 285 | static u64 lmb_align_up(u64 addr, u64 size) | 
|  | 286 | { | 
|  | 287 | return (addr + (size - 1)) & ~(size - 1); | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, | 
|  | 291 | u64 size, u64 align) | 
|  | 292 | { | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 293 | u64 base, res_base; | 
| David S. Miller | c50f68c | 2008-03-24 20:50:48 +1100 | [diff] [blame] | 294 | long j; | 
|  | 295 |  | 
|  | 296 | base = lmb_align_down((end - size), align); | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 297 | while (start <= base) { | 
|  | 298 | j = lmb_overlaps_region(&lmb.reserved, base, size); | 
|  | 299 | if (j < 0) { | 
|  | 300 | /* this area isn't reserved, take it */ | 
| David S. Miller | 4978db5 | 2008-05-12 16:51:15 -0700 | [diff] [blame] | 301 | if (lmb_add_region(&lmb.reserved, base, size) < 0) | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 302 | base = ~(u64)0; | 
|  | 303 | return base; | 
|  | 304 | } | 
|  | 305 | res_base = lmb.reserved.region[j].base; | 
|  | 306 | if (res_base < size) | 
|  | 307 | break; | 
|  | 308 | base = lmb_align_down(res_base - size, align); | 
| David S. Miller | c50f68c | 2008-03-24 20:50:48 +1100 | [diff] [blame] | 309 | } | 
|  | 310 |  | 
|  | 311 | return ~(u64)0; | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, | 
|  | 315 | u64 (*nid_range)(u64, u64, int *), | 
|  | 316 | u64 size, u64 align, int nid) | 
|  | 317 | { | 
|  | 318 | u64 start, end; | 
|  | 319 |  | 
|  | 320 | start = mp->base; | 
|  | 321 | end = start + mp->size; | 
|  | 322 |  | 
|  | 323 | start = lmb_align_up(start, align); | 
|  | 324 | while (start < end) { | 
|  | 325 | u64 this_end; | 
|  | 326 | int this_nid; | 
|  | 327 |  | 
|  | 328 | this_end = nid_range(start, end, &this_nid); | 
|  | 329 | if (this_nid == nid) { | 
|  | 330 | u64 ret = lmb_alloc_nid_unreserved(start, this_end, | 
|  | 331 | size, align); | 
|  | 332 | if (ret != ~(u64)0) | 
|  | 333 | return ret; | 
|  | 334 | } | 
|  | 335 | start = this_end; | 
|  | 336 | } | 
|  | 337 |  | 
|  | 338 | return ~(u64)0; | 
|  | 339 | } | 
|  | 340 |  | 
|  | 341 | u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, | 
|  | 342 | u64 (*nid_range)(u64 start, u64 end, int *nid)) | 
|  | 343 | { | 
|  | 344 | struct lmb_region *mem = &lmb.memory; | 
|  | 345 | int i; | 
|  | 346 |  | 
| David S. Miller | 4978db5 | 2008-05-12 16:51:15 -0700 | [diff] [blame] | 347 | BUG_ON(0 == size); | 
|  | 348 |  | 
|  | 349 | size = lmb_align_up(size, align); | 
|  | 350 |  | 
| David S. Miller | c50f68c | 2008-03-24 20:50:48 +1100 | [diff] [blame] | 351 | for (i = 0; i < mem->cnt; i++) { | 
|  | 352 | u64 ret = lmb_alloc_nid_region(&mem->region[i], | 
|  | 353 | nid_range, | 
|  | 354 | size, align, nid); | 
|  | 355 | if (ret != ~(u64)0) | 
|  | 356 | return ret; | 
|  | 357 | } | 
|  | 358 |  | 
|  | 359 | return lmb_alloc(size, align); | 
|  | 360 | } | 
|  | 361 |  | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 362 | u64 __init lmb_alloc(u64 size, u64 align) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 363 | { | 
|  | 364 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | 
|  | 365 | } | 
|  | 366 |  | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 367 | u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 368 | { | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 369 | u64 alloc; | 
| Michael Ellerman | d7a5b2f | 2006-01-25 21:31:28 +1300 | [diff] [blame] | 370 |  | 
|  | 371 | alloc = __lmb_alloc_base(size, align, max_addr); | 
|  | 372 |  | 
| Michael Ellerman | 2c27660 | 2006-03-16 14:47:20 +1100 | [diff] [blame] | 373 | if (alloc == 0) | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 374 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | 
|  | 375 | (unsigned long long) size, (unsigned long long) max_addr); | 
| Michael Ellerman | d7a5b2f | 2006-01-25 21:31:28 +1300 | [diff] [blame] | 376 |  | 
|  | 377 | return alloc; | 
|  | 378 | } | 
|  | 379 |  | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 380 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | 
| Michael Ellerman | d7a5b2f | 2006-01-25 21:31:28 +1300 | [diff] [blame] | 381 | { | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 382 | long i, j; | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 383 | u64 base = 0; | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 384 | u64 res_base; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 385 |  | 
| Michael Ellerman | 8c20faf | 2006-01-25 21:31:26 +1300 | [diff] [blame] | 386 | BUG_ON(0 == size); | 
|  | 387 |  | 
| David S. Miller | 4978db5 | 2008-05-12 16:51:15 -0700 | [diff] [blame] | 388 | size = lmb_align_up(size, align); | 
|  | 389 |  | 
| David S. Miller | d9b2b2a | 2008-02-13 16:56:49 -0800 | [diff] [blame] | 390 | /* On some platforms, make sure we allocate lowmem */ | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 391 | /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 392 | if (max_addr == LMB_ALLOC_ANYWHERE) | 
| David S. Miller | d9b2b2a | 2008-02-13 16:56:49 -0800 | [diff] [blame] | 393 | max_addr = LMB_REAL_LIMIT; | 
|  | 394 |  | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 395 | for (i = lmb.memory.cnt - 1; i >= 0; i--) { | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 396 | u64 lmbbase = lmb.memory.region[i].base; | 
|  | 397 | u64 lmbsize = lmb.memory.region[i].size; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 398 |  | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 399 | if (lmbsize < size) | 
|  | 400 | continue; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 401 | if (max_addr == LMB_ALLOC_ANYWHERE) | 
| David S. Miller | d9b2b2a | 2008-02-13 16:56:49 -0800 | [diff] [blame] | 402 | base = lmb_align_down(lmbbase + lmbsize - size, align); | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 403 | else if (lmbbase < max_addr) { | 
|  | 404 | base = min(lmbbase + lmbsize, max_addr); | 
| David S. Miller | d9b2b2a | 2008-02-13 16:56:49 -0800 | [diff] [blame] | 405 | base = lmb_align_down(base - size, align); | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 406 | } else | 
|  | 407 | continue; | 
|  | 408 |  | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 409 | while (base && lmbbase <= base) { | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 410 | j = lmb_overlaps_region(&lmb.reserved, base, size); | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 411 | if (j < 0) { | 
|  | 412 | /* this area isn't reserved, take it */ | 
| David S. Miller | 4978db5 | 2008-05-12 16:51:15 -0700 | [diff] [blame] | 413 | if (lmb_add_region(&lmb.reserved, base, size) < 0) | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 414 | return 0; | 
|  | 415 | return base; | 
|  | 416 | } | 
|  | 417 | res_base = lmb.reserved.region[j].base; | 
|  | 418 | if (res_base < size) | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 419 | break; | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 420 | base = lmb_align_down(res_base - size, align); | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 421 | } | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 422 | } | 
| Paul Mackerras | d9024df | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 423 | return 0; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 424 | } | 
|  | 425 |  | 
|  | 426 | /* You must call lmb_analyze() before this. */ | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 427 | u64 __init lmb_phys_mem_size(void) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 428 | { | 
|  | 429 | return lmb.memory.size; | 
|  | 430 | } | 
|  | 431 |  | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 432 | u64 __init lmb_end_of_DRAM(void) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 433 | { | 
|  | 434 | int idx = lmb.memory.cnt - 1; | 
|  | 435 |  | 
|  | 436 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | 
|  | 437 | } | 
|  | 438 |  | 
| Michael Ellerman | 2babf5c | 2006-05-17 18:00:46 +1000 | [diff] [blame] | 439 | /* You must call lmb_analyze() after this. */ | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 440 | void __init lmb_enforce_memory_limit(u64 memory_limit) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 441 | { | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 442 | unsigned long i; | 
|  | 443 | u64 limit; | 
| Michael Ellerman | 2babf5c | 2006-05-17 18:00:46 +1000 | [diff] [blame] | 444 | struct lmb_property *p; | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 445 |  | 
| Paul Mackerras | 300613e | 2008-04-12 15:20:59 +1000 | [diff] [blame] | 446 | if (!memory_limit) | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 447 | return; | 
|  | 448 |  | 
| Michael Ellerman | 2babf5c | 2006-05-17 18:00:46 +1000 | [diff] [blame] | 449 | /* Truncate the lmb regions to satisfy the memory limit. */ | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 450 | limit = memory_limit; | 
|  | 451 | for (i = 0; i < lmb.memory.cnt; i++) { | 
|  | 452 | if (limit > lmb.memory.region[i].size) { | 
|  | 453 | limit -= lmb.memory.region[i].size; | 
|  | 454 | continue; | 
|  | 455 | } | 
|  | 456 |  | 
|  | 457 | lmb.memory.region[i].size = limit; | 
|  | 458 | lmb.memory.cnt = i + 1; | 
|  | 459 | break; | 
|  | 460 | } | 
| Michael Ellerman | 2babf5c | 2006-05-17 18:00:46 +1000 | [diff] [blame] | 461 |  | 
| Michael Ellerman | 30f30e1 | 2006-07-04 17:13:23 +1000 | [diff] [blame] | 462 | if (lmb.memory.region[0].size < lmb.rmo_size) | 
|  | 463 | lmb.rmo_size = lmb.memory.region[0].size; | 
| Michael Ellerman | 2babf5c | 2006-05-17 18:00:46 +1000 | [diff] [blame] | 464 |  | 
| David S. Miller | ebb1951 | 2008-08-15 19:57:57 -0700 | [diff] [blame] | 465 | memory_limit = lmb_end_of_DRAM(); | 
|  | 466 |  | 
| Michael Ellerman | 2babf5c | 2006-05-17 18:00:46 +1000 | [diff] [blame] | 467 | /* And truncate any reserves above the limit also. */ | 
|  | 468 | for (i = 0; i < lmb.reserved.cnt; i++) { | 
|  | 469 | p = &lmb.reserved.region[i]; | 
|  | 470 |  | 
|  | 471 | if (p->base > memory_limit) | 
|  | 472 | p->size = 0; | 
|  | 473 | else if ((p->base + p->size) > memory_limit) | 
|  | 474 | p->size = memory_limit - p->base; | 
|  | 475 |  | 
|  | 476 | if (p->size == 0) { | 
|  | 477 | lmb_remove_region(&lmb.reserved, i); | 
|  | 478 | i--; | 
|  | 479 | } | 
|  | 480 | } | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 481 | } | 
| Kumar Gala | f98eeb4 | 2008-01-09 11:27:23 -0600 | [diff] [blame] | 482 |  | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 483 | int __init lmb_is_reserved(u64 addr) | 
| Kumar Gala | f98eeb4 | 2008-01-09 11:27:23 -0600 | [diff] [blame] | 484 | { | 
|  | 485 | int i; | 
|  | 486 |  | 
|  | 487 | for (i = 0; i < lmb.reserved.cnt; i++) { | 
| Becky Bruce | e5f2709 | 2008-02-13 16:58:39 -0800 | [diff] [blame] | 488 | u64 upper = lmb.reserved.region[i].base + | 
|  | 489 | lmb.reserved.region[i].size - 1; | 
| Kumar Gala | f98eeb4 | 2008-01-09 11:27:23 -0600 | [diff] [blame] | 490 | if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) | 
|  | 491 | return 1; | 
|  | 492 | } | 
|  | 493 | return 0; | 
|  | 494 | } | 
| Badari Pulavarty | 9d88a2e | 2008-04-18 13:33:53 -0700 | [diff] [blame] | 495 |  | 
|  | 496 | /* | 
|  | 497 | * Given a <base, len>, find which memory regions belong to this range. | 
|  | 498 | * Adjust the request and return a contiguous chunk. | 
|  | 499 | */ | 
|  | 500 | int lmb_find(struct lmb_property *res) | 
|  | 501 | { | 
|  | 502 | int i; | 
|  | 503 | u64 rstart, rend; | 
|  | 504 |  | 
|  | 505 | rstart = res->base; | 
|  | 506 | rend = rstart + res->size - 1; | 
|  | 507 |  | 
|  | 508 | for (i = 0; i < lmb.memory.cnt; i++) { | 
|  | 509 | u64 start = lmb.memory.region[i].base; | 
|  | 510 | u64 end = start + lmb.memory.region[i].size - 1; | 
|  | 511 |  | 
|  | 512 | if (start > rend) | 
|  | 513 | return -1; | 
|  | 514 |  | 
|  | 515 | if ((end >= rstart) && (start < rend)) { | 
|  | 516 | /* adjust the request */ | 
|  | 517 | if (rstart < start) | 
|  | 518 | rstart = start; | 
|  | 519 | if (rend > end) | 
|  | 520 | rend = end; | 
|  | 521 | res->base = rstart; | 
|  | 522 | res->size = rend - rstart + 1; | 
|  | 523 | return 0; | 
|  | 524 | } | 
|  | 525 | } | 
|  | 526 | return -1; | 
|  | 527 | } |