| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Procedures for maintaining information about logical memory blocks. | 
|  | 3 | * | 
|  | 4 | * Peter Bergner, IBM Corp.	June 2001. | 
|  | 5 | * Copyright (C) 2001 Peter Bergner. | 
|  | 6 | * | 
|  | 7 | *      This program is free software; you can redistribute it and/or | 
|  | 8 | *      modify it under the terms of the GNU General Public License | 
|  | 9 | *      as published by the Free Software Foundation; either version | 
|  | 10 | *      2 of the License, or (at your option) any later version. | 
|  | 11 | */ | 
|  | 12 |  | 
|  | 13 | #include <linux/config.h> | 
|  | 14 | #include <linux/kernel.h> | 
|  | 15 | #include <linux/init.h> | 
|  | 16 | #include <linux/bitops.h> | 
|  | 17 | #include <asm/types.h> | 
|  | 18 | #include <asm/page.h> | 
|  | 19 | #include <asm/prom.h> | 
|  | 20 | #include <asm/lmb.h> | 
|  | 21 | #ifdef CONFIG_PPC32 | 
|  | 22 | #include "mmu_decl.h"		/* for __max_low_memory */ | 
|  | 23 | #endif | 
|  | 24 |  | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 25 | #undef DEBUG | 
|  | 26 |  | 
| Michael Ellerman | eb48189 | 2005-11-15 14:49:22 +1100 | [diff] [blame] | 27 | #ifdef DEBUG | 
|  | 28 | #include <asm/udbg.h> | 
|  | 29 | #define DBG(fmt...) udbg_printf(fmt) | 
|  | 30 | #else | 
|  | 31 | #define DBG(fmt...) | 
|  | 32 | #endif | 
|  | 33 |  | 
|  | 34 | struct lmb lmb; | 
|  | 35 |  | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 36 | void lmb_dump_all(void) | 
|  | 37 | { | 
|  | 38 | #ifdef DEBUG | 
|  | 39 | unsigned long i; | 
|  | 40 |  | 
| Michael Ellerman | eb48189 | 2005-11-15 14:49:22 +1100 | [diff] [blame] | 41 | DBG("lmb_dump_all:\n"); | 
|  | 42 | DBG("    memory.cnt		  = 0x%lx\n", lmb.memory.cnt); | 
|  | 43 | DBG("    memory.size		  = 0x%lx\n", lmb.memory.size); | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 44 | for (i=0; i < lmb.memory.cnt ;i++) { | 
| Michael Ellerman | eb48189 | 2005-11-15 14:49:22 +1100 | [diff] [blame] | 45 | DBG("    memory.region[0x%x].base       = 0x%lx\n", | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 46 | i, lmb.memory.region[i].base); | 
| Michael Ellerman | eb48189 | 2005-11-15 14:49:22 +1100 | [diff] [blame] | 47 | DBG("		      .size     = 0x%lx\n", | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 48 | lmb.memory.region[i].size); | 
|  | 49 | } | 
|  | 50 |  | 
| Michael Ellerman | eb48189 | 2005-11-15 14:49:22 +1100 | [diff] [blame] | 51 | DBG("\n    reserved.cnt	  = 0x%lx\n", lmb.reserved.cnt); | 
|  | 52 | DBG("    reserved.size	  = 0x%lx\n", lmb.reserved.size); | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 53 | for (i=0; i < lmb.reserved.cnt ;i++) { | 
| Michael Ellerman | eb48189 | 2005-11-15 14:49:22 +1100 | [diff] [blame] | 54 | DBG("    reserved.region[0x%x].base       = 0x%lx\n", | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 55 | i, lmb.reserved.region[i].base); | 
| Michael Ellerman | eb48189 | 2005-11-15 14:49:22 +1100 | [diff] [blame] | 56 | DBG("		      .size     = 0x%lx\n", | 
| Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 57 | lmb.reserved.region[i].size); | 
|  | 58 | } | 
|  | 59 | #endif /* DEBUG */ | 
|  | 60 | } | 
|  | 61 |  | 
|  | 62 | static unsigned long __init lmb_addrs_overlap(unsigned long base1, | 
|  | 63 | unsigned long size1, unsigned long base2, unsigned long size2) | 
|  | 64 | { | 
|  | 65 | return ((base1 < (base2+size2)) && (base2 < (base1+size1))); | 
|  | 66 | } | 
|  | 67 |  | 
|  | 68 | static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, | 
|  | 69 | unsigned long base2, unsigned long size2) | 
|  | 70 | { | 
|  | 71 | if (base2 == base1 + size1) | 
|  | 72 | return 1; | 
|  | 73 | else if (base1 == base2 + size2) | 
|  | 74 | return -1; | 
|  | 75 |  | 
|  | 76 | return 0; | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | static long __init lmb_regions_adjacent(struct lmb_region *rgn, | 
|  | 80 | unsigned long r1, unsigned long r2) | 
|  | 81 | { | 
|  | 82 | unsigned long base1 = rgn->region[r1].base; | 
|  | 83 | unsigned long size1 = rgn->region[r1].size; | 
|  | 84 | unsigned long base2 = rgn->region[r2].base; | 
|  | 85 | unsigned long size2 = rgn->region[r2].size; | 
|  | 86 |  | 
|  | 87 | return lmb_addrs_adjacent(base1, size1, base2, size2); | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | /* Assumption: base addr of region 1 < base addr of region 2 */ | 
|  | 91 | static void __init lmb_coalesce_regions(struct lmb_region *rgn, | 
|  | 92 | unsigned long r1, unsigned long r2) | 
|  | 93 | { | 
|  | 94 | unsigned long i; | 
|  | 95 |  | 
|  | 96 | rgn->region[r1].size += rgn->region[r2].size; | 
|  | 97 | for (i=r2; i < rgn->cnt-1; i++) { | 
|  | 98 | rgn->region[i].base = rgn->region[i+1].base; | 
|  | 99 | rgn->region[i].size = rgn->region[i+1].size; | 
|  | 100 | } | 
|  | 101 | rgn->cnt--; | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | /* This routine called with relocation disabled. */ | 
|  | 105 | void __init lmb_init(void) | 
|  | 106 | { | 
|  | 107 | /* Create a dummy zero size LMB which will get coalesced away later. | 
|  | 108 | * This simplifies the lmb_add() code below... | 
|  | 109 | */ | 
|  | 110 | lmb.memory.region[0].base = 0; | 
|  | 111 | lmb.memory.region[0].size = 0; | 
|  | 112 | lmb.memory.cnt = 1; | 
|  | 113 |  | 
|  | 114 | /* Ditto. */ | 
|  | 115 | lmb.reserved.region[0].base = 0; | 
|  | 116 | lmb.reserved.region[0].size = 0; | 
|  | 117 | lmb.reserved.cnt = 1; | 
|  | 118 | } | 
|  | 119 |  | 
|  | 120 | /* This routine may be called with relocation disabled. */ | 
|  | 121 | void __init lmb_analyze(void) | 
|  | 122 | { | 
|  | 123 | int i; | 
|  | 124 |  | 
|  | 125 | lmb.memory.size = 0; | 
|  | 126 |  | 
|  | 127 | for (i = 0; i < lmb.memory.cnt; i++) | 
|  | 128 | lmb.memory.size += lmb.memory.region[i].size; | 
|  | 129 | } | 
|  | 130 |  | 
|  | 131 | /* This routine called with relocation disabled. */ | 
|  | 132 | static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, | 
|  | 133 | unsigned long size) | 
|  | 134 | { | 
|  | 135 | unsigned long i, coalesced = 0; | 
|  | 136 | long adjacent; | 
|  | 137 |  | 
|  | 138 | /* First try and coalesce this LMB with another. */ | 
|  | 139 | for (i=0; i < rgn->cnt; i++) { | 
|  | 140 | unsigned long rgnbase = rgn->region[i].base; | 
|  | 141 | unsigned long rgnsize = rgn->region[i].size; | 
|  | 142 |  | 
|  | 143 | adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); | 
|  | 144 | if ( adjacent > 0 ) { | 
|  | 145 | rgn->region[i].base -= size; | 
|  | 146 | rgn->region[i].size += size; | 
|  | 147 | coalesced++; | 
|  | 148 | break; | 
|  | 149 | } | 
|  | 150 | else if ( adjacent < 0 ) { | 
|  | 151 | rgn->region[i].size += size; | 
|  | 152 | coalesced++; | 
|  | 153 | break; | 
|  | 154 | } | 
|  | 155 | } | 
|  | 156 |  | 
|  | 157 | if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { | 
|  | 158 | lmb_coalesce_regions(rgn, i, i+1); | 
|  | 159 | coalesced++; | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | if (coalesced) | 
|  | 163 | return coalesced; | 
|  | 164 | if (rgn->cnt >= MAX_LMB_REGIONS) | 
|  | 165 | return -1; | 
|  | 166 |  | 
|  | 167 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | 
|  | 168 | for (i = rgn->cnt-1; i >= 0; i--) { | 
|  | 169 | if (base < rgn->region[i].base) { | 
|  | 170 | rgn->region[i+1].base = rgn->region[i].base; | 
|  | 171 | rgn->region[i+1].size = rgn->region[i].size; | 
|  | 172 | } else { | 
|  | 173 | rgn->region[i+1].base = base; | 
|  | 174 | rgn->region[i+1].size = size; | 
|  | 175 | break; | 
|  | 176 | } | 
|  | 177 | } | 
|  | 178 | rgn->cnt++; | 
|  | 179 |  | 
|  | 180 | return 0; | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 | /* This routine may be called with relocation disabled. */ | 
|  | 184 | long __init lmb_add(unsigned long base, unsigned long size) | 
|  | 185 | { | 
|  | 186 | struct lmb_region *_rgn = &(lmb.memory); | 
|  | 187 |  | 
|  | 188 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ | 
|  | 189 | if (base == 0) | 
|  | 190 | lmb.rmo_size = size; | 
|  | 191 |  | 
|  | 192 | return lmb_add_region(_rgn, base, size); | 
|  | 193 |  | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | long __init lmb_reserve(unsigned long base, unsigned long size) | 
|  | 197 | { | 
|  | 198 | struct lmb_region *_rgn = &(lmb.reserved); | 
|  | 199 |  | 
|  | 200 | return lmb_add_region(_rgn, base, size); | 
|  | 201 | } | 
|  | 202 |  | 
|  | 203 | long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, | 
|  | 204 | unsigned long size) | 
|  | 205 | { | 
|  | 206 | unsigned long i; | 
|  | 207 |  | 
|  | 208 | for (i=0; i < rgn->cnt; i++) { | 
|  | 209 | unsigned long rgnbase = rgn->region[i].base; | 
|  | 210 | unsigned long rgnsize = rgn->region[i].size; | 
|  | 211 | if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { | 
|  | 212 | break; | 
|  | 213 | } | 
|  | 214 | } | 
|  | 215 |  | 
|  | 216 | return (i < rgn->cnt) ? i : -1; | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 | unsigned long __init lmb_alloc(unsigned long size, unsigned long align) | 
|  | 220 | { | 
|  | 221 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, | 
|  | 225 | unsigned long max_addr) | 
|  | 226 | { | 
|  | 227 | long i, j; | 
|  | 228 | unsigned long base = 0; | 
|  | 229 |  | 
|  | 230 | #ifdef CONFIG_PPC32 | 
|  | 231 | /* On 32-bit, make sure we allocate lowmem */ | 
|  | 232 | if (max_addr == LMB_ALLOC_ANYWHERE) | 
|  | 233 | max_addr = __max_low_memory; | 
|  | 234 | #endif | 
|  | 235 | for (i = lmb.memory.cnt-1; i >= 0; i--) { | 
|  | 236 | unsigned long lmbbase = lmb.memory.region[i].base; | 
|  | 237 | unsigned long lmbsize = lmb.memory.region[i].size; | 
|  | 238 |  | 
|  | 239 | if (max_addr == LMB_ALLOC_ANYWHERE) | 
|  | 240 | base = _ALIGN_DOWN(lmbbase + lmbsize - size, align); | 
|  | 241 | else if (lmbbase < max_addr) { | 
|  | 242 | base = min(lmbbase + lmbsize, max_addr); | 
|  | 243 | base = _ALIGN_DOWN(base - size, align); | 
|  | 244 | } else | 
|  | 245 | continue; | 
|  | 246 |  | 
|  | 247 | while ((lmbbase <= base) && | 
|  | 248 | ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) ) | 
|  | 249 | base = _ALIGN_DOWN(lmb.reserved.region[j].base - size, | 
|  | 250 | align); | 
|  | 251 |  | 
|  | 252 | if ((base != 0) && (lmbbase <= base)) | 
|  | 253 | break; | 
|  | 254 | } | 
|  | 255 |  | 
|  | 256 | if (i < 0) | 
|  | 257 | return 0; | 
|  | 258 |  | 
|  | 259 | lmb_add_region(&lmb.reserved, base, size); | 
|  | 260 |  | 
|  | 261 | return base; | 
|  | 262 | } | 
|  | 263 |  | 
|  | 264 | /* You must call lmb_analyze() before this. */ | 
|  | 265 | unsigned long __init lmb_phys_mem_size(void) | 
|  | 266 | { | 
|  | 267 | return lmb.memory.size; | 
|  | 268 | } | 
|  | 269 |  | 
|  | 270 | unsigned long __init lmb_end_of_DRAM(void) | 
|  | 271 | { | 
|  | 272 | int idx = lmb.memory.cnt - 1; | 
|  | 273 |  | 
|  | 274 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | 
|  | 275 | } | 
|  | 276 |  | 
|  | 277 | /* | 
|  | 278 | * Truncate the lmb list to memory_limit if it's set | 
|  | 279 | * You must call lmb_analyze() after this. | 
|  | 280 | */ | 
|  | 281 | void __init lmb_enforce_memory_limit(unsigned long memory_limit) | 
|  | 282 | { | 
|  | 283 | unsigned long i, limit; | 
|  | 284 |  | 
|  | 285 | if (! memory_limit) | 
|  | 286 | return; | 
|  | 287 |  | 
|  | 288 | limit = memory_limit; | 
|  | 289 | for (i = 0; i < lmb.memory.cnt; i++) { | 
|  | 290 | if (limit > lmb.memory.region[i].size) { | 
|  | 291 | limit -= lmb.memory.region[i].size; | 
|  | 292 | continue; | 
|  | 293 | } | 
|  | 294 |  | 
|  | 295 | lmb.memory.region[i].size = limit; | 
|  | 296 | lmb.memory.cnt = i + 1; | 
|  | 297 | break; | 
|  | 298 | } | 
|  | 299 | } |