Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Extensible Firmware Interface |
| 3 | * |
| 4 | * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999 |
| 5 | * |
| 6 | * Copyright (C) 1999 VA Linux Systems |
| 7 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> |
| 8 | * Copyright (C) 1999-2003 Hewlett-Packard Co. |
| 9 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 10 | * Stephane Eranian <eranian@hpl.hp.com> |
| 11 | * |
| 12 | * All EFI Runtime Services are not implemented yet as EFI only |
| 13 | * supports physical mode addressing on SoftSDV. This is to be fixed |
| 14 | * in a future version. --drummond 1999-07-20 |
| 15 | * |
| 16 | * Implemented EFI runtime services and virtual mode calls. --davidm |
| 17 | * |
| 18 | * Goutham Rao: <goutham.rao@intel.com> |
| 19 | * Skip non-WB memory and ignore empty memory ranges. |
| 20 | */ |
| 21 | #include <linux/config.h> |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/kernel.h> |
| 24 | #include <linux/init.h> |
| 25 | #include <linux/types.h> |
| 26 | #include <linux/time.h> |
| 27 | #include <linux/efi.h> |
| 28 | |
| 29 | #include <asm/io.h> |
| 30 | #include <asm/kregs.h> |
| 31 | #include <asm/meminit.h> |
| 32 | #include <asm/pgtable.h> |
| 33 | #include <asm/processor.h> |
| 34 | #include <asm/mca.h> |
| 35 | |
| 36 | #define EFI_DEBUG 0 |
| 37 | |
| 38 | extern efi_status_t efi_call_phys (void *, ...); |
| 39 | |
| 40 | struct efi efi; |
| 41 | EXPORT_SYMBOL(efi); |
| 42 | static efi_runtime_services_t *runtime; |
| 43 | static unsigned long mem_limit = ~0UL, max_addr = ~0UL; |
| 44 | |
| 45 | #define efi_call_virt(f, args...) (*(f))(args) |
| 46 | |
| 47 | #define STUB_GET_TIME(prefix, adjust_arg) \ |
| 48 | static efi_status_t \ |
| 49 | prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ |
| 50 | { \ |
| 51 | struct ia64_fpreg fr[6]; \ |
| 52 | efi_time_cap_t *atc = NULL; \ |
| 53 | efi_status_t ret; \ |
| 54 | \ |
| 55 | if (tc) \ |
| 56 | atc = adjust_arg(tc); \ |
| 57 | ia64_save_scratch_fpregs(fr); \ |
| 58 | ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \ |
| 59 | ia64_load_scratch_fpregs(fr); \ |
| 60 | return ret; \ |
| 61 | } |
| 62 | |
| 63 | #define STUB_SET_TIME(prefix, adjust_arg) \ |
| 64 | static efi_status_t \ |
| 65 | prefix##_set_time (efi_time_t *tm) \ |
| 66 | { \ |
| 67 | struct ia64_fpreg fr[6]; \ |
| 68 | efi_status_t ret; \ |
| 69 | \ |
| 70 | ia64_save_scratch_fpregs(fr); \ |
| 71 | ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \ |
| 72 | ia64_load_scratch_fpregs(fr); \ |
| 73 | return ret; \ |
| 74 | } |
| 75 | |
| 76 | #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ |
| 77 | static efi_status_t \ |
| 78 | prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \ |
| 79 | { \ |
| 80 | struct ia64_fpreg fr[6]; \ |
| 81 | efi_status_t ret; \ |
| 82 | \ |
| 83 | ia64_save_scratch_fpregs(fr); \ |
| 84 | ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ |
| 85 | adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ |
| 86 | ia64_load_scratch_fpregs(fr); \ |
| 87 | return ret; \ |
| 88 | } |
| 89 | |
| 90 | #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ |
| 91 | static efi_status_t \ |
| 92 | prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ |
| 93 | { \ |
| 94 | struct ia64_fpreg fr[6]; \ |
| 95 | efi_time_t *atm = NULL; \ |
| 96 | efi_status_t ret; \ |
| 97 | \ |
| 98 | if (tm) \ |
| 99 | atm = adjust_arg(tm); \ |
| 100 | ia64_save_scratch_fpregs(fr); \ |
| 101 | ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ |
| 102 | enabled, atm); \ |
| 103 | ia64_load_scratch_fpregs(fr); \ |
| 104 | return ret; \ |
| 105 | } |
| 106 | |
| 107 | #define STUB_GET_VARIABLE(prefix, adjust_arg) \ |
| 108 | static efi_status_t \ |
| 109 | prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ |
| 110 | unsigned long *data_size, void *data) \ |
| 111 | { \ |
| 112 | struct ia64_fpreg fr[6]; \ |
| 113 | u32 *aattr = NULL; \ |
| 114 | efi_status_t ret; \ |
| 115 | \ |
| 116 | if (attr) \ |
| 117 | aattr = adjust_arg(attr); \ |
| 118 | ia64_save_scratch_fpregs(fr); \ |
| 119 | ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \ |
| 120 | adjust_arg(name), adjust_arg(vendor), aattr, \ |
| 121 | adjust_arg(data_size), adjust_arg(data)); \ |
| 122 | ia64_load_scratch_fpregs(fr); \ |
| 123 | return ret; \ |
| 124 | } |
| 125 | |
| 126 | #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ |
| 127 | static efi_status_t \ |
| 128 | prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \ |
| 129 | { \ |
| 130 | struct ia64_fpreg fr[6]; \ |
| 131 | efi_status_t ret; \ |
| 132 | \ |
| 133 | ia64_save_scratch_fpregs(fr); \ |
| 134 | ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \ |
| 135 | adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ |
| 136 | ia64_load_scratch_fpregs(fr); \ |
| 137 | return ret; \ |
| 138 | } |
| 139 | |
| 140 | #define STUB_SET_VARIABLE(prefix, adjust_arg) \ |
| 141 | static efi_status_t \ |
| 142 | prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr, \ |
| 143 | unsigned long data_size, void *data) \ |
| 144 | { \ |
| 145 | struct ia64_fpreg fr[6]; \ |
| 146 | efi_status_t ret; \ |
| 147 | \ |
| 148 | ia64_save_scratch_fpregs(fr); \ |
| 149 | ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \ |
| 150 | adjust_arg(name), adjust_arg(vendor), attr, data_size, \ |
| 151 | adjust_arg(data)); \ |
| 152 | ia64_load_scratch_fpregs(fr); \ |
| 153 | return ret; \ |
| 154 | } |
| 155 | |
| 156 | #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ |
| 157 | static efi_status_t \ |
| 158 | prefix##_get_next_high_mono_count (u32 *count) \ |
| 159 | { \ |
| 160 | struct ia64_fpreg fr[6]; \ |
| 161 | efi_status_t ret; \ |
| 162 | \ |
| 163 | ia64_save_scratch_fpregs(fr); \ |
| 164 | ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ |
| 165 | __va(runtime->get_next_high_mono_count), adjust_arg(count)); \ |
| 166 | ia64_load_scratch_fpregs(fr); \ |
| 167 | return ret; \ |
| 168 | } |
| 169 | |
| 170 | #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ |
| 171 | static void \ |
| 172 | prefix##_reset_system (int reset_type, efi_status_t status, \ |
| 173 | unsigned long data_size, efi_char16_t *data) \ |
| 174 | { \ |
| 175 | struct ia64_fpreg fr[6]; \ |
| 176 | efi_char16_t *adata = NULL; \ |
| 177 | \ |
| 178 | if (data) \ |
| 179 | adata = adjust_arg(data); \ |
| 180 | \ |
| 181 | ia64_save_scratch_fpregs(fr); \ |
| 182 | efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \ |
| 183 | reset_type, status, data_size, adata); \ |
| 184 | /* should not return, but just in case... */ \ |
| 185 | ia64_load_scratch_fpregs(fr); \ |
| 186 | } |
| 187 | |
| 188 | #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) |
| 189 | |
| 190 | STUB_GET_TIME(phys, phys_ptr) |
| 191 | STUB_SET_TIME(phys, phys_ptr) |
| 192 | STUB_GET_WAKEUP_TIME(phys, phys_ptr) |
| 193 | STUB_SET_WAKEUP_TIME(phys, phys_ptr) |
| 194 | STUB_GET_VARIABLE(phys, phys_ptr) |
| 195 | STUB_GET_NEXT_VARIABLE(phys, phys_ptr) |
| 196 | STUB_SET_VARIABLE(phys, phys_ptr) |
| 197 | STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr) |
| 198 | STUB_RESET_SYSTEM(phys, phys_ptr) |
| 199 | |
| 200 | #define id(arg) arg |
| 201 | |
| 202 | STUB_GET_TIME(virt, id) |
| 203 | STUB_SET_TIME(virt, id) |
| 204 | STUB_GET_WAKEUP_TIME(virt, id) |
| 205 | STUB_SET_WAKEUP_TIME(virt, id) |
| 206 | STUB_GET_VARIABLE(virt, id) |
| 207 | STUB_GET_NEXT_VARIABLE(virt, id) |
| 208 | STUB_SET_VARIABLE(virt, id) |
| 209 | STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id) |
| 210 | STUB_RESET_SYSTEM(virt, id) |
| 211 | |
| 212 | void |
| 213 | efi_gettimeofday (struct timespec *ts) |
| 214 | { |
| 215 | efi_time_t tm; |
| 216 | |
| 217 | memset(ts, 0, sizeof(ts)); |
| 218 | if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) |
| 219 | return; |
| 220 | |
| 221 | ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); |
| 222 | ts->tv_nsec = tm.nanosecond; |
| 223 | } |
| 224 | |
| 225 | static int |
| 226 | is_available_memory (efi_memory_desc_t *md) |
| 227 | { |
| 228 | if (!(md->attribute & EFI_MEMORY_WB)) |
| 229 | return 0; |
| 230 | |
| 231 | switch (md->type) { |
| 232 | case EFI_LOADER_CODE: |
| 233 | case EFI_LOADER_DATA: |
| 234 | case EFI_BOOT_SERVICES_CODE: |
| 235 | case EFI_BOOT_SERVICES_DATA: |
| 236 | case EFI_CONVENTIONAL_MEMORY: |
| 237 | return 1; |
| 238 | } |
| 239 | return 0; |
| 240 | } |
| 241 | |
| 242 | /* |
| 243 | * Trim descriptor MD so its starts at address START_ADDR. If the descriptor covers |
| 244 | * memory that is normally available to the kernel, issue a warning that some memory |
| 245 | * is being ignored. |
| 246 | */ |
| 247 | static void |
| 248 | trim_bottom (efi_memory_desc_t *md, u64 start_addr) |
| 249 | { |
| 250 | u64 num_skipped_pages; |
| 251 | |
| 252 | if (md->phys_addr >= start_addr || !md->num_pages) |
| 253 | return; |
| 254 | |
| 255 | num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT; |
| 256 | if (num_skipped_pages > md->num_pages) |
| 257 | num_skipped_pages = md->num_pages; |
| 258 | |
| 259 | if (is_available_memory(md)) |
| 260 | printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole " |
| 261 | "at 0x%lx\n", __FUNCTION__, |
| 262 | (num_skipped_pages << EFI_PAGE_SHIFT) >> 10, |
| 263 | md->phys_addr, start_addr - IA64_GRANULE_SIZE); |
| 264 | /* |
| 265 | * NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory |
| 266 | * descriptor list to become unsorted. In such a case, md->num_pages will be |
| 267 | * zero, so the Right Thing will happen. |
| 268 | */ |
| 269 | md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT; |
| 270 | md->num_pages -= num_skipped_pages; |
| 271 | } |
| 272 | |
| 273 | static void |
| 274 | trim_top (efi_memory_desc_t *md, u64 end_addr) |
| 275 | { |
| 276 | u64 num_dropped_pages, md_end_addr; |
| 277 | |
| 278 | md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); |
| 279 | |
| 280 | if (md_end_addr <= end_addr || !md->num_pages) |
| 281 | return; |
| 282 | |
| 283 | num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT; |
| 284 | if (num_dropped_pages > md->num_pages) |
| 285 | num_dropped_pages = md->num_pages; |
| 286 | |
| 287 | if (is_available_memory(md)) |
| 288 | printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole " |
| 289 | "at 0x%lx\n", __FUNCTION__, |
| 290 | (num_dropped_pages << EFI_PAGE_SHIFT) >> 10, |
| 291 | md->phys_addr, end_addr); |
| 292 | md->num_pages -= num_dropped_pages; |
| 293 | } |
| 294 | |
| 295 | /* |
| 296 | * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that |
| 297 | * has memory that is available for OS use. |
| 298 | */ |
| 299 | void |
| 300 | efi_memmap_walk (efi_freemem_callback_t callback, void *arg) |
| 301 | { |
| 302 | int prev_valid = 0; |
| 303 | struct range { |
| 304 | u64 start; |
| 305 | u64 end; |
| 306 | } prev, curr; |
| 307 | void *efi_map_start, *efi_map_end, *p, *q; |
| 308 | efi_memory_desc_t *md, *check_md; |
| 309 | u64 efi_desc_size, start, end, granule_addr, last_granule_addr, first_non_wb_addr = 0; |
| 310 | unsigned long total_mem = 0; |
| 311 | |
| 312 | efi_map_start = __va(ia64_boot_param->efi_memmap); |
| 313 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; |
| 314 | efi_desc_size = ia64_boot_param->efi_memdesc_size; |
| 315 | |
| 316 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { |
| 317 | md = p; |
| 318 | |
| 319 | /* skip over non-WB memory descriptors; that's all we're interested in... */ |
| 320 | if (!(md->attribute & EFI_MEMORY_WB)) |
| 321 | continue; |
| 322 | |
| 323 | /* |
| 324 | * granule_addr is the base of md's first granule. |
| 325 | * [granule_addr - first_non_wb_addr) is guaranteed to |
| 326 | * be contiguous WB memory. |
| 327 | */ |
| 328 | granule_addr = GRANULEROUNDDOWN(md->phys_addr); |
| 329 | first_non_wb_addr = max(first_non_wb_addr, granule_addr); |
| 330 | |
| 331 | if (first_non_wb_addr < md->phys_addr) { |
| 332 | trim_bottom(md, granule_addr + IA64_GRANULE_SIZE); |
| 333 | granule_addr = GRANULEROUNDDOWN(md->phys_addr); |
| 334 | first_non_wb_addr = max(first_non_wb_addr, granule_addr); |
| 335 | } |
| 336 | |
| 337 | for (q = p; q < efi_map_end; q += efi_desc_size) { |
| 338 | check_md = q; |
| 339 | |
| 340 | if ((check_md->attribute & EFI_MEMORY_WB) && |
| 341 | (check_md->phys_addr == first_non_wb_addr)) |
| 342 | first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT; |
| 343 | else |
| 344 | break; /* non-WB or hole */ |
| 345 | } |
| 346 | |
| 347 | last_granule_addr = GRANULEROUNDDOWN(first_non_wb_addr); |
| 348 | if (last_granule_addr < md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) |
| 349 | trim_top(md, last_granule_addr); |
| 350 | |
| 351 | if (is_available_memory(md)) { |
| 352 | if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) >= max_addr) { |
| 353 | if (md->phys_addr >= max_addr) |
| 354 | continue; |
| 355 | md->num_pages = (max_addr - md->phys_addr) >> EFI_PAGE_SHIFT; |
| 356 | first_non_wb_addr = max_addr; |
| 357 | } |
| 358 | |
| 359 | if (total_mem >= mem_limit) |
| 360 | continue; |
| 361 | |
| 362 | if (total_mem + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) { |
| 363 | unsigned long limit_addr = md->phys_addr; |
| 364 | |
| 365 | limit_addr += mem_limit - total_mem; |
| 366 | limit_addr = GRANULEROUNDDOWN(limit_addr); |
| 367 | |
| 368 | if (md->phys_addr > limit_addr) |
| 369 | continue; |
| 370 | |
| 371 | md->num_pages = (limit_addr - md->phys_addr) >> |
| 372 | EFI_PAGE_SHIFT; |
| 373 | first_non_wb_addr = max_addr = md->phys_addr + |
| 374 | (md->num_pages << EFI_PAGE_SHIFT); |
| 375 | } |
| 376 | total_mem += (md->num_pages << EFI_PAGE_SHIFT); |
| 377 | |
| 378 | if (md->num_pages == 0) |
| 379 | continue; |
| 380 | |
| 381 | curr.start = PAGE_OFFSET + md->phys_addr; |
| 382 | curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT); |
| 383 | |
| 384 | if (!prev_valid) { |
| 385 | prev = curr; |
| 386 | prev_valid = 1; |
| 387 | } else { |
| 388 | if (curr.start < prev.start) |
| 389 | printk(KERN_ERR "Oops: EFI memory table not ordered!\n"); |
| 390 | |
| 391 | if (prev.end == curr.start) { |
| 392 | /* merge two consecutive memory ranges */ |
| 393 | prev.end = curr.end; |
| 394 | } else { |
| 395 | start = PAGE_ALIGN(prev.start); |
| 396 | end = prev.end & PAGE_MASK; |
| 397 | if ((end > start) && (*callback)(start, end, arg) < 0) |
| 398 | return; |
| 399 | prev = curr; |
| 400 | } |
| 401 | } |
| 402 | } |
| 403 | } |
| 404 | if (prev_valid) { |
| 405 | start = PAGE_ALIGN(prev.start); |
| 406 | end = prev.end & PAGE_MASK; |
| 407 | if (end > start) |
| 408 | (*callback)(start, end, arg); |
| 409 | } |
| 410 | } |
| 411 | |
| 412 | /* |
| 413 | * Look for the PAL_CODE region reported by EFI and maps it using an |
| 414 | * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor |
| 415 | * Abstraction Layer chapter 11 in ADAG |
| 416 | */ |
| 417 | |
| 418 | void * |
| 419 | efi_get_pal_addr (void) |
| 420 | { |
| 421 | void *efi_map_start, *efi_map_end, *p; |
| 422 | efi_memory_desc_t *md; |
| 423 | u64 efi_desc_size; |
| 424 | int pal_code_count = 0; |
| 425 | u64 vaddr, mask; |
| 426 | |
| 427 | efi_map_start = __va(ia64_boot_param->efi_memmap); |
| 428 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; |
| 429 | efi_desc_size = ia64_boot_param->efi_memdesc_size; |
| 430 | |
| 431 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { |
| 432 | md = p; |
| 433 | if (md->type != EFI_PAL_CODE) |
| 434 | continue; |
| 435 | |
| 436 | if (++pal_code_count > 1) { |
| 437 | printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n", |
| 438 | md->phys_addr); |
| 439 | continue; |
| 440 | } |
| 441 | /* |
| 442 | * The only ITLB entry in region 7 that is used is the one installed by |
| 443 | * __start(). That entry covers a 64MB range. |
| 444 | */ |
| 445 | mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); |
| 446 | vaddr = PAGE_OFFSET + md->phys_addr; |
| 447 | |
| 448 | /* |
| 449 | * We must check that the PAL mapping won't overlap with the kernel |
| 450 | * mapping. |
| 451 | * |
| 452 | * PAL code is guaranteed to be aligned on a power of 2 between 4k and |
| 453 | * 256KB and that only one ITR is needed to map it. This implies that the |
| 454 | * PAL code is always aligned on its size, i.e., the closest matching page |
| 455 | * size supported by the TLB. Therefore PAL code is guaranteed never to |
| 456 | * cross a 64MB unless it is bigger than 64MB (very unlikely!). So for |
| 457 | * now the following test is enough to determine whether or not we need a |
| 458 | * dedicated ITR for the PAL code. |
| 459 | */ |
| 460 | if ((vaddr & mask) == (KERNEL_START & mask)) { |
| 461 | printk(KERN_INFO "%s: no need to install ITR for PAL code\n", |
| 462 | __FUNCTION__); |
| 463 | continue; |
| 464 | } |
| 465 | |
| 466 | if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE) |
| 467 | panic("Woah! PAL code size bigger than a granule!"); |
| 468 | |
| 469 | #if EFI_DEBUG |
| 470 | mask = ~((1 << IA64_GRANULE_SHIFT) - 1); |
| 471 | |
| 472 | printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n", |
| 473 | smp_processor_id(), md->phys_addr, |
| 474 | md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), |
| 475 | vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); |
| 476 | #endif |
| 477 | return __va(md->phys_addr); |
| 478 | } |
| 479 | printk(KERN_WARNING "%s: no PAL-code memory-descriptor found", |
| 480 | __FUNCTION__); |
| 481 | return NULL; |
| 482 | } |
| 483 | |
| 484 | void |
| 485 | efi_map_pal_code (void) |
| 486 | { |
| 487 | void *pal_vaddr = efi_get_pal_addr (); |
| 488 | u64 psr; |
| 489 | |
| 490 | if (!pal_vaddr) |
| 491 | return; |
| 492 | |
| 493 | /* |
| 494 | * Cannot write to CRx with PSR.ic=1 |
| 495 | */ |
| 496 | psr = ia64_clear_ic(); |
| 497 | ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr), |
| 498 | pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), |
| 499 | IA64_GRANULE_SHIFT); |
| 500 | ia64_set_psr(psr); /* restore psr */ |
| 501 | ia64_srlz_i(); |
| 502 | } |
| 503 | |
| 504 | void __init |
| 505 | efi_init (void) |
| 506 | { |
| 507 | void *efi_map_start, *efi_map_end; |
| 508 | efi_config_table_t *config_tables; |
| 509 | efi_char16_t *c16; |
| 510 | u64 efi_desc_size; |
| 511 | char *cp, *end, vendor[100] = "unknown"; |
| 512 | extern char saved_command_line[]; |
| 513 | int i; |
| 514 | |
| 515 | /* it's too early to be able to use the standard kernel command line support... */ |
| 516 | for (cp = saved_command_line; *cp; ) { |
| 517 | if (memcmp(cp, "mem=", 4) == 0) { |
| 518 | cp += 4; |
| 519 | mem_limit = memparse(cp, &end); |
| 520 | if (end != cp) |
| 521 | break; |
| 522 | cp = end; |
| 523 | } else if (memcmp(cp, "max_addr=", 9) == 0) { |
| 524 | cp += 9; |
| 525 | max_addr = GRANULEROUNDDOWN(memparse(cp, &end)); |
| 526 | if (end != cp) |
| 527 | break; |
| 528 | cp = end; |
| 529 | } else { |
| 530 | while (*cp != ' ' && *cp) |
| 531 | ++cp; |
| 532 | while (*cp == ' ') |
| 533 | ++cp; |
| 534 | } |
| 535 | } |
| 536 | if (max_addr != ~0UL) |
| 537 | printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20); |
| 538 | |
| 539 | efi.systab = __va(ia64_boot_param->efi_systab); |
| 540 | |
| 541 | /* |
| 542 | * Verify the EFI Table |
| 543 | */ |
| 544 | if (efi.systab == NULL) |
| 545 | panic("Woah! Can't find EFI system table.\n"); |
| 546 | if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) |
| 547 | panic("Woah! EFI system table signature incorrect\n"); |
| 548 | if ((efi.systab->hdr.revision ^ EFI_SYSTEM_TABLE_REVISION) >> 16 != 0) |
| 549 | printk(KERN_WARNING "Warning: EFI system table major version mismatch: " |
| 550 | "got %d.%02d, expected %d.%02d\n", |
| 551 | efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, |
| 552 | EFI_SYSTEM_TABLE_REVISION >> 16, EFI_SYSTEM_TABLE_REVISION & 0xffff); |
| 553 | |
| 554 | config_tables = __va(efi.systab->tables); |
| 555 | |
| 556 | /* Show what we know for posterity */ |
| 557 | c16 = __va(efi.systab->fw_vendor); |
| 558 | if (c16) { |
| 559 | for (i = 0;i < (int) sizeof(vendor) && *c16; ++i) |
| 560 | vendor[i] = *c16++; |
| 561 | vendor[i] = '\0'; |
| 562 | } |
| 563 | |
| 564 | printk(KERN_INFO "EFI v%u.%.02u by %s:", |
| 565 | efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); |
| 566 | |
| 567 | for (i = 0; i < (int) efi.systab->nr_tables; i++) { |
| 568 | if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { |
| 569 | efi.mps = __va(config_tables[i].table); |
| 570 | printk(" MPS=0x%lx", config_tables[i].table); |
| 571 | } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { |
| 572 | efi.acpi20 = __va(config_tables[i].table); |
| 573 | printk(" ACPI 2.0=0x%lx", config_tables[i].table); |
| 574 | } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { |
| 575 | efi.acpi = __va(config_tables[i].table); |
| 576 | printk(" ACPI=0x%lx", config_tables[i].table); |
| 577 | } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { |
| 578 | efi.smbios = __va(config_tables[i].table); |
| 579 | printk(" SMBIOS=0x%lx", config_tables[i].table); |
| 580 | } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { |
| 581 | efi.sal_systab = __va(config_tables[i].table); |
| 582 | printk(" SALsystab=0x%lx", config_tables[i].table); |
| 583 | } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { |
| 584 | efi.hcdp = __va(config_tables[i].table); |
| 585 | printk(" HCDP=0x%lx", config_tables[i].table); |
| 586 | } |
| 587 | } |
| 588 | printk("\n"); |
| 589 | |
| 590 | runtime = __va(efi.systab->runtime); |
| 591 | efi.get_time = phys_get_time; |
| 592 | efi.set_time = phys_set_time; |
| 593 | efi.get_wakeup_time = phys_get_wakeup_time; |
| 594 | efi.set_wakeup_time = phys_set_wakeup_time; |
| 595 | efi.get_variable = phys_get_variable; |
| 596 | efi.get_next_variable = phys_get_next_variable; |
| 597 | efi.set_variable = phys_set_variable; |
| 598 | efi.get_next_high_mono_count = phys_get_next_high_mono_count; |
| 599 | efi.reset_system = phys_reset_system; |
| 600 | |
| 601 | efi_map_start = __va(ia64_boot_param->efi_memmap); |
| 602 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; |
| 603 | efi_desc_size = ia64_boot_param->efi_memdesc_size; |
| 604 | |
| 605 | #if EFI_DEBUG |
| 606 | /* print EFI memory map: */ |
| 607 | { |
| 608 | efi_memory_desc_t *md; |
| 609 | void *p; |
| 610 | |
| 611 | for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) { |
| 612 | md = p; |
| 613 | printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n", |
| 614 | i, md->type, md->attribute, md->phys_addr, |
| 615 | md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), |
| 616 | md->num_pages >> (20 - EFI_PAGE_SHIFT)); |
| 617 | } |
| 618 | } |
| 619 | #endif |
| 620 | |
| 621 | efi_map_pal_code(); |
| 622 | efi_enter_virtual_mode(); |
| 623 | } |
| 624 | |
| 625 | void |
| 626 | efi_enter_virtual_mode (void) |
| 627 | { |
| 628 | void *efi_map_start, *efi_map_end, *p; |
| 629 | efi_memory_desc_t *md; |
| 630 | efi_status_t status; |
| 631 | u64 efi_desc_size; |
| 632 | |
| 633 | efi_map_start = __va(ia64_boot_param->efi_memmap); |
| 634 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; |
| 635 | efi_desc_size = ia64_boot_param->efi_memdesc_size; |
| 636 | |
| 637 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { |
| 638 | md = p; |
| 639 | if (md->attribute & EFI_MEMORY_RUNTIME) { |
| 640 | /* |
| 641 | * Some descriptors have multiple bits set, so the order of |
| 642 | * the tests is relevant. |
| 643 | */ |
| 644 | if (md->attribute & EFI_MEMORY_WB) { |
| 645 | md->virt_addr = (u64) __va(md->phys_addr); |
| 646 | } else if (md->attribute & EFI_MEMORY_UC) { |
| 647 | md->virt_addr = (u64) ioremap(md->phys_addr, 0); |
| 648 | } else if (md->attribute & EFI_MEMORY_WC) { |
| 649 | #if 0 |
| 650 | md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P |
| 651 | | _PAGE_D |
| 652 | | _PAGE_MA_WC |
| 653 | | _PAGE_PL_0 |
| 654 | | _PAGE_AR_RW)); |
| 655 | #else |
| 656 | printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); |
| 657 | md->virt_addr = (u64) ioremap(md->phys_addr, 0); |
| 658 | #endif |
| 659 | } else if (md->attribute & EFI_MEMORY_WT) { |
| 660 | #if 0 |
| 661 | md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P |
| 662 | | _PAGE_D | _PAGE_MA_WT |
| 663 | | _PAGE_PL_0 |
| 664 | | _PAGE_AR_RW)); |
| 665 | #else |
| 666 | printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); |
| 667 | md->virt_addr = (u64) ioremap(md->phys_addr, 0); |
| 668 | #endif |
| 669 | } |
| 670 | } |
| 671 | } |
| 672 | |
| 673 | status = efi_call_phys(__va(runtime->set_virtual_address_map), |
| 674 | ia64_boot_param->efi_memmap_size, |
| 675 | efi_desc_size, ia64_boot_param->efi_memdesc_version, |
| 676 | ia64_boot_param->efi_memmap); |
| 677 | if (status != EFI_SUCCESS) { |
| 678 | printk(KERN_WARNING "warning: unable to switch EFI into virtual mode " |
| 679 | "(status=%lu)\n", status); |
| 680 | return; |
| 681 | } |
| 682 | |
| 683 | /* |
| 684 | * Now that EFI is in virtual mode, we call the EFI functions more efficiently: |
| 685 | */ |
| 686 | efi.get_time = virt_get_time; |
| 687 | efi.set_time = virt_set_time; |
| 688 | efi.get_wakeup_time = virt_get_wakeup_time; |
| 689 | efi.set_wakeup_time = virt_set_wakeup_time; |
| 690 | efi.get_variable = virt_get_variable; |
| 691 | efi.get_next_variable = virt_get_next_variable; |
| 692 | efi.set_variable = virt_set_variable; |
| 693 | efi.get_next_high_mono_count = virt_get_next_high_mono_count; |
| 694 | efi.reset_system = virt_reset_system; |
| 695 | } |
| 696 | |
| 697 | /* |
| 698 | * Walk the EFI memory map looking for the I/O port range. There can only be one entry of |
| 699 | * this type, other I/O port ranges should be described via ACPI. |
| 700 | */ |
| 701 | u64 |
| 702 | efi_get_iobase (void) |
| 703 | { |
| 704 | void *efi_map_start, *efi_map_end, *p; |
| 705 | efi_memory_desc_t *md; |
| 706 | u64 efi_desc_size; |
| 707 | |
| 708 | efi_map_start = __va(ia64_boot_param->efi_memmap); |
| 709 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; |
| 710 | efi_desc_size = ia64_boot_param->efi_memdesc_size; |
| 711 | |
| 712 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { |
| 713 | md = p; |
| 714 | if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) { |
| 715 | if (md->attribute & EFI_MEMORY_UC) |
| 716 | return md->phys_addr; |
| 717 | } |
| 718 | } |
| 719 | return 0; |
| 720 | } |
| 721 | |
| 722 | u32 |
| 723 | efi_mem_type (unsigned long phys_addr) |
| 724 | { |
| 725 | void *efi_map_start, *efi_map_end, *p; |
| 726 | efi_memory_desc_t *md; |
| 727 | u64 efi_desc_size; |
| 728 | |
| 729 | efi_map_start = __va(ia64_boot_param->efi_memmap); |
| 730 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; |
| 731 | efi_desc_size = ia64_boot_param->efi_memdesc_size; |
| 732 | |
| 733 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { |
| 734 | md = p; |
| 735 | |
| 736 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) |
| 737 | return md->type; |
| 738 | } |
| 739 | return 0; |
| 740 | } |
| 741 | |
| 742 | u64 |
| 743 | efi_mem_attributes (unsigned long phys_addr) |
| 744 | { |
| 745 | void *efi_map_start, *efi_map_end, *p; |
| 746 | efi_memory_desc_t *md; |
| 747 | u64 efi_desc_size; |
| 748 | |
| 749 | efi_map_start = __va(ia64_boot_param->efi_memmap); |
| 750 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; |
| 751 | efi_desc_size = ia64_boot_param->efi_memdesc_size; |
| 752 | |
| 753 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { |
| 754 | md = p; |
| 755 | |
| 756 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) |
| 757 | return md->attribute; |
| 758 | } |
| 759 | return 0; |
| 760 | } |
| 761 | EXPORT_SYMBOL(efi_mem_attributes); |
| 762 | |
| 763 | int |
| 764 | valid_phys_addr_range (unsigned long phys_addr, unsigned long *size) |
| 765 | { |
| 766 | void *efi_map_start, *efi_map_end, *p; |
| 767 | efi_memory_desc_t *md; |
| 768 | u64 efi_desc_size; |
| 769 | |
| 770 | efi_map_start = __va(ia64_boot_param->efi_memmap); |
| 771 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; |
| 772 | efi_desc_size = ia64_boot_param->efi_memdesc_size; |
| 773 | |
| 774 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { |
| 775 | md = p; |
| 776 | |
| 777 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) { |
| 778 | if (!(md->attribute & EFI_MEMORY_WB)) |
| 779 | return 0; |
| 780 | |
| 781 | if (*size > md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr) |
| 782 | *size = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr; |
| 783 | return 1; |
| 784 | } |
| 785 | } |
| 786 | return 0; |
| 787 | } |
| 788 | |
| 789 | int __init |
| 790 | efi_uart_console_only(void) |
| 791 | { |
| 792 | efi_status_t status; |
| 793 | char *s, name[] = "ConOut"; |
| 794 | efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; |
| 795 | efi_char16_t *utf16, name_utf16[32]; |
| 796 | unsigned char data[1024]; |
| 797 | unsigned long size = sizeof(data); |
| 798 | struct efi_generic_dev_path *hdr, *end_addr; |
| 799 | int uart = 0; |
| 800 | |
| 801 | /* Convert to UTF-16 */ |
| 802 | utf16 = name_utf16; |
| 803 | s = name; |
| 804 | while (*s) |
| 805 | *utf16++ = *s++ & 0x7f; |
| 806 | *utf16 = 0; |
| 807 | |
| 808 | status = efi.get_variable(name_utf16, &guid, NULL, &size, data); |
| 809 | if (status != EFI_SUCCESS) { |
| 810 | printk(KERN_ERR "No EFI %s variable?\n", name); |
| 811 | return 0; |
| 812 | } |
| 813 | |
| 814 | hdr = (struct efi_generic_dev_path *) data; |
| 815 | end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size); |
| 816 | while (hdr < end_addr) { |
| 817 | if (hdr->type == EFI_DEV_MSG && |
| 818 | hdr->sub_type == EFI_DEV_MSG_UART) |
| 819 | uart = 1; |
| 820 | else if (hdr->type == EFI_DEV_END_PATH || |
| 821 | hdr->type == EFI_DEV_END_PATH2) { |
| 822 | if (!uart) |
| 823 | return 0; |
| 824 | if (hdr->sub_type == EFI_DEV_END_ENTIRE) |
| 825 | return 1; |
| 826 | uart = 0; |
| 827 | } |
| 828 | hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length); |
| 829 | } |
| 830 | printk(KERN_ERR "Malformed %s value\n", name); |
| 831 | return 0; |
| 832 | } |