| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Extensible Firmware Interface | 
|  | 3 | * | 
|  | 4 | * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999 | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 1999 VA Linux Systems | 
|  | 7 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | 
|  | 8 | * Copyright (C) 1999-2003 Hewlett-Packard Co. | 
|  | 9 | *	David Mosberger-Tang <davidm@hpl.hp.com> | 
|  | 10 | *	Stephane Eranian <eranian@hpl.hp.com> | 
|  | 11 | * | 
|  | 12 | * All EFI Runtime Services are not implemented yet as EFI only | 
|  | 13 | * supports physical mode addressing on SoftSDV. This is to be fixed | 
|  | 14 | * in a future version.  --drummond 1999-07-20 | 
|  | 15 | * | 
|  | 16 | * Implemented EFI runtime services and virtual mode calls.  --davidm | 
|  | 17 | * | 
|  | 18 | * Goutham Rao: <goutham.rao@intel.com> | 
|  | 19 | *	Skip non-WB memory and ignore empty memory ranges. | 
|  | 20 | */ | 
|  | 21 | #include <linux/config.h> | 
|  | 22 | #include <linux/module.h> | 
|  | 23 | #include <linux/kernel.h> | 
|  | 24 | #include <linux/init.h> | 
|  | 25 | #include <linux/types.h> | 
|  | 26 | #include <linux/time.h> | 
|  | 27 | #include <linux/efi.h> | 
|  | 28 |  | 
|  | 29 | #include <asm/io.h> | 
|  | 30 | #include <asm/kregs.h> | 
|  | 31 | #include <asm/meminit.h> | 
|  | 32 | #include <asm/pgtable.h> | 
|  | 33 | #include <asm/processor.h> | 
|  | 34 | #include <asm/mca.h> | 
|  | 35 |  | 
|  | 36 | #define EFI_DEBUG	0 | 
|  | 37 |  | 
|  | 38 | extern efi_status_t efi_call_phys (void *, ...); | 
|  | 39 |  | 
|  | 40 | struct efi efi; | 
|  | 41 | EXPORT_SYMBOL(efi); | 
|  | 42 | static efi_runtime_services_t *runtime; | 
|  | 43 | static unsigned long mem_limit = ~0UL, max_addr = ~0UL; | 
|  | 44 |  | 
|  | 45 | #define efi_call_virt(f, args...)	(*(f))(args) | 
|  | 46 |  | 
|  | 47 | #define STUB_GET_TIME(prefix, adjust_arg)							  \ | 
|  | 48 | static efi_status_t										  \ | 
|  | 49 | prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc)						  \ | 
|  | 50 | {												  \ | 
|  | 51 | struct ia64_fpreg fr[6];								  \ | 
|  | 52 | efi_time_cap_t *atc = NULL;								  \ | 
|  | 53 | efi_status_t ret;									  \ | 
|  | 54 | \ | 
|  | 55 | if (tc)											  \ | 
|  | 56 | atc = adjust_arg(tc);								  \ | 
|  | 57 | ia64_save_scratch_fpregs(fr);								  \ | 
|  | 58 | ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \ | 
|  | 59 | ia64_load_scratch_fpregs(fr);								  \ | 
|  | 60 | return ret;										  \ | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 | #define STUB_SET_TIME(prefix, adjust_arg)							\ | 
|  | 64 | static efi_status_t										\ | 
|  | 65 | prefix##_set_time (efi_time_t *tm)								\ | 
|  | 66 | {												\ | 
|  | 67 | struct ia64_fpreg fr[6];								\ | 
|  | 68 | efi_status_t ret;									\ | 
|  | 69 | \ | 
|  | 70 | ia64_save_scratch_fpregs(fr);								\ | 
|  | 71 | ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm));	\ | 
|  | 72 | ia64_load_scratch_fpregs(fr);								\ | 
|  | 73 | return ret;										\ | 
|  | 74 | } | 
|  | 75 |  | 
|  | 76 | #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg)						\ | 
|  | 77 | static efi_status_t										\ | 
|  | 78 | prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm)		\ | 
|  | 79 | {												\ | 
|  | 80 | struct ia64_fpreg fr[6];								\ | 
|  | 81 | efi_status_t ret;									\ | 
|  | 82 | \ | 
|  | 83 | ia64_save_scratch_fpregs(fr);								\ | 
|  | 84 | ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time),	\ | 
|  | 85 | adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm));	\ | 
|  | 86 | ia64_load_scratch_fpregs(fr);								\ | 
|  | 87 | return ret;										\ | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg)						\ | 
|  | 91 | static efi_status_t										\ | 
|  | 92 | prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)					\ | 
|  | 93 | {												\ | 
|  | 94 | struct ia64_fpreg fr[6];								\ | 
|  | 95 | efi_time_t *atm = NULL;									\ | 
|  | 96 | efi_status_t ret;									\ | 
|  | 97 | \ | 
|  | 98 | if (tm)											\ | 
|  | 99 | atm = adjust_arg(tm);								\ | 
|  | 100 | ia64_save_scratch_fpregs(fr);								\ | 
|  | 101 | ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time),	\ | 
|  | 102 | enabled, atm);							\ | 
|  | 103 | ia64_load_scratch_fpregs(fr);								\ | 
|  | 104 | return ret;										\ | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | #define STUB_GET_VARIABLE(prefix, adjust_arg)						\ | 
|  | 108 | static efi_status_t									\ | 
|  | 109 | prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,		\ | 
|  | 110 | unsigned long *data_size, void *data)				\ | 
|  | 111 | {											\ | 
|  | 112 | struct ia64_fpreg fr[6];							\ | 
|  | 113 | u32 *aattr = NULL;									\ | 
|  | 114 | efi_status_t ret;								\ | 
|  | 115 | \ | 
|  | 116 | if (attr)									\ | 
|  | 117 | aattr = adjust_arg(attr);						\ | 
|  | 118 | ia64_save_scratch_fpregs(fr);							\ | 
|  | 119 | ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable),	\ | 
|  | 120 | adjust_arg(name), adjust_arg(vendor), aattr,		\ | 
|  | 121 | adjust_arg(data_size), adjust_arg(data));		\ | 
|  | 122 | ia64_load_scratch_fpregs(fr);							\ | 
|  | 123 | return ret;									\ | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg)						\ | 
|  | 127 | static efi_status_t										\ | 
|  | 128 | prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor)	\ | 
|  | 129 | {												\ | 
|  | 130 | struct ia64_fpreg fr[6];								\ | 
|  | 131 | efi_status_t ret;									\ | 
|  | 132 | \ | 
|  | 133 | ia64_save_scratch_fpregs(fr);								\ | 
|  | 134 | ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable),	\ | 
|  | 135 | adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor));	\ | 
|  | 136 | ia64_load_scratch_fpregs(fr);								\ | 
|  | 137 | return ret;										\ | 
|  | 138 | } | 
|  | 139 |  | 
|  | 140 | #define STUB_SET_VARIABLE(prefix, adjust_arg)						\ | 
|  | 141 | static efi_status_t									\ | 
|  | 142 | prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr,	\ | 
|  | 143 | unsigned long data_size, void *data)				\ | 
|  | 144 | {											\ | 
|  | 145 | struct ia64_fpreg fr[6];							\ | 
|  | 146 | efi_status_t ret;								\ | 
|  | 147 | \ | 
|  | 148 | ia64_save_scratch_fpregs(fr);							\ | 
|  | 149 | ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable),	\ | 
|  | 150 | adjust_arg(name), adjust_arg(vendor), attr, data_size,	\ | 
|  | 151 | adjust_arg(data));					\ | 
|  | 152 | ia64_load_scratch_fpregs(fr);							\ | 
|  | 153 | return ret;									\ | 
|  | 154 | } | 
|  | 155 |  | 
|  | 156 | #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg)					\ | 
|  | 157 | static efi_status_t										\ | 
|  | 158 | prefix##_get_next_high_mono_count (u32 *count)							\ | 
|  | 159 | {												\ | 
|  | 160 | struct ia64_fpreg fr[6];								\ | 
|  | 161 | efi_status_t ret;									\ | 
|  | 162 | \ | 
|  | 163 | ia64_save_scratch_fpregs(fr);								\ | 
|  | 164 | ret = efi_call_##prefix((efi_get_next_high_mono_count_t *)				\ | 
|  | 165 | __va(runtime->get_next_high_mono_count), adjust_arg(count));	\ | 
|  | 166 | ia64_load_scratch_fpregs(fr);								\ | 
|  | 167 | return ret;										\ | 
|  | 168 | } | 
|  | 169 |  | 
|  | 170 | #define STUB_RESET_SYSTEM(prefix, adjust_arg)					\ | 
|  | 171 | static void									\ | 
|  | 172 | prefix##_reset_system (int reset_type, efi_status_t status,			\ | 
|  | 173 | unsigned long data_size, efi_char16_t *data)		\ | 
|  | 174 | {										\ | 
|  | 175 | struct ia64_fpreg fr[6];						\ | 
|  | 176 | efi_char16_t *adata = NULL;						\ | 
|  | 177 | \ | 
|  | 178 | if (data)								\ | 
|  | 179 | adata = adjust_arg(data);					\ | 
|  | 180 | \ | 
|  | 181 | ia64_save_scratch_fpregs(fr);						\ | 
|  | 182 | efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system),	\ | 
|  | 183 | reset_type, status, data_size, adata);		\ | 
|  | 184 | /* should not return, but just in case... */				\ | 
|  | 185 | ia64_load_scratch_fpregs(fr);						\ | 
|  | 186 | } | 
|  | 187 |  | 
|  | 188 | #define phys_ptr(arg)	((__typeof__(arg)) ia64_tpa(arg)) | 
|  | 189 |  | 
|  | 190 | STUB_GET_TIME(phys, phys_ptr) | 
|  | 191 | STUB_SET_TIME(phys, phys_ptr) | 
|  | 192 | STUB_GET_WAKEUP_TIME(phys, phys_ptr) | 
|  | 193 | STUB_SET_WAKEUP_TIME(phys, phys_ptr) | 
|  | 194 | STUB_GET_VARIABLE(phys, phys_ptr) | 
|  | 195 | STUB_GET_NEXT_VARIABLE(phys, phys_ptr) | 
|  | 196 | STUB_SET_VARIABLE(phys, phys_ptr) | 
|  | 197 | STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr) | 
|  | 198 | STUB_RESET_SYSTEM(phys, phys_ptr) | 
|  | 199 |  | 
|  | 200 | #define id(arg)	arg | 
|  | 201 |  | 
|  | 202 | STUB_GET_TIME(virt, id) | 
|  | 203 | STUB_SET_TIME(virt, id) | 
|  | 204 | STUB_GET_WAKEUP_TIME(virt, id) | 
|  | 205 | STUB_SET_WAKEUP_TIME(virt, id) | 
|  | 206 | STUB_GET_VARIABLE(virt, id) | 
|  | 207 | STUB_GET_NEXT_VARIABLE(virt, id) | 
|  | 208 | STUB_SET_VARIABLE(virt, id) | 
|  | 209 | STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id) | 
|  | 210 | STUB_RESET_SYSTEM(virt, id) | 
|  | 211 |  | 
|  | 212 | void | 
|  | 213 | efi_gettimeofday (struct timespec *ts) | 
|  | 214 | { | 
|  | 215 | efi_time_t tm; | 
|  | 216 |  | 
|  | 217 | memset(ts, 0, sizeof(ts)); | 
|  | 218 | if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) | 
|  | 219 | return; | 
|  | 220 |  | 
|  | 221 | ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); | 
|  | 222 | ts->tv_nsec = tm.nanosecond; | 
|  | 223 | } | 
|  | 224 |  | 
|  | 225 | static int | 
|  | 226 | is_available_memory (efi_memory_desc_t *md) | 
|  | 227 | { | 
|  | 228 | if (!(md->attribute & EFI_MEMORY_WB)) | 
|  | 229 | return 0; | 
|  | 230 |  | 
|  | 231 | switch (md->type) { | 
|  | 232 | case EFI_LOADER_CODE: | 
|  | 233 | case EFI_LOADER_DATA: | 
|  | 234 | case EFI_BOOT_SERVICES_CODE: | 
|  | 235 | case EFI_BOOT_SERVICES_DATA: | 
|  | 236 | case EFI_CONVENTIONAL_MEMORY: | 
|  | 237 | return 1; | 
|  | 238 | } | 
|  | 239 | return 0; | 
|  | 240 | } | 
|  | 241 |  | 
|  | 242 | /* | 
|  | 243 | * Trim descriptor MD so its starts at address START_ADDR.  If the descriptor covers | 
|  | 244 | * memory that is normally available to the kernel, issue a warning that some memory | 
|  | 245 | * is being ignored. | 
|  | 246 | */ | 
|  | 247 | static void | 
|  | 248 | trim_bottom (efi_memory_desc_t *md, u64 start_addr) | 
|  | 249 | { | 
|  | 250 | u64 num_skipped_pages; | 
|  | 251 |  | 
|  | 252 | if (md->phys_addr >= start_addr || !md->num_pages) | 
|  | 253 | return; | 
|  | 254 |  | 
|  | 255 | num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT; | 
|  | 256 | if (num_skipped_pages > md->num_pages) | 
|  | 257 | num_skipped_pages = md->num_pages; | 
|  | 258 |  | 
|  | 259 | if (is_available_memory(md)) | 
|  | 260 | printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole " | 
|  | 261 | "at 0x%lx\n", __FUNCTION__, | 
|  | 262 | (num_skipped_pages << EFI_PAGE_SHIFT) >> 10, | 
|  | 263 | md->phys_addr, start_addr - IA64_GRANULE_SIZE); | 
|  | 264 | /* | 
|  | 265 | * NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory | 
|  | 266 | * descriptor list to become unsorted.  In such a case, md->num_pages will be | 
|  | 267 | * zero, so the Right Thing will happen. | 
|  | 268 | */ | 
|  | 269 | md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT; | 
|  | 270 | md->num_pages -= num_skipped_pages; | 
|  | 271 | } | 
|  | 272 |  | 
|  | 273 | static void | 
|  | 274 | trim_top (efi_memory_desc_t *md, u64 end_addr) | 
|  | 275 | { | 
|  | 276 | u64 num_dropped_pages, md_end_addr; | 
|  | 277 |  | 
|  | 278 | md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | 
|  | 279 |  | 
|  | 280 | if (md_end_addr <= end_addr || !md->num_pages) | 
|  | 281 | return; | 
|  | 282 |  | 
|  | 283 | num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT; | 
|  | 284 | if (num_dropped_pages > md->num_pages) | 
|  | 285 | num_dropped_pages = md->num_pages; | 
|  | 286 |  | 
|  | 287 | if (is_available_memory(md)) | 
|  | 288 | printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole " | 
|  | 289 | "at 0x%lx\n", __FUNCTION__, | 
|  | 290 | (num_dropped_pages << EFI_PAGE_SHIFT) >> 10, | 
|  | 291 | md->phys_addr, end_addr); | 
|  | 292 | md->num_pages -= num_dropped_pages; | 
|  | 293 | } | 
|  | 294 |  | 
|  | 295 | /* | 
|  | 296 | * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that | 
|  | 297 | * has memory that is available for OS use. | 
|  | 298 | */ | 
|  | 299 | void | 
|  | 300 | efi_memmap_walk (efi_freemem_callback_t callback, void *arg) | 
|  | 301 | { | 
|  | 302 | int prev_valid = 0; | 
|  | 303 | struct range { | 
|  | 304 | u64 start; | 
|  | 305 | u64 end; | 
|  | 306 | } prev, curr; | 
|  | 307 | void *efi_map_start, *efi_map_end, *p, *q; | 
|  | 308 | efi_memory_desc_t *md, *check_md; | 
|  | 309 | u64 efi_desc_size, start, end, granule_addr, last_granule_addr, first_non_wb_addr = 0; | 
|  | 310 | unsigned long total_mem = 0; | 
|  | 311 |  | 
|  | 312 | efi_map_start = __va(ia64_boot_param->efi_memmap); | 
|  | 313 | efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size; | 
|  | 314 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | 
|  | 315 |  | 
|  | 316 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | 
|  | 317 | md = p; | 
|  | 318 |  | 
|  | 319 | /* skip over non-WB memory descriptors; that's all we're interested in... */ | 
|  | 320 | if (!(md->attribute & EFI_MEMORY_WB)) | 
|  | 321 | continue; | 
|  | 322 |  | 
|  | 323 | /* | 
|  | 324 | * granule_addr is the base of md's first granule. | 
|  | 325 | * [granule_addr - first_non_wb_addr) is guaranteed to | 
|  | 326 | * be contiguous WB memory. | 
|  | 327 | */ | 
|  | 328 | granule_addr = GRANULEROUNDDOWN(md->phys_addr); | 
|  | 329 | first_non_wb_addr = max(first_non_wb_addr, granule_addr); | 
|  | 330 |  | 
|  | 331 | if (first_non_wb_addr < md->phys_addr) { | 
|  | 332 | trim_bottom(md, granule_addr + IA64_GRANULE_SIZE); | 
|  | 333 | granule_addr = GRANULEROUNDDOWN(md->phys_addr); | 
|  | 334 | first_non_wb_addr = max(first_non_wb_addr, granule_addr); | 
|  | 335 | } | 
|  | 336 |  | 
|  | 337 | for (q = p; q < efi_map_end; q += efi_desc_size) { | 
|  | 338 | check_md = q; | 
|  | 339 |  | 
|  | 340 | if ((check_md->attribute & EFI_MEMORY_WB) && | 
|  | 341 | (check_md->phys_addr == first_non_wb_addr)) | 
|  | 342 | first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT; | 
|  | 343 | else | 
|  | 344 | break;		/* non-WB or hole */ | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 | last_granule_addr = GRANULEROUNDDOWN(first_non_wb_addr); | 
|  | 348 | if (last_granule_addr < md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) | 
|  | 349 | trim_top(md, last_granule_addr); | 
|  | 350 |  | 
|  | 351 | if (is_available_memory(md)) { | 
|  | 352 | if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) >= max_addr) { | 
|  | 353 | if (md->phys_addr >= max_addr) | 
|  | 354 | continue; | 
|  | 355 | md->num_pages = (max_addr - md->phys_addr) >> EFI_PAGE_SHIFT; | 
|  | 356 | first_non_wb_addr = max_addr; | 
|  | 357 | } | 
|  | 358 |  | 
|  | 359 | if (total_mem >= mem_limit) | 
|  | 360 | continue; | 
|  | 361 |  | 
|  | 362 | if (total_mem + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) { | 
|  | 363 | unsigned long limit_addr = md->phys_addr; | 
|  | 364 |  | 
|  | 365 | limit_addr += mem_limit - total_mem; | 
|  | 366 | limit_addr = GRANULEROUNDDOWN(limit_addr); | 
|  | 367 |  | 
|  | 368 | if (md->phys_addr > limit_addr) | 
|  | 369 | continue; | 
|  | 370 |  | 
|  | 371 | md->num_pages = (limit_addr - md->phys_addr) >> | 
|  | 372 | EFI_PAGE_SHIFT; | 
|  | 373 | first_non_wb_addr = max_addr = md->phys_addr + | 
|  | 374 | (md->num_pages << EFI_PAGE_SHIFT); | 
|  | 375 | } | 
|  | 376 | total_mem += (md->num_pages << EFI_PAGE_SHIFT); | 
|  | 377 |  | 
|  | 378 | if (md->num_pages == 0) | 
|  | 379 | continue; | 
|  | 380 |  | 
|  | 381 | curr.start = PAGE_OFFSET + md->phys_addr; | 
|  | 382 | curr.end   = curr.start + (md->num_pages << EFI_PAGE_SHIFT); | 
|  | 383 |  | 
|  | 384 | if (!prev_valid) { | 
|  | 385 | prev = curr; | 
|  | 386 | prev_valid = 1; | 
|  | 387 | } else { | 
|  | 388 | if (curr.start < prev.start) | 
|  | 389 | printk(KERN_ERR "Oops: EFI memory table not ordered!\n"); | 
|  | 390 |  | 
|  | 391 | if (prev.end == curr.start) { | 
|  | 392 | /* merge two consecutive memory ranges */ | 
|  | 393 | prev.end = curr.end; | 
|  | 394 | } else { | 
|  | 395 | start = PAGE_ALIGN(prev.start); | 
|  | 396 | end = prev.end & PAGE_MASK; | 
|  | 397 | if ((end > start) && (*callback)(start, end, arg) < 0) | 
|  | 398 | return; | 
|  | 399 | prev = curr; | 
|  | 400 | } | 
|  | 401 | } | 
|  | 402 | } | 
|  | 403 | } | 
|  | 404 | if (prev_valid) { | 
|  | 405 | start = PAGE_ALIGN(prev.start); | 
|  | 406 | end = prev.end & PAGE_MASK; | 
|  | 407 | if (end > start) | 
|  | 408 | (*callback)(start, end, arg); | 
|  | 409 | } | 
|  | 410 | } | 
|  | 411 |  | 
|  | 412 | /* | 
| Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 413 | * Walk the EFI memory map to pull out leftover pages in the lower | 
|  | 414 | * memory regions which do not end up in the regular memory map and | 
|  | 415 | * stick them into the uncached allocator | 
|  | 416 | * | 
|  | 417 | * The regular walk function is significantly more complex than the | 
|  | 418 | * uncached walk which means it really doesn't make sense to try and | 
|  | 419 | * marge the two. | 
|  | 420 | */ | 
|  | 421 | void __init | 
|  | 422 | efi_memmap_walk_uc (efi_freemem_callback_t callback) | 
|  | 423 | { | 
|  | 424 | void *efi_map_start, *efi_map_end, *p; | 
|  | 425 | efi_memory_desc_t *md; | 
|  | 426 | u64 efi_desc_size, start, end; | 
|  | 427 |  | 
|  | 428 | efi_map_start = __va(ia64_boot_param->efi_memmap); | 
|  | 429 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | 
|  | 430 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | 
|  | 431 |  | 
|  | 432 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | 
|  | 433 | md = p; | 
|  | 434 | if (md->attribute == EFI_MEMORY_UC) { | 
|  | 435 | start = PAGE_ALIGN(md->phys_addr); | 
|  | 436 | end = PAGE_ALIGN((md->phys_addr+(md->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK); | 
|  | 437 | if ((*callback)(start, end, NULL) < 0) | 
|  | 438 | return; | 
|  | 439 | } | 
|  | 440 | } | 
|  | 441 | } | 
|  | 442 |  | 
|  | 443 |  | 
|  | 444 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | * Look for the PAL_CODE region reported by EFI and maps it using an | 
|  | 446 | * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor | 
|  | 447 | * Abstraction Layer chapter 11 in ADAG | 
|  | 448 | */ | 
|  | 449 |  | 
|  | 450 | void * | 
|  | 451 | efi_get_pal_addr (void) | 
|  | 452 | { | 
|  | 453 | void *efi_map_start, *efi_map_end, *p; | 
|  | 454 | efi_memory_desc_t *md; | 
|  | 455 | u64 efi_desc_size; | 
|  | 456 | int pal_code_count = 0; | 
|  | 457 | u64 vaddr, mask; | 
|  | 458 |  | 
|  | 459 | efi_map_start = __va(ia64_boot_param->efi_memmap); | 
|  | 460 | efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size; | 
|  | 461 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | 
|  | 462 |  | 
|  | 463 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | 
|  | 464 | md = p; | 
|  | 465 | if (md->type != EFI_PAL_CODE) | 
|  | 466 | continue; | 
|  | 467 |  | 
|  | 468 | if (++pal_code_count > 1) { | 
|  | 469 | printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n", | 
|  | 470 | md->phys_addr); | 
|  | 471 | continue; | 
|  | 472 | } | 
|  | 473 | /* | 
|  | 474 | * The only ITLB entry in region 7 that is used is the one installed by | 
|  | 475 | * __start().  That entry covers a 64MB range. | 
|  | 476 | */ | 
|  | 477 | mask  = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); | 
|  | 478 | vaddr = PAGE_OFFSET + md->phys_addr; | 
|  | 479 |  | 
|  | 480 | /* | 
|  | 481 | * We must check that the PAL mapping won't overlap with the kernel | 
|  | 482 | * mapping. | 
|  | 483 | * | 
|  | 484 | * PAL code is guaranteed to be aligned on a power of 2 between 4k and | 
|  | 485 | * 256KB and that only one ITR is needed to map it. This implies that the | 
|  | 486 | * PAL code is always aligned on its size, i.e., the closest matching page | 
|  | 487 | * size supported by the TLB. Therefore PAL code is guaranteed never to | 
|  | 488 | * cross a 64MB unless it is bigger than 64MB (very unlikely!).  So for | 
|  | 489 | * now the following test is enough to determine whether or not we need a | 
|  | 490 | * dedicated ITR for the PAL code. | 
|  | 491 | */ | 
|  | 492 | if ((vaddr & mask) == (KERNEL_START & mask)) { | 
|  | 493 | printk(KERN_INFO "%s: no need to install ITR for PAL code\n", | 
|  | 494 | __FUNCTION__); | 
|  | 495 | continue; | 
|  | 496 | } | 
|  | 497 |  | 
|  | 498 | if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE) | 
|  | 499 | panic("Woah!  PAL code size bigger than a granule!"); | 
|  | 500 |  | 
|  | 501 | #if EFI_DEBUG | 
|  | 502 | mask  = ~((1 << IA64_GRANULE_SHIFT) - 1); | 
|  | 503 |  | 
|  | 504 | printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n", | 
|  | 505 | smp_processor_id(), md->phys_addr, | 
|  | 506 | md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), | 
|  | 507 | vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); | 
|  | 508 | #endif | 
|  | 509 | return __va(md->phys_addr); | 
|  | 510 | } | 
|  | 511 | printk(KERN_WARNING "%s: no PAL-code memory-descriptor found", | 
|  | 512 | __FUNCTION__); | 
|  | 513 | return NULL; | 
|  | 514 | } | 
|  | 515 |  | 
|  | 516 | void | 
|  | 517 | efi_map_pal_code (void) | 
|  | 518 | { | 
|  | 519 | void *pal_vaddr = efi_get_pal_addr (); | 
|  | 520 | u64 psr; | 
|  | 521 |  | 
|  | 522 | if (!pal_vaddr) | 
|  | 523 | return; | 
|  | 524 |  | 
|  | 525 | /* | 
|  | 526 | * Cannot write to CRx with PSR.ic=1 | 
|  | 527 | */ | 
|  | 528 | psr = ia64_clear_ic(); | 
|  | 529 | ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr), | 
|  | 530 | pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), | 
|  | 531 | IA64_GRANULE_SHIFT); | 
|  | 532 | ia64_set_psr(psr);		/* restore psr */ | 
|  | 533 | ia64_srlz_i(); | 
|  | 534 | } | 
|  | 535 |  | 
|  | 536 | void __init | 
|  | 537 | efi_init (void) | 
|  | 538 | { | 
|  | 539 | void *efi_map_start, *efi_map_end; | 
|  | 540 | efi_config_table_t *config_tables; | 
|  | 541 | efi_char16_t *c16; | 
|  | 542 | u64 efi_desc_size; | 
|  | 543 | char *cp, *end, vendor[100] = "unknown"; | 
|  | 544 | extern char saved_command_line[]; | 
|  | 545 | int i; | 
|  | 546 |  | 
|  | 547 | /* it's too early to be able to use the standard kernel command line support... */ | 
|  | 548 | for (cp = saved_command_line; *cp; ) { | 
|  | 549 | if (memcmp(cp, "mem=", 4) == 0) { | 
|  | 550 | cp += 4; | 
|  | 551 | mem_limit = memparse(cp, &end); | 
|  | 552 | if (end != cp) | 
|  | 553 | break; | 
|  | 554 | cp = end; | 
|  | 555 | } else if (memcmp(cp, "max_addr=", 9) == 0) { | 
|  | 556 | cp += 9; | 
|  | 557 | max_addr = GRANULEROUNDDOWN(memparse(cp, &end)); | 
|  | 558 | if (end != cp) | 
|  | 559 | break; | 
|  | 560 | cp = end; | 
|  | 561 | } else { | 
|  | 562 | while (*cp != ' ' && *cp) | 
|  | 563 | ++cp; | 
|  | 564 | while (*cp == ' ') | 
|  | 565 | ++cp; | 
|  | 566 | } | 
|  | 567 | } | 
|  | 568 | if (max_addr != ~0UL) | 
|  | 569 | printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20); | 
|  | 570 |  | 
|  | 571 | efi.systab = __va(ia64_boot_param->efi_systab); | 
|  | 572 |  | 
|  | 573 | /* | 
|  | 574 | * Verify the EFI Table | 
|  | 575 | */ | 
|  | 576 | if (efi.systab == NULL) | 
|  | 577 | panic("Woah! Can't find EFI system table.\n"); | 
|  | 578 | if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) | 
|  | 579 | panic("Woah! EFI system table signature incorrect\n"); | 
|  | 580 | if ((efi.systab->hdr.revision ^ EFI_SYSTEM_TABLE_REVISION) >> 16 != 0) | 
|  | 581 | printk(KERN_WARNING "Warning: EFI system table major version mismatch: " | 
|  | 582 | "got %d.%02d, expected %d.%02d\n", | 
|  | 583 | efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, | 
|  | 584 | EFI_SYSTEM_TABLE_REVISION >> 16, EFI_SYSTEM_TABLE_REVISION & 0xffff); | 
|  | 585 |  | 
|  | 586 | config_tables = __va(efi.systab->tables); | 
|  | 587 |  | 
|  | 588 | /* Show what we know for posterity */ | 
|  | 589 | c16 = __va(efi.systab->fw_vendor); | 
|  | 590 | if (c16) { | 
|  | 591 | for (i = 0;i < (int) sizeof(vendor) && *c16; ++i) | 
|  | 592 | vendor[i] = *c16++; | 
|  | 593 | vendor[i] = '\0'; | 
|  | 594 | } | 
|  | 595 |  | 
|  | 596 | printk(KERN_INFO "EFI v%u.%.02u by %s:", | 
|  | 597 | efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); | 
|  | 598 |  | 
|  | 599 | for (i = 0; i < (int) efi.systab->nr_tables; i++) { | 
|  | 600 | if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { | 
|  | 601 | efi.mps = __va(config_tables[i].table); | 
|  | 602 | printk(" MPS=0x%lx", config_tables[i].table); | 
|  | 603 | } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { | 
|  | 604 | efi.acpi20 = __va(config_tables[i].table); | 
|  | 605 | printk(" ACPI 2.0=0x%lx", config_tables[i].table); | 
|  | 606 | } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { | 
|  | 607 | efi.acpi = __va(config_tables[i].table); | 
|  | 608 | printk(" ACPI=0x%lx", config_tables[i].table); | 
|  | 609 | } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { | 
|  | 610 | efi.smbios = __va(config_tables[i].table); | 
|  | 611 | printk(" SMBIOS=0x%lx", config_tables[i].table); | 
|  | 612 | } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { | 
|  | 613 | efi.sal_systab = __va(config_tables[i].table); | 
|  | 614 | printk(" SALsystab=0x%lx", config_tables[i].table); | 
|  | 615 | } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { | 
|  | 616 | efi.hcdp = __va(config_tables[i].table); | 
|  | 617 | printk(" HCDP=0x%lx", config_tables[i].table); | 
|  | 618 | } | 
|  | 619 | } | 
|  | 620 | printk("\n"); | 
|  | 621 |  | 
|  | 622 | runtime = __va(efi.systab->runtime); | 
|  | 623 | efi.get_time = phys_get_time; | 
|  | 624 | efi.set_time = phys_set_time; | 
|  | 625 | efi.get_wakeup_time = phys_get_wakeup_time; | 
|  | 626 | efi.set_wakeup_time = phys_set_wakeup_time; | 
|  | 627 | efi.get_variable = phys_get_variable; | 
|  | 628 | efi.get_next_variable = phys_get_next_variable; | 
|  | 629 | efi.set_variable = phys_set_variable; | 
|  | 630 | efi.get_next_high_mono_count = phys_get_next_high_mono_count; | 
|  | 631 | efi.reset_system = phys_reset_system; | 
|  | 632 |  | 
|  | 633 | efi_map_start = __va(ia64_boot_param->efi_memmap); | 
|  | 634 | efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size; | 
|  | 635 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | 
|  | 636 |  | 
|  | 637 | #if EFI_DEBUG | 
|  | 638 | /* print EFI memory map: */ | 
|  | 639 | { | 
|  | 640 | efi_memory_desc_t *md; | 
|  | 641 | void *p; | 
|  | 642 |  | 
|  | 643 | for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) { | 
|  | 644 | md = p; | 
|  | 645 | printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n", | 
|  | 646 | i, md->type, md->attribute, md->phys_addr, | 
|  | 647 | md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), | 
|  | 648 | md->num_pages >> (20 - EFI_PAGE_SHIFT)); | 
|  | 649 | } | 
|  | 650 | } | 
|  | 651 | #endif | 
|  | 652 |  | 
|  | 653 | efi_map_pal_code(); | 
|  | 654 | efi_enter_virtual_mode(); | 
|  | 655 | } | 
|  | 656 |  | 
|  | 657 | void | 
|  | 658 | efi_enter_virtual_mode (void) | 
|  | 659 | { | 
|  | 660 | void *efi_map_start, *efi_map_end, *p; | 
|  | 661 | efi_memory_desc_t *md; | 
|  | 662 | efi_status_t status; | 
|  | 663 | u64 efi_desc_size; | 
|  | 664 |  | 
|  | 665 | efi_map_start = __va(ia64_boot_param->efi_memmap); | 
|  | 666 | efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size; | 
|  | 667 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | 
|  | 668 |  | 
|  | 669 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | 
|  | 670 | md = p; | 
|  | 671 | if (md->attribute & EFI_MEMORY_RUNTIME) { | 
|  | 672 | /* | 
|  | 673 | * Some descriptors have multiple bits set, so the order of | 
|  | 674 | * the tests is relevant. | 
|  | 675 | */ | 
|  | 676 | if (md->attribute & EFI_MEMORY_WB) { | 
|  | 677 | md->virt_addr = (u64) __va(md->phys_addr); | 
|  | 678 | } else if (md->attribute & EFI_MEMORY_UC) { | 
|  | 679 | md->virt_addr = (u64) ioremap(md->phys_addr, 0); | 
|  | 680 | } else if (md->attribute & EFI_MEMORY_WC) { | 
|  | 681 | #if 0 | 
|  | 682 | md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P | 
|  | 683 | | _PAGE_D | 
|  | 684 | | _PAGE_MA_WC | 
|  | 685 | | _PAGE_PL_0 | 
|  | 686 | | _PAGE_AR_RW)); | 
|  | 687 | #else | 
|  | 688 | printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); | 
|  | 689 | md->virt_addr = (u64) ioremap(md->phys_addr, 0); | 
|  | 690 | #endif | 
|  | 691 | } else if (md->attribute & EFI_MEMORY_WT) { | 
|  | 692 | #if 0 | 
|  | 693 | md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P | 
|  | 694 | | _PAGE_D | _PAGE_MA_WT | 
|  | 695 | | _PAGE_PL_0 | 
|  | 696 | | _PAGE_AR_RW)); | 
|  | 697 | #else | 
|  | 698 | printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); | 
|  | 699 | md->virt_addr = (u64) ioremap(md->phys_addr, 0); | 
|  | 700 | #endif | 
|  | 701 | } | 
|  | 702 | } | 
|  | 703 | } | 
|  | 704 |  | 
|  | 705 | status = efi_call_phys(__va(runtime->set_virtual_address_map), | 
|  | 706 | ia64_boot_param->efi_memmap_size, | 
|  | 707 | efi_desc_size, ia64_boot_param->efi_memdesc_version, | 
|  | 708 | ia64_boot_param->efi_memmap); | 
|  | 709 | if (status != EFI_SUCCESS) { | 
|  | 710 | printk(KERN_WARNING "warning: unable to switch EFI into virtual mode " | 
|  | 711 | "(status=%lu)\n", status); | 
|  | 712 | return; | 
|  | 713 | } | 
|  | 714 |  | 
|  | 715 | /* | 
|  | 716 | * Now that EFI is in virtual mode, we call the EFI functions more efficiently: | 
|  | 717 | */ | 
|  | 718 | efi.get_time = virt_get_time; | 
|  | 719 | efi.set_time = virt_set_time; | 
|  | 720 | efi.get_wakeup_time = virt_get_wakeup_time; | 
|  | 721 | efi.set_wakeup_time = virt_set_wakeup_time; | 
|  | 722 | efi.get_variable = virt_get_variable; | 
|  | 723 | efi.get_next_variable = virt_get_next_variable; | 
|  | 724 | efi.set_variable = virt_set_variable; | 
|  | 725 | efi.get_next_high_mono_count = virt_get_next_high_mono_count; | 
|  | 726 | efi.reset_system = virt_reset_system; | 
|  | 727 | } | 
|  | 728 |  | 
|  | 729 | /* | 
|  | 730 | * Walk the EFI memory map looking for the I/O port range.  There can only be one entry of | 
|  | 731 | * this type, other I/O port ranges should be described via ACPI. | 
|  | 732 | */ | 
|  | 733 | u64 | 
|  | 734 | efi_get_iobase (void) | 
|  | 735 | { | 
|  | 736 | void *efi_map_start, *efi_map_end, *p; | 
|  | 737 | efi_memory_desc_t *md; | 
|  | 738 | u64 efi_desc_size; | 
|  | 739 |  | 
|  | 740 | efi_map_start = __va(ia64_boot_param->efi_memmap); | 
|  | 741 | efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size; | 
|  | 742 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | 
|  | 743 |  | 
|  | 744 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | 
|  | 745 | md = p; | 
|  | 746 | if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) { | 
|  | 747 | if (md->attribute & EFI_MEMORY_UC) | 
|  | 748 | return md->phys_addr; | 
|  | 749 | } | 
|  | 750 | } | 
|  | 751 | return 0; | 
|  | 752 | } | 
|  | 753 |  | 
|  | 754 | u32 | 
|  | 755 | efi_mem_type (unsigned long phys_addr) | 
|  | 756 | { | 
|  | 757 | void *efi_map_start, *efi_map_end, *p; | 
|  | 758 | efi_memory_desc_t *md; | 
|  | 759 | u64 efi_desc_size; | 
|  | 760 |  | 
|  | 761 | efi_map_start = __va(ia64_boot_param->efi_memmap); | 
|  | 762 | efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size; | 
|  | 763 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | 
|  | 764 |  | 
|  | 765 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | 
|  | 766 | md = p; | 
|  | 767 |  | 
|  | 768 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) | 
|  | 769 | return md->type; | 
|  | 770 | } | 
|  | 771 | return 0; | 
|  | 772 | } | 
|  | 773 |  | 
|  | 774 | u64 | 
|  | 775 | efi_mem_attributes (unsigned long phys_addr) | 
|  | 776 | { | 
|  | 777 | void *efi_map_start, *efi_map_end, *p; | 
|  | 778 | efi_memory_desc_t *md; | 
|  | 779 | u64 efi_desc_size; | 
|  | 780 |  | 
|  | 781 | efi_map_start = __va(ia64_boot_param->efi_memmap); | 
|  | 782 | efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size; | 
|  | 783 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | 
|  | 784 |  | 
|  | 785 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | 
|  | 786 | md = p; | 
|  | 787 |  | 
|  | 788 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) | 
|  | 789 | return md->attribute; | 
|  | 790 | } | 
|  | 791 | return 0; | 
|  | 792 | } | 
|  | 793 | EXPORT_SYMBOL(efi_mem_attributes); | 
|  | 794 |  | 
|  | 795 | int | 
|  | 796 | valid_phys_addr_range (unsigned long phys_addr, unsigned long *size) | 
|  | 797 | { | 
|  | 798 | void *efi_map_start, *efi_map_end, *p; | 
|  | 799 | efi_memory_desc_t *md; | 
|  | 800 | u64 efi_desc_size; | 
|  | 801 |  | 
|  | 802 | efi_map_start = __va(ia64_boot_param->efi_memmap); | 
|  | 803 | efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size; | 
|  | 804 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | 
|  | 805 |  | 
|  | 806 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | 
|  | 807 | md = p; | 
|  | 808 |  | 
|  | 809 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) { | 
|  | 810 | if (!(md->attribute & EFI_MEMORY_WB)) | 
|  | 811 | return 0; | 
|  | 812 |  | 
|  | 813 | if (*size > md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr) | 
|  | 814 | *size = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr; | 
|  | 815 | return 1; | 
|  | 816 | } | 
|  | 817 | } | 
|  | 818 | return 0; | 
|  | 819 | } | 
|  | 820 |  | 
|  | 821 | int __init | 
|  | 822 | efi_uart_console_only(void) | 
|  | 823 | { | 
|  | 824 | efi_status_t status; | 
|  | 825 | char *s, name[] = "ConOut"; | 
|  | 826 | efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; | 
|  | 827 | efi_char16_t *utf16, name_utf16[32]; | 
|  | 828 | unsigned char data[1024]; | 
|  | 829 | unsigned long size = sizeof(data); | 
|  | 830 | struct efi_generic_dev_path *hdr, *end_addr; | 
|  | 831 | int uart = 0; | 
|  | 832 |  | 
|  | 833 | /* Convert to UTF-16 */ | 
|  | 834 | utf16 = name_utf16; | 
|  | 835 | s = name; | 
|  | 836 | while (*s) | 
|  | 837 | *utf16++ = *s++ & 0x7f; | 
|  | 838 | *utf16 = 0; | 
|  | 839 |  | 
|  | 840 | status = efi.get_variable(name_utf16, &guid, NULL, &size, data); | 
|  | 841 | if (status != EFI_SUCCESS) { | 
|  | 842 | printk(KERN_ERR "No EFI %s variable?\n", name); | 
|  | 843 | return 0; | 
|  | 844 | } | 
|  | 845 |  | 
|  | 846 | hdr = (struct efi_generic_dev_path *) data; | 
|  | 847 | end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size); | 
|  | 848 | while (hdr < end_addr) { | 
|  | 849 | if (hdr->type == EFI_DEV_MSG && | 
|  | 850 | hdr->sub_type == EFI_DEV_MSG_UART) | 
|  | 851 | uart = 1; | 
|  | 852 | else if (hdr->type == EFI_DEV_END_PATH || | 
|  | 853 | hdr->type == EFI_DEV_END_PATH2) { | 
|  | 854 | if (!uart) | 
|  | 855 | return 0; | 
|  | 856 | if (hdr->sub_type == EFI_DEV_END_ENTIRE) | 
|  | 857 | return 1; | 
|  | 858 | uart = 0; | 
|  | 859 | } | 
|  | 860 | hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length); | 
|  | 861 | } | 
|  | 862 | printk(KERN_ERR "Malformed %s value\n", name); | 
|  | 863 | return 0; | 
|  | 864 | } |