blob: ffee06ac7de9d0a5f0c0a6ddb52a359ce937b6b7 [file] [log] [blame]
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +03001#include <linux/io.h>
2#include <linux/memblock.h>
3
4#include <asm/cacheflush.h>
5#include <asm/pgtable.h>
6#include <asm/realmode.h>
7
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +03008struct real_mode_header *real_mode_header;
Jarkko Sakkinencda846f2012-05-08 21:22:46 +03009u32 *trampoline_cr4_features;
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030010
11void __init setup_real_mode(void)
12{
13 phys_addr_t mem;
14 u16 real_mode_seg;
15 u32 *rel;
16 u32 count;
17 u32 *ptr;
18 u16 *seg;
19 int i;
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030020 unsigned char *base;
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030021 struct trampoline_header *trampoline_header;
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030022 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030023#ifdef CONFIG_X86_64
24 u64 *trampoline_pgd;
H. Peter Anvin638d9572012-05-16 14:02:05 -070025 u64 efer;
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030026#endif
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030027
28 /* Has to be in very low memory so we can execute real-mode AP code. */
29 mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
30 if (!mem)
31 panic("Cannot allocate trampoline\n");
32
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030033 base = __va(mem);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030034 memblock_reserve(mem, size);
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030035 real_mode_header = (struct real_mode_header *) base;
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030036 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030037 base, (unsigned long long)mem, size);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030038
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030039 memcpy(base, real_mode_blob, size);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030040
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030041 real_mode_seg = __pa(base) >> 4;
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030042 rel = (u32 *) real_mode_relocs;
43
44 /* 16-bit segment relocations. */
45 count = rel[0];
46 rel = &rel[1];
47 for (i = 0; i < count; i++) {
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030048 seg = (u16 *) (base + rel[i]);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030049 *seg = real_mode_seg;
50 }
51
52 /* 32-bit linear relocations. */
53 count = rel[i];
54 rel = &rel[i + 1];
55 for (i = 0; i < count; i++) {
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030056 ptr = (u32 *) (base + rel[i]);
57 *ptr += __pa(base);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030058 }
59
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030060 /* Must be perfomed *after* relocation. */
61 trampoline_header = (struct trampoline_header *)
62 __va(real_mode_header->trampoline_header);
63
Jarkko Sakkinen48927bb2012-05-08 21:22:28 +030064#ifdef CONFIG_X86_32
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030065 trampoline_header->start = __pa(startup_32_smp);
66 trampoline_header->gdt_limit = __BOOT_DS + 7;
67 trampoline_header->gdt_base = __pa(boot_gdt);
Jarkko Sakkinen48927bb2012-05-08 21:22:28 +030068#else
H. Peter Anvin79603872012-05-16 13:22:41 -070069 /*
70 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
71 * so we need to mask it out.
72 */
H. Peter Anvin638d9572012-05-16 14:02:05 -070073 rdmsrl(MSR_EFER, efer);
74 trampoline_header->efer = efer & ~EFER_LMA;
Jarkko Sakkinencda846f2012-05-08 21:22:46 +030075
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030076 trampoline_header->start = (u64) secondary_startup_64;
Jarkko Sakkinencda846f2012-05-08 21:22:46 +030077 trampoline_cr4_features = &trampoline_header->cr4;
78 *trampoline_cr4_features = read_cr4();
79
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030080 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
Yinghai Lu9735e912013-01-24 12:19:50 -080081 trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
82 trampoline_pgd[511] = init_level4_pgt[511].pgd;
Jarkko Sakkinen48927bb2012-05-08 21:22:28 +030083#endif
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030084}
85
86/*
Yinghai Lu231b3642013-01-24 12:19:47 -080087 * setup_real_mode() gets called very early, to guarantee the
88 * availability of low memory. This is before the proper kernel page
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030089 * tables are set up, so we cannot set page permissions in that
Yinghai Lu231b3642013-01-24 12:19:47 -080090 * function. Also trampoline code will be executed by APs so we
91 * need to mark it executable at do_pre_smp_initcalls() at least,
92 * thus run it as a early_initcall().
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030093 */
94static int __init set_real_mode_permissions(void)
95{
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030096 unsigned char *base = (unsigned char *) real_mode_header;
97 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030098
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +030099 size_t ro_size =
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300100 PAGE_ALIGN(real_mode_header->ro_end) -
101 __pa(base);
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300102
103 size_t text_size =
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300104 PAGE_ALIGN(real_mode_header->ro_end) -
105 real_mode_header->text_start;
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300106
107 unsigned long text_start =
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300108 (unsigned long) __va(real_mode_header->text_start);
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300109
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300110 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
111 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300112 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
113
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300114 return 0;
115}
Yinghai Lu231b3642013-01-24 12:19:47 -0800116early_initcall(set_real_mode_permissions);