| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * machine_kexec.c - handle transition of Linux booting another kernel | 
 | 3 |  * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com> | 
 | 4 |  * | 
 | 5 |  * This source code is licensed under the GNU General Public License, | 
 | 6 |  * Version 2.  See the file COPYING for more details. | 
 | 7 |  */ | 
 | 8 |  | 
 | 9 | #include <linux/mm.h> | 
 | 10 | #include <linux/kexec.h> | 
 | 11 | #include <linux/delay.h> | 
 | 12 | #include <asm/pgtable.h> | 
 | 13 | #include <asm/pgalloc.h> | 
 | 14 | #include <asm/tlbflush.h> | 
 | 15 | #include <asm/mmu_context.h> | 
 | 16 | #include <asm/io.h> | 
 | 17 | #include <asm/apic.h> | 
 | 18 | #include <asm/cpufeature.h> | 
| Eric W. Biederman | e7b47cc | 2005-07-29 13:01:18 -0600 | [diff] [blame] | 19 | #include <asm/desc.h> | 
| Zachary Amsden | 4bb0d3e | 2005-09-03 15:56:36 -0700 | [diff] [blame] | 20 | #include <asm/system.h> | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 21 |  | 
 | 22 | #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) | 
 | 23 |  | 
 | 24 | #define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 
 | 25 | #define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 
 | 26 | #define L2_ATTR (_PAGE_PRESENT) | 
 | 27 |  | 
 | 28 | #define LEVEL0_SIZE (1UL << 12UL) | 
 | 29 |  | 
 | 30 | #ifndef CONFIG_X86_PAE | 
 | 31 | #define LEVEL1_SIZE (1UL << 22UL) | 
 | 32 | static u32 pgtable_level1[1024] PAGE_ALIGNED; | 
 | 33 |  | 
 | 34 | static void identity_map_page(unsigned long address) | 
 | 35 | { | 
 | 36 | 	unsigned long level1_index, level2_index; | 
 | 37 | 	u32 *pgtable_level2; | 
 | 38 |  | 
 | 39 | 	/* Find the current page table */ | 
 | 40 | 	pgtable_level2 = __va(read_cr3()); | 
 | 41 |  | 
 | 42 | 	/* Find the indexes of the physical address to identity map */ | 
 | 43 | 	level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE; | 
 | 44 | 	level2_index = address / LEVEL1_SIZE; | 
 | 45 |  | 
 | 46 | 	/* Identity map the page table entry */ | 
 | 47 | 	pgtable_level1[level1_index] = address | L0_ATTR; | 
 | 48 | 	pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; | 
 | 49 |  | 
 | 50 | 	/* Flush the tlb so the new mapping takes effect. | 
 | 51 | 	 * Global tlb entries are not flushed but that is not an issue. | 
 | 52 | 	 */ | 
 | 53 | 	load_cr3(pgtable_level2); | 
 | 54 | } | 
 | 55 |  | 
 | 56 | #else | 
 | 57 | #define LEVEL1_SIZE (1UL << 21UL) | 
 | 58 | #define LEVEL2_SIZE (1UL << 30UL) | 
 | 59 | static u64 pgtable_level1[512] PAGE_ALIGNED; | 
 | 60 | static u64 pgtable_level2[512] PAGE_ALIGNED; | 
 | 61 |  | 
 | 62 | static void identity_map_page(unsigned long address) | 
 | 63 | { | 
 | 64 | 	unsigned long level1_index, level2_index, level3_index; | 
 | 65 | 	u64 *pgtable_level3; | 
 | 66 |  | 
 | 67 | 	/* Find the current page table */ | 
 | 68 | 	pgtable_level3 = __va(read_cr3()); | 
 | 69 |  | 
 | 70 | 	/* Find the indexes of the physical address to identity map */ | 
 | 71 | 	level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE; | 
 | 72 | 	level2_index = (address % LEVEL2_SIZE)/LEVEL1_SIZE; | 
 | 73 | 	level3_index = address / LEVEL2_SIZE; | 
 | 74 |  | 
 | 75 | 	/* Identity map the page table entry */ | 
 | 76 | 	pgtable_level1[level1_index] = address | L0_ATTR; | 
 | 77 | 	pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; | 
| Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 78 | 	set_64bit(&pgtable_level3[level3_index], | 
 | 79 | 					       __pa(pgtable_level2) | L2_ATTR); | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 80 |  | 
 | 81 | 	/* Flush the tlb so the new mapping takes effect. | 
 | 82 | 	 * Global tlb entries are not flushed but that is not an issue. | 
 | 83 | 	 */ | 
 | 84 | 	load_cr3(pgtable_level3); | 
 | 85 | } | 
 | 86 | #endif | 
 | 87 |  | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 88 | static void set_idt(void *newidt, __u16 limit) | 
 | 89 | { | 
| Eric W. Biederman | e7b47cc | 2005-07-29 13:01:18 -0600 | [diff] [blame] | 90 | 	struct Xgt_desc_struct curidt; | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 91 |  | 
 | 92 | 	/* ia32 supports unaliged loads & stores */ | 
| Eric W. Biederman | e7b47cc | 2005-07-29 13:01:18 -0600 | [diff] [blame] | 93 | 	curidt.size    = limit; | 
 | 94 | 	curidt.address = (unsigned long)newidt; | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 95 |  | 
| Zachary Amsden | f2ab446 | 2005-09-03 15:56:42 -0700 | [diff] [blame] | 96 | 	load_idt(&curidt); | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 97 | }; | 
 | 98 |  | 
 | 99 |  | 
 | 100 | static void set_gdt(void *newgdt, __u16 limit) | 
 | 101 | { | 
| Eric W. Biederman | e7b47cc | 2005-07-29 13:01:18 -0600 | [diff] [blame] | 102 | 	struct Xgt_desc_struct curgdt; | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 103 |  | 
 | 104 | 	/* ia32 supports unaligned loads & stores */ | 
| Eric W. Biederman | e7b47cc | 2005-07-29 13:01:18 -0600 | [diff] [blame] | 105 | 	curgdt.size    = limit; | 
 | 106 | 	curgdt.address = (unsigned long)newgdt; | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 107 |  | 
| Zachary Amsden | f2ab446 | 2005-09-03 15:56:42 -0700 | [diff] [blame] | 108 | 	load_gdt(&curgdt); | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 109 | }; | 
 | 110 |  | 
 | 111 | static void load_segments(void) | 
 | 112 | { | 
 | 113 | #define __STR(X) #X | 
 | 114 | #define STR(X) __STR(X) | 
 | 115 |  | 
 | 116 | 	__asm__ __volatile__ ( | 
 | 117 | 		"\tljmp $"STR(__KERNEL_CS)",$1f\n" | 
 | 118 | 		"\t1:\n" | 
| Michael Matz | 2ec5e3a | 2006-03-07 21:55:48 -0800 | [diff] [blame] | 119 | 		"\tmovl $"STR(__KERNEL_DS)",%%eax\n" | 
 | 120 | 		"\tmovl %%eax,%%ds\n" | 
 | 121 | 		"\tmovl %%eax,%%es\n" | 
 | 122 | 		"\tmovl %%eax,%%fs\n" | 
 | 123 | 		"\tmovl %%eax,%%gs\n" | 
 | 124 | 		"\tmovl %%eax,%%ss\n" | 
 | 125 | 		::: "eax", "memory"); | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 126 | #undef STR | 
 | 127 | #undef __STR | 
 | 128 | } | 
 | 129 |  | 
 | 130 | typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)( | 
| Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 131 | 					unsigned long indirection_page, | 
 | 132 | 					unsigned long reboot_code_buffer, | 
 | 133 | 					unsigned long start_address, | 
 | 134 | 					unsigned int has_pae) ATTRIB_NORET; | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 135 |  | 
| Tobias Klauser | 2efe55a | 2006-06-26 18:57:34 +0200 | [diff] [blame] | 136 | extern const unsigned char relocate_new_kernel[]; | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 137 | extern void relocate_new_kernel_end(void); | 
| Tobias Klauser | 2efe55a | 2006-06-26 18:57:34 +0200 | [diff] [blame] | 138 | extern const unsigned int relocate_new_kernel_size; | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 139 |  | 
 | 140 | /* | 
 | 141 |  * A architecture hook called to validate the | 
 | 142 |  * proposed image and prepare the control pages | 
 | 143 |  * as needed.  The pages for KEXEC_CONTROL_CODE_SIZE | 
 | 144 |  * have been allocated, but the segments have yet | 
 | 145 |  * been copied into the kernel. | 
 | 146 |  * | 
 | 147 |  * Do what every setup is needed on image and the | 
 | 148 |  * reboot code buffer to allow us to avoid allocations | 
 | 149 |  * later. | 
 | 150 |  * | 
 | 151 |  * Currently nothing. | 
 | 152 |  */ | 
 | 153 | int machine_kexec_prepare(struct kimage *image) | 
 | 154 | { | 
 | 155 | 	return 0; | 
 | 156 | } | 
 | 157 |  | 
 | 158 | /* | 
 | 159 |  * Undo anything leftover by machine_kexec_prepare | 
 | 160 |  * when an image is freed. | 
 | 161 |  */ | 
 | 162 | void machine_kexec_cleanup(struct kimage *image) | 
 | 163 | { | 
 | 164 | } | 
 | 165 |  | 
 | 166 | /* | 
 | 167 |  * Do not allocate memory (or fail in any way) in machine_kexec(). | 
 | 168 |  * We are past the point of no return, committed to rebooting now. | 
 | 169 |  */ | 
 | 170 | NORET_TYPE void machine_kexec(struct kimage *image) | 
 | 171 | { | 
 | 172 | 	unsigned long page_list; | 
 | 173 | 	unsigned long reboot_code_buffer; | 
| Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 174 |  | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 175 | 	relocate_new_kernel_t rnk; | 
 | 176 |  | 
 | 177 | 	/* Interrupts aren't acceptable while we reboot */ | 
 | 178 | 	local_irq_disable(); | 
 | 179 |  | 
 | 180 | 	/* Compute some offsets */ | 
| Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 181 | 	reboot_code_buffer = page_to_pfn(image->control_code_page) | 
 | 182 | 								<< PAGE_SHIFT; | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 183 | 	page_list = image->head; | 
 | 184 |  | 
 | 185 | 	/* Set up an identity mapping for the reboot_code_buffer */ | 
 | 186 | 	identity_map_page(reboot_code_buffer); | 
 | 187 |  | 
 | 188 | 	/* copy it out */ | 
| Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 189 | 	memcpy((void *)reboot_code_buffer, relocate_new_kernel, | 
 | 190 | 						relocate_new_kernel_size); | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 191 |  | 
| Eric W. Biederman | 2a8a3d5 | 2006-07-30 03:03:20 -0700 | [diff] [blame] | 192 | 	/* The segment registers are funny things, they have both a | 
 | 193 | 	 * visible and an invisible part.  Whenever the visible part is | 
 | 194 | 	 * set to a specific selector, the invisible part is loaded | 
 | 195 | 	 * with from a table in memory.  At no other time is the | 
 | 196 | 	 * descriptor table in memory accessed. | 
| Eric W. Biederman | 5033cba | 2005-06-25 14:57:56 -0700 | [diff] [blame] | 197 | 	 * | 
 | 198 | 	 * I take advantage of this here by force loading the | 
 | 199 | 	 * segments, before I zap the gdt with an invalid value. | 
 | 200 | 	 */ | 
 | 201 | 	load_segments(); | 
 | 202 | 	/* The gdt & idt are now invalid. | 
 | 203 | 	 * If you want to load them you must set up your own idt & gdt. | 
 | 204 | 	 */ | 
 | 205 | 	set_gdt(phys_to_virt(0),0); | 
 | 206 | 	set_idt(phys_to_virt(0),0); | 
 | 207 |  | 
 | 208 | 	/* now call it */ | 
 | 209 | 	rnk = (relocate_new_kernel_t) reboot_code_buffer; | 
 | 210 | 	(*rnk)(page_list, reboot_code_buffer, image->start, cpu_has_pae); | 
 | 211 | } |