| H. Peter Anvin | 6b59257 | 2008-01-30 13:30:30 +0100 | [diff] [blame] | 1 | #ifndef _ASM_X86_ALTERNATIVE_H | 
|  | 2 | #define _ASM_X86_ALTERNATIVE_H | 
|  | 3 |  | 
|  | 4 | #include <linux/types.h> | 
|  | 5 | #include <linux/stddef.h> | 
|  | 6 | #include <asm/asm.h> | 
|  | 7 |  | 
|  | 8 | /* | 
|  | 9 | * Alternative inline assembly for SMP. | 
|  | 10 | * | 
|  | 11 | * The LOCK_PREFIX macro defined here replaces the LOCK and | 
|  | 12 | * LOCK_PREFIX macros used everywhere in the source tree. | 
|  | 13 | * | 
|  | 14 | * SMP alternatives use the same data structures as the other | 
|  | 15 | * alternatives and the X86_FEATURE_UP flag to indicate the case of a | 
|  | 16 | * UP system running a SMP kernel.  The existing apply_alternatives() | 
|  | 17 | * works fine for patching a SMP kernel for UP. | 
|  | 18 | * | 
|  | 19 | * The SMP alternative tables can be kept after boot and contain both | 
|  | 20 | * UP and SMP versions of the instructions to allow switching back to | 
|  | 21 | * SMP at runtime, when hotplugging in a new CPU, which is especially | 
|  | 22 | * useful in virtualized environments. | 
|  | 23 | * | 
|  | 24 | * The very common lock prefix is handled as special case in a | 
|  | 25 | * separate table which is a pure address list without replacement ptr | 
|  | 26 | * and size information.  That keeps the table sizes small. | 
|  | 27 | */ | 
|  | 28 |  | 
|  | 29 | #ifdef CONFIG_SMP | 
|  | 30 | #define LOCK_PREFIX \ | 
|  | 31 | ".section .smp_locks,\"a\"\n"	\ | 
|  | 32 | _ASM_ALIGN "\n"			\ | 
|  | 33 | _ASM_PTR "661f\n" /* address */	\ | 
|  | 34 | ".previous\n"			\ | 
|  | 35 | "661:\n\tlock; " | 
|  | 36 |  | 
|  | 37 | #else /* ! CONFIG_SMP */ | 
|  | 38 | #define LOCK_PREFIX "" | 
| Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 39 | #endif | 
| H. Peter Anvin | 6b59257 | 2008-01-30 13:30:30 +0100 | [diff] [blame] | 40 |  | 
|  | 41 | /* This must be included *after* the definition of LOCK_PREFIX */ | 
|  | 42 | #include <asm/cpufeature.h> | 
|  | 43 |  | 
|  | 44 | struct alt_instr { | 
|  | 45 | u8 *instr;		/* original instruction */ | 
|  | 46 | u8 *replacement; | 
|  | 47 | u8  cpuid;		/* cpuid bit set for replacement */ | 
|  | 48 | u8  instrlen;		/* length of original instruction */ | 
|  | 49 | u8  replacementlen;	/* length of new instruction, <= instrlen */ | 
|  | 50 | u8  pad1; | 
|  | 51 | #ifdef CONFIG_X86_64 | 
|  | 52 | u32 pad2; | 
|  | 53 | #endif | 
|  | 54 | }; | 
|  | 55 |  | 
|  | 56 | extern void alternative_instructions(void); | 
|  | 57 | extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); | 
|  | 58 |  | 
|  | 59 | struct module; | 
|  | 60 |  | 
|  | 61 | #ifdef CONFIG_SMP | 
|  | 62 | extern void alternatives_smp_module_add(struct module *mod, char *name, | 
|  | 63 | void *locks, void *locks_end, | 
|  | 64 | void *text, void *text_end); | 
|  | 65 | extern void alternatives_smp_module_del(struct module *mod); | 
|  | 66 | extern void alternatives_smp_switch(int smp); | 
|  | 67 | #else | 
|  | 68 | static inline void alternatives_smp_module_add(struct module *mod, char *name, | 
| Joe Perches | 2ac1ea7 | 2008-03-23 01:01:37 -0700 | [diff] [blame] | 69 | void *locks, void *locks_end, | 
|  | 70 | void *text, void *text_end) {} | 
| H. Peter Anvin | 6b59257 | 2008-01-30 13:30:30 +0100 | [diff] [blame] | 71 | static inline void alternatives_smp_module_del(struct module *mod) {} | 
|  | 72 | static inline void alternatives_smp_switch(int smp) {} | 
|  | 73 | #endif	/* CONFIG_SMP */ | 
|  | 74 |  | 
| Steven Rostedt | dfa60ab | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 75 | const unsigned char *const *find_nop_table(void); | 
|  | 76 |  | 
| H. Peter Anvin | 6b59257 | 2008-01-30 13:30:30 +0100 | [diff] [blame] | 77 | /* | 
|  | 78 | * Alternative instructions for different CPU types or capabilities. | 
|  | 79 | * | 
|  | 80 | * This allows to use optimized instructions even on generic binary | 
|  | 81 | * kernels. | 
|  | 82 | * | 
|  | 83 | * length of oldinstr must be longer or equal the length of newinstr | 
|  | 84 | * It can be padded with nops as needed. | 
|  | 85 | * | 
|  | 86 | * For non barrier like inlines please define new variants | 
|  | 87 | * without volatile and memory clobber. | 
|  | 88 | */ | 
|  | 89 | #define alternative(oldinstr, newinstr, feature)			\ | 
|  | 90 | asm volatile ("661:\n\t" oldinstr "\n662:\n"			\ | 
|  | 91 | ".section .altinstructions,\"a\"\n"		\ | 
|  | 92 | _ASM_ALIGN "\n"					\ | 
|  | 93 | _ASM_PTR "661b\n"		/* label */		\ | 
|  | 94 | _ASM_PTR "663f\n"		/* new instruction */	\ | 
|  | 95 | "	 .byte %c0\n"		/* feature bit */	\ | 
|  | 96 | "	 .byte 662b-661b\n"	/* sourcelen */		\ | 
|  | 97 | "	 .byte 664f-663f\n"	/* replacementlen */	\ | 
|  | 98 | ".previous\n"					\ | 
|  | 99 | ".section .altinstr_replacement,\"ax\"\n"		\ | 
|  | 100 | "663:\n\t" newinstr "\n664:\n"  /* replacement */	\ | 
|  | 101 | ".previous" :: "i" (feature) : "memory") | 
|  | 102 |  | 
|  | 103 | /* | 
|  | 104 | * Alternative inline assembly with input. | 
|  | 105 | * | 
|  | 106 | * Pecularities: | 
|  | 107 | * No memory clobber here. | 
|  | 108 | * Argument numbers start with 1. | 
|  | 109 | * Best is to use constraints that are fixed size (like (%1) ... "r") | 
|  | 110 | * If you use variable sized constraints like "m" or "g" in the | 
|  | 111 | * replacement make sure to pad to the worst case length. | 
|  | 112 | */ | 
|  | 113 | #define alternative_input(oldinstr, newinstr, feature, input...)	\ | 
|  | 114 | asm volatile ("661:\n\t" oldinstr "\n662:\n"			\ | 
|  | 115 | ".section .altinstructions,\"a\"\n"		\ | 
|  | 116 | _ASM_ALIGN "\n"					\ | 
|  | 117 | _ASM_PTR "661b\n"		/* label */		\ | 
|  | 118 | _ASM_PTR "663f\n"		/* new instruction */	\ | 
|  | 119 | "	 .byte %c0\n"		/* feature bit */	\ | 
|  | 120 | "	 .byte 662b-661b\n"	/* sourcelen */		\ | 
|  | 121 | "	 .byte 664f-663f\n"	/* replacementlen */	\ | 
|  | 122 | ".previous\n"					\ | 
|  | 123 | ".section .altinstr_replacement,\"ax\"\n"		\ | 
|  | 124 | "663:\n\t" newinstr "\n664:\n"  /* replacement */	\ | 
|  | 125 | ".previous" :: "i" (feature), ##input) | 
|  | 126 |  | 
|  | 127 | /* Like alternative_input, but with a single output argument */ | 
|  | 128 | #define alternative_io(oldinstr, newinstr, feature, output, input...)	\ | 
|  | 129 | asm volatile ("661:\n\t" oldinstr "\n662:\n"			\ | 
|  | 130 | ".section .altinstructions,\"a\"\n"		\ | 
|  | 131 | _ASM_ALIGN "\n"					\ | 
|  | 132 | _ASM_PTR "661b\n"		/* label */		\ | 
|  | 133 | _ASM_PTR "663f\n"		/* new instruction */	\ | 
|  | 134 | "	 .byte %c[feat]\n"	/* feature bit */	\ | 
|  | 135 | "	 .byte 662b-661b\n"	/* sourcelen */		\ | 
|  | 136 | "	 .byte 664f-663f\n"	/* replacementlen */	\ | 
|  | 137 | ".previous\n"					\ | 
|  | 138 | ".section .altinstr_replacement,\"ax\"\n"		\ | 
|  | 139 | "663:\n\t" newinstr "\n664:\n"  /* replacement */ \ | 
|  | 140 | ".previous" : output : [feat] "i" (feature), ##input) | 
|  | 141 |  | 
|  | 142 | /* | 
|  | 143 | * use this macro(s) if you need more than one output parameter | 
|  | 144 | * in alternative_io | 
|  | 145 | */ | 
|  | 146 | #define ASM_OUTPUT2(a, b) a, b | 
|  | 147 |  | 
|  | 148 | struct paravirt_patch_site; | 
|  | 149 | #ifdef CONFIG_PARAVIRT | 
|  | 150 | void apply_paravirt(struct paravirt_patch_site *start, | 
|  | 151 | struct paravirt_patch_site *end); | 
|  | 152 | #else | 
| Joe Perches | 2ac1ea7 | 2008-03-23 01:01:37 -0700 | [diff] [blame] | 153 | static inline void apply_paravirt(struct paravirt_patch_site *start, | 
|  | 154 | struct paravirt_patch_site *end) | 
| H. Peter Anvin | 6b59257 | 2008-01-30 13:30:30 +0100 | [diff] [blame] | 155 | {} | 
|  | 156 | #define __parainstructions	NULL | 
|  | 157 | #define __parainstructions_end	NULL | 
|  | 158 | #endif | 
|  | 159 |  | 
| Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 160 | extern void add_nops(void *insns, unsigned int len); | 
|  | 161 |  | 
|  | 162 | /* | 
|  | 163 | * Clear and restore the kernel write-protection flag on the local CPU. | 
|  | 164 | * Allows the kernel to edit read-only pages. | 
|  | 165 | * Side-effect: any interrupt handler running between save and restore will have | 
|  | 166 | * the ability to write to read-only pages. | 
|  | 167 | * | 
|  | 168 | * Warning: | 
|  | 169 | * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and | 
|  | 170 | * no thread can be preempted in the instructions being modified (no iret to an | 
|  | 171 | * invalid instruction possible) or if the instructions are changed from a | 
|  | 172 | * consistent state to another consistent state atomically. | 
|  | 173 | * More care must be taken when modifying code in the SMP case because of | 
|  | 174 | * Intel's errata. | 
|  | 175 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an | 
|  | 176 | * inconsistent instruction while you patch. | 
|  | 177 | * The _early version expects the memory to already be RW. | 
|  | 178 | */ | 
|  | 179 |  | 
|  | 180 | extern void *text_poke(void *addr, const void *opcode, size_t len); | 
|  | 181 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); | 
| H. Peter Anvin | 6b59257 | 2008-01-30 13:30:30 +0100 | [diff] [blame] | 182 |  | 
|  | 183 | #endif /* _ASM_X86_ALTERNATIVE_H */ |