blob: 426f59b0106b65ae5c9fe7d574354c92ff9a8a32 [file] [log] [blame]
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08001#include <linux/module.h>
Al Virof6a57032006-10-18 01:47:25 -04002#include <linux/sched.h>
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08003#include <linux/spinlock.h>
4#include <linux/list.h>
5#include <asm/alternative.h>
6#include <asm/sections.h>
7
Gerd Hoffmannd167a512006-06-26 13:56:16 +02008static int smp_alt_once = 0;
9static int debug_alternative = 0;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080010
Gerd Hoffmannd167a512006-06-26 13:56:16 +020011static int __init bootonly(char *str)
12{
13 smp_alt_once = 1;
14 return 1;
15}
16static int __init debug_alt(char *str)
17{
18 debug_alternative = 1;
19 return 1;
20}
21
Gerd Hoffmannd167a512006-06-26 13:56:16 +020022__setup("smp-alt-boot", bootonly);
23__setup("debug-alternative", debug_alt);
24
25#define DPRINTK(fmt, args...) if (debug_alternative) \
26 printk(KERN_DEBUG fmt, args)
27
28#ifdef GENERIC_NOP1
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080029/* Use inline assembly to define this because the nops are defined
30 as inline assembly strings in the include files and we cannot
31 get them easily into strings. */
32asm("\t.data\nintelnops: "
33 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
34 GENERIC_NOP7 GENERIC_NOP8);
Gerd Hoffmannd167a512006-06-26 13:56:16 +020035extern unsigned char intelnops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080036static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
37 NULL,
38 intelnops,
39 intelnops + 1,
40 intelnops + 1 + 2,
41 intelnops + 1 + 2 + 3,
42 intelnops + 1 + 2 + 3 + 4,
43 intelnops + 1 + 2 + 3 + 4 + 5,
44 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
45 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
46};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020047#endif
48
49#ifdef K8_NOP1
50asm("\t.data\nk8nops: "
51 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
52 K8_NOP7 K8_NOP8);
53extern unsigned char k8nops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080054static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
55 NULL,
56 k8nops,
57 k8nops + 1,
58 k8nops + 1 + 2,
59 k8nops + 1 + 2 + 3,
60 k8nops + 1 + 2 + 3 + 4,
61 k8nops + 1 + 2 + 3 + 4 + 5,
62 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
63 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
64};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020065#endif
66
67#ifdef K7_NOP1
68asm("\t.data\nk7nops: "
69 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
70 K7_NOP7 K7_NOP8);
71extern unsigned char k7nops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080072static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
73 NULL,
74 k7nops,
75 k7nops + 1,
76 k7nops + 1 + 2,
77 k7nops + 1 + 2 + 3,
78 k7nops + 1 + 2 + 3 + 4,
79 k7nops + 1 + 2 + 3 + 4 + 5,
80 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
81 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
82};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020083#endif
84
85#ifdef CONFIG_X86_64
86
87extern char __vsyscall_0;
88static inline unsigned char** find_nop_table(void)
89{
90 return k8_nops;
91}
92
93#else /* CONFIG_X86_64 */
94
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080095static struct nop {
96 int cpuid;
97 unsigned char **noptable;
98} noptypes[] = {
99 { X86_FEATURE_K8, k8_nops },
100 { X86_FEATURE_K7, k7_nops },
101 { -1, NULL }
102};
103
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800104static unsigned char** find_nop_table(void)
105{
106 unsigned char **noptable = intel_nops;
107 int i;
108
109 for (i = 0; noptypes[i].cpuid >= 0; i++) {
110 if (boot_cpu_has(noptypes[i].cpuid)) {
111 noptable = noptypes[i].noptable;
112 break;
113 }
114 }
115 return noptable;
116}
117
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200118#endif /* CONFIG_X86_64 */
119
Rusty Russell139ec7c2006-12-07 02:14:08 +0100120static void nop_out(void *insns, unsigned int len)
121{
122 unsigned char **noptable = find_nop_table();
123
124 while (len > 0) {
125 unsigned int noplen = len;
126 if (noplen > ASM_NOP_MAX)
127 noplen = ASM_NOP_MAX;
128 memcpy(insns, noptable[noplen], noplen);
129 insns += noplen;
130 len -= noplen;
131 }
132}
133
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200134extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
135extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
136extern u8 *__smp_locks[], *__smp_locks_end[];
137
138extern u8 __smp_alt_begin[], __smp_alt_end[];
139
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800140/* Replace instructions with better alternatives for this CPU type.
141 This runs before SMP is initialized to avoid SMP problems with
142 self modifying code. This implies that assymetric systems where
143 APs have less capabilities than the boot processor are not handled.
144 Tough. Make sure you disable such features by hand. */
145
146void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
147{
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800148 struct alt_instr *a;
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200149 u8 *instr;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100150 int diff;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800151
152 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
153 for (a = start; a < end; a++) {
154 BUG_ON(a->replacementlen > a->instrlen);
155 if (!boot_cpu_has(a->cpuid))
156 continue;
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200157 instr = a->instr;
158#ifdef CONFIG_X86_64
159 /* vsyscall code is not mapped yet. resolve it manually. */
160 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
161 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
162 DPRINTK("%s: vsyscall fixup: %p => %p\n",
163 __FUNCTION__, a->instr, instr);
164 }
165#endif
166 memcpy(instr, a->replacement, a->replacementlen);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800167 diff = a->instrlen - a->replacementlen;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100168 nop_out(instr + a->replacementlen, diff);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800169 }
170}
171
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700172#ifdef CONFIG_SMP
173
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800174static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
175{
176 struct alt_instr *a;
177
178 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
179 for (a = start; a < end; a++) {
180 memcpy(a->replacement + a->replacementlen,
181 a->instr,
182 a->instrlen);
183 }
184}
185
186static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
187{
188 struct alt_instr *a;
189
190 for (a = start; a < end; a++) {
191 memcpy(a->instr,
192 a->replacement + a->replacementlen,
193 a->instrlen);
194 }
195}
196
197static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
198{
199 u8 **ptr;
200
201 for (ptr = start; ptr < end; ptr++) {
202 if (*ptr < text)
203 continue;
204 if (*ptr > text_end)
205 continue;
206 **ptr = 0xf0; /* lock prefix */
207 };
208}
209
210static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
211{
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800212 u8 **ptr;
213
214 for (ptr = start; ptr < end; ptr++) {
215 if (*ptr < text)
216 continue;
217 if (*ptr > text_end)
218 continue;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100219 nop_out(*ptr, 1);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800220 };
221}
222
223struct smp_alt_module {
224 /* what is this ??? */
225 struct module *mod;
226 char *name;
227
228 /* ptrs to lock prefixes */
229 u8 **locks;
230 u8 **locks_end;
231
232 /* .text segment, needed to avoid patching init code ;) */
233 u8 *text;
234 u8 *text_end;
235
236 struct list_head next;
237};
238static LIST_HEAD(smp_alt_modules);
239static DEFINE_SPINLOCK(smp_alt);
240
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800241void alternatives_smp_module_add(struct module *mod, char *name,
242 void *locks, void *locks_end,
243 void *text, void *text_end)
244{
245 struct smp_alt_module *smp;
246 unsigned long flags;
247
248 if (smp_alt_once) {
249 if (boot_cpu_has(X86_FEATURE_UP))
250 alternatives_smp_unlock(locks, locks_end,
251 text, text_end);
252 return;
253 }
254
255 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
256 if (NULL == smp)
257 return; /* we'll run the (safe but slow) SMP code then ... */
258
259 smp->mod = mod;
260 smp->name = name;
261 smp->locks = locks;
262 smp->locks_end = locks_end;
263 smp->text = text;
264 smp->text_end = text_end;
265 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
266 __FUNCTION__, smp->locks, smp->locks_end,
267 smp->text, smp->text_end, smp->name);
268
269 spin_lock_irqsave(&smp_alt, flags);
270 list_add_tail(&smp->next, &smp_alt_modules);
271 if (boot_cpu_has(X86_FEATURE_UP))
272 alternatives_smp_unlock(smp->locks, smp->locks_end,
273 smp->text, smp->text_end);
274 spin_unlock_irqrestore(&smp_alt, flags);
275}
276
277void alternatives_smp_module_del(struct module *mod)
278{
279 struct smp_alt_module *item;
280 unsigned long flags;
281
Andi Kleen9ce883b2007-04-24 13:05:37 +0200282 if (smp_alt_once)
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800283 return;
284
285 spin_lock_irqsave(&smp_alt, flags);
286 list_for_each_entry(item, &smp_alt_modules, next) {
287 if (mod != item->mod)
288 continue;
289 list_del(&item->next);
290 spin_unlock_irqrestore(&smp_alt, flags);
291 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
292 kfree(item);
293 return;
294 }
295 spin_unlock_irqrestore(&smp_alt, flags);
296}
297
298void alternatives_smp_switch(int smp)
299{
300 struct smp_alt_module *mod;
301 unsigned long flags;
302
Ingo Molnar3047e992006-07-03 00:24:57 -0700303#ifdef CONFIG_LOCKDEP
304 /*
305 * A not yet fixed binutils section handling bug prevents
306 * alternatives-replacement from working reliably, so turn
307 * it off:
308 */
309 printk("lockdep: not fixing up alternatives.\n");
310 return;
311#endif
312
Andi Kleen9ce883b2007-04-24 13:05:37 +0200313 if (smp_alt_once)
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800314 return;
315 BUG_ON(!smp && (num_online_cpus() > 1));
316
317 spin_lock_irqsave(&smp_alt, flags);
318 if (smp) {
319 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
320 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
321 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
322 alternatives_smp_apply(__smp_alt_instructions,
323 __smp_alt_instructions_end);
324 list_for_each_entry(mod, &smp_alt_modules, next)
325 alternatives_smp_lock(mod->locks, mod->locks_end,
326 mod->text, mod->text_end);
327 } else {
328 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
329 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
330 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
331 apply_alternatives(__smp_alt_instructions,
332 __smp_alt_instructions_end);
333 list_for_each_entry(mod, &smp_alt_modules, next)
334 alternatives_smp_unlock(mod->locks, mod->locks_end,
335 mod->text, mod->text_end);
336 }
337 spin_unlock_irqrestore(&smp_alt, flags);
338}
339
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700340#endif
341
Rusty Russell139ec7c2006-12-07 02:14:08 +0100342#ifdef CONFIG_PARAVIRT
343void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
344{
345 struct paravirt_patch *p;
346
347 for (p = start; p < end; p++) {
348 unsigned int used;
349
350 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
351 p->len);
352#ifdef CONFIG_DEBUG_PARAVIRT
353 {
354 int i;
355 /* Deliberately clobber regs using "not %reg" to find bugs. */
356 for (i = 0; i < 3; i++) {
357 if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
358 memcpy(p->instr + used, "\xf7\xd0", 2);
359 p->instr[used+1] |= i;
360 used += 2;
361 }
362 }
363 }
364#endif
365 /* Pad the rest with nops */
366 nop_out(p->instr + used, p->len - used);
367 }
368
369 /* Sync to be conservative, in case we patched following instructions */
370 sync_core();
371}
372extern struct paravirt_patch __start_parainstructions[],
373 __stop_parainstructions[];
374#endif /* CONFIG_PARAVIRT */
375
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800376void __init alternative_instructions(void)
377{
Zachary Amsdene51959f2006-10-19 23:29:04 -0700378 unsigned long flags;
Zachary Amsdene51959f2006-10-19 23:29:04 -0700379
380 local_irq_save(flags);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800381 apply_alternatives(__alt_instructions, __alt_instructions_end);
382
383 /* switch to patch-once-at-boottime-only mode and free the
384 * tables in case we know the number of CPUs will never ever
385 * change */
386#ifdef CONFIG_HOTPLUG_CPU
387 if (num_possible_cpus() < 2)
388 smp_alt_once = 1;
389#else
390 smp_alt_once = 1;
391#endif
392
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700393#ifdef CONFIG_SMP
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800394 if (smp_alt_once) {
395 if (1 == num_possible_cpus()) {
396 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
397 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
398 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
399 apply_alternatives(__smp_alt_instructions,
400 __smp_alt_instructions_end);
401 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
402 _text, _etext);
403 }
404 free_init_pages("SMP alternatives",
405 (unsigned long)__smp_alt_begin,
406 (unsigned long)__smp_alt_end);
407 } else {
408 alternatives_smp_save(__smp_alt_instructions,
409 __smp_alt_instructions_end);
410 alternatives_smp_module_add(NULL, "core kernel",
411 __smp_locks, __smp_locks_end,
412 _text, _etext);
413 alternatives_smp_switch(0);
414 }
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700415#endif
Rusty Russell139ec7c2006-12-07 02:14:08 +0100416 apply_paravirt(__start_parainstructions, __stop_parainstructions);
Zachary Amsdene51959f2006-10-19 23:29:04 -0700417 local_irq_restore(flags);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800418}