blob: 0695be538de53cb88f56e6d068feabbe733d16ae [file] [log] [blame]
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08001#include <linux/module.h>
Al Virof6a57032006-10-18 01:47:25 -04002#include <linux/sched.h>
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08003#include <linux/spinlock.h>
4#include <linux/list.h>
5#include <asm/alternative.h>
6#include <asm/sections.h>
7
Jan Beulich09488162007-07-21 17:10:25 +02008#ifdef CONFIG_HOTPLUG_CPU
9static int smp_alt_once;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080010
Gerd Hoffmannd167a512006-06-26 13:56:16 +020011static int __init bootonly(char *str)
12{
13 smp_alt_once = 1;
14 return 1;
15}
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +020016__setup("smp-alt-boot", bootonly);
Jan Beulich09488162007-07-21 17:10:25 +020017#else
18#define smp_alt_once 1
19#endif
20
21static int debug_alternative;
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +020022
Gerd Hoffmannd167a512006-06-26 13:56:16 +020023static int __init debug_alt(char *str)
24{
25 debug_alternative = 1;
26 return 1;
27}
Gerd Hoffmannd167a512006-06-26 13:56:16 +020028__setup("debug-alternative", debug_alt);
29
Jan Beulich09488162007-07-21 17:10:25 +020030static int noreplace_smp;
31
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +020032static int __init setup_noreplace_smp(char *str)
33{
34 noreplace_smp = 1;
35 return 1;
36}
37__setup("noreplace-smp", setup_noreplace_smp);
38
Jeremy Fitzhardinge959b4fd2007-05-02 19:27:16 +020039#ifdef CONFIG_PARAVIRT
40static int noreplace_paravirt = 0;
41
42static int __init setup_noreplace_paravirt(char *str)
43{
44 noreplace_paravirt = 1;
45 return 1;
46}
47__setup("noreplace-paravirt", setup_noreplace_paravirt);
48#endif
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +020049
Gerd Hoffmannd167a512006-06-26 13:56:16 +020050#define DPRINTK(fmt, args...) if (debug_alternative) \
51 printk(KERN_DEBUG fmt, args)
52
53#ifdef GENERIC_NOP1
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080054/* Use inline assembly to define this because the nops are defined
55 as inline assembly strings in the include files and we cannot
56 get them easily into strings. */
57asm("\t.data\nintelnops: "
58 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
59 GENERIC_NOP7 GENERIC_NOP8);
Gerd Hoffmannd167a512006-06-26 13:56:16 +020060extern unsigned char intelnops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080061static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
62 NULL,
63 intelnops,
64 intelnops + 1,
65 intelnops + 1 + 2,
66 intelnops + 1 + 2 + 3,
67 intelnops + 1 + 2 + 3 + 4,
68 intelnops + 1 + 2 + 3 + 4 + 5,
69 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
70 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
71};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020072#endif
73
74#ifdef K8_NOP1
75asm("\t.data\nk8nops: "
76 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
77 K8_NOP7 K8_NOP8);
78extern unsigned char k8nops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080079static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
80 NULL,
81 k8nops,
82 k8nops + 1,
83 k8nops + 1 + 2,
84 k8nops + 1 + 2 + 3,
85 k8nops + 1 + 2 + 3 + 4,
86 k8nops + 1 + 2 + 3 + 4 + 5,
87 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
88 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
89};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020090#endif
91
92#ifdef K7_NOP1
93asm("\t.data\nk7nops: "
94 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
95 K7_NOP7 K7_NOP8);
96extern unsigned char k7nops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080097static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
98 NULL,
99 k7nops,
100 k7nops + 1,
101 k7nops + 1 + 2,
102 k7nops + 1 + 2 + 3,
103 k7nops + 1 + 2 + 3 + 4,
104 k7nops + 1 + 2 + 3 + 4 + 5,
105 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
106 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
107};
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200108#endif
109
110#ifdef CONFIG_X86_64
111
112extern char __vsyscall_0;
113static inline unsigned char** find_nop_table(void)
114{
115 return k8_nops;
116}
117
118#else /* CONFIG_X86_64 */
119
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800120static struct nop {
121 int cpuid;
122 unsigned char **noptable;
123} noptypes[] = {
124 { X86_FEATURE_K8, k8_nops },
125 { X86_FEATURE_K7, k7_nops },
126 { -1, NULL }
127};
128
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800129static unsigned char** find_nop_table(void)
130{
131 unsigned char **noptable = intel_nops;
132 int i;
133
134 for (i = 0; noptypes[i].cpuid >= 0; i++) {
135 if (boot_cpu_has(noptypes[i].cpuid)) {
136 noptable = noptypes[i].noptable;
137 break;
138 }
139 }
140 return noptable;
141}
142
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200143#endif /* CONFIG_X86_64 */
144
Rusty Russell139ec7c2006-12-07 02:14:08 +0100145static void nop_out(void *insns, unsigned int len)
146{
147 unsigned char **noptable = find_nop_table();
148
149 while (len > 0) {
150 unsigned int noplen = len;
151 if (noplen > ASM_NOP_MAX)
152 noplen = ASM_NOP_MAX;
153 memcpy(insns, noptable[noplen], noplen);
154 insns += noplen;
155 len -= noplen;
156 }
157}
158
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200159extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200160extern u8 *__smp_locks[], *__smp_locks_end[];
161
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800162/* Replace instructions with better alternatives for this CPU type.
163 This runs before SMP is initialized to avoid SMP problems with
164 self modifying code. This implies that assymetric systems where
165 APs have less capabilities than the boot processor are not handled.
166 Tough. Make sure you disable such features by hand. */
167
168void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
169{
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800170 struct alt_instr *a;
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200171 u8 *instr;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100172 int diff;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800173
174 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
175 for (a = start; a < end; a++) {
176 BUG_ON(a->replacementlen > a->instrlen);
177 if (!boot_cpu_has(a->cpuid))
178 continue;
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200179 instr = a->instr;
180#ifdef CONFIG_X86_64
181 /* vsyscall code is not mapped yet. resolve it manually. */
182 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
183 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
184 DPRINTK("%s: vsyscall fixup: %p => %p\n",
185 __FUNCTION__, a->instr, instr);
186 }
187#endif
188 memcpy(instr, a->replacement, a->replacementlen);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800189 diff = a->instrlen - a->replacementlen;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100190 nop_out(instr + a->replacementlen, diff);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800191 }
192}
193
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700194#ifdef CONFIG_SMP
195
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800196static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
197{
198 u8 **ptr;
199
200 for (ptr = start; ptr < end; ptr++) {
201 if (*ptr < text)
202 continue;
203 if (*ptr > text_end)
204 continue;
205 **ptr = 0xf0; /* lock prefix */
206 };
207}
208
209static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
210{
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800211 u8 **ptr;
212
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +0200213 if (noreplace_smp)
214 return;
215
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800216 for (ptr = start; ptr < end; ptr++) {
217 if (*ptr < text)
218 continue;
219 if (*ptr > text_end)
220 continue;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100221 nop_out(*ptr, 1);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800222 };
223}
224
225struct smp_alt_module {
226 /* what is this ??? */
227 struct module *mod;
228 char *name;
229
230 /* ptrs to lock prefixes */
231 u8 **locks;
232 u8 **locks_end;
233
234 /* .text segment, needed to avoid patching init code ;) */
235 u8 *text;
236 u8 *text_end;
237
238 struct list_head next;
239};
240static LIST_HEAD(smp_alt_modules);
241static DEFINE_SPINLOCK(smp_alt);
242
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800243void alternatives_smp_module_add(struct module *mod, char *name,
244 void *locks, void *locks_end,
245 void *text, void *text_end)
246{
247 struct smp_alt_module *smp;
248 unsigned long flags;
249
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +0200250 if (noreplace_smp)
251 return;
252
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800253 if (smp_alt_once) {
254 if (boot_cpu_has(X86_FEATURE_UP))
255 alternatives_smp_unlock(locks, locks_end,
256 text, text_end);
257 return;
258 }
259
260 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
261 if (NULL == smp)
262 return; /* we'll run the (safe but slow) SMP code then ... */
263
264 smp->mod = mod;
265 smp->name = name;
266 smp->locks = locks;
267 smp->locks_end = locks_end;
268 smp->text = text;
269 smp->text_end = text_end;
270 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
271 __FUNCTION__, smp->locks, smp->locks_end,
272 smp->text, smp->text_end, smp->name);
273
274 spin_lock_irqsave(&smp_alt, flags);
275 list_add_tail(&smp->next, &smp_alt_modules);
276 if (boot_cpu_has(X86_FEATURE_UP))
277 alternatives_smp_unlock(smp->locks, smp->locks_end,
278 smp->text, smp->text_end);
279 spin_unlock_irqrestore(&smp_alt, flags);
280}
281
282void alternatives_smp_module_del(struct module *mod)
283{
284 struct smp_alt_module *item;
285 unsigned long flags;
286
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +0200287 if (smp_alt_once || noreplace_smp)
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800288 return;
289
290 spin_lock_irqsave(&smp_alt, flags);
291 list_for_each_entry(item, &smp_alt_modules, next) {
292 if (mod != item->mod)
293 continue;
294 list_del(&item->next);
295 spin_unlock_irqrestore(&smp_alt, flags);
296 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
297 kfree(item);
298 return;
299 }
300 spin_unlock_irqrestore(&smp_alt, flags);
301}
302
303void alternatives_smp_switch(int smp)
304{
305 struct smp_alt_module *mod;
306 unsigned long flags;
307
Ingo Molnar3047e992006-07-03 00:24:57 -0700308#ifdef CONFIG_LOCKDEP
309 /*
310 * A not yet fixed binutils section handling bug prevents
311 * alternatives-replacement from working reliably, so turn
312 * it off:
313 */
314 printk("lockdep: not fixing up alternatives.\n");
315 return;
316#endif
317
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +0200318 if (noreplace_smp || smp_alt_once)
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800319 return;
320 BUG_ON(!smp && (num_online_cpus() > 1));
321
322 spin_lock_irqsave(&smp_alt, flags);
323 if (smp) {
324 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
325 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
326 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800327 list_for_each_entry(mod, &smp_alt_modules, next)
328 alternatives_smp_lock(mod->locks, mod->locks_end,
329 mod->text, mod->text_end);
330 } else {
331 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
332 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
333 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800334 list_for_each_entry(mod, &smp_alt_modules, next)
335 alternatives_smp_unlock(mod->locks, mod->locks_end,
336 mod->text, mod->text_end);
337 }
338 spin_unlock_irqrestore(&smp_alt, flags);
339}
340
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700341#endif
342
Rusty Russell139ec7c2006-12-07 02:14:08 +0100343#ifdef CONFIG_PARAVIRT
Jeremy Fitzhardinge98de0322007-05-02 19:27:14 +0200344void apply_paravirt(struct paravirt_patch_site *start,
345 struct paravirt_patch_site *end)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100346{
Jeremy Fitzhardinge98de0322007-05-02 19:27:14 +0200347 struct paravirt_patch_site *p;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100348
Jeremy Fitzhardinge959b4fd2007-05-02 19:27:16 +0200349 if (noreplace_paravirt)
350 return;
351
Rusty Russell139ec7c2006-12-07 02:14:08 +0100352 for (p = start; p < end; p++) {
353 unsigned int used;
354
355 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
356 p->len);
Jeremy Fitzhardinge7f63c412007-05-02 19:27:13 +0200357
Jeremy Fitzhardinge63f70272007-05-02 19:27:14 +0200358 BUG_ON(used > p->len);
359
Rusty Russell139ec7c2006-12-07 02:14:08 +0100360 /* Pad the rest with nops */
361 nop_out(p->instr + used, p->len - used);
362 }
363
Jeremy Fitzhardinge63f70272007-05-02 19:27:14 +0200364 /* Sync to be conservative, in case we patched following
365 * instructions */
Rusty Russell139ec7c2006-12-07 02:14:08 +0100366 sync_core();
367}
Jeremy Fitzhardinge98de0322007-05-02 19:27:14 +0200368extern struct paravirt_patch_site __start_parainstructions[],
Rusty Russell139ec7c2006-12-07 02:14:08 +0100369 __stop_parainstructions[];
370#endif /* CONFIG_PARAVIRT */
371
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800372void __init alternative_instructions(void)
373{
Zachary Amsdene51959f2006-10-19 23:29:04 -0700374 unsigned long flags;
Zachary Amsdene51959f2006-10-19 23:29:04 -0700375
376 local_irq_save(flags);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800377 apply_alternatives(__alt_instructions, __alt_instructions_end);
378
379 /* switch to patch-once-at-boottime-only mode and free the
380 * tables in case we know the number of CPUs will never ever
381 * change */
382#ifdef CONFIG_HOTPLUG_CPU
383 if (num_possible_cpus() < 2)
384 smp_alt_once = 1;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800385#endif
386
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700387#ifdef CONFIG_SMP
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800388 if (smp_alt_once) {
389 if (1 == num_possible_cpus()) {
390 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
391 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
392 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800393 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
394 _text, _etext);
395 }
396 free_init_pages("SMP alternatives",
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700397 (unsigned long)__smp_locks,
398 (unsigned long)__smp_locks_end);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800399 } else {
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800400 alternatives_smp_module_add(NULL, "core kernel",
401 __smp_locks, __smp_locks_end,
402 _text, _etext);
403 alternatives_smp_switch(0);
404 }
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700405#endif
Jeremy Fitzhardinge441d40d2007-05-02 19:27:16 +0200406 apply_paravirt(__parainstructions, __parainstructions_end);
Zachary Amsdene51959f2006-10-19 23:29:04 -0700407 local_irq_restore(flags);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800408}