blob: c5d037c60950ffadbfbc0205c1aed8a04827391c [file] [log] [blame]
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08001#include <linux/module.h>
Al Virof6a57032006-10-18 01:47:25 -04002#include <linux/sched.h>
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08003#include <linux/spinlock.h>
4#include <linux/list.h>
5#include <asm/alternative.h>
6#include <asm/sections.h>
7
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +02008static int noreplace_smp = 0;
Gerd Hoffmannd167a512006-06-26 13:56:16 +02009static int smp_alt_once = 0;
10static int debug_alternative = 0;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080011
Gerd Hoffmannd167a512006-06-26 13:56:16 +020012static int __init bootonly(char *str)
13{
14 smp_alt_once = 1;
15 return 1;
16}
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +020017__setup("smp-alt-boot", bootonly);
18
Gerd Hoffmannd167a512006-06-26 13:56:16 +020019static int __init debug_alt(char *str)
20{
21 debug_alternative = 1;
22 return 1;
23}
Gerd Hoffmannd167a512006-06-26 13:56:16 +020024__setup("debug-alternative", debug_alt);
25
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +020026static int __init setup_noreplace_smp(char *str)
27{
28 noreplace_smp = 1;
29 return 1;
30}
31__setup("noreplace-smp", setup_noreplace_smp);
32
33
Gerd Hoffmannd167a512006-06-26 13:56:16 +020034#define DPRINTK(fmt, args...) if (debug_alternative) \
35 printk(KERN_DEBUG fmt, args)
36
37#ifdef GENERIC_NOP1
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080038/* Use inline assembly to define this because the nops are defined
39 as inline assembly strings in the include files and we cannot
40 get them easily into strings. */
41asm("\t.data\nintelnops: "
42 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
43 GENERIC_NOP7 GENERIC_NOP8);
Gerd Hoffmannd167a512006-06-26 13:56:16 +020044extern unsigned char intelnops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080045static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
46 NULL,
47 intelnops,
48 intelnops + 1,
49 intelnops + 1 + 2,
50 intelnops + 1 + 2 + 3,
51 intelnops + 1 + 2 + 3 + 4,
52 intelnops + 1 + 2 + 3 + 4 + 5,
53 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
54 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
55};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020056#endif
57
58#ifdef K8_NOP1
59asm("\t.data\nk8nops: "
60 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
61 K8_NOP7 K8_NOP8);
62extern unsigned char k8nops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080063static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
64 NULL,
65 k8nops,
66 k8nops + 1,
67 k8nops + 1 + 2,
68 k8nops + 1 + 2 + 3,
69 k8nops + 1 + 2 + 3 + 4,
70 k8nops + 1 + 2 + 3 + 4 + 5,
71 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
72 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
73};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020074#endif
75
76#ifdef K7_NOP1
77asm("\t.data\nk7nops: "
78 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
79 K7_NOP7 K7_NOP8);
80extern unsigned char k7nops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080081static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
82 NULL,
83 k7nops,
84 k7nops + 1,
85 k7nops + 1 + 2,
86 k7nops + 1 + 2 + 3,
87 k7nops + 1 + 2 + 3 + 4,
88 k7nops + 1 + 2 + 3 + 4 + 5,
89 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
90 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
91};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020092#endif
93
94#ifdef CONFIG_X86_64
95
96extern char __vsyscall_0;
97static inline unsigned char** find_nop_table(void)
98{
99 return k8_nops;
100}
101
102#else /* CONFIG_X86_64 */
103
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800104static struct nop {
105 int cpuid;
106 unsigned char **noptable;
107} noptypes[] = {
108 { X86_FEATURE_K8, k8_nops },
109 { X86_FEATURE_K7, k7_nops },
110 { -1, NULL }
111};
112
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800113static unsigned char** find_nop_table(void)
114{
115 unsigned char **noptable = intel_nops;
116 int i;
117
118 for (i = 0; noptypes[i].cpuid >= 0; i++) {
119 if (boot_cpu_has(noptypes[i].cpuid)) {
120 noptable = noptypes[i].noptable;
121 break;
122 }
123 }
124 return noptable;
125}
126
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200127#endif /* CONFIG_X86_64 */
128
Rusty Russell139ec7c2006-12-07 02:14:08 +0100129static void nop_out(void *insns, unsigned int len)
130{
131 unsigned char **noptable = find_nop_table();
132
133 while (len > 0) {
134 unsigned int noplen = len;
135 if (noplen > ASM_NOP_MAX)
136 noplen = ASM_NOP_MAX;
137 memcpy(insns, noptable[noplen], noplen);
138 insns += noplen;
139 len -= noplen;
140 }
141}
142
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200143extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200144extern u8 *__smp_locks[], *__smp_locks_end[];
145
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800146/* Replace instructions with better alternatives for this CPU type.
147 This runs before SMP is initialized to avoid SMP problems with
148 self modifying code. This implies that assymetric systems where
149 APs have less capabilities than the boot processor are not handled.
150 Tough. Make sure you disable such features by hand. */
151
152void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
153{
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800154 struct alt_instr *a;
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200155 u8 *instr;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100156 int diff;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800157
158 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
159 for (a = start; a < end; a++) {
160 BUG_ON(a->replacementlen > a->instrlen);
161 if (!boot_cpu_has(a->cpuid))
162 continue;
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200163 instr = a->instr;
164#ifdef CONFIG_X86_64
165 /* vsyscall code is not mapped yet. resolve it manually. */
166 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
167 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
168 DPRINTK("%s: vsyscall fixup: %p => %p\n",
169 __FUNCTION__, a->instr, instr);
170 }
171#endif
172 memcpy(instr, a->replacement, a->replacementlen);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800173 diff = a->instrlen - a->replacementlen;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100174 nop_out(instr + a->replacementlen, diff);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800175 }
176}
177
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700178#ifdef CONFIG_SMP
179
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800180static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
181{
182 u8 **ptr;
183
184 for (ptr = start; ptr < end; ptr++) {
185 if (*ptr < text)
186 continue;
187 if (*ptr > text_end)
188 continue;
189 **ptr = 0xf0; /* lock prefix */
190 };
191}
192
193static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
194{
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800195 u8 **ptr;
196
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +0200197 if (noreplace_smp)
198 return;
199
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800200 for (ptr = start; ptr < end; ptr++) {
201 if (*ptr < text)
202 continue;
203 if (*ptr > text_end)
204 continue;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100205 nop_out(*ptr, 1);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800206 };
207}
208
209struct smp_alt_module {
210 /* what is this ??? */
211 struct module *mod;
212 char *name;
213
214 /* ptrs to lock prefixes */
215 u8 **locks;
216 u8 **locks_end;
217
218 /* .text segment, needed to avoid patching init code ;) */
219 u8 *text;
220 u8 *text_end;
221
222 struct list_head next;
223};
224static LIST_HEAD(smp_alt_modules);
225static DEFINE_SPINLOCK(smp_alt);
226
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800227void alternatives_smp_module_add(struct module *mod, char *name,
228 void *locks, void *locks_end,
229 void *text, void *text_end)
230{
231 struct smp_alt_module *smp;
232 unsigned long flags;
233
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +0200234 if (noreplace_smp)
235 return;
236
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800237 if (smp_alt_once) {
238 if (boot_cpu_has(X86_FEATURE_UP))
239 alternatives_smp_unlock(locks, locks_end,
240 text, text_end);
241 return;
242 }
243
244 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
245 if (NULL == smp)
246 return; /* we'll run the (safe but slow) SMP code then ... */
247
248 smp->mod = mod;
249 smp->name = name;
250 smp->locks = locks;
251 smp->locks_end = locks_end;
252 smp->text = text;
253 smp->text_end = text_end;
254 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
255 __FUNCTION__, smp->locks, smp->locks_end,
256 smp->text, smp->text_end, smp->name);
257
258 spin_lock_irqsave(&smp_alt, flags);
259 list_add_tail(&smp->next, &smp_alt_modules);
260 if (boot_cpu_has(X86_FEATURE_UP))
261 alternatives_smp_unlock(smp->locks, smp->locks_end,
262 smp->text, smp->text_end);
263 spin_unlock_irqrestore(&smp_alt, flags);
264}
265
266void alternatives_smp_module_del(struct module *mod)
267{
268 struct smp_alt_module *item;
269 unsigned long flags;
270
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +0200271 if (smp_alt_once || noreplace_smp)
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800272 return;
273
274 spin_lock_irqsave(&smp_alt, flags);
275 list_for_each_entry(item, &smp_alt_modules, next) {
276 if (mod != item->mod)
277 continue;
278 list_del(&item->next);
279 spin_unlock_irqrestore(&smp_alt, flags);
280 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
281 kfree(item);
282 return;
283 }
284 spin_unlock_irqrestore(&smp_alt, flags);
285}
286
287void alternatives_smp_switch(int smp)
288{
289 struct smp_alt_module *mod;
290 unsigned long flags;
291
Ingo Molnar3047e992006-07-03 00:24:57 -0700292#ifdef CONFIG_LOCKDEP
293 /*
294 * A not yet fixed binutils section handling bug prevents
295 * alternatives-replacement from working reliably, so turn
296 * it off:
297 */
298 printk("lockdep: not fixing up alternatives.\n");
299 return;
300#endif
301
Jeremy Fitzhardingeb7fb4af2007-05-02 19:27:13 +0200302 if (noreplace_smp || smp_alt_once)
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800303 return;
304 BUG_ON(!smp && (num_online_cpus() > 1));
305
306 spin_lock_irqsave(&smp_alt, flags);
307 if (smp) {
308 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
309 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
310 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800311 list_for_each_entry(mod, &smp_alt_modules, next)
312 alternatives_smp_lock(mod->locks, mod->locks_end,
313 mod->text, mod->text_end);
314 } else {
315 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
316 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
317 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800318 list_for_each_entry(mod, &smp_alt_modules, next)
319 alternatives_smp_unlock(mod->locks, mod->locks_end,
320 mod->text, mod->text_end);
321 }
322 spin_unlock_irqrestore(&smp_alt, flags);
323}
324
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700325#endif
326
Rusty Russell139ec7c2006-12-07 02:14:08 +0100327#ifdef CONFIG_PARAVIRT
Jeremy Fitzhardinge98de0322007-05-02 19:27:14 +0200328void apply_paravirt(struct paravirt_patch_site *start,
329 struct paravirt_patch_site *end)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100330{
Jeremy Fitzhardinge98de0322007-05-02 19:27:14 +0200331 struct paravirt_patch_site *p;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100332
333 for (p = start; p < end; p++) {
334 unsigned int used;
335
336 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
337 p->len);
Jeremy Fitzhardinge7f63c412007-05-02 19:27:13 +0200338
Jeremy Fitzhardinge63f70272007-05-02 19:27:14 +0200339 BUG_ON(used > p->len);
340
Rusty Russell139ec7c2006-12-07 02:14:08 +0100341 /* Pad the rest with nops */
342 nop_out(p->instr + used, p->len - used);
343 }
344
Jeremy Fitzhardinge63f70272007-05-02 19:27:14 +0200345 /* Sync to be conservative, in case we patched following
346 * instructions */
Rusty Russell139ec7c2006-12-07 02:14:08 +0100347 sync_core();
348}
Jeremy Fitzhardinge98de0322007-05-02 19:27:14 +0200349extern struct paravirt_patch_site __start_parainstructions[],
Rusty Russell139ec7c2006-12-07 02:14:08 +0100350 __stop_parainstructions[];
351#endif /* CONFIG_PARAVIRT */
352
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800353void __init alternative_instructions(void)
354{
Zachary Amsdene51959f2006-10-19 23:29:04 -0700355 unsigned long flags;
Zachary Amsdene51959f2006-10-19 23:29:04 -0700356
357 local_irq_save(flags);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800358 apply_alternatives(__alt_instructions, __alt_instructions_end);
359
360 /* switch to patch-once-at-boottime-only mode and free the
361 * tables in case we know the number of CPUs will never ever
362 * change */
363#ifdef CONFIG_HOTPLUG_CPU
364 if (num_possible_cpus() < 2)
365 smp_alt_once = 1;
366#else
367 smp_alt_once = 1;
368#endif
369
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700370#ifdef CONFIG_SMP
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800371 if (smp_alt_once) {
372 if (1 == num_possible_cpus()) {
373 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
374 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
375 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800376 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
377 _text, _etext);
378 }
379 free_init_pages("SMP alternatives",
Jeremy Fitzhardinged0175ab2007-05-02 19:27:13 +0200380 __pa_symbol(&__smp_locks),
381 __pa_symbol(&__smp_locks_end));
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800382 } else {
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800383 alternatives_smp_module_add(NULL, "core kernel",
384 __smp_locks, __smp_locks_end,
385 _text, _etext);
386 alternatives_smp_switch(0);
387 }
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700388#endif
Rusty Russell139ec7c2006-12-07 02:14:08 +0100389 apply_paravirt(__start_parainstructions, __stop_parainstructions);
Zachary Amsdene51959f2006-10-19 23:29:04 -0700390 local_irq_restore(flags);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800391}