blob: f09635408049fe3f5c26dedb5fb451d46bdfac56 [file] [log] [blame]
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08001#include <linux/module.h>
Al Virof6a57032006-10-18 01:47:25 -04002#include <linux/sched.h>
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08003#include <linux/spinlock.h>
4#include <linux/list.h>
5#include <asm/alternative.h>
6#include <asm/sections.h>
7
Gerd Hoffmannd167a512006-06-26 13:56:16 +02008static int smp_alt_once = 0;
9static int debug_alternative = 0;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080010
Gerd Hoffmannd167a512006-06-26 13:56:16 +020011static int __init bootonly(char *str)
12{
13 smp_alt_once = 1;
14 return 1;
15}
16static int __init debug_alt(char *str)
17{
18 debug_alternative = 1;
19 return 1;
20}
21
Gerd Hoffmannd167a512006-06-26 13:56:16 +020022__setup("smp-alt-boot", bootonly);
23__setup("debug-alternative", debug_alt);
24
25#define DPRINTK(fmt, args...) if (debug_alternative) \
26 printk(KERN_DEBUG fmt, args)
27
28#ifdef GENERIC_NOP1
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080029/* Use inline assembly to define this because the nops are defined
30 as inline assembly strings in the include files and we cannot
31 get them easily into strings. */
32asm("\t.data\nintelnops: "
33 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
34 GENERIC_NOP7 GENERIC_NOP8);
Gerd Hoffmannd167a512006-06-26 13:56:16 +020035extern unsigned char intelnops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080036static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
37 NULL,
38 intelnops,
39 intelnops + 1,
40 intelnops + 1 + 2,
41 intelnops + 1 + 2 + 3,
42 intelnops + 1 + 2 + 3 + 4,
43 intelnops + 1 + 2 + 3 + 4 + 5,
44 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
45 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
46};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020047#endif
48
49#ifdef K8_NOP1
50asm("\t.data\nk8nops: "
51 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
52 K8_NOP7 K8_NOP8);
53extern unsigned char k8nops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080054static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
55 NULL,
56 k8nops,
57 k8nops + 1,
58 k8nops + 1 + 2,
59 k8nops + 1 + 2 + 3,
60 k8nops + 1 + 2 + 3 + 4,
61 k8nops + 1 + 2 + 3 + 4 + 5,
62 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
63 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
64};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020065#endif
66
67#ifdef K7_NOP1
68asm("\t.data\nk7nops: "
69 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
70 K7_NOP7 K7_NOP8);
71extern unsigned char k7nops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080072static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
73 NULL,
74 k7nops,
75 k7nops + 1,
76 k7nops + 1 + 2,
77 k7nops + 1 + 2 + 3,
78 k7nops + 1 + 2 + 3 + 4,
79 k7nops + 1 + 2 + 3 + 4 + 5,
80 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
81 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
82};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020083#endif
84
85#ifdef CONFIG_X86_64
86
87extern char __vsyscall_0;
88static inline unsigned char** find_nop_table(void)
89{
90 return k8_nops;
91}
92
93#else /* CONFIG_X86_64 */
94
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080095static struct nop {
96 int cpuid;
97 unsigned char **noptable;
98} noptypes[] = {
99 { X86_FEATURE_K8, k8_nops },
100 { X86_FEATURE_K7, k7_nops },
101 { -1, NULL }
102};
103
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800104static unsigned char** find_nop_table(void)
105{
106 unsigned char **noptable = intel_nops;
107 int i;
108
109 for (i = 0; noptypes[i].cpuid >= 0; i++) {
110 if (boot_cpu_has(noptypes[i].cpuid)) {
111 noptable = noptypes[i].noptable;
112 break;
113 }
114 }
115 return noptable;
116}
117
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200118#endif /* CONFIG_X86_64 */
119
Rusty Russell139ec7c2006-12-07 02:14:08 +0100120static void nop_out(void *insns, unsigned int len)
121{
122 unsigned char **noptable = find_nop_table();
123
124 while (len > 0) {
125 unsigned int noplen = len;
126 if (noplen > ASM_NOP_MAX)
127 noplen = ASM_NOP_MAX;
128 memcpy(insns, noptable[noplen], noplen);
129 insns += noplen;
130 len -= noplen;
131 }
132}
133
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200134extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200135extern u8 *__smp_locks[], *__smp_locks_end[];
136
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800137/* Replace instructions with better alternatives for this CPU type.
138 This runs before SMP is initialized to avoid SMP problems with
139 self modifying code. This implies that assymetric systems where
140 APs have less capabilities than the boot processor are not handled.
141 Tough. Make sure you disable such features by hand. */
142
143void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
144{
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800145 struct alt_instr *a;
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200146 u8 *instr;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100147 int diff;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800148
149 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
150 for (a = start; a < end; a++) {
151 BUG_ON(a->replacementlen > a->instrlen);
152 if (!boot_cpu_has(a->cpuid))
153 continue;
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200154 instr = a->instr;
155#ifdef CONFIG_X86_64
156 /* vsyscall code is not mapped yet. resolve it manually. */
157 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
158 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
159 DPRINTK("%s: vsyscall fixup: %p => %p\n",
160 __FUNCTION__, a->instr, instr);
161 }
162#endif
163 memcpy(instr, a->replacement, a->replacementlen);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800164 diff = a->instrlen - a->replacementlen;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100165 nop_out(instr + a->replacementlen, diff);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800166 }
167}
168
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700169#ifdef CONFIG_SMP
170
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800171static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
172{
173 u8 **ptr;
174
175 for (ptr = start; ptr < end; ptr++) {
176 if (*ptr < text)
177 continue;
178 if (*ptr > text_end)
179 continue;
180 **ptr = 0xf0; /* lock prefix */
181 };
182}
183
184static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
185{
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800186 u8 **ptr;
187
188 for (ptr = start; ptr < end; ptr++) {
189 if (*ptr < text)
190 continue;
191 if (*ptr > text_end)
192 continue;
Rusty Russell139ec7c2006-12-07 02:14:08 +0100193 nop_out(*ptr, 1);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800194 };
195}
196
197struct smp_alt_module {
198 /* what is this ??? */
199 struct module *mod;
200 char *name;
201
202 /* ptrs to lock prefixes */
203 u8 **locks;
204 u8 **locks_end;
205
206 /* .text segment, needed to avoid patching init code ;) */
207 u8 *text;
208 u8 *text_end;
209
210 struct list_head next;
211};
212static LIST_HEAD(smp_alt_modules);
213static DEFINE_SPINLOCK(smp_alt);
214
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800215void alternatives_smp_module_add(struct module *mod, char *name,
216 void *locks, void *locks_end,
217 void *text, void *text_end)
218{
219 struct smp_alt_module *smp;
220 unsigned long flags;
221
222 if (smp_alt_once) {
223 if (boot_cpu_has(X86_FEATURE_UP))
224 alternatives_smp_unlock(locks, locks_end,
225 text, text_end);
226 return;
227 }
228
229 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
230 if (NULL == smp)
231 return; /* we'll run the (safe but slow) SMP code then ... */
232
233 smp->mod = mod;
234 smp->name = name;
235 smp->locks = locks;
236 smp->locks_end = locks_end;
237 smp->text = text;
238 smp->text_end = text_end;
239 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
240 __FUNCTION__, smp->locks, smp->locks_end,
241 smp->text, smp->text_end, smp->name);
242
243 spin_lock_irqsave(&smp_alt, flags);
244 list_add_tail(&smp->next, &smp_alt_modules);
245 if (boot_cpu_has(X86_FEATURE_UP))
246 alternatives_smp_unlock(smp->locks, smp->locks_end,
247 smp->text, smp->text_end);
248 spin_unlock_irqrestore(&smp_alt, flags);
249}
250
251void alternatives_smp_module_del(struct module *mod)
252{
253 struct smp_alt_module *item;
254 unsigned long flags;
255
Andi Kleen9ce883b2007-04-24 13:05:37 +0200256 if (smp_alt_once)
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800257 return;
258
259 spin_lock_irqsave(&smp_alt, flags);
260 list_for_each_entry(item, &smp_alt_modules, next) {
261 if (mod != item->mod)
262 continue;
263 list_del(&item->next);
264 spin_unlock_irqrestore(&smp_alt, flags);
265 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
266 kfree(item);
267 return;
268 }
269 spin_unlock_irqrestore(&smp_alt, flags);
270}
271
272void alternatives_smp_switch(int smp)
273{
274 struct smp_alt_module *mod;
275 unsigned long flags;
276
Ingo Molnar3047e992006-07-03 00:24:57 -0700277#ifdef CONFIG_LOCKDEP
278 /*
279 * A not yet fixed binutils section handling bug prevents
280 * alternatives-replacement from working reliably, so turn
281 * it off:
282 */
283 printk("lockdep: not fixing up alternatives.\n");
284 return;
285#endif
286
Andi Kleen9ce883b2007-04-24 13:05:37 +0200287 if (smp_alt_once)
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800288 return;
289 BUG_ON(!smp && (num_online_cpus() > 1));
290
291 spin_lock_irqsave(&smp_alt, flags);
292 if (smp) {
293 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
294 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
295 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800296 list_for_each_entry(mod, &smp_alt_modules, next)
297 alternatives_smp_lock(mod->locks, mod->locks_end,
298 mod->text, mod->text_end);
299 } else {
300 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
301 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
302 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800303 list_for_each_entry(mod, &smp_alt_modules, next)
304 alternatives_smp_unlock(mod->locks, mod->locks_end,
305 mod->text, mod->text_end);
306 }
307 spin_unlock_irqrestore(&smp_alt, flags);
308}
309
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700310#endif
311
Rusty Russell139ec7c2006-12-07 02:14:08 +0100312#ifdef CONFIG_PARAVIRT
313void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
314{
315 struct paravirt_patch *p;
316
317 for (p = start; p < end; p++) {
318 unsigned int used;
319
320 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
321 p->len);
322#ifdef CONFIG_DEBUG_PARAVIRT
323 {
324 int i;
325 /* Deliberately clobber regs using "not %reg" to find bugs. */
326 for (i = 0; i < 3; i++) {
327 if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
328 memcpy(p->instr + used, "\xf7\xd0", 2);
329 p->instr[used+1] |= i;
330 used += 2;
331 }
332 }
333 }
334#endif
335 /* Pad the rest with nops */
336 nop_out(p->instr + used, p->len - used);
337 }
338
339 /* Sync to be conservative, in case we patched following instructions */
340 sync_core();
341}
342extern struct paravirt_patch __start_parainstructions[],
343 __stop_parainstructions[];
344#endif /* CONFIG_PARAVIRT */
345
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800346void __init alternative_instructions(void)
347{
Zachary Amsdene51959f2006-10-19 23:29:04 -0700348 unsigned long flags;
Zachary Amsdene51959f2006-10-19 23:29:04 -0700349
350 local_irq_save(flags);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800351 apply_alternatives(__alt_instructions, __alt_instructions_end);
352
353 /* switch to patch-once-at-boottime-only mode and free the
354 * tables in case we know the number of CPUs will never ever
355 * change */
356#ifdef CONFIG_HOTPLUG_CPU
357 if (num_possible_cpus() < 2)
358 smp_alt_once = 1;
359#else
360 smp_alt_once = 1;
361#endif
362
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700363#ifdef CONFIG_SMP
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800364 if (smp_alt_once) {
365 if (1 == num_possible_cpus()) {
366 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
367 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
368 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800369 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
370 _text, _etext);
371 }
372 free_init_pages("SMP alternatives",
Jeremy Fitzhardinged0175ab2007-05-02 19:27:13 +0200373 __pa_symbol(&__smp_locks),
374 __pa_symbol(&__smp_locks_end));
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800375 } else {
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800376 alternatives_smp_module_add(NULL, "core kernel",
377 __smp_locks, __smp_locks_end,
378 _text, _etext);
379 alternatives_smp_switch(0);
380 }
Gerd Hoffmann8ec4d412006-07-01 04:36:18 -0700381#endif
Rusty Russell139ec7c2006-12-07 02:14:08 +0100382 apply_paravirt(__start_parainstructions, __stop_parainstructions);
Zachary Amsdene51959f2006-10-19 23:29:04 -0700383 local_irq_restore(flags);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800384}