blob: 4d8450ee3635e8ee587f5f639f7862b5a5155a74 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
6 */
7
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/rcupdate.h>
14#include <linux/kallsyms.h>
15#include <linux/sysdev.h>
16#include <linux/miscdevice.h>
17#include <linux/fs.h>
Randy Dunlapa9415642006-01-11 12:17:48 -080018#include <linux/capability.h>
Andi Kleen91c6d402005-07-28 21:15:39 -070019#include <linux/cpu.h>
20#include <linux/percpu.h>
Tim Hockine02e68d2007-07-21 17:10:36 +020021#include <linux/poll.h>
22#include <linux/thread_info.h>
Andi Kleen8c566ef2005-09-12 18:49:24 +020023#include <linux/ctype.h>
Andi Kleena98f0dd2007-02-13 13:26:23 +010024#include <linux/kmod.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070025#include <linux/kdebug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <asm/processor.h>
27#include <asm/msr.h>
28#include <asm/mce.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/uaccess.h>
Andi Kleen0a9c3ee2006-01-11 22:46:54 +010030#include <asm/smp.h>
Tim Hockine02e68d2007-07-21 17:10:36 +020031#include <asm/idle.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#define MISC_MCELOG_MINOR 227
Shaohua Li73ca5352006-01-11 22:43:06 +010034#define NR_BANKS 6
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Andi Kleen553f2652006-04-07 19:49:57 +020036atomic_t mce_entry;
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038static int mce_dont_init;
39
Tim Hockinbd784322007-07-21 17:10:37 +020040/*
41 * Tolerant levels:
42 * 0: always panic on uncorrected errors, log corrected errors
43 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
44 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
45 * 3: never panic or SIGBUS, log all errors (for testing only)
46 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static int tolerant = 1;
48static int banks;
49static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
Tim Hockine02e68d2007-07-21 17:10:36 +020050static unsigned long notify_user;
Andi Kleen94ad8472005-04-16 15:25:09 -070051static int rip_msr;
Andi Kleene5835382005-11-05 17:25:54 +010052static int mce_bootlog = 1;
Andi Kleena98f0dd2007-02-13 13:26:23 +010053static atomic_t mce_events;
54
55static char trigger[128];
56static char *trigger_argv[2] = { trigger, NULL };
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Tim Hockine02e68d2007-07-21 17:10:36 +020058static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/*
61 * Lockless MCE logging infrastructure.
62 * This avoids deadlocks on printk locks without having to break locks. Also
63 * separate MCEs from kernel messages to avoid bogus bug reports.
64 */
65
66struct mce_log mcelog = {
67 MCE_LOG_SIGNATURE,
68 MCE_LOG_LEN,
69};
70
71void mce_log(struct mce *mce)
72{
73 unsigned next, entry;
Andi Kleena98f0dd2007-02-13 13:26:23 +010074 atomic_inc(&mce_events);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 mce->finished = 0;
Mike Waychison76441432005-09-30 00:01:27 +020076 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 for (;;) {
78 entry = rcu_dereference(mcelog.next);
Mike Waychison76441432005-09-30 00:01:27 +020079 /* The rmb forces the compiler to reload next in each
80 iteration */
81 rmb();
Andi Kleen673242c2005-09-12 18:49:24 +020082 for (;;) {
83 /* When the buffer fills up discard new entries. Assume
84 that the earlier errors are the more interesting. */
85 if (entry >= MCE_LOG_LEN) {
86 set_bit(MCE_OVERFLOW, &mcelog.flags);
87 return;
88 }
89 /* Old left over entry. Skip. */
90 if (mcelog.entry[entry].finished) {
91 entry++;
92 continue;
93 }
Mike Waychison76441432005-09-30 00:01:27 +020094 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 smp_rmb();
97 next = entry + 1;
98 if (cmpxchg(&mcelog.next, entry, next) == entry)
99 break;
100 }
101 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
Mike Waychison76441432005-09-30 00:01:27 +0200102 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 mcelog.entry[entry].finished = 1;
Mike Waychison76441432005-09-30 00:01:27 +0200104 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Tim Hockine02e68d2007-07-21 17:10:36 +0200106 set_bit(0, &notify_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
109static void print_mce(struct mce *m)
110{
111 printk(KERN_EMERG "\n"
Andi Kleen48551702006-01-11 22:44:48 +0100112 KERN_EMERG "HARDWARE ERROR\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 KERN_EMERG
114 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
115 m->cpu, m->mcgstatus, m->bank, m->status);
116 if (m->rip) {
117 printk(KERN_EMERG
118 "RIP%s %02x:<%016Lx> ",
119 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
120 m->cs, m->rip);
121 if (m->cs == __KERNEL_CS)
122 print_symbol("{%s}", m->rip);
123 printk("\n");
124 }
125 printk(KERN_EMERG "TSC %Lx ", m->tsc);
126 if (m->addr)
127 printk("ADDR %Lx ", m->addr);
128 if (m->misc)
129 printk("MISC %Lx ", m->misc);
130 printk("\n");
Andi Kleen48551702006-01-11 22:44:48 +0100131 printk(KERN_EMERG "This is not a software problem!\n");
132 printk(KERN_EMERG
133 "Run through mcelog --ascii to decode and contact your hardware vendor\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
135
136static void mce_panic(char *msg, struct mce *backup, unsigned long start)
137{
138 int i;
Tim Hockine02e68d2007-07-21 17:10:36 +0200139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 oops_begin();
141 for (i = 0; i < MCE_LOG_LEN; i++) {
142 unsigned long tsc = mcelog.entry[i].tsc;
143 if (time_before(tsc, start))
144 continue;
145 print_mce(&mcelog.entry[i]);
146 if (backup && mcelog.entry[i].tsc == backup->tsc)
147 backup = NULL;
148 }
149 if (backup)
150 print_mce(backup);
Tim Hockine02e68d2007-07-21 17:10:36 +0200151 panic(msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152}
153
154static int mce_available(struct cpuinfo_x86 *c)
155{
Akinobu Mita3d1712c2006-03-24 03:15:11 -0800156 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157}
158
Andi Kleen94ad8472005-04-16 15:25:09 -0700159static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
160{
161 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
162 m->rip = regs->rip;
163 m->cs = regs->cs;
164 } else {
165 m->rip = 0;
166 m->cs = 0;
167 }
168 if (rip_msr) {
169 /* Assume the RIP in the MSR is exact. Is this true? */
170 m->mcgstatus |= MCG_STATUS_EIPV;
171 rdmsrl(rip_msr, m->rip);
172 m->cs = 0;
173 }
174}
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/*
177 * The actual machine check handler
178 */
179
180void do_machine_check(struct pt_regs * regs, long error_code)
181{
182 struct mce m, panicm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 u64 mcestart = 0;
184 int i;
185 int panicm_found = 0;
Tim Hockinbd784322007-07-21 17:10:37 +0200186 /*
187 * If no_way_out gets set, there is no safe way to recover from this
188 * MCE. If tolerant is cranked up, we'll try anyway.
189 */
190 int no_way_out = 0;
191 /*
192 * If kill_it gets set, there might be a way to recover from this
193 * error.
194 */
195 int kill_it = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Andi Kleen553f2652006-04-07 19:49:57 +0200197 atomic_inc(&mce_entry);
198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 if (regs)
Jan Beulich6e3f3612006-01-11 22:42:14 +0100200 notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 if (!banks)
Andi Kleen553f2652006-04-07 19:49:57 +0200202 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 memset(&m, 0, sizeof(struct mce));
Andi Kleen151f8cc2006-09-26 10:52:37 +0200205 m.cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
Tim Hockinbd784322007-07-21 17:10:37 +0200207 /* if the restart IP is not valid, we're done for */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 if (!(m.mcgstatus & MCG_STATUS_RIPV))
Tim Hockinbd784322007-07-21 17:10:37 +0200209 no_way_out = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 rdtscll(mcestart);
212 barrier();
213
214 for (i = 0; i < banks; i++) {
215 if (!bank[i])
216 continue;
217
218 m.misc = 0;
219 m.addr = 0;
220 m.bank = i;
221 m.tsc = 0;
222
223 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
224 if ((m.status & MCI_STATUS_VAL) == 0)
225 continue;
226
227 if (m.status & MCI_STATUS_EN) {
Tim Hockinbd784322007-07-21 17:10:37 +0200228 /* if PCC was set, there's no way out */
229 no_way_out |= !!(m.status & MCI_STATUS_PCC);
230 /*
231 * If this error was uncorrectable and there was
232 * an overflow, we're in trouble. If no overflow,
233 * we might get away with just killing a task.
234 */
235 if (m.status & MCI_STATUS_UC) {
236 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
237 no_way_out = 1;
238 kill_it = 1;
239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241
242 if (m.status & MCI_STATUS_MISCV)
243 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
244 if (m.status & MCI_STATUS_ADDRV)
245 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
246
Andi Kleen94ad8472005-04-16 15:25:09 -0700247 mce_get_rip(&m, regs);
Andi Kleend5172f22005-08-07 09:42:07 -0700248 if (error_code >= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 rdtscll(m.tsc);
Andi Kleend5172f22005-08-07 09:42:07 -0700250 if (error_code != -2)
251 mce_log(&m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253 /* Did this bank cause the exception? */
254 /* Assume that the bank with uncorrectable errors did it,
255 and that there is only a single one. */
256 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
257 panicm = m;
258 panicm_found = 1;
259 }
260
Randy Dunlap9f158332005-09-13 01:25:16 -0700261 add_taint(TAINT_MACHINE_CHECK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 }
263
264 /* Never do anything final in the polling timer */
Tim Hockine02e68d2007-07-21 17:10:36 +0200265 if (!regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 goto out;
267
268 /* If we didn't find an uncorrectable error, pick
269 the last one (shouldn't happen, just being safe). */
270 if (!panicm_found)
271 panicm = m;
Tim Hockinbd784322007-07-21 17:10:37 +0200272
273 /*
274 * If we have decided that we just CAN'T continue, and the user
275 * has not set tolerant to an insane level, give up and die.
276 */
277 if (no_way_out && tolerant < 3)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 mce_panic("Machine check", &panicm, mcestart);
Tim Hockinbd784322007-07-21 17:10:37 +0200279
280 /*
281 * If the error seems to be unrecoverable, something should be
282 * done. Try to kill as little as possible. If we can kill just
283 * one task, do that. If the user has set the tolerance very
284 * high, don't try to do anything at all.
285 */
286 if (kill_it && tolerant < 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 int user_space = 0;
288
Tim Hockinbd784322007-07-21 17:10:37 +0200289 /*
290 * If the EIPV bit is set, it means the saved IP is the
291 * instruction which caused the MCE.
292 */
293 if (m.mcgstatus & MCG_STATUS_EIPV)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 user_space = panicm.rip && (panicm.cs & 3);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Tim Hockinbd784322007-07-21 17:10:37 +0200296 /*
297 * If we know that the error was in user space, send a
298 * SIGBUS. Otherwise, panic if tolerance is low.
299 *
300 * do_exit() takes an awful lot of locks and has a slight
301 * risk of deadlocking.
302 */
303 if (user_space) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 do_exit(SIGBUS);
Tim Hockinbd784322007-07-21 17:10:37 +0200305 } else if (panic_on_oops || tolerant < 2) {
306 mce_panic("Uncorrected machine check",
307 &panicm, mcestart);
308 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 }
310
Tim Hockine02e68d2007-07-21 17:10:36 +0200311 /* notify userspace ASAP */
312 set_thread_flag(TIF_MCE_NOTIFY);
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 out:
Tim Hockinbd784322007-07-21 17:10:37 +0200315 /* the last thing we do is clear state */
316 for (i = 0; i < banks; i++)
317 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 wrmsrl(MSR_IA32_MCG_STATUS, 0);
Andi Kleen553f2652006-04-07 19:49:57 +0200319 out2:
320 atomic_dec(&mce_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
322
Dmitriy Zavin15d5f832006-09-26 10:52:42 +0200323#ifdef CONFIG_X86_MCE_INTEL
324/***
325 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
326 * @cpu: The CPU on which the event occured.
327 * @status: Event status information
328 *
329 * This function should be called by the thermal interrupt after the
330 * event has been processed and the decision was made to log the event
331 * further.
332 *
333 * The status parameter will be saved to the 'status' field of 'struct mce'
334 * and historically has been the register value of the
335 * MSR_IA32_THERMAL_STATUS (Intel) msr.
336 */
337void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
338{
339 struct mce m;
340
341 memset(&m, 0, sizeof(m));
342 m.cpu = cpu;
343 m.bank = MCE_THERMAL_BANK;
344 m.status = status;
345 rdtscll(m.tsc);
346 mce_log(&m);
347}
348#endif /* CONFIG_X86_MCE_INTEL */
349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350/*
Tim Hockin8a336b02007-05-02 19:27:19 +0200351 * Periodic polling timer for "silent" machine check errors. If the
352 * poller finds an MCE, poll 2x faster. When the poller finds no more
353 * errors, poll 2x slower (up to check_interval seconds).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 */
355
356static int check_interval = 5 * 60; /* 5 minutes */
Tim Hockin8a336b02007-05-02 19:27:19 +0200357static int next_interval; /* in jiffies */
David Howells65f27f32006-11-22 14:55:48 +0000358static void mcheck_timer(struct work_struct *work);
359static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361static void mcheck_check_cpu(void *info)
362{
363 if (mce_available(&current_cpu_data))
364 do_machine_check(NULL, 0);
365}
366
David Howells65f27f32006-11-22 14:55:48 +0000367static void mcheck_timer(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
369 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371 /*
Tim Hockine02e68d2007-07-21 17:10:36 +0200372 * Alert userspace if needed. If we logged an MCE, reduce the
373 * polling interval, otherwise increase the polling interval.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 */
Tim Hockine02e68d2007-07-21 17:10:36 +0200375 if (mce_notify_user()) {
Tim Hockin8a336b02007-05-02 19:27:19 +0200376 next_interval = max(next_interval/2, HZ/100);
Tim Hockin8a336b02007-05-02 19:27:19 +0200377 } else {
Venki Pallipadi22293e52007-07-21 17:10:44 +0200378 next_interval = min(next_interval*2,
379 (int)round_jiffies_relative(check_interval*HZ));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 }
Tim Hockin8a336b02007-05-02 19:27:19 +0200381
382 schedule_delayed_work(&mcheck_work, next_interval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383}
384
Tim Hockine02e68d2007-07-21 17:10:36 +0200385/*
386 * This is only called from process context. This is where we do
387 * anything we need to alert userspace about new MCEs. This is called
388 * directly from the poller and also from entry.S and idle, thanks to
389 * TIF_MCE_NOTIFY.
390 */
391int mce_notify_user(void)
392{
393 clear_thread_flag(TIF_MCE_NOTIFY);
394 if (test_and_clear_bit(0, &notify_user)) {
395 static unsigned long last_print;
396 unsigned long now = jiffies;
397
398 wake_up_interruptible(&mce_wait);
399 if (trigger[0])
400 call_usermodehelper(trigger, trigger_argv, NULL,
401 UMH_NO_WAIT);
402
403 if (time_after_eq(now, last_print + (check_interval*HZ))) {
404 last_print = now;
405 printk(KERN_INFO "Machine check events logged\n");
406 }
407
408 return 1;
409 }
410 return 0;
411}
412
413/* see if the idle task needs to notify userspace */
414static int
415mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
416{
417 /* IDLE_END should be safe - interrupts are back on */
418 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
419 mce_notify_user();
420
421 return NOTIFY_OK;
422}
423
424static struct notifier_block mce_idle_notifier = {
425 .notifier_call = mce_idle_callback,
426};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
428static __init int periodic_mcheck_init(void)
429{
Tim Hockin8a336b02007-05-02 19:27:19 +0200430 next_interval = check_interval * HZ;
431 if (next_interval)
Venki Pallipadi22293e52007-07-21 17:10:44 +0200432 schedule_delayed_work(&mcheck_work,
433 round_jiffies_relative(next_interval));
Tim Hockine02e68d2007-07-21 17:10:36 +0200434 idle_notifier_register(&mce_idle_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 return 0;
436}
437__initcall(periodic_mcheck_init);
438
439
440/*
441 * Initialize Machine Checks for a CPU.
442 */
443static void mce_init(void *dummy)
444{
445 u64 cap;
446 int i;
447
448 rdmsrl(MSR_IA32_MCG_CAP, cap);
449 banks = cap & 0xff;
450 if (banks > NR_BANKS) {
451 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks);
452 banks = NR_BANKS;
453 }
Andi Kleen94ad8472005-04-16 15:25:09 -0700454 /* Use accurate RIP reporting if available. */
455 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
456 rip_msr = MSR_IA32_MCG_EIP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
458 /* Log the machine checks left over from the previous reset.
459 This also clears all registers */
Andi Kleend5172f22005-08-07 09:42:07 -0700460 do_machine_check(NULL, mce_bootlog ? -1 : -2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462 set_in_cr4(X86_CR4_MCE);
463
464 if (cap & MCG_CTL_P)
465 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
466
467 for (i = 0; i < banks; i++) {
468 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
469 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
470 }
471}
472
473/* Add per CPU specific workarounds here */
Ashok Raje6982c62005-06-25 14:54:58 -0700474static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475{
476 /* This should be disabled by the BIOS, but isn't always */
477 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) {
478 /* disable GART TBL walk error reporting, which trips off
479 incorrectly with the IOMMU & 3ware & Cerberus. */
480 clear_bit(10, &bank[4]);
Andi Kleene5835382005-11-05 17:25:54 +0100481 /* Lots of broken BIOS around that don't clear them
482 by default and leave crap in there. Don't log. */
483 mce_bootlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 }
Andi Kleene5835382005-11-05 17:25:54 +0100485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486}
487
Ashok Raje6982c62005-06-25 14:54:58 -0700488static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
490 switch (c->x86_vendor) {
491 case X86_VENDOR_INTEL:
492 mce_intel_feature_init(c);
493 break;
Jacob Shin89b831e2005-11-05 17:25:53 +0100494 case X86_VENDOR_AMD:
495 mce_amd_feature_init(c);
496 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 default:
498 break;
499 }
500}
501
502/*
503 * Called for each booted CPU to set up machine checks.
504 * Must be called with preempt off.
505 */
Ashok Raje6982c62005-06-25 14:54:58 -0700506void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Ashok Raj7ded5682006-02-03 21:51:23 +0100508 static cpumask_t mce_cpus = CPU_MASK_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510 mce_cpu_quirks(c);
511
512 if (mce_dont_init ||
513 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
514 !mce_available(c))
515 return;
516
517 mce_init(NULL);
518 mce_cpu_features(c);
519}
520
521/*
522 * Character device to read and clear the MCE log.
523 */
524
Tim Hockinf528e7b2007-07-21 17:10:35 +0200525static DEFINE_SPINLOCK(mce_state_lock);
526static int open_count; /* #times opened */
527static int open_exclu; /* already open exclusive? */
528
529static int mce_open(struct inode *inode, struct file *file)
530{
531 spin_lock(&mce_state_lock);
532
533 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
534 spin_unlock(&mce_state_lock);
535 return -EBUSY;
536 }
537
538 if (file->f_flags & O_EXCL)
539 open_exclu = 1;
540 open_count++;
541
542 spin_unlock(&mce_state_lock);
543
Tim Hockinbd784322007-07-21 17:10:37 +0200544 return nonseekable_open(inode, file);
Tim Hockinf528e7b2007-07-21 17:10:35 +0200545}
546
547static int mce_release(struct inode *inode, struct file *file)
548{
549 spin_lock(&mce_state_lock);
550
551 open_count--;
552 open_exclu = 0;
553
554 spin_unlock(&mce_state_lock);
555
556 return 0;
557}
558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559static void collect_tscs(void *data)
560{
561 unsigned long *cpu_tsc = (unsigned long *)data;
562 rdtscll(cpu_tsc[smp_processor_id()]);
563}
564
565static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off)
566{
Andi Kleenf0de53b2005-04-16 15:25:10 -0700567 unsigned long *cpu_tsc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 static DECLARE_MUTEX(mce_read_sem);
569 unsigned next;
570 char __user *buf = ubuf;
571 int i, err;
572
Andi Kleenf0de53b2005-04-16 15:25:10 -0700573 cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL);
574 if (!cpu_tsc)
575 return -ENOMEM;
576
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 down(&mce_read_sem);
578 next = rcu_dereference(mcelog.next);
579
580 /* Only supports full reads right now */
581 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
582 up(&mce_read_sem);
Andi Kleenf0de53b2005-04-16 15:25:10 -0700583 kfree(cpu_tsc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 return -EINVAL;
585 }
586
587 err = 0;
Andi Kleen673242c2005-09-12 18:49:24 +0200588 for (i = 0; i < next; i++) {
589 unsigned long start = jiffies;
590 while (!mcelog.entry[i].finished) {
Joshua Wise4f84e4b2007-06-23 17:16:45 -0700591 if (time_after_eq(jiffies, start + 2)) {
Andi Kleen673242c2005-09-12 18:49:24 +0200592 memset(mcelog.entry + i,0, sizeof(struct mce));
Joshua Wise4f84e4b2007-06-23 17:16:45 -0700593 goto timeout;
Andi Kleen673242c2005-09-12 18:49:24 +0200594 }
595 cpu_relax();
596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 smp_rmb();
598 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
599 buf += sizeof(struct mce);
Joshua Wise4f84e4b2007-06-23 17:16:45 -0700600 timeout:
601 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 }
603
604 memset(mcelog.entry, 0, next * sizeof(struct mce));
605 mcelog.next = 0;
606
Paul E. McKenneyb2b18662005-06-25 14:55:38 -0700607 synchronize_sched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 /* Collect entries that were still getting written before the synchronize. */
610
611 on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
612 for (i = next; i < MCE_LOG_LEN; i++) {
613 if (mcelog.entry[i].finished &&
614 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
615 err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce));
616 smp_rmb();
617 buf += sizeof(struct mce);
618 memset(&mcelog.entry[i], 0, sizeof(struct mce));
619 }
620 }
621 up(&mce_read_sem);
Andi Kleenf0de53b2005-04-16 15:25:10 -0700622 kfree(cpu_tsc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 return err ? -EFAULT : buf - ubuf;
624}
625
Tim Hockine02e68d2007-07-21 17:10:36 +0200626static unsigned int mce_poll(struct file *file, poll_table *wait)
627{
628 poll_wait(file, &mce_wait, wait);
629 if (rcu_dereference(mcelog.next))
630 return POLLIN | POLLRDNORM;
631 return 0;
632}
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg)
635{
636 int __user *p = (int __user *)arg;
637 if (!capable(CAP_SYS_ADMIN))
638 return -EPERM;
639 switch (cmd) {
640 case MCE_GET_RECORD_LEN:
641 return put_user(sizeof(struct mce), p);
642 case MCE_GET_LOG_LEN:
643 return put_user(MCE_LOG_LEN, p);
644 case MCE_GETCLEAR_FLAGS: {
645 unsigned flags;
646 do {
647 flags = mcelog.flags;
648 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
649 return put_user(flags, p);
650 }
651 default:
652 return -ENOTTY;
653 }
654}
655
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800656static const struct file_operations mce_chrdev_ops = {
Tim Hockinf528e7b2007-07-21 17:10:35 +0200657 .open = mce_open,
658 .release = mce_release,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 .read = mce_read,
Tim Hockine02e68d2007-07-21 17:10:36 +0200660 .poll = mce_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 .ioctl = mce_ioctl,
662};
663
664static struct miscdevice mce_log_device = {
665 MISC_MCELOG_MINOR,
666 "mcelog",
667 &mce_chrdev_ops,
668};
669
670/*
671 * Old style boot options parsing. Only for compatibility.
672 */
673
674static int __init mcheck_disable(char *str)
675{
676 mce_dont_init = 1;
OGAWA Hirofumi9b410462006-03-31 02:30:33 -0800677 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
679
680/* mce=off disables machine check. Note you can reenable it later
Andi Kleend5172f22005-08-07 09:42:07 -0700681 using sysfs.
Andi Kleen8c566ef2005-09-12 18:49:24 +0200682 mce=TOLERANCELEVEL (number, see above)
Andi Kleene5835382005-11-05 17:25:54 +0100683 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
684 mce=nobootlog Don't log MCEs from before booting. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685static int __init mcheck_enable(char *str)
686{
Andi Kleend5172f22005-08-07 09:42:07 -0700687 if (*str == '=')
688 str++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 if (!strcmp(str, "off"))
690 mce_dont_init = 1;
Andi Kleene5835382005-11-05 17:25:54 +0100691 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
692 mce_bootlog = str[0] == 'b';
Andi Kleen8c566ef2005-09-12 18:49:24 +0200693 else if (isdigit(str[0]))
694 get_option(&str, &tolerant);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 else
696 printk("mce= argument %s ignored. Please use /sys", str);
OGAWA Hirofumi9b410462006-03-31 02:30:33 -0800697 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698}
699
700__setup("nomce", mcheck_disable);
701__setup("mce", mcheck_enable);
702
703/*
704 * Sysfs support
705 */
706
Andi Kleen413588c2005-09-12 18:49:24 +0200707/* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
708 Only one CPU is active at this time, the others get readded later using
709 CPU hotplug. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710static int mce_resume(struct sys_device *dev)
711{
Andi Kleen413588c2005-09-12 18:49:24 +0200712 mce_init(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 return 0;
714}
715
716/* Reinit MCEs after user configuration changes */
717static void mce_restart(void)
718{
Tim Hockin8a336b02007-05-02 19:27:19 +0200719 if (next_interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 cancel_delayed_work(&mcheck_work);
721 /* Timer race is harmless here */
722 on_each_cpu(mce_init, NULL, 1, 1);
Tim Hockin8a336b02007-05-02 19:27:19 +0200723 next_interval = check_interval * HZ;
724 if (next_interval)
Venki Pallipadi22293e52007-07-21 17:10:44 +0200725 schedule_delayed_work(&mcheck_work,
726 round_jiffies_relative(next_interval));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727}
728
729static struct sysdev_class mce_sysclass = {
730 .resume = mce_resume,
731 set_kset_name("machinecheck"),
732};
733
Jacob Shinfff2e892006-06-26 13:58:50 +0200734DEFINE_PER_CPU(struct sys_device, device_mce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
736/* Why are there no generic functions for this? */
737#define ACCESSOR(name, var, start) \
738 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
739 return sprintf(buf, "%lx\n", (unsigned long)var); \
740 } \
741 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
742 char *end; \
743 unsigned long new = simple_strtoul(buf, &end, 0); \
744 if (end == buf) return -EINVAL; \
745 var = new; \
746 start; \
747 return end-buf; \
748 } \
749 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
750
Andi Kleena98f0dd2007-02-13 13:26:23 +0100751/* TBD should generate these dynamically based on number of available banks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752ACCESSOR(bank0ctl,bank[0],mce_restart())
753ACCESSOR(bank1ctl,bank[1],mce_restart())
754ACCESSOR(bank2ctl,bank[2],mce_restart())
755ACCESSOR(bank3ctl,bank[3],mce_restart())
756ACCESSOR(bank4ctl,bank[4],mce_restart())
Shaohua Li73ca5352006-01-11 22:43:06 +0100757ACCESSOR(bank5ctl,bank[5],mce_restart())
Andi Kleena98f0dd2007-02-13 13:26:23 +0100758
759static ssize_t show_trigger(struct sys_device *s, char *buf)
760{
761 strcpy(buf, trigger);
762 strcat(buf, "\n");
763 return strlen(trigger) + 1;
764}
765
766static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz)
767{
768 char *p;
769 int len;
770 strncpy(trigger, buf, sizeof(trigger));
771 trigger[sizeof(trigger)-1] = 0;
772 len = strlen(trigger);
773 p = strchr(trigger, '\n');
774 if (*p) *p = 0;
775 return len;
776}
777
778static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779ACCESSOR(tolerant,tolerant,)
780ACCESSOR(check_interval,check_interval,mce_restart())
Andi Kleena98f0dd2007-02-13 13:26:23 +0100781static struct sysdev_attribute *mce_attributes[] = {
782 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
783 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
784 &attr_tolerant, &attr_check_interval, &attr_trigger,
785 NULL
786};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
Andi Kleen91c6d402005-07-28 21:15:39 -0700788/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
789static __cpuinit int mce_create_device(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
791 int err;
Shaohua Li73ca5352006-01-11 22:43:06 +0100792 int i;
Andi Kleen91c6d402005-07-28 21:15:39 -0700793 if (!mce_available(&cpu_data[cpu]))
794 return -EIO;
795
796 per_cpu(device_mce,cpu).id = cpu;
797 per_cpu(device_mce,cpu).cls = &mce_sysclass;
798
799 err = sysdev_register(&per_cpu(device_mce,cpu));
800
801 if (!err) {
Andi Kleena98f0dd2007-02-13 13:26:23 +0100802 for (i = 0; mce_attributes[i]; i++)
Shaohua Li73ca5352006-01-11 22:43:06 +0100803 sysdev_create_file(&per_cpu(device_mce,cpu),
Andi Kleena98f0dd2007-02-13 13:26:23 +0100804 mce_attributes[i]);
Andi Kleen91c6d402005-07-28 21:15:39 -0700805 }
806 return err;
807}
808
Chandra Seetharamanbe6b5a32006-07-30 03:03:37 -0700809static void mce_remove_device(unsigned int cpu)
Andi Kleen91c6d402005-07-28 21:15:39 -0700810{
Shaohua Li73ca5352006-01-11 22:43:06 +0100811 int i;
812
Andi Kleena98f0dd2007-02-13 13:26:23 +0100813 for (i = 0; mce_attributes[i]; i++)
Shaohua Li73ca5352006-01-11 22:43:06 +0100814 sysdev_remove_file(&per_cpu(device_mce,cpu),
Andi Kleena98f0dd2007-02-13 13:26:23 +0100815 mce_attributes[i]);
Andi Kleen91c6d402005-07-28 21:15:39 -0700816 sysdev_unregister(&per_cpu(device_mce,cpu));
Rafael J. Wysockid4c45712006-12-07 02:14:12 +0100817 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
Andi Kleen91c6d402005-07-28 21:15:39 -0700818}
Andi Kleen91c6d402005-07-28 21:15:39 -0700819
820/* Get notified when a cpu comes on/off. Be hotplug friendly. */
Chandra Seetharamanbe6b5a32006-07-30 03:03:37 -0700821static int
Andi Kleen91c6d402005-07-28 21:15:39 -0700822mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
823{
824 unsigned int cpu = (unsigned long)hcpu;
825
826 switch (action) {
827 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700828 case CPU_ONLINE_FROZEN:
Andi Kleen91c6d402005-07-28 21:15:39 -0700829 mce_create_device(cpu);
830 break;
Andi Kleen91c6d402005-07-28 21:15:39 -0700831 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700832 case CPU_DEAD_FROZEN:
Andi Kleen91c6d402005-07-28 21:15:39 -0700833 mce_remove_device(cpu);
834 break;
Andi Kleen91c6d402005-07-28 21:15:39 -0700835 }
836 return NOTIFY_OK;
837}
838
Chandra Seetharamanbe6b5a32006-07-30 03:03:37 -0700839static struct notifier_block mce_cpu_notifier = {
Andi Kleen91c6d402005-07-28 21:15:39 -0700840 .notifier_call = mce_cpu_callback,
841};
842
843static __init int mce_init_device(void)
844{
845 int err;
846 int i = 0;
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 if (!mce_available(&boot_cpu_data))
849 return -EIO;
850 err = sysdev_class_register(&mce_sysclass);
Andi Kleen91c6d402005-07-28 21:15:39 -0700851
852 for_each_online_cpu(i) {
853 mce_create_device(i);
854 }
855
Chandra Seetharamanbe6b5a32006-07-30 03:03:37 -0700856 register_hotcpu_notifier(&mce_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 misc_register(&mce_log_device);
858 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859}
Andi Kleen91c6d402005-07-28 21:15:39 -0700860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861device_initcall(mce_init_device);