blob: 7b6abe0582536f54f5c5aea12587d22d3ab92f1d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86-64/kernel/time.c
3 *
4 * "High Precision Event Timer" based timekeeping.
5 *
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
14 */
15
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/mc146818rtc.h>
21#include <linux/irq.h>
22#include <linux/time.h>
23#include <linux/ioport.h>
24#include <linux/module.h>
25#include <linux/device.h>
26#include <linux/sysdev.h>
27#include <linux/bcd.h>
28#include <linux/kallsyms.h>
Andi Kleen312df5f2005-05-16 21:53:28 -070029#include <linux/acpi.h>
Andi Kleen8d916402005-05-31 14:39:26 -070030#ifdef CONFIG_ACPI
Andi Kleen312df5f2005-05-16 21:53:28 -070031#include <acpi/achware.h> /* for PM timer frequency */
Andi Kleen8d916402005-05-31 14:39:26 -070032#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/8253pit.h>
34#include <asm/pgtable.h>
35#include <asm/vsyscall.h>
36#include <asm/timex.h>
37#include <asm/proto.h>
38#include <asm/hpet.h>
39#include <asm/sections.h>
40#include <linux/cpufreq.h>
41#include <linux/hpet.h>
42#ifdef CONFIG_X86_LOCAL_APIC
43#include <asm/apic.h>
44#endif
45
46u64 jiffies_64 = INITIAL_JIFFIES;
47
48EXPORT_SYMBOL(jiffies_64);
49
50#ifdef CONFIG_CPU_FREQ
51static void cpufreq_delayed_get(void);
52#endif
53extern void i8254_timer_resume(void);
54extern int using_apic_timer;
55
56DEFINE_SPINLOCK(rtc_lock);
57DEFINE_SPINLOCK(i8253_lock);
58
59static int nohpet __initdata = 0;
60static int notsc __initdata = 0;
61
62#undef HPET_HACK_ENABLE_DANGEROUS
63
64unsigned int cpu_khz; /* TSC clocks / usec, not used here */
65static unsigned long hpet_period; /* fsecs / HPET clock */
66unsigned long hpet_tick; /* HPET clocks / interrupt */
john stultza3a00752005-06-23 00:08:36 -070067static int hpet_use_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068unsigned long vxtime_hz = PIT_TICK_RATE;
69int report_lost_ticks; /* command line option */
70unsigned long long monotonic_base;
71
72struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
73
74volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
75unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
76struct timespec __xtime __section_xtime;
77struct timezone __sys_tz __section_sys_tz;
78
79static inline void rdtscll_sync(unsigned long *tsc)
80{
81#ifdef CONFIG_SMP
82 sync_core();
83#endif
84 rdtscll(*tsc);
85}
86
87/*
88 * do_gettimeoffset() returns microseconds since last timer interrupt was
89 * triggered by hardware. A memory read of HPET is slower than a register read
90 * of TSC, but much more reliable. It's also synchronized to the timer
91 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
92 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
93 * This is not a problem, because jiffies hasn't updated either. They are bound
94 * together by xtime_lock.
95 */
96
97static inline unsigned int do_gettimeoffset_tsc(void)
98{
99 unsigned long t;
100 unsigned long x;
101 rdtscll_sync(&t);
102 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
103 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
104 return x;
105}
106
107static inline unsigned int do_gettimeoffset_hpet(void)
108{
john stultza3a00752005-06-23 00:08:36 -0700109 /* cap counter read to one tick to avoid inconsistencies */
110 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
111 return (min(counter,hpet_tick) * vxtime.quot) >> 32;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112}
113
114unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
115
116/*
117 * This version of gettimeofday() has microsecond resolution and better than
118 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
119 * MHz) HPET timer.
120 */
121
122void do_gettimeofday(struct timeval *tv)
123{
124 unsigned long seq, t;
125 unsigned int sec, usec;
126
127 do {
128 seq = read_seqbegin(&xtime_lock);
129
130 sec = xtime.tv_sec;
131 usec = xtime.tv_nsec / 1000;
132
133 /* i386 does some correction here to keep the clock
134 monotonous even when ntpd is fixing drift.
135 But they didn't work for me, there is a non monotonic
136 clock anyways with ntp.
137 I dropped all corrections now until a real solution can
138 be found. Note when you fix it here you need to do the same
139 in arch/x86_64/kernel/vsyscall.c and export all needed
140 variables in vmlinux.lds. -AK */
141
142 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
143 do_gettimeoffset();
144 usec += t;
145
146 } while (read_seqretry(&xtime_lock, seq));
147
148 tv->tv_sec = sec + usec / 1000000;
149 tv->tv_usec = usec % 1000000;
150}
151
152EXPORT_SYMBOL(do_gettimeofday);
153
154/*
155 * settimeofday() first undoes the correction that gettimeofday would do
156 * on the time, and then saves it. This is ugly, but has been like this for
157 * ages already.
158 */
159
160int do_settimeofday(struct timespec *tv)
161{
162 time_t wtm_sec, sec = tv->tv_sec;
163 long wtm_nsec, nsec = tv->tv_nsec;
164
165 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
166 return -EINVAL;
167
168 write_seqlock_irq(&xtime_lock);
169
170 nsec -= do_gettimeoffset() * 1000 +
171 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
172
173 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
174 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
175
176 set_normalized_timespec(&xtime, sec, nsec);
177 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
178
john stultzb149ee22005-09-06 15:17:46 -0700179 ntp_clear();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
181 write_sequnlock_irq(&xtime_lock);
182 clock_was_set();
183 return 0;
184}
185
186EXPORT_SYMBOL(do_settimeofday);
187
188unsigned long profile_pc(struct pt_regs *regs)
189{
190 unsigned long pc = instruction_pointer(regs);
191
192 /* Assume the lock function has either no stack frame or only a single word.
193 This checks if the address on the stack looks like a kernel text address.
194 There is a small window for false hits, but in that case the tick
195 is just accounted to the spinlock function.
196 Better would be to write these functions in assembler again
197 and check exactly. */
198 if (in_lock_functions(pc)) {
199 char *v = *(char **)regs->rsp;
200 if ((v >= _stext && v <= _etext) ||
201 (v >= _sinittext && v <= _einittext) ||
202 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
203 return (unsigned long)v;
204 return ((unsigned long *)regs->rsp)[1];
205 }
206 return pc;
207}
208EXPORT_SYMBOL(profile_pc);
209
210/*
211 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
212 * ms after the second nowtime has started, because when nowtime is written
213 * into the registers of the CMOS clock, it will jump to the next second
214 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
215 * sheet for details.
216 */
217
218static void set_rtc_mmss(unsigned long nowtime)
219{
220 int real_seconds, real_minutes, cmos_minutes;
221 unsigned char control, freq_select;
222
223/*
224 * IRQs are disabled when we're called from the timer interrupt,
225 * no need for spin_lock_irqsave()
226 */
227
228 spin_lock(&rtc_lock);
229
230/*
231 * Tell the clock it's being set and stop it.
232 */
233
234 control = CMOS_READ(RTC_CONTROL);
235 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
236
237 freq_select = CMOS_READ(RTC_FREQ_SELECT);
238 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
239
240 cmos_minutes = CMOS_READ(RTC_MINUTES);
241 BCD_TO_BIN(cmos_minutes);
242
243/*
244 * since we're only adjusting minutes and seconds, don't interfere with hour
245 * overflow. This avoids messing with unknown time zones but requires your RTC
246 * not to be off by more than 15 minutes. Since we're calling it only when
247 * our clock is externally synchronized using NTP, this shouldn't be a problem.
248 */
249
250 real_seconds = nowtime % 60;
251 real_minutes = nowtime / 60;
252 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
253 real_minutes += 30; /* correct for half hour time zone */
254 real_minutes %= 60;
255
256#if 0
257 /* AMD 8111 is a really bad time keeper and hits this regularly.
258 It probably was an attempt to avoid screwing up DST, but ignore
259 that for now. */
260 if (abs(real_minutes - cmos_minutes) >= 30) {
261 printk(KERN_WARNING "time.c: can't update CMOS clock "
262 "from %d to %d\n", cmos_minutes, real_minutes);
263 } else
264#endif
265
266 {
267 BIN_TO_BCD(real_seconds);
268 BIN_TO_BCD(real_minutes);
269 CMOS_WRITE(real_seconds, RTC_SECONDS);
270 CMOS_WRITE(real_minutes, RTC_MINUTES);
271 }
272
273/*
274 * The following flags have to be released exactly in this order, otherwise the
275 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
276 * not reset the oscillator and will not update precisely 500 ms later. You
277 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
278 * believes data sheets anyway ... -- Markus Kuhn
279 */
280
281 CMOS_WRITE(control, RTC_CONTROL);
282 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
283
284 spin_unlock(&rtc_lock);
285}
286
287
288/* monotonic_clock(): returns # of nanoseconds passed since time_init()
289 * Note: This function is required to return accurate
290 * time even in the absence of multiple timer ticks.
291 */
292unsigned long long monotonic_clock(void)
293{
294 unsigned long seq;
295 u32 last_offset, this_offset, offset;
296 unsigned long long base;
297
298 if (vxtime.mode == VXTIME_HPET) {
299 do {
300 seq = read_seqbegin(&xtime_lock);
301
302 last_offset = vxtime.last;
303 base = monotonic_base;
john stultza3a00752005-06-23 00:08:36 -0700304 this_offset = hpet_readl(HPET_COUNTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 } while (read_seqretry(&xtime_lock, seq));
307 offset = (this_offset - last_offset);
308 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
309 return base + offset;
310 }else{
311 do {
312 seq = read_seqbegin(&xtime_lock);
313
314 last_offset = vxtime.last_tsc;
315 base = monotonic_base;
316 } while (read_seqretry(&xtime_lock, seq));
317 sync_core();
318 rdtscll(this_offset);
319 offset = (this_offset - last_offset)*1000/cpu_khz;
320 return base + offset;
321 }
322
323
324}
325EXPORT_SYMBOL(monotonic_clock);
326
327static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
328{
329 static long lost_count;
330 static int warned;
331
332 if (report_lost_ticks) {
333 printk(KERN_WARNING "time.c: Lost %d timer "
334 "tick(s)! ", lost);
335 print_symbol("rip %s)\n", regs->rip);
336 }
337
338 if (lost_count == 1000 && !warned) {
339 printk(KERN_WARNING
340 "warning: many lost ticks.\n"
341 KERN_WARNING "Your time source seems to be instable or "
342 "some driver is hogging interupts\n");
343 print_symbol("rip %s\n", regs->rip);
344 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
345 printk(KERN_WARNING "Falling back to HPET\n");
346 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
347 vxtime.mode = VXTIME_HPET;
348 do_gettimeoffset = do_gettimeoffset_hpet;
349 }
350 /* else should fall back to PIT, but code missing. */
351 warned = 1;
352 } else
353 lost_count++;
354
355#ifdef CONFIG_CPU_FREQ
356 /* In some cases the CPU can change frequency without us noticing
357 (like going into thermal throttle)
358 Give cpufreq a change to catch up. */
359 if ((lost_count+1) % 25 == 0) {
360 cpufreq_delayed_get();
361 }
362#endif
363}
364
365static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
366{
367 static unsigned long rtc_update = 0;
368 unsigned long tsc;
369 int delay, offset = 0, lost = 0;
370
371/*
372 * Here we are in the timer irq handler. We have irqs locally disabled (so we
373 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
374 * on the other CPU, so we need a lock. We also need to lock the vsyscall
375 * variables, because both do_timer() and us change them -arca+vojtech
376 */
377
378 write_seqlock(&xtime_lock);
379
john stultza3a00752005-06-23 00:08:36 -0700380 if (vxtime.hpet_address)
381 offset = hpet_readl(HPET_COUNTER);
382
383 if (hpet_use_timer) {
384 /* if we're using the hpet timer functionality,
385 * we can more accurately know the counter value
386 * when the timer interrupt occured.
387 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
389 delay = hpet_readl(HPET_COUNTER) - offset;
390 } else {
391 spin_lock(&i8253_lock);
392 outb_p(0x00, 0x43);
393 delay = inb_p(0x40);
394 delay |= inb(0x40) << 8;
395 spin_unlock(&i8253_lock);
396 delay = LATCH - 1 - delay;
397 }
398
399 rdtscll_sync(&tsc);
400
401 if (vxtime.mode == VXTIME_HPET) {
402 if (offset - vxtime.last > hpet_tick) {
403 lost = (offset - vxtime.last) / hpet_tick - 1;
404 }
405
406 monotonic_base +=
407 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
408
409 vxtime.last = offset;
Andi Kleen312df5f2005-05-16 21:53:28 -0700410#ifdef CONFIG_X86_PM_TIMER
411 } else if (vxtime.mode == VXTIME_PMTMR) {
412 lost = pmtimer_mark_offset();
413#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 } else {
415 offset = (((tsc - vxtime.last_tsc) *
416 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
417
418 if (offset < 0)
419 offset = 0;
420
421 if (offset > (USEC_PER_SEC / HZ)) {
422 lost = offset / (USEC_PER_SEC / HZ);
423 offset %= (USEC_PER_SEC / HZ);
424 }
425
426 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
427
428 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
429
430 if ((((tsc - vxtime.last_tsc) *
431 vxtime.tsc_quot) >> 32) < offset)
432 vxtime.last_tsc = tsc -
433 (((long) offset << 32) / vxtime.tsc_quot) - 1;
434 }
435
436 if (lost > 0) {
437 handle_lost_ticks(lost, regs);
438 jiffies += lost;
439 }
440
441/*
442 * Do the timer stuff.
443 */
444
445 do_timer(regs);
446#ifndef CONFIG_SMP
447 update_process_times(user_mode(regs));
448#endif
449
450/*
451 * In the SMP case we use the local APIC timer interrupt to do the profiling,
452 * except when we simulate SMP mode on a uniprocessor system, in that case we
453 * have to call the local interrupt handler.
454 */
455
456#ifndef CONFIG_X86_LOCAL_APIC
457 profile_tick(CPU_PROFILING, regs);
458#else
459 if (!using_apic_timer)
460 smp_local_timer_interrupt(regs);
461#endif
462
463/*
464 * If we have an externally synchronized Linux clock, then update CMOS clock
465 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
466 * closest to exactly 500 ms before the next second. If the update fails, we
467 * don't care, as it'll be updated on the next turn, and the problem (time way
468 * off) isn't likely to go away much sooner anyway.
469 */
470
john stultzb149ee22005-09-06 15:17:46 -0700471 if (ntp_synced() && xtime.tv_sec > rtc_update &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
473 set_rtc_mmss(xtime.tv_sec);
474 rtc_update = xtime.tv_sec + 660;
475 }
476
477 write_sequnlock(&xtime_lock);
478
479 return IRQ_HANDLED;
480}
481
482static unsigned int cyc2ns_scale;
483#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
484
485static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
486{
487 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
488}
489
490static inline unsigned long long cycles_2_ns(unsigned long long cyc)
491{
492 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
493}
494
495unsigned long long sched_clock(void)
496{
497 unsigned long a = 0;
498
499#if 0
500 /* Don't do a HPET read here. Using TSC always is much faster
501 and HPET may not be mapped yet when the scheduler first runs.
502 Disadvantage is a small drift between CPUs in some configurations,
503 but that should be tolerable. */
504 if (__vxtime.mode == VXTIME_HPET)
505 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
506#endif
507
508 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
509 which means it is not completely exact and may not be monotonous between
510 CPUs. But the errors should be too small to matter for scheduling
511 purposes. */
512
513 rdtscll(a);
514 return cycles_2_ns(a);
515}
516
517unsigned long get_cmos_time(void)
518{
519 unsigned int timeout, year, mon, day, hour, min, sec;
520 unsigned char last, this;
521 unsigned long flags;
522
523/*
524 * The Linux interpretation of the CMOS clock register contents: When the
525 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
526 * second which has precisely just started. Waiting for this can take up to 1
527 * second, we timeout approximately after 2.4 seconds on a machine with
528 * standard 8.3 MHz ISA bus.
529 */
530
531 spin_lock_irqsave(&rtc_lock, flags);
532
533 timeout = 1000000;
534 last = this = 0;
535
536 while (timeout && last && !this) {
537 last = this;
538 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
539 timeout--;
540 }
541
542/*
543 * Here we are safe to assume the registers won't change for a whole second, so
544 * we just go ahead and read them.
545 */
546
547 sec = CMOS_READ(RTC_SECONDS);
548 min = CMOS_READ(RTC_MINUTES);
549 hour = CMOS_READ(RTC_HOURS);
550 day = CMOS_READ(RTC_DAY_OF_MONTH);
551 mon = CMOS_READ(RTC_MONTH);
552 year = CMOS_READ(RTC_YEAR);
553
554 spin_unlock_irqrestore(&rtc_lock, flags);
555
556/*
557 * We know that x86-64 always uses BCD format, no need to check the config
558 * register.
559 */
560
561 BCD_TO_BIN(sec);
562 BCD_TO_BIN(min);
563 BCD_TO_BIN(hour);
564 BCD_TO_BIN(day);
565 BCD_TO_BIN(mon);
566 BCD_TO_BIN(year);
567
568/*
569 * x86-64 systems only exists since 2002.
570 * This will work up to Dec 31, 2100
571 */
572 year += 2000;
573
574 return mktime(year, mon, day, hour, min, sec);
575}
576
577#ifdef CONFIG_CPU_FREQ
578
579/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
580 changes.
581
582 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
583 not that important because current Opteron setups do not support
584 scaling on SMP anyroads.
585
586 Should fix up last_tsc too. Currently gettimeofday in the
587 first tick after the change will be slightly wrong. */
588
589#include <linux/workqueue.h>
590
591static unsigned int cpufreq_delayed_issched = 0;
592static unsigned int cpufreq_init = 0;
593static struct work_struct cpufreq_delayed_get_work;
594
595static void handle_cpufreq_delayed_get(void *v)
596{
597 unsigned int cpu;
598 for_each_online_cpu(cpu) {
599 cpufreq_get(cpu);
600 }
601 cpufreq_delayed_issched = 0;
602}
603
604/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
605 * to verify the CPU frequency the timing core thinks the CPU is running
606 * at is still correct.
607 */
608static void cpufreq_delayed_get(void)
609{
610 static int warned;
611 if (cpufreq_init && !cpufreq_delayed_issched) {
612 cpufreq_delayed_issched = 1;
613 if (!warned) {
614 warned = 1;
615 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
616 }
617 schedule_work(&cpufreq_delayed_get_work);
618 }
619}
620
621static unsigned int ref_freq = 0;
622static unsigned long loops_per_jiffy_ref = 0;
623
624static unsigned long cpu_khz_ref = 0;
625
626static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
627 void *data)
628{
629 struct cpufreq_freqs *freq = data;
630 unsigned long *lpj, dummy;
631
Andi Kleenc29601e2005-04-16 15:25:05 -0700632 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
633 return 0;
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 lpj = &dummy;
636 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
637#ifdef CONFIG_SMP
638 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
639#else
640 lpj = &boot_cpu_data.loops_per_jiffy;
641#endif
642
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 if (!ref_freq) {
644 ref_freq = freq->old;
645 loops_per_jiffy_ref = *lpj;
646 cpu_khz_ref = cpu_khz;
647 }
648 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
649 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
650 (val == CPUFREQ_RESUMECHANGE)) {
651 *lpj =
652 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
653
654 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
655 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
656 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
657 }
658
659 set_cyc2ns_scale(cpu_khz_ref / 1000);
660
661 return 0;
662}
663
664static struct notifier_block time_cpufreq_notifier_block = {
665 .notifier_call = time_cpufreq_notifier
666};
667
668static int __init cpufreq_tsc(void)
669{
670 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
671 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
672 CPUFREQ_TRANSITION_NOTIFIER))
673 cpufreq_init = 1;
674 return 0;
675}
676
677core_initcall(cpufreq_tsc);
678
679#endif
680
681/*
682 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
683 * it to the HPET timer of known frequency.
684 */
685
686#define TICK_COUNT 100000000
687
688static unsigned int __init hpet_calibrate_tsc(void)
689{
690 int tsc_start, hpet_start;
691 int tsc_now, hpet_now;
692 unsigned long flags;
693
694 local_irq_save(flags);
695 local_irq_disable();
696
697 hpet_start = hpet_readl(HPET_COUNTER);
698 rdtscl(tsc_start);
699
700 do {
701 local_irq_disable();
702 hpet_now = hpet_readl(HPET_COUNTER);
703 sync_core();
704 rdtscl(tsc_now);
705 local_irq_restore(flags);
706 } while ((tsc_now - tsc_start) < TICK_COUNT &&
707 (hpet_now - hpet_start) < TICK_COUNT);
708
709 return (tsc_now - tsc_start) * 1000000000L
710 / ((hpet_now - hpet_start) * hpet_period / 1000);
711}
712
713
714/*
715 * pit_calibrate_tsc() uses the speaker output (channel 2) of
716 * the PIT. This is better than using the timer interrupt output,
717 * because we can read the value of the speaker with just one inb(),
718 * where we need three i/o operations for the interrupt channel.
719 * We count how many ticks the TSC does in 50 ms.
720 */
721
722static unsigned int __init pit_calibrate_tsc(void)
723{
724 unsigned long start, end;
725 unsigned long flags;
726
727 spin_lock_irqsave(&i8253_lock, flags);
728
729 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
730
731 outb(0xb0, 0x43);
732 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
733 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
734 rdtscll(start);
735 sync_core();
736 while ((inb(0x61) & 0x20) == 0);
737 sync_core();
738 rdtscll(end);
739
740 spin_unlock_irqrestore(&i8253_lock, flags);
741
742 return (end - start) / 50;
743}
744
745#ifdef CONFIG_HPET
746static __init int late_hpet_init(void)
747{
748 struct hpet_data hd;
749 unsigned int ntimer;
750
751 if (!vxtime.hpet_address)
752 return -1;
753
754 memset(&hd, 0, sizeof (hd));
755
756 ntimer = hpet_readl(HPET_ID);
757 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
758 ntimer++;
759
760 /*
761 * Register with driver.
762 * Timer0 and Timer1 is used by platform.
763 */
764 hd.hd_phys_address = vxtime.hpet_address;
765 hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
766 hd.hd_nirqs = ntimer;
767 hd.hd_flags = HPET_DATA_PLATFORM;
768 hpet_reserve_timer(&hd, 0);
769#ifdef CONFIG_HPET_EMULATE_RTC
770 hpet_reserve_timer(&hd, 1);
771#endif
772 hd.hd_irq[0] = HPET_LEGACY_8254;
773 hd.hd_irq[1] = HPET_LEGACY_RTC;
774 if (ntimer > 2) {
775 struct hpet *hpet;
776 struct hpet_timer *timer;
777 int i;
778
779 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
780
781 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
782 timer++, i++)
783 hd.hd_irq[i] = (timer->hpet_config &
784 Tn_INT_ROUTE_CNF_MASK) >>
785 Tn_INT_ROUTE_CNF_SHIFT;
786
787 }
788
789 hpet_alloc(&hd);
790 return 0;
791}
792fs_initcall(late_hpet_init);
793#endif
794
795static int hpet_timer_stop_set_go(unsigned long tick)
796{
797 unsigned int cfg;
798
799/*
800 * Stop the timers and reset the main counter.
801 */
802
803 cfg = hpet_readl(HPET_CFG);
804 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
805 hpet_writel(cfg, HPET_CFG);
806 hpet_writel(0, HPET_COUNTER);
807 hpet_writel(0, HPET_COUNTER + 4);
808
809/*
810 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
811 * and period also hpet_tick.
812 */
john stultza3a00752005-06-23 00:08:36 -0700813 if (hpet_use_timer) {
814 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 HPET_TN_32BIT, HPET_T0_CFG);
john stultza3a00752005-06-23 00:08:36 -0700816 hpet_writel(hpet_tick, HPET_T0_CMP);
817 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
818 cfg |= HPET_CFG_LEGACY;
819 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820/*
821 * Go!
822 */
823
john stultza3a00752005-06-23 00:08:36 -0700824 cfg |= HPET_CFG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 hpet_writel(cfg, HPET_CFG);
826
827 return 0;
828}
829
830static int hpet_init(void)
831{
832 unsigned int id;
833
834 if (!vxtime.hpet_address)
835 return -1;
836 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
837 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
838
839/*
840 * Read the period, compute tick and quotient.
841 */
842
843 id = hpet_readl(HPET_ID);
844
john stultza3a00752005-06-23 00:08:36 -0700845 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 return -1;
847
848 hpet_period = hpet_readl(HPET_PERIOD);
849 if (hpet_period < 100000 || hpet_period > 100000000)
850 return -1;
851
852 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
853 hpet_period;
854
john stultza3a00752005-06-23 00:08:36 -0700855 hpet_use_timer = (id & HPET_ID_LEGSUP);
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 return hpet_timer_stop_set_go(hpet_tick);
858}
859
860static int hpet_reenable(void)
861{
862 return hpet_timer_stop_set_go(hpet_tick);
863}
864
865void __init pit_init(void)
866{
867 unsigned long flags;
868
869 spin_lock_irqsave(&i8253_lock, flags);
870 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
871 outb_p(LATCH & 0xff, 0x40); /* LSB */
872 outb_p(LATCH >> 8, 0x40); /* MSB */
873 spin_unlock_irqrestore(&i8253_lock, flags);
874}
875
876int __init time_setup(char *str)
877{
878 report_lost_ticks = 1;
879 return 1;
880}
881
882static struct irqaction irq0 = {
883 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
884};
885
886extern void __init config_acpi_tables(void);
887
888void __init time_init(void)
889{
890 char *timename;
891
892#ifdef HPET_HACK_ENABLE_DANGEROUS
893 if (!vxtime.hpet_address) {
894 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
895 "manually!\n");
896 outl(0x800038a0, 0xcf8);
897 outl(0xff000001, 0xcfc);
898 outl(0x800038a0, 0xcf8);
899 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
900 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
901 "at %#lx.\n", vxtime.hpet_address);
902 }
903#endif
904 if (nohpet)
905 vxtime.hpet_address = 0;
906
907 xtime.tv_sec = get_cmos_time();
908 xtime.tv_nsec = 0;
909
910 set_normalized_timespec(&wall_to_monotonic,
911 -xtime.tv_sec, -xtime.tv_nsec);
912
john stultza3a00752005-06-23 00:08:36 -0700913 if (!hpet_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 vxtime_hz = (1000000000000000L + hpet_period / 2) /
915 hpet_period;
john stultza3a00752005-06-23 00:08:36 -0700916
917 if (hpet_use_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 cpu_khz = hpet_calibrate_tsc();
919 timename = "HPET";
Andi Kleen312df5f2005-05-16 21:53:28 -0700920#ifdef CONFIG_X86_PM_TIMER
921 } else if (pmtmr_ioport) {
922 vxtime_hz = PM_TIMER_FREQUENCY;
923 timename = "PM";
924 pit_init();
925 cpu_khz = pit_calibrate_tsc();
926#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 } else {
928 pit_init();
929 cpu_khz = pit_calibrate_tsc();
930 timename = "PIT";
931 }
932
933 printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
934 vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
935 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
936 cpu_khz / 1000, cpu_khz % 1000);
937 vxtime.mode = VXTIME_TSC;
938 vxtime.quot = (1000000L << 32) / vxtime_hz;
939 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
940 vxtime.hz = vxtime_hz;
941 rdtscll_sync(&vxtime.last_tsc);
942 setup_irq(0, &irq0);
943
944 set_cyc2ns_scale(cpu_khz / 1000);
Andi Kleena8ab26f2005-04-16 15:25:19 -0700945
946#ifndef CONFIG_SMP
947 time_init_gtod();
948#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949}
950
Andi Kleena8ab26f2005-04-16 15:25:19 -0700951/*
Andi Kleen312df5f2005-05-16 21:53:28 -0700952 * Make an educated guess if the TSC is trustworthy and synchronized
953 * over all CPUs.
954 */
955static __init int unsynchronized_tsc(void)
956{
957#ifdef CONFIG_SMP
958 if (oem_force_hpet_timer())
959 return 1;
960 /* Intel systems are normally all synchronized. Exceptions
961 are handled in the OEM check above. */
962 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
963 return 0;
964 /* All in a single socket - should be synchronized */
965 if (cpus_weight(cpu_core_map[0]) == num_online_cpus())
966 return 0;
967#endif
968 /* Assume multi socket systems are not synchronized */
969 return num_online_cpus() > 1;
970}
971
972/*
Andi Kleena8ab26f2005-04-16 15:25:19 -0700973 * Decide after all CPUs are booted what mode gettimeofday should use.
974 */
975void __init time_init_gtod(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976{
977 char *timetype;
978
Andi Kleen312df5f2005-05-16 21:53:28 -0700979 if (unsynchronized_tsc())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 notsc = 1;
981 if (vxtime.hpet_address && notsc) {
john stultza3a00752005-06-23 00:08:36 -0700982 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
984 vxtime.mode = VXTIME_HPET;
985 do_gettimeoffset = do_gettimeoffset_hpet;
Andi Kleen312df5f2005-05-16 21:53:28 -0700986#ifdef CONFIG_X86_PM_TIMER
987 /* Using PM for gettimeofday is quite slow, but we have no other
988 choice because the TSC is too unreliable on some systems. */
989 } else if (pmtmr_ioport && !vxtime.hpet_address && notsc) {
990 timetype = "PM";
991 do_gettimeoffset = do_gettimeoffset_pm;
992 vxtime.mode = VXTIME_PMTMR;
993 sysctl_vsyscall = 0;
994 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
995#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 } else {
john stultza3a00752005-06-23 00:08:36 -0700997 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 vxtime.mode = VXTIME_TSC;
999 }
1000
1001 printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
1002}
1003
1004__setup("report_lost_ticks", time_setup);
1005
1006static long clock_cmos_diff;
1007static unsigned long sleep_start;
1008
Pavel Machek0b9c33a2005-04-16 15:25:31 -07001009static int timer_suspend(struct sys_device *dev, pm_message_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010{
1011 /*
1012 * Estimate time zone so that set_time can update the clock
1013 */
1014 long cmos_time = get_cmos_time();
1015
1016 clock_cmos_diff = -cmos_time;
1017 clock_cmos_diff += get_seconds();
1018 sleep_start = cmos_time;
1019 return 0;
1020}
1021
1022static int timer_resume(struct sys_device *dev)
1023{
1024 unsigned long flags;
1025 unsigned long sec;
1026 unsigned long ctime = get_cmos_time();
1027 unsigned long sleep_length = (ctime - sleep_start) * HZ;
1028
1029 if (vxtime.hpet_address)
1030 hpet_reenable();
1031 else
1032 i8254_timer_resume();
1033
1034 sec = ctime + clock_cmos_diff;
1035 write_seqlock_irqsave(&xtime_lock,flags);
1036 xtime.tv_sec = sec;
1037 xtime.tv_nsec = 0;
1038 write_sequnlock_irqrestore(&xtime_lock,flags);
1039 jiffies += sleep_length;
1040 wall_jiffies += sleep_length;
Ingo Molnar8446f1d2005-09-06 15:16:27 -07001041 touch_softlockup_watchdog();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 return 0;
1043}
1044
1045static struct sysdev_class timer_sysclass = {
1046 .resume = timer_resume,
1047 .suspend = timer_suspend,
1048 set_kset_name("timer"),
1049};
1050
1051
1052/* XXX this driverfs stuff should probably go elsewhere later -john */
1053static struct sys_device device_timer = {
1054 .id = 0,
1055 .cls = &timer_sysclass,
1056};
1057
1058static int time_init_device(void)
1059{
1060 int error = sysdev_class_register(&timer_sysclass);
1061 if (!error)
1062 error = sysdev_register(&device_timer);
1063 return error;
1064}
1065
1066device_initcall(time_init_device);
1067
1068#ifdef CONFIG_HPET_EMULATE_RTC
1069/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1070 * is enabled, we support RTC interrupt functionality in software.
1071 * RTC has 3 kinds of interrupts:
1072 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1073 * is updated
1074 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1075 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1076 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1077 * (1) and (2) above are implemented using polling at a frequency of
1078 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1079 * overhead. (DEFAULT_RTC_INT_FREQ)
1080 * For (3), we use interrupts at 64Hz or user specified periodic
1081 * frequency, whichever is higher.
1082 */
1083#include <linux/rtc.h>
1084
1085extern irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
1086
1087#define DEFAULT_RTC_INT_FREQ 64
1088#define RTC_NUM_INTS 1
1089
1090static unsigned long UIE_on;
1091static unsigned long prev_update_sec;
1092
1093static unsigned long AIE_on;
1094static struct rtc_time alarm_time;
1095
1096static unsigned long PIE_on;
1097static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1098static unsigned long PIE_count;
1099
1100static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1101
1102int is_hpet_enabled(void)
1103{
1104 return vxtime.hpet_address != 0;
1105}
1106
1107/*
1108 * Timer 1 for RTC, we do not use periodic interrupt feature,
1109 * even if HPET supports periodic interrupts on Timer 1.
1110 * The reason being, to set up a periodic interrupt in HPET, we need to
1111 * stop the main counter. And if we do that everytime someone diables/enables
1112 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1113 * So, for the time being, simulate the periodic interrupt in software.
1114 *
1115 * hpet_rtc_timer_init() is called for the first time and during subsequent
1116 * interuppts reinit happens through hpet_rtc_timer_reinit().
1117 */
1118int hpet_rtc_timer_init(void)
1119{
1120 unsigned int cfg, cnt;
1121 unsigned long flags;
1122
1123 if (!is_hpet_enabled())
1124 return 0;
1125 /*
1126 * Set the counter 1 and enable the interrupts.
1127 */
1128 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1129 hpet_rtc_int_freq = PIE_freq;
1130 else
1131 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1132
1133 local_irq_save(flags);
1134 cnt = hpet_readl(HPET_COUNTER);
1135 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1136 hpet_writel(cnt, HPET_T1_CMP);
1137 local_irq_restore(flags);
1138
1139 cfg = hpet_readl(HPET_T1_CFG);
1140 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1141 hpet_writel(cfg, HPET_T1_CFG);
1142
1143 return 1;
1144}
1145
1146static void hpet_rtc_timer_reinit(void)
1147{
1148 unsigned int cfg, cnt;
1149
1150 if (!(PIE_on | AIE_on | UIE_on))
1151 return;
1152
1153 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1154 hpet_rtc_int_freq = PIE_freq;
1155 else
1156 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1157
1158 /* It is more accurate to use the comparator value than current count.*/
1159 cnt = hpet_readl(HPET_T1_CMP);
1160 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1161 hpet_writel(cnt, HPET_T1_CMP);
1162
1163 cfg = hpet_readl(HPET_T1_CFG);
1164 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1165 hpet_writel(cfg, HPET_T1_CFG);
1166
1167 return;
1168}
1169
1170/*
1171 * The functions below are called from rtc driver.
1172 * Return 0 if HPET is not being used.
1173 * Otherwise do the necessary changes and return 1.
1174 */
1175int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1176{
1177 if (!is_hpet_enabled())
1178 return 0;
1179
1180 if (bit_mask & RTC_UIE)
1181 UIE_on = 0;
1182 if (bit_mask & RTC_PIE)
1183 PIE_on = 0;
1184 if (bit_mask & RTC_AIE)
1185 AIE_on = 0;
1186
1187 return 1;
1188}
1189
1190int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1191{
1192 int timer_init_reqd = 0;
1193
1194 if (!is_hpet_enabled())
1195 return 0;
1196
1197 if (!(PIE_on | AIE_on | UIE_on))
1198 timer_init_reqd = 1;
1199
1200 if (bit_mask & RTC_UIE) {
1201 UIE_on = 1;
1202 }
1203 if (bit_mask & RTC_PIE) {
1204 PIE_on = 1;
1205 PIE_count = 0;
1206 }
1207 if (bit_mask & RTC_AIE) {
1208 AIE_on = 1;
1209 }
1210
1211 if (timer_init_reqd)
1212 hpet_rtc_timer_init();
1213
1214 return 1;
1215}
1216
1217int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1218{
1219 if (!is_hpet_enabled())
1220 return 0;
1221
1222 alarm_time.tm_hour = hrs;
1223 alarm_time.tm_min = min;
1224 alarm_time.tm_sec = sec;
1225
1226 return 1;
1227}
1228
1229int hpet_set_periodic_freq(unsigned long freq)
1230{
1231 if (!is_hpet_enabled())
1232 return 0;
1233
1234 PIE_freq = freq;
1235 PIE_count = 0;
1236
1237 return 1;
1238}
1239
1240int hpet_rtc_dropped_irq(void)
1241{
1242 if (!is_hpet_enabled())
1243 return 0;
1244
1245 return 1;
1246}
1247
1248irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1249{
1250 struct rtc_time curr_time;
1251 unsigned long rtc_int_flag = 0;
1252 int call_rtc_interrupt = 0;
1253
1254 hpet_rtc_timer_reinit();
1255
1256 if (UIE_on | AIE_on) {
1257 rtc_get_rtc_time(&curr_time);
1258 }
1259 if (UIE_on) {
1260 if (curr_time.tm_sec != prev_update_sec) {
1261 /* Set update int info, call real rtc int routine */
1262 call_rtc_interrupt = 1;
1263 rtc_int_flag = RTC_UF;
1264 prev_update_sec = curr_time.tm_sec;
1265 }
1266 }
1267 if (PIE_on) {
1268 PIE_count++;
1269 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1270 /* Set periodic int info, call real rtc int routine */
1271 call_rtc_interrupt = 1;
1272 rtc_int_flag |= RTC_PF;
1273 PIE_count = 0;
1274 }
1275 }
1276 if (AIE_on) {
1277 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1278 (curr_time.tm_min == alarm_time.tm_min) &&
1279 (curr_time.tm_hour == alarm_time.tm_hour)) {
1280 /* Set alarm int info, call real rtc int routine */
1281 call_rtc_interrupt = 1;
1282 rtc_int_flag |= RTC_AF;
1283 }
1284 }
1285 if (call_rtc_interrupt) {
1286 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1287 rtc_interrupt(rtc_int_flag, dev_id, regs);
1288 }
1289 return IRQ_HANDLED;
1290}
1291#endif
1292
1293
1294
1295static int __init nohpet_setup(char *s)
1296{
1297 nohpet = 1;
1298 return 0;
1299}
1300
1301__setup("nohpet", nohpet_setup);
1302
1303
1304static int __init notsc_setup(char *s)
1305{
1306 notsc = 1;
1307 return 0;
1308}
1309
1310__setup("notsc", notsc_setup);
1311
1312