blob: 25bc58aac2a52c879fd04979b411acf543d1fcea [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86-64/kernel/time.c
3 *
4 * "High Precision Event Timer" based timekeeping.
5 *
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
14 */
15
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/mc146818rtc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/time.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/device.h>
25#include <linux/sysdev.h>
26#include <linux/bcd.h>
27#include <linux/kallsyms.h>
Andi Kleen312df5f2005-05-16 21:53:28 -070028#include <linux/acpi.h>
Andi Kleen8d916402005-05-31 14:39:26 -070029#ifdef CONFIG_ACPI
Andi Kleen312df5f2005-05-16 21:53:28 -070030#include <acpi/achware.h> /* for PM timer frequency */
Andi Kleen8d916402005-05-31 14:39:26 -070031#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/8253pit.h>
33#include <asm/pgtable.h>
34#include <asm/vsyscall.h>
35#include <asm/timex.h>
36#include <asm/proto.h>
37#include <asm/hpet.h>
38#include <asm/sections.h>
39#include <linux/cpufreq.h>
40#include <linux/hpet.h>
41#ifdef CONFIG_X86_LOCAL_APIC
42#include <asm/apic.h>
43#endif
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#ifdef CONFIG_CPU_FREQ
46static void cpufreq_delayed_get(void);
47#endif
48extern void i8254_timer_resume(void);
49extern int using_apic_timer;
50
51DEFINE_SPINLOCK(rtc_lock);
52DEFINE_SPINLOCK(i8253_lock);
53
54static int nohpet __initdata = 0;
55static int notsc __initdata = 0;
56
57#undef HPET_HACK_ENABLE_DANGEROUS
58
59unsigned int cpu_khz; /* TSC clocks / usec, not used here */
60static unsigned long hpet_period; /* fsecs / HPET clock */
61unsigned long hpet_tick; /* HPET clocks / interrupt */
Andi Kleen68e18892005-12-12 22:17:07 -080062static int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063unsigned long vxtime_hz = PIT_TICK_RATE;
64int report_lost_ticks; /* command line option */
65unsigned long long monotonic_base;
66
67struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
68
69volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
70unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
71struct timespec __xtime __section_xtime;
72struct timezone __sys_tz __section_sys_tz;
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074/*
75 * do_gettimeoffset() returns microseconds since last timer interrupt was
76 * triggered by hardware. A memory read of HPET is slower than a register read
77 * of TSC, but much more reliable. It's also synchronized to the timer
78 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
79 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
80 * This is not a problem, because jiffies hasn't updated either. They are bound
81 * together by xtime_lock.
82 */
83
84static inline unsigned int do_gettimeoffset_tsc(void)
85{
86 unsigned long t;
87 unsigned long x;
Andi Kleenc818a182006-01-11 22:45:24 +010088 t = get_cycles_sync();
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
90 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
91 return x;
92}
93
94static inline unsigned int do_gettimeoffset_hpet(void)
95{
john stultza3a00752005-06-23 00:08:36 -070096 /* cap counter read to one tick to avoid inconsistencies */
97 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
98 return (min(counter,hpet_tick) * vxtime.quot) >> 32;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
102
103/*
104 * This version of gettimeofday() has microsecond resolution and better than
105 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
106 * MHz) HPET timer.
107 */
108
109void do_gettimeofday(struct timeval *tv)
110{
111 unsigned long seq, t;
112 unsigned int sec, usec;
113
114 do {
115 seq = read_seqbegin(&xtime_lock);
116
117 sec = xtime.tv_sec;
118 usec = xtime.tv_nsec / 1000;
119
120 /* i386 does some correction here to keep the clock
121 monotonous even when ntpd is fixing drift.
122 But they didn't work for me, there is a non monotonic
123 clock anyways with ntp.
124 I dropped all corrections now until a real solution can
125 be found. Note when you fix it here you need to do the same
126 in arch/x86_64/kernel/vsyscall.c and export all needed
127 variables in vmlinux.lds. -AK */
128
129 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
130 do_gettimeoffset();
131 usec += t;
132
133 } while (read_seqretry(&xtime_lock, seq));
134
135 tv->tv_sec = sec + usec / 1000000;
136 tv->tv_usec = usec % 1000000;
137}
138
139EXPORT_SYMBOL(do_gettimeofday);
140
141/*
142 * settimeofday() first undoes the correction that gettimeofday would do
143 * on the time, and then saves it. This is ugly, but has been like this for
144 * ages already.
145 */
146
147int do_settimeofday(struct timespec *tv)
148{
149 time_t wtm_sec, sec = tv->tv_sec;
150 long wtm_nsec, nsec = tv->tv_nsec;
151
152 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
153 return -EINVAL;
154
155 write_seqlock_irq(&xtime_lock);
156
157 nsec -= do_gettimeoffset() * 1000 +
158 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
159
160 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
161 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
162
163 set_normalized_timespec(&xtime, sec, nsec);
164 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
165
john stultzb149ee22005-09-06 15:17:46 -0700166 ntp_clear();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168 write_sequnlock_irq(&xtime_lock);
169 clock_was_set();
170 return 0;
171}
172
173EXPORT_SYMBOL(do_settimeofday);
174
175unsigned long profile_pc(struct pt_regs *regs)
176{
177 unsigned long pc = instruction_pointer(regs);
178
179 /* Assume the lock function has either no stack frame or only a single word.
180 This checks if the address on the stack looks like a kernel text address.
181 There is a small window for false hits, but in that case the tick
182 is just accounted to the spinlock function.
183 Better would be to write these functions in assembler again
184 and check exactly. */
185 if (in_lock_functions(pc)) {
186 char *v = *(char **)regs->rsp;
187 if ((v >= _stext && v <= _etext) ||
188 (v >= _sinittext && v <= _einittext) ||
189 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
190 return (unsigned long)v;
191 return ((unsigned long *)regs->rsp)[1];
192 }
193 return pc;
194}
195EXPORT_SYMBOL(profile_pc);
196
197/*
198 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
199 * ms after the second nowtime has started, because when nowtime is written
200 * into the registers of the CMOS clock, it will jump to the next second
201 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
202 * sheet for details.
203 */
204
205static void set_rtc_mmss(unsigned long nowtime)
206{
207 int real_seconds, real_minutes, cmos_minutes;
208 unsigned char control, freq_select;
209
210/*
211 * IRQs are disabled when we're called from the timer interrupt,
212 * no need for spin_lock_irqsave()
213 */
214
215 spin_lock(&rtc_lock);
216
217/*
218 * Tell the clock it's being set and stop it.
219 */
220
221 control = CMOS_READ(RTC_CONTROL);
222 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
223
224 freq_select = CMOS_READ(RTC_FREQ_SELECT);
225 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
226
227 cmos_minutes = CMOS_READ(RTC_MINUTES);
228 BCD_TO_BIN(cmos_minutes);
229
230/*
231 * since we're only adjusting minutes and seconds, don't interfere with hour
232 * overflow. This avoids messing with unknown time zones but requires your RTC
233 * not to be off by more than 15 minutes. Since we're calling it only when
234 * our clock is externally synchronized using NTP, this shouldn't be a problem.
235 */
236
237 real_seconds = nowtime % 60;
238 real_minutes = nowtime / 60;
239 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
240 real_minutes += 30; /* correct for half hour time zone */
241 real_minutes %= 60;
242
243#if 0
244 /* AMD 8111 is a really bad time keeper and hits this regularly.
245 It probably was an attempt to avoid screwing up DST, but ignore
246 that for now. */
247 if (abs(real_minutes - cmos_minutes) >= 30) {
248 printk(KERN_WARNING "time.c: can't update CMOS clock "
249 "from %d to %d\n", cmos_minutes, real_minutes);
250 } else
251#endif
252
253 {
254 BIN_TO_BCD(real_seconds);
255 BIN_TO_BCD(real_minutes);
256 CMOS_WRITE(real_seconds, RTC_SECONDS);
257 CMOS_WRITE(real_minutes, RTC_MINUTES);
258 }
259
260/*
261 * The following flags have to be released exactly in this order, otherwise the
262 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
263 * not reset the oscillator and will not update precisely 500 ms later. You
264 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
265 * believes data sheets anyway ... -- Markus Kuhn
266 */
267
268 CMOS_WRITE(control, RTC_CONTROL);
269 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
270
271 spin_unlock(&rtc_lock);
272}
273
274
275/* monotonic_clock(): returns # of nanoseconds passed since time_init()
276 * Note: This function is required to return accurate
277 * time even in the absence of multiple timer ticks.
278 */
279unsigned long long monotonic_clock(void)
280{
281 unsigned long seq;
282 u32 last_offset, this_offset, offset;
283 unsigned long long base;
284
285 if (vxtime.mode == VXTIME_HPET) {
286 do {
287 seq = read_seqbegin(&xtime_lock);
288
289 last_offset = vxtime.last;
290 base = monotonic_base;
john stultza3a00752005-06-23 00:08:36 -0700291 this_offset = hpet_readl(HPET_COUNTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293 } while (read_seqretry(&xtime_lock, seq));
294 offset = (this_offset - last_offset);
295 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
296 return base + offset;
297 }else{
298 do {
299 seq = read_seqbegin(&xtime_lock);
300
301 last_offset = vxtime.last_tsc;
302 base = monotonic_base;
303 } while (read_seqretry(&xtime_lock, seq));
Andi Kleenc818a182006-01-11 22:45:24 +0100304 this_offset = get_cycles_sync();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 offset = (this_offset - last_offset)*1000/cpu_khz;
306 return base + offset;
307 }
308
309
310}
311EXPORT_SYMBOL(monotonic_clock);
312
313static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
314{
315 static long lost_count;
316 static int warned;
317
318 if (report_lost_ticks) {
319 printk(KERN_WARNING "time.c: Lost %d timer "
320 "tick(s)! ", lost);
321 print_symbol("rip %s)\n", regs->rip);
322 }
323
324 if (lost_count == 1000 && !warned) {
325 printk(KERN_WARNING
326 "warning: many lost ticks.\n"
327 KERN_WARNING "Your time source seems to be instable or "
328 "some driver is hogging interupts\n");
329 print_symbol("rip %s\n", regs->rip);
330 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
331 printk(KERN_WARNING "Falling back to HPET\n");
332 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
333 vxtime.mode = VXTIME_HPET;
334 do_gettimeoffset = do_gettimeoffset_hpet;
335 }
336 /* else should fall back to PIT, but code missing. */
337 warned = 1;
338 } else
339 lost_count++;
340
341#ifdef CONFIG_CPU_FREQ
342 /* In some cases the CPU can change frequency without us noticing
343 (like going into thermal throttle)
344 Give cpufreq a change to catch up. */
345 if ((lost_count+1) % 25 == 0) {
346 cpufreq_delayed_get();
347 }
348#endif
349}
350
351static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
352{
353 static unsigned long rtc_update = 0;
354 unsigned long tsc;
355 int delay, offset = 0, lost = 0;
356
357/*
358 * Here we are in the timer irq handler. We have irqs locally disabled (so we
359 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
360 * on the other CPU, so we need a lock. We also need to lock the vsyscall
361 * variables, because both do_timer() and us change them -arca+vojtech
362 */
363
364 write_seqlock(&xtime_lock);
365
john stultza3a00752005-06-23 00:08:36 -0700366 if (vxtime.hpet_address)
367 offset = hpet_readl(HPET_COUNTER);
368
369 if (hpet_use_timer) {
370 /* if we're using the hpet timer functionality,
371 * we can more accurately know the counter value
372 * when the timer interrupt occured.
373 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
375 delay = hpet_readl(HPET_COUNTER) - offset;
376 } else {
377 spin_lock(&i8253_lock);
378 outb_p(0x00, 0x43);
379 delay = inb_p(0x40);
380 delay |= inb(0x40) << 8;
381 spin_unlock(&i8253_lock);
382 delay = LATCH - 1 - delay;
383 }
384
Andi Kleenc818a182006-01-11 22:45:24 +0100385 tsc = get_cycles_sync();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
387 if (vxtime.mode == VXTIME_HPET) {
388 if (offset - vxtime.last > hpet_tick) {
389 lost = (offset - vxtime.last) / hpet_tick - 1;
390 }
391
392 monotonic_base +=
393 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
394
395 vxtime.last = offset;
Andi Kleen312df5f2005-05-16 21:53:28 -0700396#ifdef CONFIG_X86_PM_TIMER
397 } else if (vxtime.mode == VXTIME_PMTMR) {
398 lost = pmtimer_mark_offset();
399#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 } else {
401 offset = (((tsc - vxtime.last_tsc) *
402 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
403
404 if (offset < 0)
405 offset = 0;
406
407 if (offset > (USEC_PER_SEC / HZ)) {
408 lost = offset / (USEC_PER_SEC / HZ);
409 offset %= (USEC_PER_SEC / HZ);
410 }
411
412 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
413
414 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
415
416 if ((((tsc - vxtime.last_tsc) *
417 vxtime.tsc_quot) >> 32) < offset)
418 vxtime.last_tsc = tsc -
419 (((long) offset << 32) / vxtime.tsc_quot) - 1;
420 }
421
422 if (lost > 0) {
423 handle_lost_ticks(lost, regs);
424 jiffies += lost;
425 }
426
427/*
428 * Do the timer stuff.
429 */
430
431 do_timer(regs);
432#ifndef CONFIG_SMP
433 update_process_times(user_mode(regs));
434#endif
435
436/*
437 * In the SMP case we use the local APIC timer interrupt to do the profiling,
438 * except when we simulate SMP mode on a uniprocessor system, in that case we
439 * have to call the local interrupt handler.
440 */
441
442#ifndef CONFIG_X86_LOCAL_APIC
443 profile_tick(CPU_PROFILING, regs);
444#else
445 if (!using_apic_timer)
446 smp_local_timer_interrupt(regs);
447#endif
448
449/*
450 * If we have an externally synchronized Linux clock, then update CMOS clock
451 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
452 * closest to exactly 500 ms before the next second. If the update fails, we
453 * don't care, as it'll be updated on the next turn, and the problem (time way
454 * off) isn't likely to go away much sooner anyway.
455 */
456
john stultzb149ee22005-09-06 15:17:46 -0700457 if (ntp_synced() && xtime.tv_sec > rtc_update &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
459 set_rtc_mmss(xtime.tv_sec);
460 rtc_update = xtime.tv_sec + 660;
461 }
462
463 write_sequnlock(&xtime_lock);
464
Venkatesh Pallipadid25bf7e2006-01-11 22:44:24 +0100465#ifdef CONFIG_X86_LOCAL_APIC
466 if (using_apic_timer)
467 smp_send_timer_broadcast_ipi();
468#endif
469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 return IRQ_HANDLED;
471}
472
473static unsigned int cyc2ns_scale;
474#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
475
Mathieu Desnoyersdacb16b2005-10-30 14:59:25 -0800476static inline void set_cyc2ns_scale(unsigned long cpu_khz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477{
Mathieu Desnoyersdacb16b2005-10-30 14:59:25 -0800478 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479}
480
481static inline unsigned long long cycles_2_ns(unsigned long long cyc)
482{
483 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
484}
485
486unsigned long long sched_clock(void)
487{
488 unsigned long a = 0;
489
490#if 0
491 /* Don't do a HPET read here. Using TSC always is much faster
492 and HPET may not be mapped yet when the scheduler first runs.
493 Disadvantage is a small drift between CPUs in some configurations,
494 but that should be tolerable. */
495 if (__vxtime.mode == VXTIME_HPET)
496 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
497#endif
498
499 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
500 which means it is not completely exact and may not be monotonous between
501 CPUs. But the errors should be too small to matter for scheduling
502 purposes. */
503
504 rdtscll(a);
505 return cycles_2_ns(a);
506}
507
508unsigned long get_cmos_time(void)
509{
510 unsigned int timeout, year, mon, day, hour, min, sec;
511 unsigned char last, this;
512 unsigned long flags;
513
514/*
515 * The Linux interpretation of the CMOS clock register contents: When the
516 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
517 * second which has precisely just started. Waiting for this can take up to 1
518 * second, we timeout approximately after 2.4 seconds on a machine with
519 * standard 8.3 MHz ISA bus.
520 */
521
522 spin_lock_irqsave(&rtc_lock, flags);
523
524 timeout = 1000000;
525 last = this = 0;
526
527 while (timeout && last && !this) {
528 last = this;
529 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
530 timeout--;
531 }
532
533/*
534 * Here we are safe to assume the registers won't change for a whole second, so
535 * we just go ahead and read them.
536 */
537
538 sec = CMOS_READ(RTC_SECONDS);
539 min = CMOS_READ(RTC_MINUTES);
540 hour = CMOS_READ(RTC_HOURS);
541 day = CMOS_READ(RTC_DAY_OF_MONTH);
542 mon = CMOS_READ(RTC_MONTH);
543 year = CMOS_READ(RTC_YEAR);
544
545 spin_unlock_irqrestore(&rtc_lock, flags);
546
547/*
548 * We know that x86-64 always uses BCD format, no need to check the config
549 * register.
550 */
551
552 BCD_TO_BIN(sec);
553 BCD_TO_BIN(min);
554 BCD_TO_BIN(hour);
555 BCD_TO_BIN(day);
556 BCD_TO_BIN(mon);
557 BCD_TO_BIN(year);
558
559/*
560 * x86-64 systems only exists since 2002.
561 * This will work up to Dec 31, 2100
562 */
563 year += 2000;
564
565 return mktime(year, mon, day, hour, min, sec);
566}
567
568#ifdef CONFIG_CPU_FREQ
569
570/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
571 changes.
572
573 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
574 not that important because current Opteron setups do not support
575 scaling on SMP anyroads.
576
577 Should fix up last_tsc too. Currently gettimeofday in the
578 first tick after the change will be slightly wrong. */
579
580#include <linux/workqueue.h>
581
582static unsigned int cpufreq_delayed_issched = 0;
583static unsigned int cpufreq_init = 0;
584static struct work_struct cpufreq_delayed_get_work;
585
586static void handle_cpufreq_delayed_get(void *v)
587{
588 unsigned int cpu;
589 for_each_online_cpu(cpu) {
590 cpufreq_get(cpu);
591 }
592 cpufreq_delayed_issched = 0;
593}
594
595/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
596 * to verify the CPU frequency the timing core thinks the CPU is running
597 * at is still correct.
598 */
599static void cpufreq_delayed_get(void)
600{
601 static int warned;
602 if (cpufreq_init && !cpufreq_delayed_issched) {
603 cpufreq_delayed_issched = 1;
604 if (!warned) {
605 warned = 1;
606 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
607 }
608 schedule_work(&cpufreq_delayed_get_work);
609 }
610}
611
612static unsigned int ref_freq = 0;
613static unsigned long loops_per_jiffy_ref = 0;
614
615static unsigned long cpu_khz_ref = 0;
616
617static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
618 void *data)
619{
620 struct cpufreq_freqs *freq = data;
621 unsigned long *lpj, dummy;
622
Andi Kleenc29601e2005-04-16 15:25:05 -0700623 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
624 return 0;
625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 lpj = &dummy;
627 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
628#ifdef CONFIG_SMP
629 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
630#else
631 lpj = &boot_cpu_data.loops_per_jiffy;
632#endif
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 if (!ref_freq) {
635 ref_freq = freq->old;
636 loops_per_jiffy_ref = *lpj;
637 cpu_khz_ref = cpu_khz;
638 }
639 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
640 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
641 (val == CPUFREQ_RESUMECHANGE)) {
642 *lpj =
643 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
644
645 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
646 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
647 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
648 }
649
Mathieu Desnoyersdacb16b2005-10-30 14:59:25 -0800650 set_cyc2ns_scale(cpu_khz_ref);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652 return 0;
653}
654
655static struct notifier_block time_cpufreq_notifier_block = {
656 .notifier_call = time_cpufreq_notifier
657};
658
659static int __init cpufreq_tsc(void)
660{
661 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
662 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
663 CPUFREQ_TRANSITION_NOTIFIER))
664 cpufreq_init = 1;
665 return 0;
666}
667
668core_initcall(cpufreq_tsc);
669
670#endif
671
672/*
673 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
674 * it to the HPET timer of known frequency.
675 */
676
677#define TICK_COUNT 100000000
678
679static unsigned int __init hpet_calibrate_tsc(void)
680{
681 int tsc_start, hpet_start;
682 int tsc_now, hpet_now;
683 unsigned long flags;
684
685 local_irq_save(flags);
686 local_irq_disable();
687
688 hpet_start = hpet_readl(HPET_COUNTER);
689 rdtscl(tsc_start);
690
691 do {
692 local_irq_disable();
693 hpet_now = hpet_readl(HPET_COUNTER);
Andi Kleenc818a182006-01-11 22:45:24 +0100694 tsc_now = get_cycles_sync();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 local_irq_restore(flags);
696 } while ((tsc_now - tsc_start) < TICK_COUNT &&
697 (hpet_now - hpet_start) < TICK_COUNT);
698
699 return (tsc_now - tsc_start) * 1000000000L
700 / ((hpet_now - hpet_start) * hpet_period / 1000);
701}
702
703
704/*
705 * pit_calibrate_tsc() uses the speaker output (channel 2) of
706 * the PIT. This is better than using the timer interrupt output,
707 * because we can read the value of the speaker with just one inb(),
708 * where we need three i/o operations for the interrupt channel.
709 * We count how many ticks the TSC does in 50 ms.
710 */
711
712static unsigned int __init pit_calibrate_tsc(void)
713{
714 unsigned long start, end;
715 unsigned long flags;
716
717 spin_lock_irqsave(&i8253_lock, flags);
718
719 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
720
721 outb(0xb0, 0x43);
722 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
723 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
Andi Kleenc818a182006-01-11 22:45:24 +0100724 start = get_cycles_sync();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 while ((inb(0x61) & 0x20) == 0);
Andi Kleenc818a182006-01-11 22:45:24 +0100726 end = get_cycles_sync();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
728 spin_unlock_irqrestore(&i8253_lock, flags);
729
730 return (end - start) / 50;
731}
732
733#ifdef CONFIG_HPET
734static __init int late_hpet_init(void)
735{
736 struct hpet_data hd;
737 unsigned int ntimer;
738
739 if (!vxtime.hpet_address)
740 return -1;
741
742 memset(&hd, 0, sizeof (hd));
743
744 ntimer = hpet_readl(HPET_ID);
745 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
746 ntimer++;
747
748 /*
749 * Register with driver.
750 * Timer0 and Timer1 is used by platform.
751 */
752 hd.hd_phys_address = vxtime.hpet_address;
753 hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
754 hd.hd_nirqs = ntimer;
755 hd.hd_flags = HPET_DATA_PLATFORM;
756 hpet_reserve_timer(&hd, 0);
757#ifdef CONFIG_HPET_EMULATE_RTC
758 hpet_reserve_timer(&hd, 1);
759#endif
760 hd.hd_irq[0] = HPET_LEGACY_8254;
761 hd.hd_irq[1] = HPET_LEGACY_RTC;
762 if (ntimer > 2) {
763 struct hpet *hpet;
764 struct hpet_timer *timer;
765 int i;
766
767 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
768
769 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
770 timer++, i++)
771 hd.hd_irq[i] = (timer->hpet_config &
772 Tn_INT_ROUTE_CNF_MASK) >>
773 Tn_INT_ROUTE_CNF_SHIFT;
774
775 }
776
777 hpet_alloc(&hd);
778 return 0;
779}
780fs_initcall(late_hpet_init);
781#endif
782
783static int hpet_timer_stop_set_go(unsigned long tick)
784{
785 unsigned int cfg;
786
787/*
788 * Stop the timers and reset the main counter.
789 */
790
791 cfg = hpet_readl(HPET_CFG);
792 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
793 hpet_writel(cfg, HPET_CFG);
794 hpet_writel(0, HPET_COUNTER);
795 hpet_writel(0, HPET_COUNTER + 4);
796
797/*
798 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
799 * and period also hpet_tick.
800 */
john stultza3a00752005-06-23 00:08:36 -0700801 if (hpet_use_timer) {
802 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 HPET_TN_32BIT, HPET_T0_CFG);
john stultza3a00752005-06-23 00:08:36 -0700804 hpet_writel(hpet_tick, HPET_T0_CMP);
805 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
806 cfg |= HPET_CFG_LEGACY;
807 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808/*
809 * Go!
810 */
811
john stultza3a00752005-06-23 00:08:36 -0700812 cfg |= HPET_CFG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 hpet_writel(cfg, HPET_CFG);
814
815 return 0;
816}
817
818static int hpet_init(void)
819{
820 unsigned int id;
821
822 if (!vxtime.hpet_address)
823 return -1;
824 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
825 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
826
827/*
828 * Read the period, compute tick and quotient.
829 */
830
831 id = hpet_readl(HPET_ID);
832
john stultza3a00752005-06-23 00:08:36 -0700833 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 return -1;
835
836 hpet_period = hpet_readl(HPET_PERIOD);
837 if (hpet_period < 100000 || hpet_period > 100000000)
838 return -1;
839
840 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
841 hpet_period;
842
john stultza3a00752005-06-23 00:08:36 -0700843 hpet_use_timer = (id & HPET_ID_LEGSUP);
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 return hpet_timer_stop_set_go(hpet_tick);
846}
847
848static int hpet_reenable(void)
849{
850 return hpet_timer_stop_set_go(hpet_tick);
851}
852
853void __init pit_init(void)
854{
855 unsigned long flags;
856
857 spin_lock_irqsave(&i8253_lock, flags);
858 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
859 outb_p(LATCH & 0xff, 0x40); /* LSB */
860 outb_p(LATCH >> 8, 0x40); /* MSB */
861 spin_unlock_irqrestore(&i8253_lock, flags);
862}
863
864int __init time_setup(char *str)
865{
866 report_lost_ticks = 1;
867 return 1;
868}
869
870static struct irqaction irq0 = {
871 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
872};
873
874extern void __init config_acpi_tables(void);
875
876void __init time_init(void)
877{
878 char *timename;
879
880#ifdef HPET_HACK_ENABLE_DANGEROUS
881 if (!vxtime.hpet_address) {
882 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
883 "manually!\n");
884 outl(0x800038a0, 0xcf8);
885 outl(0xff000001, 0xcfc);
886 outl(0x800038a0, 0xcf8);
887 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
888 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
889 "at %#lx.\n", vxtime.hpet_address);
890 }
891#endif
892 if (nohpet)
893 vxtime.hpet_address = 0;
894
895 xtime.tv_sec = get_cmos_time();
896 xtime.tv_nsec = 0;
897
898 set_normalized_timespec(&wall_to_monotonic,
899 -xtime.tv_sec, -xtime.tv_nsec);
900
john stultza3a00752005-06-23 00:08:36 -0700901 if (!hpet_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 vxtime_hz = (1000000000000000L + hpet_period / 2) /
903 hpet_period;
Andi Kleen68e18892005-12-12 22:17:07 -0800904 else
905 vxtime.hpet_address = 0;
john stultza3a00752005-06-23 00:08:36 -0700906
907 if (hpet_use_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 cpu_khz = hpet_calibrate_tsc();
909 timename = "HPET";
Andi Kleen312df5f2005-05-16 21:53:28 -0700910#ifdef CONFIG_X86_PM_TIMER
john stultzfd495472005-12-12 22:17:13 -0800911 } else if (pmtmr_ioport && !vxtime.hpet_address) {
Andi Kleen312df5f2005-05-16 21:53:28 -0700912 vxtime_hz = PM_TIMER_FREQUENCY;
913 timename = "PM";
914 pit_init();
915 cpu_khz = pit_calibrate_tsc();
916#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 } else {
918 pit_init();
919 cpu_khz = pit_calibrate_tsc();
920 timename = "PIT";
921 }
922
923 printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
924 vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
925 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
926 cpu_khz / 1000, cpu_khz % 1000);
927 vxtime.mode = VXTIME_TSC;
928 vxtime.quot = (1000000L << 32) / vxtime_hz;
929 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
Andi Kleenc818a182006-01-11 22:45:24 +0100930 vxtime.last_tsc = get_cycles_sync();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 setup_irq(0, &irq0);
932
Mathieu Desnoyersdacb16b2005-10-30 14:59:25 -0800933 set_cyc2ns_scale(cpu_khz);
Andi Kleena8ab26f2005-04-16 15:25:19 -0700934
935#ifndef CONFIG_SMP
936 time_init_gtod();
937#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938}
939
Andi Kleena8ab26f2005-04-16 15:25:19 -0700940/*
Andi Kleen312df5f2005-05-16 21:53:28 -0700941 * Make an educated guess if the TSC is trustworthy and synchronized
942 * over all CPUs.
943 */
Andi Kleen737c5c32006-01-11 22:45:15 +0100944__init int unsynchronized_tsc(void)
Andi Kleen312df5f2005-05-16 21:53:28 -0700945{
946#ifdef CONFIG_SMP
947 if (oem_force_hpet_timer())
948 return 1;
949 /* Intel systems are normally all synchronized. Exceptions
950 are handled in the OEM check above. */
951 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
952 return 0;
Andi Kleen312df5f2005-05-16 21:53:28 -0700953#endif
954 /* Assume multi socket systems are not synchronized */
Andi Kleen737c5c32006-01-11 22:45:15 +0100955 return num_present_cpus() > 1;
Andi Kleen312df5f2005-05-16 21:53:28 -0700956}
957
958/*
Andi Kleena8ab26f2005-04-16 15:25:19 -0700959 * Decide after all CPUs are booted what mode gettimeofday should use.
960 */
961void __init time_init_gtod(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
963 char *timetype;
964
Andi Kleen312df5f2005-05-16 21:53:28 -0700965 if (unsynchronized_tsc())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 notsc = 1;
967 if (vxtime.hpet_address && notsc) {
john stultza3a00752005-06-23 00:08:36 -0700968 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
970 vxtime.mode = VXTIME_HPET;
971 do_gettimeoffset = do_gettimeoffset_hpet;
Andi Kleen312df5f2005-05-16 21:53:28 -0700972#ifdef CONFIG_X86_PM_TIMER
973 /* Using PM for gettimeofday is quite slow, but we have no other
974 choice because the TSC is too unreliable on some systems. */
975 } else if (pmtmr_ioport && !vxtime.hpet_address && notsc) {
976 timetype = "PM";
977 do_gettimeoffset = do_gettimeoffset_pm;
978 vxtime.mode = VXTIME_PMTMR;
979 sysctl_vsyscall = 0;
980 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
981#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 } else {
john stultza3a00752005-06-23 00:08:36 -0700983 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 vxtime.mode = VXTIME_TSC;
985 }
986
987 printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
988}
989
990__setup("report_lost_ticks", time_setup);
991
992static long clock_cmos_diff;
993static unsigned long sleep_start;
994
Pavel Machek0b9c33a2005-04-16 15:25:31 -0700995static int timer_suspend(struct sys_device *dev, pm_message_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
997 /*
998 * Estimate time zone so that set_time can update the clock
999 */
1000 long cmos_time = get_cmos_time();
1001
1002 clock_cmos_diff = -cmos_time;
1003 clock_cmos_diff += get_seconds();
1004 sleep_start = cmos_time;
1005 return 0;
1006}
1007
1008static int timer_resume(struct sys_device *dev)
1009{
1010 unsigned long flags;
1011 unsigned long sec;
1012 unsigned long ctime = get_cmos_time();
1013 unsigned long sleep_length = (ctime - sleep_start) * HZ;
1014
1015 if (vxtime.hpet_address)
1016 hpet_reenable();
1017 else
1018 i8254_timer_resume();
1019
1020 sec = ctime + clock_cmos_diff;
1021 write_seqlock_irqsave(&xtime_lock,flags);
1022 xtime.tv_sec = sec;
1023 xtime.tv_nsec = 0;
1024 write_sequnlock_irqrestore(&xtime_lock,flags);
1025 jiffies += sleep_length;
1026 wall_jiffies += sleep_length;
Ingo Molnar8446f1d2005-09-06 15:16:27 -07001027 touch_softlockup_watchdog();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 return 0;
1029}
1030
1031static struct sysdev_class timer_sysclass = {
1032 .resume = timer_resume,
1033 .suspend = timer_suspend,
1034 set_kset_name("timer"),
1035};
1036
1037
1038/* XXX this driverfs stuff should probably go elsewhere later -john */
1039static struct sys_device device_timer = {
1040 .id = 0,
1041 .cls = &timer_sysclass,
1042};
1043
1044static int time_init_device(void)
1045{
1046 int error = sysdev_class_register(&timer_sysclass);
1047 if (!error)
1048 error = sysdev_register(&device_timer);
1049 return error;
1050}
1051
1052device_initcall(time_init_device);
1053
1054#ifdef CONFIG_HPET_EMULATE_RTC
1055/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1056 * is enabled, we support RTC interrupt functionality in software.
1057 * RTC has 3 kinds of interrupts:
1058 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1059 * is updated
1060 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1061 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1062 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1063 * (1) and (2) above are implemented using polling at a frequency of
1064 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1065 * overhead. (DEFAULT_RTC_INT_FREQ)
1066 * For (3), we use interrupts at 64Hz or user specified periodic
1067 * frequency, whichever is higher.
1068 */
1069#include <linux/rtc.h>
1070
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071#define DEFAULT_RTC_INT_FREQ 64
1072#define RTC_NUM_INTS 1
1073
1074static unsigned long UIE_on;
1075static unsigned long prev_update_sec;
1076
1077static unsigned long AIE_on;
1078static struct rtc_time alarm_time;
1079
1080static unsigned long PIE_on;
1081static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1082static unsigned long PIE_count;
1083
1084static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
Clemens Ladisch7811fb82005-10-30 15:03:36 -08001085static unsigned int hpet_t1_cmp; /* cached comparator register */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
1087int is_hpet_enabled(void)
1088{
1089 return vxtime.hpet_address != 0;
1090}
1091
1092/*
1093 * Timer 1 for RTC, we do not use periodic interrupt feature,
1094 * even if HPET supports periodic interrupts on Timer 1.
1095 * The reason being, to set up a periodic interrupt in HPET, we need to
1096 * stop the main counter. And if we do that everytime someone diables/enables
1097 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1098 * So, for the time being, simulate the periodic interrupt in software.
1099 *
1100 * hpet_rtc_timer_init() is called for the first time and during subsequent
1101 * interuppts reinit happens through hpet_rtc_timer_reinit().
1102 */
1103int hpet_rtc_timer_init(void)
1104{
1105 unsigned int cfg, cnt;
1106 unsigned long flags;
1107
1108 if (!is_hpet_enabled())
1109 return 0;
1110 /*
1111 * Set the counter 1 and enable the interrupts.
1112 */
1113 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1114 hpet_rtc_int_freq = PIE_freq;
1115 else
1116 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1117
1118 local_irq_save(flags);
1119 cnt = hpet_readl(HPET_COUNTER);
1120 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1121 hpet_writel(cnt, HPET_T1_CMP);
Clemens Ladisch7811fb82005-10-30 15:03:36 -08001122 hpet_t1_cmp = cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 local_irq_restore(flags);
1124
1125 cfg = hpet_readl(HPET_T1_CFG);
Clemens Ladisch5f819942005-10-30 15:03:36 -08001126 cfg &= ~HPET_TN_PERIODIC;
1127 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 hpet_writel(cfg, HPET_T1_CFG);
1129
1130 return 1;
1131}
1132
1133static void hpet_rtc_timer_reinit(void)
1134{
1135 unsigned int cfg, cnt;
1136
Clemens Ladischf00c96f2005-10-30 15:03:35 -08001137 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
1138 cfg = hpet_readl(HPET_T1_CFG);
1139 cfg &= ~HPET_TN_ENABLE;
1140 hpet_writel(cfg, HPET_T1_CFG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 return;
Clemens Ladischf00c96f2005-10-30 15:03:35 -08001142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
1144 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1145 hpet_rtc_int_freq = PIE_freq;
1146 else
1147 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1148
1149 /* It is more accurate to use the comparator value than current count.*/
Clemens Ladisch7811fb82005-10-30 15:03:36 -08001150 cnt = hpet_t1_cmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1152 hpet_writel(cnt, HPET_T1_CMP);
Clemens Ladisch7811fb82005-10-30 15:03:36 -08001153 hpet_t1_cmp = cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154}
1155
1156/*
1157 * The functions below are called from rtc driver.
1158 * Return 0 if HPET is not being used.
1159 * Otherwise do the necessary changes and return 1.
1160 */
1161int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1162{
1163 if (!is_hpet_enabled())
1164 return 0;
1165
1166 if (bit_mask & RTC_UIE)
1167 UIE_on = 0;
1168 if (bit_mask & RTC_PIE)
1169 PIE_on = 0;
1170 if (bit_mask & RTC_AIE)
1171 AIE_on = 0;
1172
1173 return 1;
1174}
1175
1176int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1177{
1178 int timer_init_reqd = 0;
1179
1180 if (!is_hpet_enabled())
1181 return 0;
1182
1183 if (!(PIE_on | AIE_on | UIE_on))
1184 timer_init_reqd = 1;
1185
1186 if (bit_mask & RTC_UIE) {
1187 UIE_on = 1;
1188 }
1189 if (bit_mask & RTC_PIE) {
1190 PIE_on = 1;
1191 PIE_count = 0;
1192 }
1193 if (bit_mask & RTC_AIE) {
1194 AIE_on = 1;
1195 }
1196
1197 if (timer_init_reqd)
1198 hpet_rtc_timer_init();
1199
1200 return 1;
1201}
1202
1203int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1204{
1205 if (!is_hpet_enabled())
1206 return 0;
1207
1208 alarm_time.tm_hour = hrs;
1209 alarm_time.tm_min = min;
1210 alarm_time.tm_sec = sec;
1211
1212 return 1;
1213}
1214
1215int hpet_set_periodic_freq(unsigned long freq)
1216{
1217 if (!is_hpet_enabled())
1218 return 0;
1219
1220 PIE_freq = freq;
1221 PIE_count = 0;
1222
1223 return 1;
1224}
1225
1226int hpet_rtc_dropped_irq(void)
1227{
1228 if (!is_hpet_enabled())
1229 return 0;
1230
1231 return 1;
1232}
1233
1234irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1235{
1236 struct rtc_time curr_time;
1237 unsigned long rtc_int_flag = 0;
1238 int call_rtc_interrupt = 0;
1239
1240 hpet_rtc_timer_reinit();
1241
1242 if (UIE_on | AIE_on) {
1243 rtc_get_rtc_time(&curr_time);
1244 }
1245 if (UIE_on) {
1246 if (curr_time.tm_sec != prev_update_sec) {
1247 /* Set update int info, call real rtc int routine */
1248 call_rtc_interrupt = 1;
1249 rtc_int_flag = RTC_UF;
1250 prev_update_sec = curr_time.tm_sec;
1251 }
1252 }
1253 if (PIE_on) {
1254 PIE_count++;
1255 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1256 /* Set periodic int info, call real rtc int routine */
1257 call_rtc_interrupt = 1;
1258 rtc_int_flag |= RTC_PF;
1259 PIE_count = 0;
1260 }
1261 }
1262 if (AIE_on) {
1263 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1264 (curr_time.tm_min == alarm_time.tm_min) &&
1265 (curr_time.tm_hour == alarm_time.tm_hour)) {
1266 /* Set alarm int info, call real rtc int routine */
1267 call_rtc_interrupt = 1;
1268 rtc_int_flag |= RTC_AF;
1269 }
1270 }
1271 if (call_rtc_interrupt) {
1272 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1273 rtc_interrupt(rtc_int_flag, dev_id, regs);
1274 }
1275 return IRQ_HANDLED;
1276}
1277#endif
1278
1279
1280
1281static int __init nohpet_setup(char *s)
1282{
1283 nohpet = 1;
1284 return 0;
1285}
1286
1287__setup("nohpet", nohpet_setup);
1288
1289
1290static int __init notsc_setup(char *s)
1291{
1292 notsc = 1;
1293 return 0;
1294}
1295
1296__setup("notsc", notsc_setup);
1297
1298