blob: 0e3b60e1ac5b54f60d4a2a7cff4492c8ddef9774 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86-64/kernel/time.c
3 *
4 * "High Precision Event Timer" based timekeeping.
5 *
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
14 */
15
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/mc146818rtc.h>
21#include <linux/irq.h>
22#include <linux/time.h>
23#include <linux/ioport.h>
24#include <linux/module.h>
25#include <linux/device.h>
26#include <linux/sysdev.h>
27#include <linux/bcd.h>
28#include <linux/kallsyms.h>
29#include <asm/8253pit.h>
30#include <asm/pgtable.h>
31#include <asm/vsyscall.h>
32#include <asm/timex.h>
33#include <asm/proto.h>
34#include <asm/hpet.h>
35#include <asm/sections.h>
36#include <linux/cpufreq.h>
37#include <linux/hpet.h>
38#ifdef CONFIG_X86_LOCAL_APIC
39#include <asm/apic.h>
40#endif
41
42u64 jiffies_64 = INITIAL_JIFFIES;
43
44EXPORT_SYMBOL(jiffies_64);
45
46#ifdef CONFIG_CPU_FREQ
47static void cpufreq_delayed_get(void);
48#endif
49extern void i8254_timer_resume(void);
50extern int using_apic_timer;
51
52DEFINE_SPINLOCK(rtc_lock);
53DEFINE_SPINLOCK(i8253_lock);
54
55static int nohpet __initdata = 0;
56static int notsc __initdata = 0;
57
58#undef HPET_HACK_ENABLE_DANGEROUS
59
60unsigned int cpu_khz; /* TSC clocks / usec, not used here */
61static unsigned long hpet_period; /* fsecs / HPET clock */
62unsigned long hpet_tick; /* HPET clocks / interrupt */
63unsigned long vxtime_hz = PIT_TICK_RATE;
64int report_lost_ticks; /* command line option */
65unsigned long long monotonic_base;
66
67struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
68
69volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
70unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
71struct timespec __xtime __section_xtime;
72struct timezone __sys_tz __section_sys_tz;
73
74static inline void rdtscll_sync(unsigned long *tsc)
75{
76#ifdef CONFIG_SMP
77 sync_core();
78#endif
79 rdtscll(*tsc);
80}
81
82/*
83 * do_gettimeoffset() returns microseconds since last timer interrupt was
84 * triggered by hardware. A memory read of HPET is slower than a register read
85 * of TSC, but much more reliable. It's also synchronized to the timer
86 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
87 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
88 * This is not a problem, because jiffies hasn't updated either. They are bound
89 * together by xtime_lock.
90 */
91
92static inline unsigned int do_gettimeoffset_tsc(void)
93{
94 unsigned long t;
95 unsigned long x;
96 rdtscll_sync(&t);
97 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
98 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
99 return x;
100}
101
102static inline unsigned int do_gettimeoffset_hpet(void)
103{
104 return ((hpet_readl(HPET_COUNTER) - vxtime.last) * vxtime.quot) >> 32;
105}
106
107unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
108
109/*
110 * This version of gettimeofday() has microsecond resolution and better than
111 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
112 * MHz) HPET timer.
113 */
114
115void do_gettimeofday(struct timeval *tv)
116{
117 unsigned long seq, t;
118 unsigned int sec, usec;
119
120 do {
121 seq = read_seqbegin(&xtime_lock);
122
123 sec = xtime.tv_sec;
124 usec = xtime.tv_nsec / 1000;
125
126 /* i386 does some correction here to keep the clock
127 monotonous even when ntpd is fixing drift.
128 But they didn't work for me, there is a non monotonic
129 clock anyways with ntp.
130 I dropped all corrections now until a real solution can
131 be found. Note when you fix it here you need to do the same
132 in arch/x86_64/kernel/vsyscall.c and export all needed
133 variables in vmlinux.lds. -AK */
134
135 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
136 do_gettimeoffset();
137 usec += t;
138
139 } while (read_seqretry(&xtime_lock, seq));
140
141 tv->tv_sec = sec + usec / 1000000;
142 tv->tv_usec = usec % 1000000;
143}
144
145EXPORT_SYMBOL(do_gettimeofday);
146
147/*
148 * settimeofday() first undoes the correction that gettimeofday would do
149 * on the time, and then saves it. This is ugly, but has been like this for
150 * ages already.
151 */
152
153int do_settimeofday(struct timespec *tv)
154{
155 time_t wtm_sec, sec = tv->tv_sec;
156 long wtm_nsec, nsec = tv->tv_nsec;
157
158 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
159 return -EINVAL;
160
161 write_seqlock_irq(&xtime_lock);
162
163 nsec -= do_gettimeoffset() * 1000 +
164 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
165
166 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
167 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
168
169 set_normalized_timespec(&xtime, sec, nsec);
170 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
171
172 time_adjust = 0; /* stop active adjtime() */
173 time_status |= STA_UNSYNC;
174 time_maxerror = NTP_PHASE_LIMIT;
175 time_esterror = NTP_PHASE_LIMIT;
176
177 write_sequnlock_irq(&xtime_lock);
178 clock_was_set();
179 return 0;
180}
181
182EXPORT_SYMBOL(do_settimeofday);
183
184unsigned long profile_pc(struct pt_regs *regs)
185{
186 unsigned long pc = instruction_pointer(regs);
187
188 /* Assume the lock function has either no stack frame or only a single word.
189 This checks if the address on the stack looks like a kernel text address.
190 There is a small window for false hits, but in that case the tick
191 is just accounted to the spinlock function.
192 Better would be to write these functions in assembler again
193 and check exactly. */
194 if (in_lock_functions(pc)) {
195 char *v = *(char **)regs->rsp;
196 if ((v >= _stext && v <= _etext) ||
197 (v >= _sinittext && v <= _einittext) ||
198 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
199 return (unsigned long)v;
200 return ((unsigned long *)regs->rsp)[1];
201 }
202 return pc;
203}
204EXPORT_SYMBOL(profile_pc);
205
206/*
207 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
208 * ms after the second nowtime has started, because when nowtime is written
209 * into the registers of the CMOS clock, it will jump to the next second
210 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
211 * sheet for details.
212 */
213
214static void set_rtc_mmss(unsigned long nowtime)
215{
216 int real_seconds, real_minutes, cmos_minutes;
217 unsigned char control, freq_select;
218
219/*
220 * IRQs are disabled when we're called from the timer interrupt,
221 * no need for spin_lock_irqsave()
222 */
223
224 spin_lock(&rtc_lock);
225
226/*
227 * Tell the clock it's being set and stop it.
228 */
229
230 control = CMOS_READ(RTC_CONTROL);
231 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
232
233 freq_select = CMOS_READ(RTC_FREQ_SELECT);
234 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
235
236 cmos_minutes = CMOS_READ(RTC_MINUTES);
237 BCD_TO_BIN(cmos_minutes);
238
239/*
240 * since we're only adjusting minutes and seconds, don't interfere with hour
241 * overflow. This avoids messing with unknown time zones but requires your RTC
242 * not to be off by more than 15 minutes. Since we're calling it only when
243 * our clock is externally synchronized using NTP, this shouldn't be a problem.
244 */
245
246 real_seconds = nowtime % 60;
247 real_minutes = nowtime / 60;
248 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
249 real_minutes += 30; /* correct for half hour time zone */
250 real_minutes %= 60;
251
252#if 0
253 /* AMD 8111 is a really bad time keeper and hits this regularly.
254 It probably was an attempt to avoid screwing up DST, but ignore
255 that for now. */
256 if (abs(real_minutes - cmos_minutes) >= 30) {
257 printk(KERN_WARNING "time.c: can't update CMOS clock "
258 "from %d to %d\n", cmos_minutes, real_minutes);
259 } else
260#endif
261
262 {
263 BIN_TO_BCD(real_seconds);
264 BIN_TO_BCD(real_minutes);
265 CMOS_WRITE(real_seconds, RTC_SECONDS);
266 CMOS_WRITE(real_minutes, RTC_MINUTES);
267 }
268
269/*
270 * The following flags have to be released exactly in this order, otherwise the
271 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
272 * not reset the oscillator and will not update precisely 500 ms later. You
273 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
274 * believes data sheets anyway ... -- Markus Kuhn
275 */
276
277 CMOS_WRITE(control, RTC_CONTROL);
278 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
279
280 spin_unlock(&rtc_lock);
281}
282
283
284/* monotonic_clock(): returns # of nanoseconds passed since time_init()
285 * Note: This function is required to return accurate
286 * time even in the absence of multiple timer ticks.
287 */
288unsigned long long monotonic_clock(void)
289{
290 unsigned long seq;
291 u32 last_offset, this_offset, offset;
292 unsigned long long base;
293
294 if (vxtime.mode == VXTIME_HPET) {
295 do {
296 seq = read_seqbegin(&xtime_lock);
297
298 last_offset = vxtime.last;
299 base = monotonic_base;
300 this_offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
301
302 } while (read_seqretry(&xtime_lock, seq));
303 offset = (this_offset - last_offset);
304 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
305 return base + offset;
306 }else{
307 do {
308 seq = read_seqbegin(&xtime_lock);
309
310 last_offset = vxtime.last_tsc;
311 base = monotonic_base;
312 } while (read_seqretry(&xtime_lock, seq));
313 sync_core();
314 rdtscll(this_offset);
315 offset = (this_offset - last_offset)*1000/cpu_khz;
316 return base + offset;
317 }
318
319
320}
321EXPORT_SYMBOL(monotonic_clock);
322
323static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
324{
325 static long lost_count;
326 static int warned;
327
328 if (report_lost_ticks) {
329 printk(KERN_WARNING "time.c: Lost %d timer "
330 "tick(s)! ", lost);
331 print_symbol("rip %s)\n", regs->rip);
332 }
333
334 if (lost_count == 1000 && !warned) {
335 printk(KERN_WARNING
336 "warning: many lost ticks.\n"
337 KERN_WARNING "Your time source seems to be instable or "
338 "some driver is hogging interupts\n");
339 print_symbol("rip %s\n", regs->rip);
340 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
341 printk(KERN_WARNING "Falling back to HPET\n");
342 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
343 vxtime.mode = VXTIME_HPET;
344 do_gettimeoffset = do_gettimeoffset_hpet;
345 }
346 /* else should fall back to PIT, but code missing. */
347 warned = 1;
348 } else
349 lost_count++;
350
351#ifdef CONFIG_CPU_FREQ
352 /* In some cases the CPU can change frequency without us noticing
353 (like going into thermal throttle)
354 Give cpufreq a change to catch up. */
355 if ((lost_count+1) % 25 == 0) {
356 cpufreq_delayed_get();
357 }
358#endif
359}
360
361static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
362{
363 static unsigned long rtc_update = 0;
364 unsigned long tsc;
365 int delay, offset = 0, lost = 0;
366
367/*
368 * Here we are in the timer irq handler. We have irqs locally disabled (so we
369 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
370 * on the other CPU, so we need a lock. We also need to lock the vsyscall
371 * variables, because both do_timer() and us change them -arca+vojtech
372 */
373
374 write_seqlock(&xtime_lock);
375
376 if (vxtime.hpet_address) {
377 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
378 delay = hpet_readl(HPET_COUNTER) - offset;
379 } else {
380 spin_lock(&i8253_lock);
381 outb_p(0x00, 0x43);
382 delay = inb_p(0x40);
383 delay |= inb(0x40) << 8;
384 spin_unlock(&i8253_lock);
385 delay = LATCH - 1 - delay;
386 }
387
388 rdtscll_sync(&tsc);
389
390 if (vxtime.mode == VXTIME_HPET) {
391 if (offset - vxtime.last > hpet_tick) {
392 lost = (offset - vxtime.last) / hpet_tick - 1;
393 }
394
395 monotonic_base +=
396 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
397
398 vxtime.last = offset;
399 } else {
400 offset = (((tsc - vxtime.last_tsc) *
401 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
402
403 if (offset < 0)
404 offset = 0;
405
406 if (offset > (USEC_PER_SEC / HZ)) {
407 lost = offset / (USEC_PER_SEC / HZ);
408 offset %= (USEC_PER_SEC / HZ);
409 }
410
411 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
412
413 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
414
415 if ((((tsc - vxtime.last_tsc) *
416 vxtime.tsc_quot) >> 32) < offset)
417 vxtime.last_tsc = tsc -
418 (((long) offset << 32) / vxtime.tsc_quot) - 1;
419 }
420
421 if (lost > 0) {
422 handle_lost_ticks(lost, regs);
423 jiffies += lost;
424 }
425
426/*
427 * Do the timer stuff.
428 */
429
430 do_timer(regs);
431#ifndef CONFIG_SMP
432 update_process_times(user_mode(regs));
433#endif
434
435/*
436 * In the SMP case we use the local APIC timer interrupt to do the profiling,
437 * except when we simulate SMP mode on a uniprocessor system, in that case we
438 * have to call the local interrupt handler.
439 */
440
441#ifndef CONFIG_X86_LOCAL_APIC
442 profile_tick(CPU_PROFILING, regs);
443#else
444 if (!using_apic_timer)
445 smp_local_timer_interrupt(regs);
446#endif
447
448/*
449 * If we have an externally synchronized Linux clock, then update CMOS clock
450 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
451 * closest to exactly 500 ms before the next second. If the update fails, we
452 * don't care, as it'll be updated on the next turn, and the problem (time way
453 * off) isn't likely to go away much sooner anyway.
454 */
455
456 if ((~time_status & STA_UNSYNC) && xtime.tv_sec > rtc_update &&
457 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
458 set_rtc_mmss(xtime.tv_sec);
459 rtc_update = xtime.tv_sec + 660;
460 }
461
462 write_sequnlock(&xtime_lock);
463
464 return IRQ_HANDLED;
465}
466
467static unsigned int cyc2ns_scale;
468#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
469
470static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
471{
472 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
473}
474
475static inline unsigned long long cycles_2_ns(unsigned long long cyc)
476{
477 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
478}
479
480unsigned long long sched_clock(void)
481{
482 unsigned long a = 0;
483
484#if 0
485 /* Don't do a HPET read here. Using TSC always is much faster
486 and HPET may not be mapped yet when the scheduler first runs.
487 Disadvantage is a small drift between CPUs in some configurations,
488 but that should be tolerable. */
489 if (__vxtime.mode == VXTIME_HPET)
490 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
491#endif
492
493 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
494 which means it is not completely exact and may not be monotonous between
495 CPUs. But the errors should be too small to matter for scheduling
496 purposes. */
497
498 rdtscll(a);
499 return cycles_2_ns(a);
500}
501
502unsigned long get_cmos_time(void)
503{
504 unsigned int timeout, year, mon, day, hour, min, sec;
505 unsigned char last, this;
506 unsigned long flags;
507
508/*
509 * The Linux interpretation of the CMOS clock register contents: When the
510 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
511 * second which has precisely just started. Waiting for this can take up to 1
512 * second, we timeout approximately after 2.4 seconds on a machine with
513 * standard 8.3 MHz ISA bus.
514 */
515
516 spin_lock_irqsave(&rtc_lock, flags);
517
518 timeout = 1000000;
519 last = this = 0;
520
521 while (timeout && last && !this) {
522 last = this;
523 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
524 timeout--;
525 }
526
527/*
528 * Here we are safe to assume the registers won't change for a whole second, so
529 * we just go ahead and read them.
530 */
531
532 sec = CMOS_READ(RTC_SECONDS);
533 min = CMOS_READ(RTC_MINUTES);
534 hour = CMOS_READ(RTC_HOURS);
535 day = CMOS_READ(RTC_DAY_OF_MONTH);
536 mon = CMOS_READ(RTC_MONTH);
537 year = CMOS_READ(RTC_YEAR);
538
539 spin_unlock_irqrestore(&rtc_lock, flags);
540
541/*
542 * We know that x86-64 always uses BCD format, no need to check the config
543 * register.
544 */
545
546 BCD_TO_BIN(sec);
547 BCD_TO_BIN(min);
548 BCD_TO_BIN(hour);
549 BCD_TO_BIN(day);
550 BCD_TO_BIN(mon);
551 BCD_TO_BIN(year);
552
553/*
554 * x86-64 systems only exists since 2002.
555 * This will work up to Dec 31, 2100
556 */
557 year += 2000;
558
559 return mktime(year, mon, day, hour, min, sec);
560}
561
562#ifdef CONFIG_CPU_FREQ
563
564/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
565 changes.
566
567 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
568 not that important because current Opteron setups do not support
569 scaling on SMP anyroads.
570
571 Should fix up last_tsc too. Currently gettimeofday in the
572 first tick after the change will be slightly wrong. */
573
574#include <linux/workqueue.h>
575
576static unsigned int cpufreq_delayed_issched = 0;
577static unsigned int cpufreq_init = 0;
578static struct work_struct cpufreq_delayed_get_work;
579
580static void handle_cpufreq_delayed_get(void *v)
581{
582 unsigned int cpu;
583 for_each_online_cpu(cpu) {
584 cpufreq_get(cpu);
585 }
586 cpufreq_delayed_issched = 0;
587}
588
589/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
590 * to verify the CPU frequency the timing core thinks the CPU is running
591 * at is still correct.
592 */
593static void cpufreq_delayed_get(void)
594{
595 static int warned;
596 if (cpufreq_init && !cpufreq_delayed_issched) {
597 cpufreq_delayed_issched = 1;
598 if (!warned) {
599 warned = 1;
600 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
601 }
602 schedule_work(&cpufreq_delayed_get_work);
603 }
604}
605
606static unsigned int ref_freq = 0;
607static unsigned long loops_per_jiffy_ref = 0;
608
609static unsigned long cpu_khz_ref = 0;
610
611static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
612 void *data)
613{
614 struct cpufreq_freqs *freq = data;
615 unsigned long *lpj, dummy;
616
Andi Kleenc29601e2005-04-16 15:25:05 -0700617 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
618 return 0;
619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 lpj = &dummy;
621 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
622#ifdef CONFIG_SMP
623 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
624#else
625 lpj = &boot_cpu_data.loops_per_jiffy;
626#endif
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 if (!ref_freq) {
629 ref_freq = freq->old;
630 loops_per_jiffy_ref = *lpj;
631 cpu_khz_ref = cpu_khz;
632 }
633 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
634 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
635 (val == CPUFREQ_RESUMECHANGE)) {
636 *lpj =
637 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
638
639 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
640 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
641 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
642 }
643
644 set_cyc2ns_scale(cpu_khz_ref / 1000);
645
646 return 0;
647}
648
649static struct notifier_block time_cpufreq_notifier_block = {
650 .notifier_call = time_cpufreq_notifier
651};
652
653static int __init cpufreq_tsc(void)
654{
655 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
656 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
657 CPUFREQ_TRANSITION_NOTIFIER))
658 cpufreq_init = 1;
659 return 0;
660}
661
662core_initcall(cpufreq_tsc);
663
664#endif
665
666/*
667 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
668 * it to the HPET timer of known frequency.
669 */
670
671#define TICK_COUNT 100000000
672
673static unsigned int __init hpet_calibrate_tsc(void)
674{
675 int tsc_start, hpet_start;
676 int tsc_now, hpet_now;
677 unsigned long flags;
678
679 local_irq_save(flags);
680 local_irq_disable();
681
682 hpet_start = hpet_readl(HPET_COUNTER);
683 rdtscl(tsc_start);
684
685 do {
686 local_irq_disable();
687 hpet_now = hpet_readl(HPET_COUNTER);
688 sync_core();
689 rdtscl(tsc_now);
690 local_irq_restore(flags);
691 } while ((tsc_now - tsc_start) < TICK_COUNT &&
692 (hpet_now - hpet_start) < TICK_COUNT);
693
694 return (tsc_now - tsc_start) * 1000000000L
695 / ((hpet_now - hpet_start) * hpet_period / 1000);
696}
697
698
699/*
700 * pit_calibrate_tsc() uses the speaker output (channel 2) of
701 * the PIT. This is better than using the timer interrupt output,
702 * because we can read the value of the speaker with just one inb(),
703 * where we need three i/o operations for the interrupt channel.
704 * We count how many ticks the TSC does in 50 ms.
705 */
706
707static unsigned int __init pit_calibrate_tsc(void)
708{
709 unsigned long start, end;
710 unsigned long flags;
711
712 spin_lock_irqsave(&i8253_lock, flags);
713
714 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
715
716 outb(0xb0, 0x43);
717 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
718 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
719 rdtscll(start);
720 sync_core();
721 while ((inb(0x61) & 0x20) == 0);
722 sync_core();
723 rdtscll(end);
724
725 spin_unlock_irqrestore(&i8253_lock, flags);
726
727 return (end - start) / 50;
728}
729
730#ifdef CONFIG_HPET
731static __init int late_hpet_init(void)
732{
733 struct hpet_data hd;
734 unsigned int ntimer;
735
736 if (!vxtime.hpet_address)
737 return -1;
738
739 memset(&hd, 0, sizeof (hd));
740
741 ntimer = hpet_readl(HPET_ID);
742 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
743 ntimer++;
744
745 /*
746 * Register with driver.
747 * Timer0 and Timer1 is used by platform.
748 */
749 hd.hd_phys_address = vxtime.hpet_address;
750 hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
751 hd.hd_nirqs = ntimer;
752 hd.hd_flags = HPET_DATA_PLATFORM;
753 hpet_reserve_timer(&hd, 0);
754#ifdef CONFIG_HPET_EMULATE_RTC
755 hpet_reserve_timer(&hd, 1);
756#endif
757 hd.hd_irq[0] = HPET_LEGACY_8254;
758 hd.hd_irq[1] = HPET_LEGACY_RTC;
759 if (ntimer > 2) {
760 struct hpet *hpet;
761 struct hpet_timer *timer;
762 int i;
763
764 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
765
766 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
767 timer++, i++)
768 hd.hd_irq[i] = (timer->hpet_config &
769 Tn_INT_ROUTE_CNF_MASK) >>
770 Tn_INT_ROUTE_CNF_SHIFT;
771
772 }
773
774 hpet_alloc(&hd);
775 return 0;
776}
777fs_initcall(late_hpet_init);
778#endif
779
780static int hpet_timer_stop_set_go(unsigned long tick)
781{
782 unsigned int cfg;
783
784/*
785 * Stop the timers and reset the main counter.
786 */
787
788 cfg = hpet_readl(HPET_CFG);
789 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
790 hpet_writel(cfg, HPET_CFG);
791 hpet_writel(0, HPET_COUNTER);
792 hpet_writel(0, HPET_COUNTER + 4);
793
794/*
795 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
796 * and period also hpet_tick.
797 */
798
799 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
800 HPET_TN_32BIT, HPET_T0_CFG);
801 hpet_writel(hpet_tick, HPET_T0_CMP);
802 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
803
804/*
805 * Go!
806 */
807
808 cfg |= HPET_CFG_ENABLE | HPET_CFG_LEGACY;
809 hpet_writel(cfg, HPET_CFG);
810
811 return 0;
812}
813
814static int hpet_init(void)
815{
816 unsigned int id;
817
818 if (!vxtime.hpet_address)
819 return -1;
820 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
821 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
822
823/*
824 * Read the period, compute tick and quotient.
825 */
826
827 id = hpet_readl(HPET_ID);
828
829 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER) ||
830 !(id & HPET_ID_LEGSUP))
831 return -1;
832
833 hpet_period = hpet_readl(HPET_PERIOD);
834 if (hpet_period < 100000 || hpet_period > 100000000)
835 return -1;
836
837 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
838 hpet_period;
839
840 return hpet_timer_stop_set_go(hpet_tick);
841}
842
843static int hpet_reenable(void)
844{
845 return hpet_timer_stop_set_go(hpet_tick);
846}
847
848void __init pit_init(void)
849{
850 unsigned long flags;
851
852 spin_lock_irqsave(&i8253_lock, flags);
853 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
854 outb_p(LATCH & 0xff, 0x40); /* LSB */
855 outb_p(LATCH >> 8, 0x40); /* MSB */
856 spin_unlock_irqrestore(&i8253_lock, flags);
857}
858
859int __init time_setup(char *str)
860{
861 report_lost_ticks = 1;
862 return 1;
863}
864
865static struct irqaction irq0 = {
866 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
867};
868
869extern void __init config_acpi_tables(void);
870
871void __init time_init(void)
872{
873 char *timename;
874
875#ifdef HPET_HACK_ENABLE_DANGEROUS
876 if (!vxtime.hpet_address) {
877 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
878 "manually!\n");
879 outl(0x800038a0, 0xcf8);
880 outl(0xff000001, 0xcfc);
881 outl(0x800038a0, 0xcf8);
882 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
883 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
884 "at %#lx.\n", vxtime.hpet_address);
885 }
886#endif
887 if (nohpet)
888 vxtime.hpet_address = 0;
889
890 xtime.tv_sec = get_cmos_time();
891 xtime.tv_nsec = 0;
892
893 set_normalized_timespec(&wall_to_monotonic,
894 -xtime.tv_sec, -xtime.tv_nsec);
895
896 if (!hpet_init()) {
897 vxtime_hz = (1000000000000000L + hpet_period / 2) /
898 hpet_period;
899 cpu_khz = hpet_calibrate_tsc();
900 timename = "HPET";
901 } else {
902 pit_init();
903 cpu_khz = pit_calibrate_tsc();
904 timename = "PIT";
905 }
906
907 printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
908 vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
909 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
910 cpu_khz / 1000, cpu_khz % 1000);
911 vxtime.mode = VXTIME_TSC;
912 vxtime.quot = (1000000L << 32) / vxtime_hz;
913 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
914 vxtime.hz = vxtime_hz;
915 rdtscll_sync(&vxtime.last_tsc);
916 setup_irq(0, &irq0);
917
918 set_cyc2ns_scale(cpu_khz / 1000);
Andi Kleena8ab26f2005-04-16 15:25:19 -0700919
920#ifndef CONFIG_SMP
921 time_init_gtod();
922#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923}
924
Andi Kleena8ab26f2005-04-16 15:25:19 -0700925/*
926 * Decide after all CPUs are booted what mode gettimeofday should use.
927 */
928void __init time_init_gtod(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929{
930 char *timetype;
931
932 /*
933 * AMD systems with more than one CPU don't have fully synchronized
934 * TSCs. Always use HPET gettimeofday for these, although it is slower.
935 * Intel SMP systems usually have synchronized TSCs, so use always
936 * the TSC.
937 *
938 * Exceptions:
939 * IBM Summit2 checked by oem_force_hpet_timer().
940 * AMD dual core may also not need HPET. Check me.
941 *
942 * Can be turned off with "notsc".
943 */
944 if (num_online_cpus() > 1 &&
945 boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
946 notsc = 1;
947 /* Some systems will want to disable TSC and use HPET. */
948 if (oem_force_hpet_timer())
949 notsc = 1;
950 if (vxtime.hpet_address && notsc) {
951 timetype = "HPET";
952 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
953 vxtime.mode = VXTIME_HPET;
954 do_gettimeoffset = do_gettimeoffset_hpet;
955 } else {
956 timetype = vxtime.hpet_address ? "HPET/TSC" : "PIT/TSC";
957 vxtime.mode = VXTIME_TSC;
958 }
959
960 printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
961}
962
963__setup("report_lost_ticks", time_setup);
964
965static long clock_cmos_diff;
966static unsigned long sleep_start;
967
Pavel Machek0b9c33a2005-04-16 15:25:31 -0700968static int timer_suspend(struct sys_device *dev, pm_message_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
970 /*
971 * Estimate time zone so that set_time can update the clock
972 */
973 long cmos_time = get_cmos_time();
974
975 clock_cmos_diff = -cmos_time;
976 clock_cmos_diff += get_seconds();
977 sleep_start = cmos_time;
978 return 0;
979}
980
981static int timer_resume(struct sys_device *dev)
982{
983 unsigned long flags;
984 unsigned long sec;
985 unsigned long ctime = get_cmos_time();
986 unsigned long sleep_length = (ctime - sleep_start) * HZ;
987
988 if (vxtime.hpet_address)
989 hpet_reenable();
990 else
991 i8254_timer_resume();
992
993 sec = ctime + clock_cmos_diff;
994 write_seqlock_irqsave(&xtime_lock,flags);
995 xtime.tv_sec = sec;
996 xtime.tv_nsec = 0;
997 write_sequnlock_irqrestore(&xtime_lock,flags);
998 jiffies += sleep_length;
999 wall_jiffies += sleep_length;
1000 return 0;
1001}
1002
1003static struct sysdev_class timer_sysclass = {
1004 .resume = timer_resume,
1005 .suspend = timer_suspend,
1006 set_kset_name("timer"),
1007};
1008
1009
1010/* XXX this driverfs stuff should probably go elsewhere later -john */
1011static struct sys_device device_timer = {
1012 .id = 0,
1013 .cls = &timer_sysclass,
1014};
1015
1016static int time_init_device(void)
1017{
1018 int error = sysdev_class_register(&timer_sysclass);
1019 if (!error)
1020 error = sysdev_register(&device_timer);
1021 return error;
1022}
1023
1024device_initcall(time_init_device);
1025
1026#ifdef CONFIG_HPET_EMULATE_RTC
1027/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1028 * is enabled, we support RTC interrupt functionality in software.
1029 * RTC has 3 kinds of interrupts:
1030 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1031 * is updated
1032 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1033 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1034 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1035 * (1) and (2) above are implemented using polling at a frequency of
1036 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1037 * overhead. (DEFAULT_RTC_INT_FREQ)
1038 * For (3), we use interrupts at 64Hz or user specified periodic
1039 * frequency, whichever is higher.
1040 */
1041#include <linux/rtc.h>
1042
1043extern irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
1044
1045#define DEFAULT_RTC_INT_FREQ 64
1046#define RTC_NUM_INTS 1
1047
1048static unsigned long UIE_on;
1049static unsigned long prev_update_sec;
1050
1051static unsigned long AIE_on;
1052static struct rtc_time alarm_time;
1053
1054static unsigned long PIE_on;
1055static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1056static unsigned long PIE_count;
1057
1058static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1059
1060int is_hpet_enabled(void)
1061{
1062 return vxtime.hpet_address != 0;
1063}
1064
1065/*
1066 * Timer 1 for RTC, we do not use periodic interrupt feature,
1067 * even if HPET supports periodic interrupts on Timer 1.
1068 * The reason being, to set up a periodic interrupt in HPET, we need to
1069 * stop the main counter. And if we do that everytime someone diables/enables
1070 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1071 * So, for the time being, simulate the periodic interrupt in software.
1072 *
1073 * hpet_rtc_timer_init() is called for the first time and during subsequent
1074 * interuppts reinit happens through hpet_rtc_timer_reinit().
1075 */
1076int hpet_rtc_timer_init(void)
1077{
1078 unsigned int cfg, cnt;
1079 unsigned long flags;
1080
1081 if (!is_hpet_enabled())
1082 return 0;
1083 /*
1084 * Set the counter 1 and enable the interrupts.
1085 */
1086 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1087 hpet_rtc_int_freq = PIE_freq;
1088 else
1089 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1090
1091 local_irq_save(flags);
1092 cnt = hpet_readl(HPET_COUNTER);
1093 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1094 hpet_writel(cnt, HPET_T1_CMP);
1095 local_irq_restore(flags);
1096
1097 cfg = hpet_readl(HPET_T1_CFG);
1098 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1099 hpet_writel(cfg, HPET_T1_CFG);
1100
1101 return 1;
1102}
1103
1104static void hpet_rtc_timer_reinit(void)
1105{
1106 unsigned int cfg, cnt;
1107
1108 if (!(PIE_on | AIE_on | UIE_on))
1109 return;
1110
1111 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1112 hpet_rtc_int_freq = PIE_freq;
1113 else
1114 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1115
1116 /* It is more accurate to use the comparator value than current count.*/
1117 cnt = hpet_readl(HPET_T1_CMP);
1118 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1119 hpet_writel(cnt, HPET_T1_CMP);
1120
1121 cfg = hpet_readl(HPET_T1_CFG);
1122 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1123 hpet_writel(cfg, HPET_T1_CFG);
1124
1125 return;
1126}
1127
1128/*
1129 * The functions below are called from rtc driver.
1130 * Return 0 if HPET is not being used.
1131 * Otherwise do the necessary changes and return 1.
1132 */
1133int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1134{
1135 if (!is_hpet_enabled())
1136 return 0;
1137
1138 if (bit_mask & RTC_UIE)
1139 UIE_on = 0;
1140 if (bit_mask & RTC_PIE)
1141 PIE_on = 0;
1142 if (bit_mask & RTC_AIE)
1143 AIE_on = 0;
1144
1145 return 1;
1146}
1147
1148int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1149{
1150 int timer_init_reqd = 0;
1151
1152 if (!is_hpet_enabled())
1153 return 0;
1154
1155 if (!(PIE_on | AIE_on | UIE_on))
1156 timer_init_reqd = 1;
1157
1158 if (bit_mask & RTC_UIE) {
1159 UIE_on = 1;
1160 }
1161 if (bit_mask & RTC_PIE) {
1162 PIE_on = 1;
1163 PIE_count = 0;
1164 }
1165 if (bit_mask & RTC_AIE) {
1166 AIE_on = 1;
1167 }
1168
1169 if (timer_init_reqd)
1170 hpet_rtc_timer_init();
1171
1172 return 1;
1173}
1174
1175int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1176{
1177 if (!is_hpet_enabled())
1178 return 0;
1179
1180 alarm_time.tm_hour = hrs;
1181 alarm_time.tm_min = min;
1182 alarm_time.tm_sec = sec;
1183
1184 return 1;
1185}
1186
1187int hpet_set_periodic_freq(unsigned long freq)
1188{
1189 if (!is_hpet_enabled())
1190 return 0;
1191
1192 PIE_freq = freq;
1193 PIE_count = 0;
1194
1195 return 1;
1196}
1197
1198int hpet_rtc_dropped_irq(void)
1199{
1200 if (!is_hpet_enabled())
1201 return 0;
1202
1203 return 1;
1204}
1205
1206irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1207{
1208 struct rtc_time curr_time;
1209 unsigned long rtc_int_flag = 0;
1210 int call_rtc_interrupt = 0;
1211
1212 hpet_rtc_timer_reinit();
1213
1214 if (UIE_on | AIE_on) {
1215 rtc_get_rtc_time(&curr_time);
1216 }
1217 if (UIE_on) {
1218 if (curr_time.tm_sec != prev_update_sec) {
1219 /* Set update int info, call real rtc int routine */
1220 call_rtc_interrupt = 1;
1221 rtc_int_flag = RTC_UF;
1222 prev_update_sec = curr_time.tm_sec;
1223 }
1224 }
1225 if (PIE_on) {
1226 PIE_count++;
1227 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1228 /* Set periodic int info, call real rtc int routine */
1229 call_rtc_interrupt = 1;
1230 rtc_int_flag |= RTC_PF;
1231 PIE_count = 0;
1232 }
1233 }
1234 if (AIE_on) {
1235 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1236 (curr_time.tm_min == alarm_time.tm_min) &&
1237 (curr_time.tm_hour == alarm_time.tm_hour)) {
1238 /* Set alarm int info, call real rtc int routine */
1239 call_rtc_interrupt = 1;
1240 rtc_int_flag |= RTC_AF;
1241 }
1242 }
1243 if (call_rtc_interrupt) {
1244 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1245 rtc_interrupt(rtc_int_flag, dev_id, regs);
1246 }
1247 return IRQ_HANDLED;
1248}
1249#endif
1250
1251
1252
1253static int __init nohpet_setup(char *s)
1254{
1255 nohpet = 1;
1256 return 0;
1257}
1258
1259__setup("nohpet", nohpet_setup);
1260
1261
1262static int __init notsc_setup(char *s)
1263{
1264 notsc = 1;
1265 return 0;
1266}
1267
1268__setup("notsc", notsc_setup);
1269
1270