blob: c7a1b50b4af34b5d919a82711f3354ed67137508 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86-64/kernel/time.c
3 *
4 * "High Precision Event Timer" based timekeeping.
5 *
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
14 */
15
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/mc146818rtc.h>
21#include <linux/irq.h>
22#include <linux/time.h>
23#include <linux/ioport.h>
24#include <linux/module.h>
25#include <linux/device.h>
26#include <linux/sysdev.h>
27#include <linux/bcd.h>
28#include <linux/kallsyms.h>
29#include <asm/8253pit.h>
30#include <asm/pgtable.h>
31#include <asm/vsyscall.h>
32#include <asm/timex.h>
33#include <asm/proto.h>
34#include <asm/hpet.h>
35#include <asm/sections.h>
36#include <linux/cpufreq.h>
37#include <linux/hpet.h>
38#ifdef CONFIG_X86_LOCAL_APIC
39#include <asm/apic.h>
40#endif
41
42u64 jiffies_64 = INITIAL_JIFFIES;
43
44EXPORT_SYMBOL(jiffies_64);
45
46#ifdef CONFIG_CPU_FREQ
47static void cpufreq_delayed_get(void);
48#endif
49extern void i8254_timer_resume(void);
50extern int using_apic_timer;
51
52DEFINE_SPINLOCK(rtc_lock);
53DEFINE_SPINLOCK(i8253_lock);
54
55static int nohpet __initdata = 0;
56static int notsc __initdata = 0;
57
58#undef HPET_HACK_ENABLE_DANGEROUS
59
60unsigned int cpu_khz; /* TSC clocks / usec, not used here */
61static unsigned long hpet_period; /* fsecs / HPET clock */
62unsigned long hpet_tick; /* HPET clocks / interrupt */
63unsigned long vxtime_hz = PIT_TICK_RATE;
64int report_lost_ticks; /* command line option */
65unsigned long long monotonic_base;
66
67struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
68
69volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
70unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
71struct timespec __xtime __section_xtime;
72struct timezone __sys_tz __section_sys_tz;
73
74static inline void rdtscll_sync(unsigned long *tsc)
75{
76#ifdef CONFIG_SMP
77 sync_core();
78#endif
79 rdtscll(*tsc);
80}
81
82/*
83 * do_gettimeoffset() returns microseconds since last timer interrupt was
84 * triggered by hardware. A memory read of HPET is slower than a register read
85 * of TSC, but much more reliable. It's also synchronized to the timer
86 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
87 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
88 * This is not a problem, because jiffies hasn't updated either. They are bound
89 * together by xtime_lock.
90 */
91
92static inline unsigned int do_gettimeoffset_tsc(void)
93{
94 unsigned long t;
95 unsigned long x;
96 rdtscll_sync(&t);
97 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
98 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
99 return x;
100}
101
102static inline unsigned int do_gettimeoffset_hpet(void)
103{
104 return ((hpet_readl(HPET_COUNTER) - vxtime.last) * vxtime.quot) >> 32;
105}
106
107unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
108
109/*
110 * This version of gettimeofday() has microsecond resolution and better than
111 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
112 * MHz) HPET timer.
113 */
114
115void do_gettimeofday(struct timeval *tv)
116{
117 unsigned long seq, t;
118 unsigned int sec, usec;
119
120 do {
121 seq = read_seqbegin(&xtime_lock);
122
123 sec = xtime.tv_sec;
124 usec = xtime.tv_nsec / 1000;
125
126 /* i386 does some correction here to keep the clock
127 monotonous even when ntpd is fixing drift.
128 But they didn't work for me, there is a non monotonic
129 clock anyways with ntp.
130 I dropped all corrections now until a real solution can
131 be found. Note when you fix it here you need to do the same
132 in arch/x86_64/kernel/vsyscall.c and export all needed
133 variables in vmlinux.lds. -AK */
134
135 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
136 do_gettimeoffset();
137 usec += t;
138
139 } while (read_seqretry(&xtime_lock, seq));
140
141 tv->tv_sec = sec + usec / 1000000;
142 tv->tv_usec = usec % 1000000;
143}
144
145EXPORT_SYMBOL(do_gettimeofday);
146
147/*
148 * settimeofday() first undoes the correction that gettimeofday would do
149 * on the time, and then saves it. This is ugly, but has been like this for
150 * ages already.
151 */
152
153int do_settimeofday(struct timespec *tv)
154{
155 time_t wtm_sec, sec = tv->tv_sec;
156 long wtm_nsec, nsec = tv->tv_nsec;
157
158 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
159 return -EINVAL;
160
161 write_seqlock_irq(&xtime_lock);
162
163 nsec -= do_gettimeoffset() * 1000 +
164 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
165
166 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
167 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
168
169 set_normalized_timespec(&xtime, sec, nsec);
170 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
171
172 time_adjust = 0; /* stop active adjtime() */
173 time_status |= STA_UNSYNC;
174 time_maxerror = NTP_PHASE_LIMIT;
175 time_esterror = NTP_PHASE_LIMIT;
176
177 write_sequnlock_irq(&xtime_lock);
178 clock_was_set();
179 return 0;
180}
181
182EXPORT_SYMBOL(do_settimeofday);
183
184unsigned long profile_pc(struct pt_regs *regs)
185{
186 unsigned long pc = instruction_pointer(regs);
187
188 /* Assume the lock function has either no stack frame or only a single word.
189 This checks if the address on the stack looks like a kernel text address.
190 There is a small window for false hits, but in that case the tick
191 is just accounted to the spinlock function.
192 Better would be to write these functions in assembler again
193 and check exactly. */
194 if (in_lock_functions(pc)) {
195 char *v = *(char **)regs->rsp;
196 if ((v >= _stext && v <= _etext) ||
197 (v >= _sinittext && v <= _einittext) ||
198 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
199 return (unsigned long)v;
200 return ((unsigned long *)regs->rsp)[1];
201 }
202 return pc;
203}
204EXPORT_SYMBOL(profile_pc);
205
206/*
207 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
208 * ms after the second nowtime has started, because when nowtime is written
209 * into the registers of the CMOS clock, it will jump to the next second
210 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
211 * sheet for details.
212 */
213
214static void set_rtc_mmss(unsigned long nowtime)
215{
216 int real_seconds, real_minutes, cmos_minutes;
217 unsigned char control, freq_select;
218
219/*
220 * IRQs are disabled when we're called from the timer interrupt,
221 * no need for spin_lock_irqsave()
222 */
223
224 spin_lock(&rtc_lock);
225
226/*
227 * Tell the clock it's being set and stop it.
228 */
229
230 control = CMOS_READ(RTC_CONTROL);
231 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
232
233 freq_select = CMOS_READ(RTC_FREQ_SELECT);
234 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
235
236 cmos_minutes = CMOS_READ(RTC_MINUTES);
237 BCD_TO_BIN(cmos_minutes);
238
239/*
240 * since we're only adjusting minutes and seconds, don't interfere with hour
241 * overflow. This avoids messing with unknown time zones but requires your RTC
242 * not to be off by more than 15 minutes. Since we're calling it only when
243 * our clock is externally synchronized using NTP, this shouldn't be a problem.
244 */
245
246 real_seconds = nowtime % 60;
247 real_minutes = nowtime / 60;
248 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
249 real_minutes += 30; /* correct for half hour time zone */
250 real_minutes %= 60;
251
252#if 0
253 /* AMD 8111 is a really bad time keeper and hits this regularly.
254 It probably was an attempt to avoid screwing up DST, but ignore
255 that for now. */
256 if (abs(real_minutes - cmos_minutes) >= 30) {
257 printk(KERN_WARNING "time.c: can't update CMOS clock "
258 "from %d to %d\n", cmos_minutes, real_minutes);
259 } else
260#endif
261
262 {
263 BIN_TO_BCD(real_seconds);
264 BIN_TO_BCD(real_minutes);
265 CMOS_WRITE(real_seconds, RTC_SECONDS);
266 CMOS_WRITE(real_minutes, RTC_MINUTES);
267 }
268
269/*
270 * The following flags have to be released exactly in this order, otherwise the
271 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
272 * not reset the oscillator and will not update precisely 500 ms later. You
273 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
274 * believes data sheets anyway ... -- Markus Kuhn
275 */
276
277 CMOS_WRITE(control, RTC_CONTROL);
278 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
279
280 spin_unlock(&rtc_lock);
281}
282
283
284/* monotonic_clock(): returns # of nanoseconds passed since time_init()
285 * Note: This function is required to return accurate
286 * time even in the absence of multiple timer ticks.
287 */
288unsigned long long monotonic_clock(void)
289{
290 unsigned long seq;
291 u32 last_offset, this_offset, offset;
292 unsigned long long base;
293
294 if (vxtime.mode == VXTIME_HPET) {
295 do {
296 seq = read_seqbegin(&xtime_lock);
297
298 last_offset = vxtime.last;
299 base = monotonic_base;
300 this_offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
301
302 } while (read_seqretry(&xtime_lock, seq));
303 offset = (this_offset - last_offset);
304 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
305 return base + offset;
306 }else{
307 do {
308 seq = read_seqbegin(&xtime_lock);
309
310 last_offset = vxtime.last_tsc;
311 base = monotonic_base;
312 } while (read_seqretry(&xtime_lock, seq));
313 sync_core();
314 rdtscll(this_offset);
315 offset = (this_offset - last_offset)*1000/cpu_khz;
316 return base + offset;
317 }
318
319
320}
321EXPORT_SYMBOL(monotonic_clock);
322
323static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
324{
325 static long lost_count;
326 static int warned;
327
328 if (report_lost_ticks) {
329 printk(KERN_WARNING "time.c: Lost %d timer "
330 "tick(s)! ", lost);
331 print_symbol("rip %s)\n", regs->rip);
332 }
333
334 if (lost_count == 1000 && !warned) {
335 printk(KERN_WARNING
336 "warning: many lost ticks.\n"
337 KERN_WARNING "Your time source seems to be instable or "
338 "some driver is hogging interupts\n");
339 print_symbol("rip %s\n", regs->rip);
340 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
341 printk(KERN_WARNING "Falling back to HPET\n");
342 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
343 vxtime.mode = VXTIME_HPET;
344 do_gettimeoffset = do_gettimeoffset_hpet;
345 }
346 /* else should fall back to PIT, but code missing. */
347 warned = 1;
348 } else
349 lost_count++;
350
351#ifdef CONFIG_CPU_FREQ
352 /* In some cases the CPU can change frequency without us noticing
353 (like going into thermal throttle)
354 Give cpufreq a change to catch up. */
355 if ((lost_count+1) % 25 == 0) {
356 cpufreq_delayed_get();
357 }
358#endif
359}
360
361static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
362{
363 static unsigned long rtc_update = 0;
364 unsigned long tsc;
365 int delay, offset = 0, lost = 0;
366
367/*
368 * Here we are in the timer irq handler. We have irqs locally disabled (so we
369 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
370 * on the other CPU, so we need a lock. We also need to lock the vsyscall
371 * variables, because both do_timer() and us change them -arca+vojtech
372 */
373
374 write_seqlock(&xtime_lock);
375
376 if (vxtime.hpet_address) {
377 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
378 delay = hpet_readl(HPET_COUNTER) - offset;
379 } else {
380 spin_lock(&i8253_lock);
381 outb_p(0x00, 0x43);
382 delay = inb_p(0x40);
383 delay |= inb(0x40) << 8;
384 spin_unlock(&i8253_lock);
385 delay = LATCH - 1 - delay;
386 }
387
388 rdtscll_sync(&tsc);
389
390 if (vxtime.mode == VXTIME_HPET) {
391 if (offset - vxtime.last > hpet_tick) {
392 lost = (offset - vxtime.last) / hpet_tick - 1;
393 }
394
395 monotonic_base +=
396 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
397
398 vxtime.last = offset;
399 } else {
400 offset = (((tsc - vxtime.last_tsc) *
401 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
402
403 if (offset < 0)
404 offset = 0;
405
406 if (offset > (USEC_PER_SEC / HZ)) {
407 lost = offset / (USEC_PER_SEC / HZ);
408 offset %= (USEC_PER_SEC / HZ);
409 }
410
411 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
412
413 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
414
415 if ((((tsc - vxtime.last_tsc) *
416 vxtime.tsc_quot) >> 32) < offset)
417 vxtime.last_tsc = tsc -
418 (((long) offset << 32) / vxtime.tsc_quot) - 1;
419 }
420
421 if (lost > 0) {
422 handle_lost_ticks(lost, regs);
423 jiffies += lost;
424 }
425
426/*
427 * Do the timer stuff.
428 */
429
430 do_timer(regs);
431#ifndef CONFIG_SMP
432 update_process_times(user_mode(regs));
433#endif
434
435/*
436 * In the SMP case we use the local APIC timer interrupt to do the profiling,
437 * except when we simulate SMP mode on a uniprocessor system, in that case we
438 * have to call the local interrupt handler.
439 */
440
441#ifndef CONFIG_X86_LOCAL_APIC
442 profile_tick(CPU_PROFILING, regs);
443#else
444 if (!using_apic_timer)
445 smp_local_timer_interrupt(regs);
446#endif
447
448/*
449 * If we have an externally synchronized Linux clock, then update CMOS clock
450 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
451 * closest to exactly 500 ms before the next second. If the update fails, we
452 * don't care, as it'll be updated on the next turn, and the problem (time way
453 * off) isn't likely to go away much sooner anyway.
454 */
455
456 if ((~time_status & STA_UNSYNC) && xtime.tv_sec > rtc_update &&
457 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
458 set_rtc_mmss(xtime.tv_sec);
459 rtc_update = xtime.tv_sec + 660;
460 }
461
462 write_sequnlock(&xtime_lock);
463
464 return IRQ_HANDLED;
465}
466
467static unsigned int cyc2ns_scale;
468#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
469
470static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
471{
472 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
473}
474
475static inline unsigned long long cycles_2_ns(unsigned long long cyc)
476{
477 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
478}
479
480unsigned long long sched_clock(void)
481{
482 unsigned long a = 0;
483
484#if 0
485 /* Don't do a HPET read here. Using TSC always is much faster
486 and HPET may not be mapped yet when the scheduler first runs.
487 Disadvantage is a small drift between CPUs in some configurations,
488 but that should be tolerable. */
489 if (__vxtime.mode == VXTIME_HPET)
490 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
491#endif
492
493 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
494 which means it is not completely exact and may not be monotonous between
495 CPUs. But the errors should be too small to matter for scheduling
496 purposes. */
497
498 rdtscll(a);
499 return cycles_2_ns(a);
500}
501
502unsigned long get_cmos_time(void)
503{
504 unsigned int timeout, year, mon, day, hour, min, sec;
505 unsigned char last, this;
506 unsigned long flags;
507
508/*
509 * The Linux interpretation of the CMOS clock register contents: When the
510 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
511 * second which has precisely just started. Waiting for this can take up to 1
512 * second, we timeout approximately after 2.4 seconds on a machine with
513 * standard 8.3 MHz ISA bus.
514 */
515
516 spin_lock_irqsave(&rtc_lock, flags);
517
518 timeout = 1000000;
519 last = this = 0;
520
521 while (timeout && last && !this) {
522 last = this;
523 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
524 timeout--;
525 }
526
527/*
528 * Here we are safe to assume the registers won't change for a whole second, so
529 * we just go ahead and read them.
530 */
531
532 sec = CMOS_READ(RTC_SECONDS);
533 min = CMOS_READ(RTC_MINUTES);
534 hour = CMOS_READ(RTC_HOURS);
535 day = CMOS_READ(RTC_DAY_OF_MONTH);
536 mon = CMOS_READ(RTC_MONTH);
537 year = CMOS_READ(RTC_YEAR);
538
539 spin_unlock_irqrestore(&rtc_lock, flags);
540
541/*
542 * We know that x86-64 always uses BCD format, no need to check the config
543 * register.
544 */
545
546 BCD_TO_BIN(sec);
547 BCD_TO_BIN(min);
548 BCD_TO_BIN(hour);
549 BCD_TO_BIN(day);
550 BCD_TO_BIN(mon);
551 BCD_TO_BIN(year);
552
553/*
554 * x86-64 systems only exists since 2002.
555 * This will work up to Dec 31, 2100
556 */
557 year += 2000;
558
559 return mktime(year, mon, day, hour, min, sec);
560}
561
562#ifdef CONFIG_CPU_FREQ
563
564/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
565 changes.
566
567 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
568 not that important because current Opteron setups do not support
569 scaling on SMP anyroads.
570
571 Should fix up last_tsc too. Currently gettimeofday in the
572 first tick after the change will be slightly wrong. */
573
574#include <linux/workqueue.h>
575
576static unsigned int cpufreq_delayed_issched = 0;
577static unsigned int cpufreq_init = 0;
578static struct work_struct cpufreq_delayed_get_work;
579
580static void handle_cpufreq_delayed_get(void *v)
581{
582 unsigned int cpu;
583 for_each_online_cpu(cpu) {
584 cpufreq_get(cpu);
585 }
586 cpufreq_delayed_issched = 0;
587}
588
589/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
590 * to verify the CPU frequency the timing core thinks the CPU is running
591 * at is still correct.
592 */
593static void cpufreq_delayed_get(void)
594{
595 static int warned;
596 if (cpufreq_init && !cpufreq_delayed_issched) {
597 cpufreq_delayed_issched = 1;
598 if (!warned) {
599 warned = 1;
600 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
601 }
602 schedule_work(&cpufreq_delayed_get_work);
603 }
604}
605
606static unsigned int ref_freq = 0;
607static unsigned long loops_per_jiffy_ref = 0;
608
609static unsigned long cpu_khz_ref = 0;
610
611static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
612 void *data)
613{
614 struct cpufreq_freqs *freq = data;
615 unsigned long *lpj, dummy;
616
Andi Kleenc29601e2005-04-16 15:25:05 -0700617 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
618 return 0;
619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 lpj = &dummy;
621 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
622#ifdef CONFIG_SMP
623 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
624#else
625 lpj = &boot_cpu_data.loops_per_jiffy;
626#endif
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 if (!ref_freq) {
629 ref_freq = freq->old;
630 loops_per_jiffy_ref = *lpj;
631 cpu_khz_ref = cpu_khz;
632 }
633 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
634 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
635 (val == CPUFREQ_RESUMECHANGE)) {
636 *lpj =
637 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
638
639 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
640 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
641 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
642 }
643
644 set_cyc2ns_scale(cpu_khz_ref / 1000);
645
646 return 0;
647}
648
649static struct notifier_block time_cpufreq_notifier_block = {
650 .notifier_call = time_cpufreq_notifier
651};
652
653static int __init cpufreq_tsc(void)
654{
655 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
656 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
657 CPUFREQ_TRANSITION_NOTIFIER))
658 cpufreq_init = 1;
659 return 0;
660}
661
662core_initcall(cpufreq_tsc);
663
664#endif
665
666/*
667 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
668 * it to the HPET timer of known frequency.
669 */
670
671#define TICK_COUNT 100000000
672
673static unsigned int __init hpet_calibrate_tsc(void)
674{
675 int tsc_start, hpet_start;
676 int tsc_now, hpet_now;
677 unsigned long flags;
678
679 local_irq_save(flags);
680 local_irq_disable();
681
682 hpet_start = hpet_readl(HPET_COUNTER);
683 rdtscl(tsc_start);
684
685 do {
686 local_irq_disable();
687 hpet_now = hpet_readl(HPET_COUNTER);
688 sync_core();
689 rdtscl(tsc_now);
690 local_irq_restore(flags);
691 } while ((tsc_now - tsc_start) < TICK_COUNT &&
692 (hpet_now - hpet_start) < TICK_COUNT);
693
694 return (tsc_now - tsc_start) * 1000000000L
695 / ((hpet_now - hpet_start) * hpet_period / 1000);
696}
697
698
699/*
700 * pit_calibrate_tsc() uses the speaker output (channel 2) of
701 * the PIT. This is better than using the timer interrupt output,
702 * because we can read the value of the speaker with just one inb(),
703 * where we need three i/o operations for the interrupt channel.
704 * We count how many ticks the TSC does in 50 ms.
705 */
706
707static unsigned int __init pit_calibrate_tsc(void)
708{
709 unsigned long start, end;
710 unsigned long flags;
711
712 spin_lock_irqsave(&i8253_lock, flags);
713
714 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
715
716 outb(0xb0, 0x43);
717 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
718 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
719 rdtscll(start);
720 sync_core();
721 while ((inb(0x61) & 0x20) == 0);
722 sync_core();
723 rdtscll(end);
724
725 spin_unlock_irqrestore(&i8253_lock, flags);
726
727 return (end - start) / 50;
728}
729
730#ifdef CONFIG_HPET
731static __init int late_hpet_init(void)
732{
733 struct hpet_data hd;
734 unsigned int ntimer;
735
736 if (!vxtime.hpet_address)
737 return -1;
738
739 memset(&hd, 0, sizeof (hd));
740
741 ntimer = hpet_readl(HPET_ID);
742 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
743 ntimer++;
744
745 /*
746 * Register with driver.
747 * Timer0 and Timer1 is used by platform.
748 */
749 hd.hd_phys_address = vxtime.hpet_address;
750 hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
751 hd.hd_nirqs = ntimer;
752 hd.hd_flags = HPET_DATA_PLATFORM;
753 hpet_reserve_timer(&hd, 0);
754#ifdef CONFIG_HPET_EMULATE_RTC
755 hpet_reserve_timer(&hd, 1);
756#endif
757 hd.hd_irq[0] = HPET_LEGACY_8254;
758 hd.hd_irq[1] = HPET_LEGACY_RTC;
759 if (ntimer > 2) {
760 struct hpet *hpet;
761 struct hpet_timer *timer;
762 int i;
763
764 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
765
766 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
767 timer++, i++)
768 hd.hd_irq[i] = (timer->hpet_config &
769 Tn_INT_ROUTE_CNF_MASK) >>
770 Tn_INT_ROUTE_CNF_SHIFT;
771
772 }
773
774 hpet_alloc(&hd);
775 return 0;
776}
777fs_initcall(late_hpet_init);
778#endif
779
780static int hpet_timer_stop_set_go(unsigned long tick)
781{
782 unsigned int cfg;
783
784/*
785 * Stop the timers and reset the main counter.
786 */
787
788 cfg = hpet_readl(HPET_CFG);
789 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
790 hpet_writel(cfg, HPET_CFG);
791 hpet_writel(0, HPET_COUNTER);
792 hpet_writel(0, HPET_COUNTER + 4);
793
794/*
795 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
796 * and period also hpet_tick.
797 */
798
799 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
800 HPET_TN_32BIT, HPET_T0_CFG);
801 hpet_writel(hpet_tick, HPET_T0_CMP);
802 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
803
804/*
805 * Go!
806 */
807
808 cfg |= HPET_CFG_ENABLE | HPET_CFG_LEGACY;
809 hpet_writel(cfg, HPET_CFG);
810
811 return 0;
812}
813
814static int hpet_init(void)
815{
816 unsigned int id;
817
818 if (!vxtime.hpet_address)
819 return -1;
820 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
821 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
822
823/*
824 * Read the period, compute tick and quotient.
825 */
826
827 id = hpet_readl(HPET_ID);
828
829 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER) ||
830 !(id & HPET_ID_LEGSUP))
831 return -1;
832
833 hpet_period = hpet_readl(HPET_PERIOD);
834 if (hpet_period < 100000 || hpet_period > 100000000)
835 return -1;
836
837 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
838 hpet_period;
839
840 return hpet_timer_stop_set_go(hpet_tick);
841}
842
843static int hpet_reenable(void)
844{
845 return hpet_timer_stop_set_go(hpet_tick);
846}
847
848void __init pit_init(void)
849{
850 unsigned long flags;
851
852 spin_lock_irqsave(&i8253_lock, flags);
853 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
854 outb_p(LATCH & 0xff, 0x40); /* LSB */
855 outb_p(LATCH >> 8, 0x40); /* MSB */
856 spin_unlock_irqrestore(&i8253_lock, flags);
857}
858
859int __init time_setup(char *str)
860{
861 report_lost_ticks = 1;
862 return 1;
863}
864
865static struct irqaction irq0 = {
866 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
867};
868
869extern void __init config_acpi_tables(void);
870
871void __init time_init(void)
872{
873 char *timename;
874
875#ifdef HPET_HACK_ENABLE_DANGEROUS
876 if (!vxtime.hpet_address) {
877 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
878 "manually!\n");
879 outl(0x800038a0, 0xcf8);
880 outl(0xff000001, 0xcfc);
881 outl(0x800038a0, 0xcf8);
882 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
883 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
884 "at %#lx.\n", vxtime.hpet_address);
885 }
886#endif
887 if (nohpet)
888 vxtime.hpet_address = 0;
889
890 xtime.tv_sec = get_cmos_time();
891 xtime.tv_nsec = 0;
892
893 set_normalized_timespec(&wall_to_monotonic,
894 -xtime.tv_sec, -xtime.tv_nsec);
895
896 if (!hpet_init()) {
897 vxtime_hz = (1000000000000000L + hpet_period / 2) /
898 hpet_period;
899 cpu_khz = hpet_calibrate_tsc();
900 timename = "HPET";
901 } else {
902 pit_init();
903 cpu_khz = pit_calibrate_tsc();
904 timename = "PIT";
905 }
906
907 printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
908 vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
909 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
910 cpu_khz / 1000, cpu_khz % 1000);
911 vxtime.mode = VXTIME_TSC;
912 vxtime.quot = (1000000L << 32) / vxtime_hz;
913 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
914 vxtime.hz = vxtime_hz;
915 rdtscll_sync(&vxtime.last_tsc);
916 setup_irq(0, &irq0);
917
918 set_cyc2ns_scale(cpu_khz / 1000);
919}
920
921void __init time_init_smp(void)
922{
923 char *timetype;
924
925 /*
926 * AMD systems with more than one CPU don't have fully synchronized
927 * TSCs. Always use HPET gettimeofday for these, although it is slower.
928 * Intel SMP systems usually have synchronized TSCs, so use always
929 * the TSC.
930 *
931 * Exceptions:
932 * IBM Summit2 checked by oem_force_hpet_timer().
933 * AMD dual core may also not need HPET. Check me.
934 *
935 * Can be turned off with "notsc".
936 */
937 if (num_online_cpus() > 1 &&
938 boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
939 notsc = 1;
940 /* Some systems will want to disable TSC and use HPET. */
941 if (oem_force_hpet_timer())
942 notsc = 1;
943 if (vxtime.hpet_address && notsc) {
944 timetype = "HPET";
945 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
946 vxtime.mode = VXTIME_HPET;
947 do_gettimeoffset = do_gettimeoffset_hpet;
948 } else {
949 timetype = vxtime.hpet_address ? "HPET/TSC" : "PIT/TSC";
950 vxtime.mode = VXTIME_TSC;
951 }
952
953 printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
954}
955
956__setup("report_lost_ticks", time_setup);
957
958static long clock_cmos_diff;
959static unsigned long sleep_start;
960
961static int timer_suspend(struct sys_device *dev, u32 state)
962{
963 /*
964 * Estimate time zone so that set_time can update the clock
965 */
966 long cmos_time = get_cmos_time();
967
968 clock_cmos_diff = -cmos_time;
969 clock_cmos_diff += get_seconds();
970 sleep_start = cmos_time;
971 return 0;
972}
973
974static int timer_resume(struct sys_device *dev)
975{
976 unsigned long flags;
977 unsigned long sec;
978 unsigned long ctime = get_cmos_time();
979 unsigned long sleep_length = (ctime - sleep_start) * HZ;
980
981 if (vxtime.hpet_address)
982 hpet_reenable();
983 else
984 i8254_timer_resume();
985
986 sec = ctime + clock_cmos_diff;
987 write_seqlock_irqsave(&xtime_lock,flags);
988 xtime.tv_sec = sec;
989 xtime.tv_nsec = 0;
990 write_sequnlock_irqrestore(&xtime_lock,flags);
991 jiffies += sleep_length;
992 wall_jiffies += sleep_length;
993 return 0;
994}
995
996static struct sysdev_class timer_sysclass = {
997 .resume = timer_resume,
998 .suspend = timer_suspend,
999 set_kset_name("timer"),
1000};
1001
1002
1003/* XXX this driverfs stuff should probably go elsewhere later -john */
1004static struct sys_device device_timer = {
1005 .id = 0,
1006 .cls = &timer_sysclass,
1007};
1008
1009static int time_init_device(void)
1010{
1011 int error = sysdev_class_register(&timer_sysclass);
1012 if (!error)
1013 error = sysdev_register(&device_timer);
1014 return error;
1015}
1016
1017device_initcall(time_init_device);
1018
1019#ifdef CONFIG_HPET_EMULATE_RTC
1020/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1021 * is enabled, we support RTC interrupt functionality in software.
1022 * RTC has 3 kinds of interrupts:
1023 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1024 * is updated
1025 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1026 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1027 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1028 * (1) and (2) above are implemented using polling at a frequency of
1029 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1030 * overhead. (DEFAULT_RTC_INT_FREQ)
1031 * For (3), we use interrupts at 64Hz or user specified periodic
1032 * frequency, whichever is higher.
1033 */
1034#include <linux/rtc.h>
1035
1036extern irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
1037
1038#define DEFAULT_RTC_INT_FREQ 64
1039#define RTC_NUM_INTS 1
1040
1041static unsigned long UIE_on;
1042static unsigned long prev_update_sec;
1043
1044static unsigned long AIE_on;
1045static struct rtc_time alarm_time;
1046
1047static unsigned long PIE_on;
1048static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1049static unsigned long PIE_count;
1050
1051static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1052
1053int is_hpet_enabled(void)
1054{
1055 return vxtime.hpet_address != 0;
1056}
1057
1058/*
1059 * Timer 1 for RTC, we do not use periodic interrupt feature,
1060 * even if HPET supports periodic interrupts on Timer 1.
1061 * The reason being, to set up a periodic interrupt in HPET, we need to
1062 * stop the main counter. And if we do that everytime someone diables/enables
1063 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1064 * So, for the time being, simulate the periodic interrupt in software.
1065 *
1066 * hpet_rtc_timer_init() is called for the first time and during subsequent
1067 * interuppts reinit happens through hpet_rtc_timer_reinit().
1068 */
1069int hpet_rtc_timer_init(void)
1070{
1071 unsigned int cfg, cnt;
1072 unsigned long flags;
1073
1074 if (!is_hpet_enabled())
1075 return 0;
1076 /*
1077 * Set the counter 1 and enable the interrupts.
1078 */
1079 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1080 hpet_rtc_int_freq = PIE_freq;
1081 else
1082 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1083
1084 local_irq_save(flags);
1085 cnt = hpet_readl(HPET_COUNTER);
1086 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1087 hpet_writel(cnt, HPET_T1_CMP);
1088 local_irq_restore(flags);
1089
1090 cfg = hpet_readl(HPET_T1_CFG);
1091 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1092 hpet_writel(cfg, HPET_T1_CFG);
1093
1094 return 1;
1095}
1096
1097static void hpet_rtc_timer_reinit(void)
1098{
1099 unsigned int cfg, cnt;
1100
1101 if (!(PIE_on | AIE_on | UIE_on))
1102 return;
1103
1104 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1105 hpet_rtc_int_freq = PIE_freq;
1106 else
1107 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1108
1109 /* It is more accurate to use the comparator value than current count.*/
1110 cnt = hpet_readl(HPET_T1_CMP);
1111 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1112 hpet_writel(cnt, HPET_T1_CMP);
1113
1114 cfg = hpet_readl(HPET_T1_CFG);
1115 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1116 hpet_writel(cfg, HPET_T1_CFG);
1117
1118 return;
1119}
1120
1121/*
1122 * The functions below are called from rtc driver.
1123 * Return 0 if HPET is not being used.
1124 * Otherwise do the necessary changes and return 1.
1125 */
1126int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1127{
1128 if (!is_hpet_enabled())
1129 return 0;
1130
1131 if (bit_mask & RTC_UIE)
1132 UIE_on = 0;
1133 if (bit_mask & RTC_PIE)
1134 PIE_on = 0;
1135 if (bit_mask & RTC_AIE)
1136 AIE_on = 0;
1137
1138 return 1;
1139}
1140
1141int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1142{
1143 int timer_init_reqd = 0;
1144
1145 if (!is_hpet_enabled())
1146 return 0;
1147
1148 if (!(PIE_on | AIE_on | UIE_on))
1149 timer_init_reqd = 1;
1150
1151 if (bit_mask & RTC_UIE) {
1152 UIE_on = 1;
1153 }
1154 if (bit_mask & RTC_PIE) {
1155 PIE_on = 1;
1156 PIE_count = 0;
1157 }
1158 if (bit_mask & RTC_AIE) {
1159 AIE_on = 1;
1160 }
1161
1162 if (timer_init_reqd)
1163 hpet_rtc_timer_init();
1164
1165 return 1;
1166}
1167
1168int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1169{
1170 if (!is_hpet_enabled())
1171 return 0;
1172
1173 alarm_time.tm_hour = hrs;
1174 alarm_time.tm_min = min;
1175 alarm_time.tm_sec = sec;
1176
1177 return 1;
1178}
1179
1180int hpet_set_periodic_freq(unsigned long freq)
1181{
1182 if (!is_hpet_enabled())
1183 return 0;
1184
1185 PIE_freq = freq;
1186 PIE_count = 0;
1187
1188 return 1;
1189}
1190
1191int hpet_rtc_dropped_irq(void)
1192{
1193 if (!is_hpet_enabled())
1194 return 0;
1195
1196 return 1;
1197}
1198
1199irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1200{
1201 struct rtc_time curr_time;
1202 unsigned long rtc_int_flag = 0;
1203 int call_rtc_interrupt = 0;
1204
1205 hpet_rtc_timer_reinit();
1206
1207 if (UIE_on | AIE_on) {
1208 rtc_get_rtc_time(&curr_time);
1209 }
1210 if (UIE_on) {
1211 if (curr_time.tm_sec != prev_update_sec) {
1212 /* Set update int info, call real rtc int routine */
1213 call_rtc_interrupt = 1;
1214 rtc_int_flag = RTC_UF;
1215 prev_update_sec = curr_time.tm_sec;
1216 }
1217 }
1218 if (PIE_on) {
1219 PIE_count++;
1220 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1221 /* Set periodic int info, call real rtc int routine */
1222 call_rtc_interrupt = 1;
1223 rtc_int_flag |= RTC_PF;
1224 PIE_count = 0;
1225 }
1226 }
1227 if (AIE_on) {
1228 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1229 (curr_time.tm_min == alarm_time.tm_min) &&
1230 (curr_time.tm_hour == alarm_time.tm_hour)) {
1231 /* Set alarm int info, call real rtc int routine */
1232 call_rtc_interrupt = 1;
1233 rtc_int_flag |= RTC_AF;
1234 }
1235 }
1236 if (call_rtc_interrupt) {
1237 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1238 rtc_interrupt(rtc_int_flag, dev_id, regs);
1239 }
1240 return IRQ_HANDLED;
1241}
1242#endif
1243
1244
1245
1246static int __init nohpet_setup(char *s)
1247{
1248 nohpet = 1;
1249 return 0;
1250}
1251
1252__setup("nohpet", nohpet_setup);
1253
1254
1255static int __init notsc_setup(char *s)
1256{
1257 notsc = 1;
1258 return 0;
1259}
1260
1261__setup("notsc", notsc_setup);
1262
1263