blob: a690868be837ad99407d050a1c0229d9a237adc5 [file] [log] [blame]
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -07001/*
2 * Xen time implementation.
3 *
4 * This is implemented in terms of a clocksource driver which uses
5 * the hypervisor clock as a nanosecond timebase, and a clockevent
6 * driver which uses the hypervisor's timer mechanism.
7 *
8 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
9 */
10#include <linux/kernel.h>
11#include <linux/interrupt.h>
12#include <linux/clocksource.h>
13#include <linux/clockchips.h>
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070014#include <linux/kernel_stat.h>
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +020015#include <linux/math64.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Konrad Rzeszutek Wilkc9d76a22013-06-04 17:09:36 -040017#include <linux/slab.h>
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070018
Gerd Hoffmann1c7b67f2008-06-03 16:17:30 +020019#include <asm/pvclock.h>
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070020#include <asm/xen/hypervisor.h>
21#include <asm/xen/hypercall.h>
22
23#include <xen/events.h>
Stefano Stabellini409771d2010-05-14 12:48:19 +010024#include <xen/features.h>
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070025#include <xen/interface/xen.h>
26#include <xen/interface/vcpu.h>
27
28#include "xen-ops.h"
29
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070030/* Xen may fire a timer up to this many ns early */
31#define TIMER_SLOP 100000
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070032#define NS_PER_TICK (1000000000LL / HZ)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070033
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070034/* runstate info updated by Xen */
Tejun Heoc6e22f92009-10-29 22:34:13 +090035static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070036
37/* snapshots of runstate info */
Tejun Heoc6e22f92009-10-29 22:34:13 +090038static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070039
Laszlo Ersek0b0c0022011-10-18 22:42:59 +020040/* unused ns of stolen time */
Tejun Heoc6e22f92009-10-29 22:34:13 +090041static DEFINE_PER_CPU(u64, xen_residual_stolen);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070042
43/* return an consistent snapshot of 64-bit time/counter value */
44static u64 get64(const u64 *p)
45{
46 u64 ret;
47
48 if (BITS_PER_LONG < 64) {
49 u32 *p32 = (u32 *)p;
50 u32 h, l;
51
52 /*
53 * Read high then low, and then make sure high is
54 * still the same; this will only loop if low wraps
55 * and carries into high.
56 * XXX some clean way to make this endian-proof?
57 */
58 do {
59 h = p32[1];
60 barrier();
61 l = p32[0];
62 barrier();
63 } while (p32[1] != h);
64
65 ret = (((u64)h) << 32) | l;
66 } else
67 ret = *p;
68
69 return ret;
70}
71
72/*
73 * Runstate accounting
74 */
75static void get_runstate_snapshot(struct vcpu_runstate_info *res)
76{
77 u64 state_time;
78 struct vcpu_runstate_info *state;
79
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070080 BUG_ON(preemptible());
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070081
Tejun Heoc6e22f92009-10-29 22:34:13 +090082 state = &__get_cpu_var(xen_runstate);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070083
84 /*
85 * The runstate info is always updated by the hypervisor on
86 * the current CPU, so there's no need to use anything
87 * stronger than a compiler barrier when fetching it.
88 */
89 do {
90 state_time = get64(&state->state_entry_time);
91 barrier();
92 *res = *state;
93 barrier();
94 } while (get64(&state->state_entry_time) != state_time);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070095}
96
Jeremy Fitzhardingef0d73392007-10-16 11:51:30 -070097/* return true when a vcpu could run but has no real cpu to run on */
98bool xen_vcpu_stolen(int vcpu)
99{
Tejun Heoc6e22f92009-10-29 22:34:13 +0900100 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
Jeremy Fitzhardingef0d73392007-10-16 11:51:30 -0700101}
102
Ian Campbellbe012922009-11-21 08:35:55 +0800103void xen_setup_runstate_info(int cpu)
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700104{
105 struct vcpu_register_runstate_memory_area area;
106
Tejun Heoc6e22f92009-10-29 22:34:13 +0900107 area.addr.v = &per_cpu(xen_runstate, cpu);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700108
109 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
110 cpu, &area))
111 BUG();
112}
113
114static void do_stolen_accounting(void)
115{
116 struct vcpu_runstate_info state;
117 struct vcpu_runstate_info *snap;
Laszlo Ersek0b0c0022011-10-18 22:42:59 +0200118 s64 runnable, offline, stolen;
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700119 cputime_t ticks;
120
121 get_runstate_snapshot(&state);
122
123 WARN_ON(state.state != RUNSTATE_running);
124
Tejun Heoc6e22f92009-10-29 22:34:13 +0900125 snap = &__get_cpu_var(xen_runstate_snapshot);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700126
127 /* work out how much time the VCPU has not been runn*ing* */
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700128 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
129 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
130
131 *snap = state;
132
133 /* Add the appropriate number of ticks of stolen time,
Martin Schwidefsky79741dd2008-12-31 15:11:38 +0100134 including any left-overs from last time. */
Christoph Lameter780f36d2010-12-06 11:16:29 -0600135 stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700136
137 if (stolen < 0)
138 stolen = 0;
139
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200140 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
Christoph Lameter780f36d2010-12-06 11:16:29 -0600141 __this_cpu_write(xen_residual_stolen, stolen);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +0100142 account_steal_ticks(ticks);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700143}
144
Alok Katariae93ef942008-07-01 11:43:36 -0700145/* Get the TSC speed from Xen */
Stefano Stabellini409771d2010-05-14 12:48:19 +0100146static unsigned long xen_tsc_khz(void)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700147{
Glauber Costa3807f342008-07-28 11:47:52 -0300148 struct pvclock_vcpu_time_info *info =
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700149 &HYPERVISOR_shared_info->vcpu_info[0].time;
150
Glauber Costa3807f342008-07-28 11:47:52 -0300151 return pvclock_tsc_khz(info);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700152}
153
Jeremy Fitzhardingeee7686b2008-08-21 13:17:56 -0700154cycle_t xen_clocksource_read(void)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700155{
Gerd Hoffmann1c7b67f2008-06-03 16:17:30 +0200156 struct pvclock_vcpu_time_info *src;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700157 cycle_t ret;
158
Jeremy Fitzhardingef1c39622011-08-24 09:54:24 -0700159 preempt_disable_notrace();
160 src = &__get_cpu_var(xen_vcpu)->time;
Gerd Hoffmann1c7b67f2008-06-03 16:17:30 +0200161 ret = pvclock_clocksource_read(src);
Jeremy Fitzhardingef1c39622011-08-24 09:54:24 -0700162 preempt_enable_notrace();
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700163 return ret;
164}
165
Magnus Damm8e196082009-04-21 12:24:00 -0700166static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
167{
168 return xen_clocksource_read();
169}
170
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700171static void xen_read_wallclock(struct timespec *ts)
172{
Gerd Hoffmann1c7b67f2008-06-03 16:17:30 +0200173 struct shared_info *s = HYPERVISOR_shared_info;
174 struct pvclock_wall_clock *wall_clock = &(s->wc);
175 struct pvclock_vcpu_time_info *vcpu_time;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700176
Gerd Hoffmann1c7b67f2008-06-03 16:17:30 +0200177 vcpu_time = &get_cpu_var(xen_vcpu)->time;
178 pvclock_read_wallclock(wall_clock, vcpu_time, ts);
179 put_cpu_var(xen_vcpu);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700180}
181
Stefano Stabellini409771d2010-05-14 12:48:19 +0100182static unsigned long xen_get_wallclock(void)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700183{
184 struct timespec ts;
185
186 xen_read_wallclock(&ts);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700187 return ts.tv_sec;
188}
189
Stefano Stabellini409771d2010-05-14 12:48:19 +0100190static int xen_set_wallclock(unsigned long now)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700191{
Jeremy Fitzhardingefdb9eb92010-03-26 11:21:22 -0700192 struct xen_platform_op op;
193 int rc;
194
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700195 /* do nothing for domU */
Jeremy Fitzhardingefdb9eb92010-03-26 11:21:22 -0700196 if (!xen_initial_domain())
197 return -1;
198
199 op.cmd = XENPF_settime;
200 op.u.settime.secs = now;
201 op.u.settime.nsecs = 0;
202 op.u.settime.system_time = xen_clocksource_read();
203
204 rc = HYPERVISOR_dom0_op(&op);
205 WARN(rc != 0, "XENPF_settime failed: now=%ld\n", now);
206
207 return rc;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700208}
209
210static struct clocksource xen_clocksource __read_mostly = {
211 .name = "xen",
212 .rating = 400,
Magnus Damm8e196082009-04-21 12:24:00 -0700213 .read = xen_clocksource_get_cycles,
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700214 .mask = ~0,
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700215 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
216};
217
218/*
219 Xen clockevent implementation
220
221 Xen has two clockevent implementations:
222
223 The old timer_op one works with all released versions of Xen prior
224 to version 3.0.4. This version of the hypervisor provides a
225 single-shot timer with nanosecond resolution. However, sharing the
226 same event channel is a 100Hz tick which is delivered while the
227 vcpu is running. We don't care about or use this tick, but it will
228 cause the core time code to think the timer fired too soon, and
229 will end up resetting it each time. It could be filtered, but
230 doing so has complications when the ktime clocksource is not yet
231 the xen clocksource (ie, at boot time).
232
233 The new vcpu_op-based timer interface allows the tick timer period
234 to be changed or turned off. The tick timer is not useful as a
235 periodic timer because events are only delivered to running vcpus.
236 The one-shot timer can report when a timeout is in the past, so
237 set_next_event is capable of returning -ETIME when appropriate.
238 This interface is used when available.
239*/
240
241
242/*
243 Get a hypervisor absolute time. In theory we could maintain an
244 offset between the kernel's time and the hypervisor's time, and
245 apply that to a kernel's absolute timeout. Unfortunately the
246 hypervisor and kernel times can drift even if the kernel is using
247 the Xen clocksource, because ntp can warp the kernel's clocksource.
248*/
249static s64 get_abs_timeout(unsigned long delta)
250{
251 return xen_clocksource_read() + delta;
252}
253
254static void xen_timerop_set_mode(enum clock_event_mode mode,
255 struct clock_event_device *evt)
256{
257 switch (mode) {
258 case CLOCK_EVT_MODE_PERIODIC:
259 /* unsupported */
260 WARN_ON(1);
261 break;
262
263 case CLOCK_EVT_MODE_ONESHOT:
Thomas Gleixner18de5bc2007-07-21 04:37:34 -0700264 case CLOCK_EVT_MODE_RESUME:
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700265 break;
266
267 case CLOCK_EVT_MODE_UNUSED:
268 case CLOCK_EVT_MODE_SHUTDOWN:
269 HYPERVISOR_set_timer_op(0); /* cancel timeout */
270 break;
271 }
272}
273
274static int xen_timerop_set_next_event(unsigned long delta,
275 struct clock_event_device *evt)
276{
277 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
278
279 if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
280 BUG();
281
282 /* We may have missed the deadline, but there's no real way of
283 knowing for sure. If the event was in the past, then we'll
284 get an immediate interrupt. */
285
286 return 0;
287}
288
289static const struct clock_event_device xen_timerop_clockevent = {
290 .name = "xen",
291 .features = CLOCK_EVT_FEAT_ONESHOT,
292
293 .max_delta_ns = 0xffffffff,
294 .min_delta_ns = TIMER_SLOP,
295
296 .mult = 1,
297 .shift = 0,
298 .rating = 500,
299
300 .set_mode = xen_timerop_set_mode,
301 .set_next_event = xen_timerop_set_next_event,
302};
303
304
305
306static void xen_vcpuop_set_mode(enum clock_event_mode mode,
307 struct clock_event_device *evt)
308{
309 int cpu = smp_processor_id();
310
311 switch (mode) {
312 case CLOCK_EVT_MODE_PERIODIC:
313 WARN_ON(1); /* unsupported */
314 break;
315
316 case CLOCK_EVT_MODE_ONESHOT:
317 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
318 BUG();
319 break;
320
321 case CLOCK_EVT_MODE_UNUSED:
322 case CLOCK_EVT_MODE_SHUTDOWN:
323 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
324 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
325 BUG();
326 break;
Thomas Gleixner18de5bc2007-07-21 04:37:34 -0700327 case CLOCK_EVT_MODE_RESUME:
328 break;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700329 }
330}
331
332static int xen_vcpuop_set_next_event(unsigned long delta,
333 struct clock_event_device *evt)
334{
335 int cpu = smp_processor_id();
336 struct vcpu_set_singleshot_timer single;
337 int ret;
338
339 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
340
341 single.timeout_abs_ns = get_abs_timeout(delta);
342 single.flags = VCPU_SSHOTTMR_future;
343
344 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
345
346 BUG_ON(ret != 0 && ret != -ETIME);
347
348 return ret;
349}
350
351static const struct clock_event_device xen_vcpuop_clockevent = {
352 .name = "xen",
353 .features = CLOCK_EVT_FEAT_ONESHOT,
354
355 .max_delta_ns = 0xffffffff,
356 .min_delta_ns = TIMER_SLOP,
357
358 .mult = 1,
359 .shift = 0,
360 .rating = 500,
361
362 .set_mode = xen_vcpuop_set_mode,
363 .set_next_event = xen_vcpuop_set_next_event,
364};
365
366static const struct clock_event_device *xen_clockevent =
367 &xen_timerop_clockevent;
Konrad Rzeszutek Wilk31620a12013-06-04 17:06:36 -0400368
369struct xen_clock_event_device {
370 struct clock_event_device evt;
371 char *name;
372};
373static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700374
375static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
376{
Konrad Rzeszutek Wilk31620a12013-06-04 17:06:36 -0400377 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700378 irqreturn_t ret;
379
380 ret = IRQ_NONE;
381 if (evt->event_handler) {
382 evt->event_handler(evt);
383 ret = IRQ_HANDLED;
384 }
385
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700386 do_stolen_accounting();
387
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700388 return ret;
389}
390
Konrad Rzeszutek Wilk09e99da2013-06-04 17:13:29 -0400391void xen_teardown_timer(int cpu)
392{
393 struct clock_event_device *evt;
394 BUG_ON(cpu == 0);
395 evt = &per_cpu(xen_clock_events, cpu).evt;
396
397 if (evt->irq >= 0) {
398 unbind_from_irqhandler(evt->irq, NULL);
399 evt->irq = -1;
400 kfree(per_cpu(xen_clock_events, cpu).name);
401 per_cpu(xen_clock_events, cpu).name = NULL;
402 }
403}
404
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700405void xen_setup_timer(int cpu)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700406{
Konrad Rzeszutek Wilkc9d76a22013-06-04 17:09:36 -0400407 char *name;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700408 struct clock_event_device *evt;
409 int irq;
410
Konrad Rzeszutek Wilk31620a12013-06-04 17:06:36 -0400411 evt = &per_cpu(xen_clock_events, cpu).evt;
Konrad Rzeszutek Wilkef35a4e2013-04-08 21:05:15 -0400412 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
Konrad Rzeszutek Wilk09e99da2013-06-04 17:13:29 -0400413 if (evt->irq >= 0)
414 xen_teardown_timer(cpu);
Konrad Rzeszutek Wilkef35a4e2013-04-08 21:05:15 -0400415
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700416 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
417
418 name = kasprintf(GFP_KERNEL, "timer%d", cpu);
419 if (!name)
420 name = "<timer kasprintf failed>";
421
422 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
Ian Campbellf611f2d2011-02-08 14:03:31 +0000423 IRQF_DISABLED|IRQF_PERCPU|
424 IRQF_NOBALANCING|IRQF_TIMER|
425 IRQF_FORCE_RESUME,
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700426 name, NULL);
427
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700428 memcpy(evt, xen_clockevent, sizeof(*evt));
429
Rusty Russell320ab2b2008-12-13 21:20:26 +1030430 evt->cpumask = cpumask_of(cpu);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700431 evt->irq = irq;
Konrad Rzeszutek Wilkc9d76a22013-06-04 17:09:36 -0400432 per_cpu(xen_clock_events, cpu).name = name;
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700433}
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700434
Alex Nixond68d82a2008-08-22 11:52:15 +0100435
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700436void xen_setup_cpu_clockevents(void)
437{
438 BUG_ON(preemptible());
439
Konrad Rzeszutek Wilk31620a12013-06-04 17:06:36 -0400440 clockevents_register_device(&__get_cpu_var(xen_clock_events).evt);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700441}
442
Jeremy Fitzhardinged07af1f2008-05-31 01:33:03 +0100443void xen_timer_resume(void)
444{
445 int cpu;
446
Jeremy Fitzhardingee7a3481c2010-10-25 16:53:46 -0700447 pvclock_resume();
448
Jeremy Fitzhardinged07af1f2008-05-31 01:33:03 +0100449 if (xen_clockevent != &xen_vcpuop_clockevent)
450 return;
451
452 for_each_online_cpu(cpu) {
453 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
454 BUG();
455 }
456}
457
Daniel Kiperfb6ce5d2011-05-04 20:18:45 +0200458static const struct pv_time_ops xen_time_ops __initconst = {
Jeremy Fitzhardingeca50a5f2010-08-04 14:49:16 -0700459 .sched_clock = xen_clocksource_read,
Stefano Stabellini409771d2010-05-14 12:48:19 +0100460};
461
Daniel Kiperfb6ce5d2011-05-04 20:18:45 +0200462static void __init xen_time_init(void)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700463{
464 int cpu = smp_processor_id();
John Stultzc4507252010-03-11 14:04:47 -0800465 struct timespec tp;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700466
John Stultzb01cc1b2010-04-26 19:03:05 -0700467 clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700468
469 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700470 /* Successfully turned off 100Hz tick, so we have the
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700471 vcpuop-based timer interface */
472 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
473 xen_clockevent = &xen_vcpuop_clockevent;
474 }
475
476 /* Set initial system time with full resolution */
John Stultzc4507252010-03-11 14:04:47 -0800477 xen_read_wallclock(&tp);
478 do_settimeofday(&tp);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700479
Andi Kleen404ee5b2008-01-30 13:33:20 +0100480 setup_force_cpu_cap(X86_FEATURE_TSC);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700481
Ian Campbellbe012922009-11-21 08:35:55 +0800482 xen_setup_runstate_info(cpu);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700483 xen_setup_timer(cpu);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700484 xen_setup_cpu_clockevents();
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700485}
Stefano Stabellini409771d2010-05-14 12:48:19 +0100486
Daniel Kiperfb6ce5d2011-05-04 20:18:45 +0200487void __init xen_init_time_ops(void)
Stefano Stabellini409771d2010-05-14 12:48:19 +0100488{
489 pv_time_ops = xen_time_ops;
490
491 x86_init.timers.timer_init = xen_time_init;
492 x86_init.timers.setup_percpu_clockev = x86_init_noop;
493 x86_cpuinit.setup_percpu_clockev = x86_init_noop;
494
495 x86_platform.calibrate_tsc = xen_tsc_khz;
496 x86_platform.get_wallclock = xen_get_wallclock;
497 x86_platform.set_wallclock = xen_set_wallclock;
498}
499
Stefano Stabellinica65f9f2010-07-29 14:37:48 +0100500#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini409771d2010-05-14 12:48:19 +0100501static void xen_hvm_setup_cpu_clockevents(void)
502{
503 int cpu = smp_processor_id();
504 xen_setup_runstate_info(cpu);
Konrad Rzeszutek Wilk7918c922013-04-16 15:18:00 -0400505 /*
506 * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
507 * doing it xen_hvm_cpu_notify (which gets called by smp_init during
508 * early bootup and also during CPU hotplug events).
509 */
Stefano Stabellini409771d2010-05-14 12:48:19 +0100510 xen_setup_cpu_clockevents();
511}
512
Daniel Kiperfb6ce5d2011-05-04 20:18:45 +0200513void __init xen_hvm_init_time_ops(void)
Stefano Stabellini409771d2010-05-14 12:48:19 +0100514{
515 /* vector callback is needed otherwise we cannot receive interrupts
Stefano Stabellini31e7e932010-10-01 17:35:46 +0100516 * on cpu > 0 and at this point we don't know how many cpus are
517 * available */
518 if (!xen_have_vector_callback)
Stefano Stabellini409771d2010-05-14 12:48:19 +0100519 return;
520 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
521 printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
522 "disable pv timer\n");
523 return;
524 }
525
526 pv_time_ops = xen_time_ops;
527 x86_init.timers.setup_percpu_clockev = xen_time_init;
528 x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
529
530 x86_platform.calibrate_tsc = xen_tsc_khz;
531 x86_platform.get_wallclock = xen_get_wallclock;
532 x86_platform.set_wallclock = xen_set_wallclock;
533}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +0100534#endif