blob: 4b8474c966dc878e22e6924611c9b84f08e52543 [file] [log] [blame]
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001/*
2 * sched_clock for unstable cpu clocks
3 *
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 *
Steven Rostedtc300ba22008-07-09 00:15:33 -04006 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
8 *
Peter Zijlstra3e51f332008-05-03 18:29:28 +02009 * Based on code by:
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
12 *
13 * Create a semi stable clock from a mixture of other events, including:
14 * - gtod
15 * - jiffies
16 * - sched_clock()
17 * - explicit idle events
18 *
19 * We use gtod as base and the unstable clock deltas. The deltas are filtered,
20 * making it monotonic and keeping it within an expected window. This window
21 * is set up using jiffies.
22 *
23 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
24 * that is otherwise invisible (TSC gets stopped).
25 *
26 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
27 * consistent between cpus (never more than 1 jiffies difference).
28 */
29#include <linux/sched.h>
30#include <linux/percpu.h>
31#include <linux/spinlock.h>
32#include <linux/ktime.h>
33#include <linux/module.h>
34
Hugh Dickins2c3d1032008-07-25 19:45:00 +010035/*
36 * Scheduler clock - returns current time in nanosec units.
37 * This is default implementation.
38 * Architectures and sub-architectures can override this.
39 */
40unsigned long long __attribute__((weak)) sched_clock(void)
41{
42 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
43}
Peter Zijlstra3e51f332008-05-03 18:29:28 +020044
45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
46
47struct sched_clock_data {
48 /*
49 * Raw spinlock - this is a special case: this might be called
50 * from within instrumentation code so we dont want to do any
51 * instrumentation ourselves.
52 */
53 raw_spinlock_t lock;
54
Steven Rostedt62c43dd2008-07-07 14:16:50 -040055 unsigned long tick_jiffies;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020056 u64 tick_raw;
57 u64 tick_gtod;
58 u64 clock;
59};
60
61static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
62
63static inline struct sched_clock_data *this_scd(void)
64{
65 return &__get_cpu_var(sched_clock_data);
66}
67
68static inline struct sched_clock_data *cpu_sdc(int cpu)
69{
70 return &per_cpu(sched_clock_data, cpu);
71}
72
Peter Zijlstraa3817592008-05-29 10:07:15 +020073static __read_mostly int sched_clock_running;
74
Peter Zijlstra3e51f332008-05-03 18:29:28 +020075void sched_clock_init(void)
76{
77 u64 ktime_now = ktime_to_ns(ktime_get());
Peter Zijlstraa3817592008-05-29 10:07:15 +020078 unsigned long now_jiffies = jiffies;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020079 int cpu;
80
81 for_each_possible_cpu(cpu) {
82 struct sched_clock_data *scd = cpu_sdc(cpu);
83
84 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt62c43dd2008-07-07 14:16:50 -040085 scd->tick_jiffies = now_jiffies;
Peter Zijlstraa3817592008-05-29 10:07:15 +020086 scd->tick_raw = 0;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020087 scd->tick_gtod = ktime_now;
88 scd->clock = ktime_now;
89 }
Peter Zijlstraa3817592008-05-29 10:07:15 +020090
91 sched_clock_running = 1;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020092}
93
94/*
95 * update the percpu scd from the raw @now value
96 *
97 * - filter out backward motion
98 * - use jiffies to generate a min,max window to clip the raw values
99 */
Ingo Molnare4e4e532008-04-14 08:50:02 +0200100static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200101{
102 unsigned long now_jiffies = jiffies;
Steven Rostedt62c43dd2008-07-07 14:16:50 -0400103 long delta_jiffies = now_jiffies - scd->tick_jiffies;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200104 u64 clock = scd->clock;
105 u64 min_clock, max_clock;
Ingo Molnar18e4e362008-07-30 10:13:35 +0200106 s64 delta = now - scd->tick_raw;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200107
108 WARN_ON_ONCE(!irqs_disabled());
Ingo Molnare4e4e532008-04-14 08:50:02 +0200109 min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200110
111 if (unlikely(delta < 0)) {
112 clock++;
113 goto out;
114 }
115
Ingo Molnare4e4e532008-04-14 08:50:02 +0200116 max_clock = min_clock + TICK_NSEC;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200117
Ingo Molnare4e4e532008-04-14 08:50:02 +0200118 if (unlikely(clock + delta > max_clock)) {
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200119 if (clock < max_clock)
120 clock = max_clock;
121 else
122 clock++;
123 } else {
124 clock += delta;
125 }
126
127 out:
128 if (unlikely(clock < min_clock))
129 clock = min_clock;
130
Ingo Molnare4e4e532008-04-14 08:50:02 +0200131 scd->tick_jiffies = now_jiffies;
132 scd->clock = clock;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200133}
134
135static void lock_double_clock(struct sched_clock_data *data1,
136 struct sched_clock_data *data2)
137{
138 if (data1 < data2) {
139 __raw_spin_lock(&data1->lock);
140 __raw_spin_lock(&data2->lock);
141 } else {
142 __raw_spin_lock(&data2->lock);
143 __raw_spin_lock(&data1->lock);
144 }
145}
146
147u64 sched_clock_cpu(int cpu)
148{
149 struct sched_clock_data *scd = cpu_sdc(cpu);
150 u64 now, clock;
151
Peter Zijlstraa3817592008-05-29 10:07:15 +0200152 if (unlikely(!sched_clock_running))
153 return 0ull;
154
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200155 WARN_ON_ONCE(!irqs_disabled());
156 now = sched_clock();
157
158 if (cpu != raw_smp_processor_id()) {
159 /*
160 * in order to update a remote cpu's clock based on our
161 * unstable raw time rebase it against:
162 * tick_raw (offset between raw counters)
163 * tick_gotd (tick offset between cpus)
164 */
165 struct sched_clock_data *my_scd = this_scd();
166
167 lock_double_clock(scd, my_scd);
168
Ingo Molnar50526962008-07-30 09:39:48 +0200169 now += scd->tick_raw - my_scd->tick_raw;
170 now += my_scd->tick_gtod - scd->tick_gtod;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200171
172 __raw_spin_unlock(&my_scd->lock);
173 } else {
174 __raw_spin_lock(&scd->lock);
175 }
176
Ingo Molnare4e4e532008-04-14 08:50:02 +0200177 __update_sched_clock(scd, now);
178 clock = scd->clock;
179
180 __raw_spin_unlock(&scd->lock);
181
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200182 return clock;
183}
184
185void sched_clock_tick(void)
186{
187 struct sched_clock_data *scd = this_scd();
188 u64 now, now_gtod;
189
Peter Zijlstraa3817592008-05-29 10:07:15 +0200190 if (unlikely(!sched_clock_running))
191 return;
192
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200193 WARN_ON_ONCE(!irqs_disabled());
194
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200195 now_gtod = ktime_to_ns(ktime_get());
Steven Rostedta83bc472008-07-09 00:15:32 -0400196 now = sched_clock();
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200197
198 __raw_spin_lock(&scd->lock);
Ingo Molnare4e4e532008-04-14 08:50:02 +0200199 __update_sched_clock(scd, now);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200200 /*
201 * update tick_gtod after __update_sched_clock() because that will
202 * already observe 1 new jiffy; adding a new tick_gtod to that would
203 * increase the clock 2 jiffies.
204 */
205 scd->tick_raw = now;
206 scd->tick_gtod = now_gtod;
207 __raw_spin_unlock(&scd->lock);
208}
209
210/*
211 * We are going deep-idle (irqs are disabled):
212 */
213void sched_clock_idle_sleep_event(void)
214{
215 sched_clock_cpu(smp_processor_id());
216}
217EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
218
219/*
220 * We just idled delta nanoseconds (called with irqs disabled):
221 */
222void sched_clock_idle_wakeup_event(u64 delta_ns)
223{
224 struct sched_clock_data *scd = this_scd();
225 u64 now = sched_clock();
226
227 /*
228 * Override the previous timestamp and ignore all
229 * sched_clock() deltas that occured while we idled,
230 * and use the PM-provided delta_ns to advance the
231 * rq clock:
232 */
233 __raw_spin_lock(&scd->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200234 scd->clock += delta_ns;
235 __raw_spin_unlock(&scd->lock);
236
237 touch_softlockup_watchdog();
238}
239EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
240
241#endif
242
Peter Zijlstra76a2a6e2008-06-27 13:41:15 +0200243unsigned long long cpu_clock(int cpu)
244{
245 unsigned long long clock;
246 unsigned long flags;
247
Ingo Molnar2d452c92008-06-29 15:01:59 +0200248 local_irq_save(flags);
Peter Zijlstra76a2a6e2008-06-27 13:41:15 +0200249 clock = sched_clock_cpu(cpu);
Ingo Molnar2d452c92008-06-29 15:01:59 +0200250 local_irq_restore(flags);
Peter Zijlstra76a2a6e2008-06-27 13:41:15 +0200251
252 return clock;
253}
Ingo Molnar4c9fe8a2008-06-27 14:49:35 +0200254EXPORT_SYMBOL_GPL(cpu_clock);