| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * sched_clock for unstable cpu clocks | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
 | 5 |  * | 
| Steven Rostedt | c300ba2 | 2008-07-09 00:15:33 -0400 | [diff] [blame] | 6 |  *  Updates and enhancements: | 
 | 7 |  *    Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> | 
 | 8 |  * | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 9 |  * Based on code by: | 
 | 10 |  *   Ingo Molnar <mingo@redhat.com> | 
 | 11 |  *   Guillaume Chazarain <guichaz@gmail.com> | 
 | 12 |  * | 
 | 13 |  * Create a semi stable clock from a mixture of other events, including: | 
 | 14 |  *  - gtod | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 15 |  *  - sched_clock() | 
 | 16 |  *  - explicit idle events | 
 | 17 |  * | 
 | 18 |  * We use gtod as base and the unstable clock deltas. The deltas are filtered, | 
| Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 19 |  * making it monotonic and keeping it within an expected window. | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 20 |  * | 
 | 21 |  * Furthermore, explicit sleep and wakeup hooks allow us to account for time | 
 | 22 |  * that is otherwise invisible (TSC gets stopped). | 
 | 23 |  * | 
 | 24 |  * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | 
| Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 25 |  * consistent between cpus (never more than 2 jiffies difference). | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 26 |  */ | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 27 | #include <linux/spinlock.h> | 
| Ingo Molnar | 6409c4d | 2008-05-12 21:21:14 +0200 | [diff] [blame] | 28 | #include <linux/hardirq.h> | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 29 | #include <linux/module.h> | 
| Ingo Molnar | b342501 | 2009-02-26 20:20:29 +0100 | [diff] [blame] | 30 | #include <linux/percpu.h> | 
 | 31 | #include <linux/ktime.h> | 
 | 32 | #include <linux/sched.h> | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 33 |  | 
| Hugh Dickins | 2c3d103 | 2008-07-25 19:45:00 +0100 | [diff] [blame] | 34 | /* | 
 | 35 |  * Scheduler clock - returns current time in nanosec units. | 
 | 36 |  * This is default implementation. | 
 | 37 |  * Architectures and sub-architectures can override this. | 
 | 38 |  */ | 
 | 39 | unsigned long long __attribute__((weak)) sched_clock(void) | 
 | 40 | { | 
| Ron | 92d23f7 | 2009-05-08 22:54:49 +0930 | [diff] [blame] | 41 | 	return (unsigned long long)(jiffies - INITIAL_JIFFIES) | 
 | 42 | 					* (NSEC_PER_SEC / HZ); | 
| Hugh Dickins | 2c3d103 | 2008-07-25 19:45:00 +0100 | [diff] [blame] | 43 | } | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 44 |  | 
| Peter Zijlstra | c1955a3 | 2008-08-11 08:59:03 +0200 | [diff] [blame] | 45 | static __read_mostly int sched_clock_running; | 
 | 46 |  | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 47 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 
| Ingo Molnar | b342501 | 2009-02-26 20:20:29 +0100 | [diff] [blame] | 48 | __read_mostly int sched_clock_stable; | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 49 |  | 
 | 50 | struct sched_clock_data { | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 51 | 	u64			tick_raw; | 
 | 52 | 	u64			tick_gtod; | 
 | 53 | 	u64			clock; | 
 | 54 | }; | 
 | 55 |  | 
 | 56 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | 
 | 57 |  | 
 | 58 | static inline struct sched_clock_data *this_scd(void) | 
 | 59 | { | 
 | 60 | 	return &__get_cpu_var(sched_clock_data); | 
 | 61 | } | 
 | 62 |  | 
 | 63 | static inline struct sched_clock_data *cpu_sdc(int cpu) | 
 | 64 | { | 
 | 65 | 	return &per_cpu(sched_clock_data, cpu); | 
 | 66 | } | 
 | 67 |  | 
 | 68 | void sched_clock_init(void) | 
 | 69 | { | 
 | 70 | 	u64 ktime_now = ktime_to_ns(ktime_get()); | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 71 | 	int cpu; | 
 | 72 |  | 
 | 73 | 	for_each_possible_cpu(cpu) { | 
 | 74 | 		struct sched_clock_data *scd = cpu_sdc(cpu); | 
 | 75 |  | 
| Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 76 | 		scd->tick_raw = 0; | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 77 | 		scd->tick_gtod = ktime_now; | 
 | 78 | 		scd->clock = ktime_now; | 
 | 79 | 	} | 
| Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 80 |  | 
 | 81 | 	sched_clock_running = 1; | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 82 | } | 
 | 83 |  | 
 | 84 | /* | 
| Ingo Molnar | b342501 | 2009-02-26 20:20:29 +0100 | [diff] [blame] | 85 |  * min, max except they take wrapping into account | 
| Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 86 |  */ | 
 | 87 |  | 
 | 88 | static inline u64 wrap_min(u64 x, u64 y) | 
 | 89 | { | 
 | 90 | 	return (s64)(x - y) < 0 ? x : y; | 
 | 91 | } | 
 | 92 |  | 
 | 93 | static inline u64 wrap_max(u64 x, u64 y) | 
 | 94 | { | 
 | 95 | 	return (s64)(x - y) > 0 ? x : y; | 
 | 96 | } | 
 | 97 |  | 
 | 98 | /* | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 99 |  * update the percpu scd from the raw @now value | 
 | 100 |  * | 
 | 101 |  *  - filter out backward motion | 
| Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 102 |  *  - use the GTOD tick value to create a window to filter crazy TSC values | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 103 |  */ | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 104 | static u64 sched_clock_local(struct sched_clock_data *scd) | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 105 | { | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 106 | 	u64 now, clock, old_clock, min_clock, max_clock; | 
 | 107 | 	s64 delta; | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 108 |  | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 109 | again: | 
 | 110 | 	now = sched_clock(); | 
 | 111 | 	delta = now - scd->tick_raw; | 
| Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 112 | 	if (unlikely(delta < 0)) | 
 | 113 | 		delta = 0; | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 114 |  | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 115 | 	old_clock = scd->clock; | 
 | 116 |  | 
| Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 117 | 	/* | 
 | 118 | 	 * scd->clock = clamp(scd->tick_gtod + delta, | 
| Ingo Molnar | b342501 | 2009-02-26 20:20:29 +0100 | [diff] [blame] | 119 | 	 *		      max(scd->tick_gtod, scd->clock), | 
 | 120 | 	 *		      scd->tick_gtod + TICK_NSEC); | 
| Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 121 | 	 */ | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 122 |  | 
| Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 123 | 	clock = scd->tick_gtod + delta; | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 124 | 	min_clock = wrap_max(scd->tick_gtod, old_clock); | 
 | 125 | 	max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 126 |  | 
| Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 127 | 	clock = wrap_max(clock, min_clock); | 
 | 128 | 	clock = wrap_min(clock, max_clock); | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 129 |  | 
| Eric Dumazet | 152f9d0 | 2009-09-30 20:36:19 +0200 | [diff] [blame] | 130 | 	if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 131 | 		goto again; | 
| Ingo Molnar | 56b9061 | 2008-07-30 10:15:55 +0200 | [diff] [blame] | 132 |  | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 133 | 	return clock; | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 134 | } | 
 | 135 |  | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 136 | static u64 sched_clock_remote(struct sched_clock_data *scd) | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 137 | { | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 138 | 	struct sched_clock_data *my_scd = this_scd(); | 
 | 139 | 	u64 this_clock, remote_clock; | 
 | 140 | 	u64 *ptr, old_val, val; | 
 | 141 |  | 
 | 142 | 	sched_clock_local(my_scd); | 
 | 143 | again: | 
 | 144 | 	this_clock = my_scd->clock; | 
 | 145 | 	remote_clock = scd->clock; | 
 | 146 |  | 
 | 147 | 	/* | 
 | 148 | 	 * Use the opportunity that we have both locks | 
 | 149 | 	 * taken to couple the two clocks: we take the | 
 | 150 | 	 * larger time as the latest time for both | 
 | 151 | 	 * runqueues. (this creates monotonic movement) | 
 | 152 | 	 */ | 
 | 153 | 	if (likely((s64)(remote_clock - this_clock) < 0)) { | 
 | 154 | 		ptr = &scd->clock; | 
 | 155 | 		old_val = remote_clock; | 
 | 156 | 		val = this_clock; | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 157 | 	} else { | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 158 | 		/* | 
 | 159 | 		 * Should be rare, but possible: | 
 | 160 | 		 */ | 
 | 161 | 		ptr = &my_scd->clock; | 
 | 162 | 		old_val = this_clock; | 
 | 163 | 		val = remote_clock; | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 164 | 	} | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 165 |  | 
| Eric Dumazet | 152f9d0 | 2009-09-30 20:36:19 +0200 | [diff] [blame] | 166 | 	if (cmpxchg64(ptr, old_val, val) != old_val) | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 167 | 		goto again; | 
 | 168 |  | 
 | 169 | 	return val; | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 170 | } | 
 | 171 |  | 
 | 172 | u64 sched_clock_cpu(int cpu) | 
 | 173 | { | 
| Ingo Molnar | b342501 | 2009-02-26 20:20:29 +0100 | [diff] [blame] | 174 | 	struct sched_clock_data *scd; | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 175 | 	u64 clock; | 
 | 176 |  | 
 | 177 | 	WARN_ON_ONCE(!irqs_disabled()); | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 178 |  | 
| Ingo Molnar | b342501 | 2009-02-26 20:20:29 +0100 | [diff] [blame] | 179 | 	if (sched_clock_stable) | 
 | 180 | 		return sched_clock(); | 
| Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 181 |  | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 182 | 	if (unlikely(!sched_clock_running)) | 
 | 183 | 		return 0ull; | 
 | 184 |  | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 185 | 	scd = cpu_sdc(cpu); | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 186 |  | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 187 | 	if (cpu != smp_processor_id()) | 
 | 188 | 		clock = sched_clock_remote(scd); | 
 | 189 | 	else | 
 | 190 | 		clock = sched_clock_local(scd); | 
| Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame] | 191 |  | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 192 | 	return clock; | 
 | 193 | } | 
 | 194 |  | 
 | 195 | void sched_clock_tick(void) | 
 | 196 | { | 
| Peter Zijlstra | 8325d9c | 2009-02-26 21:40:16 +0100 | [diff] [blame] | 197 | 	struct sched_clock_data *scd; | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 198 | 	u64 now, now_gtod; | 
 | 199 |  | 
| Peter Zijlstra | 8325d9c | 2009-02-26 21:40:16 +0100 | [diff] [blame] | 200 | 	if (sched_clock_stable) | 
 | 201 | 		return; | 
 | 202 |  | 
| Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 203 | 	if (unlikely(!sched_clock_running)) | 
 | 204 | 		return; | 
 | 205 |  | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 206 | 	WARN_ON_ONCE(!irqs_disabled()); | 
 | 207 |  | 
| Peter Zijlstra | 8325d9c | 2009-02-26 21:40:16 +0100 | [diff] [blame] | 208 | 	scd = this_scd(); | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 209 | 	now_gtod = ktime_to_ns(ktime_get()); | 
| Steven Rostedt | a83bc47 | 2008-07-09 00:15:32 -0400 | [diff] [blame] | 210 | 	now = sched_clock(); | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 211 |  | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 212 | 	scd->tick_raw = now; | 
 | 213 | 	scd->tick_gtod = now_gtod; | 
| Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 214 | 	sched_clock_local(scd); | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 215 | } | 
 | 216 |  | 
 | 217 | /* | 
 | 218 |  * We are going deep-idle (irqs are disabled): | 
 | 219 |  */ | 
 | 220 | void sched_clock_idle_sleep_event(void) | 
 | 221 | { | 
 | 222 | 	sched_clock_cpu(smp_processor_id()); | 
 | 223 | } | 
 | 224 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | 
 | 225 |  | 
 | 226 | /* | 
 | 227 |  * We just idled delta nanoseconds (called with irqs disabled): | 
 | 228 |  */ | 
 | 229 | void sched_clock_idle_wakeup_event(u64 delta_ns) | 
 | 230 | { | 
| Thomas Gleixner | 1c5745a | 2008-12-22 23:05:28 +0100 | [diff] [blame] | 231 | 	if (timekeeping_suspended) | 
 | 232 | 		return; | 
 | 233 |  | 
| Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 234 | 	sched_clock_tick(); | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 235 | 	touch_softlockup_watchdog(); | 
 | 236 | } | 
 | 237 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 
 | 238 |  | 
| Peter Zijlstra | 8325d9c | 2009-02-26 21:40:16 +0100 | [diff] [blame] | 239 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 
 | 240 |  | 
 | 241 | void sched_clock_init(void) | 
 | 242 | { | 
 | 243 | 	sched_clock_running = 1; | 
 | 244 | } | 
 | 245 |  | 
 | 246 | u64 sched_clock_cpu(int cpu) | 
 | 247 | { | 
 | 248 | 	if (unlikely(!sched_clock_running)) | 
 | 249 | 		return 0; | 
 | 250 |  | 
 | 251 | 	return sched_clock(); | 
 | 252 | } | 
 | 253 |  | 
| Ingo Molnar | b342501 | 2009-02-26 20:20:29 +0100 | [diff] [blame] | 254 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 
| Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 255 |  | 
| Peter Zijlstra | 76a2a6e | 2008-06-27 13:41:15 +0200 | [diff] [blame] | 256 | unsigned long long cpu_clock(int cpu) | 
 | 257 | { | 
 | 258 | 	unsigned long long clock; | 
 | 259 | 	unsigned long flags; | 
 | 260 |  | 
| Ingo Molnar | 2d452c9 | 2008-06-29 15:01:59 +0200 | [diff] [blame] | 261 | 	local_irq_save(flags); | 
| Peter Zijlstra | 76a2a6e | 2008-06-27 13:41:15 +0200 | [diff] [blame] | 262 | 	clock = sched_clock_cpu(cpu); | 
| Ingo Molnar | 2d452c9 | 2008-06-29 15:01:59 +0200 | [diff] [blame] | 263 | 	local_irq_restore(flags); | 
| Peter Zijlstra | 76a2a6e | 2008-06-27 13:41:15 +0200 | [diff] [blame] | 264 |  | 
 | 265 | 	return clock; | 
 | 266 | } | 
| Ingo Molnar | 4c9fe8a | 2008-06-27 14:49:35 +0200 | [diff] [blame] | 267 | EXPORT_SYMBOL_GPL(cpu_clock); |