| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * tracing clocks | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
 | 5 |  * | 
 | 6 |  * Implements 3 trace clock variants, with differing scalability/precision | 
 | 7 |  * tradeoffs: | 
 | 8 |  * | 
 | 9 |  *  -   local: CPU-local trace clock | 
 | 10 |  *  -  medium: scalable global clock with some jitter | 
 | 11 |  *  -  global: globally monotonic, serialized clock | 
 | 12 |  * | 
 | 13 |  * Tracer plugins will chose a default from these clocks. | 
 | 14 |  */ | 
 | 15 | #include <linux/spinlock.h> | 
| Frederic Weisbecker | ae1f303 | 2010-02-28 19:42:38 +0100 | [diff] [blame] | 16 | #include <linux/irqflags.h> | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 17 | #include <linux/hardirq.h> | 
 | 18 | #include <linux/module.h> | 
 | 19 | #include <linux/percpu.h> | 
 | 20 | #include <linux/sched.h> | 
 | 21 | #include <linux/ktime.h> | 
| Dmitri Vorobiev | b8b9426 | 2009-03-22 19:11:11 +0200 | [diff] [blame] | 22 | #include <linux/trace_clock.h> | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 23 |  | 
 | 24 | /* | 
 | 25 |  * trace_clock_local(): the simplest and least coherent tracing clock. | 
 | 26 |  * | 
 | 27 |  * Useful for tracing that does not cross to other CPUs nor | 
 | 28 |  * does it go through idle events. | 
 | 29 |  */ | 
 | 30 | u64 notrace trace_clock_local(void) | 
 | 31 | { | 
| Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 32 | 	u64 clock; | 
 | 33 |  | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 34 | 	/* | 
 | 35 | 	 * sched_clock() is an architecture implemented, fast, scalable, | 
 | 36 | 	 * lockless clock. It is not guaranteed to be coherent across | 
 | 37 | 	 * CPUs, nor across CPU idle events. | 
 | 38 | 	 */ | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 39 | 	preempt_disable_notrace(); | 
| Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 40 | 	clock = sched_clock(); | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 41 | 	preempt_enable_notrace(); | 
| Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 42 |  | 
 | 43 | 	return clock; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 44 | } | 
| Paul E. McKenney | dc975e9 | 2012-11-15 11:27:26 -0800 | [diff] [blame] | 45 | EXPORT_SYMBOL_GPL(trace_clock_local); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 46 |  | 
 | 47 | /* | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 48 |  * trace_clock(): 'between' trace clock. Not completely serialized, | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 49 |  * but not completely incorrect when crossing CPUs either. | 
 | 50 |  * | 
 | 51 |  * This is based on cpu_clock(), which will allow at most ~1 jiffy of | 
 | 52 |  * jitter between CPUs. So it's a pretty scalable clock, but there | 
 | 53 |  * can be offsets in the trace data. | 
 | 54 |  */ | 
 | 55 | u64 notrace trace_clock(void) | 
 | 56 | { | 
| Peter Zijlstra | c676329 | 2010-05-25 10:48:51 +0200 | [diff] [blame] | 57 | 	return local_clock(); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 58 | } | 
 | 59 |  | 
| Steven Rostedt (Red Hat) | 8aacf01 | 2013-03-14 13:13:45 -0400 | [diff] [blame] | 60 | /* | 
 | 61 |  * trace_jiffy_clock(): Simply use jiffies as a clock counter. | 
 | 62 |  */ | 
 | 63 | u64 notrace trace_clock_jiffies(void) | 
 | 64 | { | 
 | 65 | 	u64 jiffy = jiffies - INITIAL_JIFFIES; | 
 | 66 |  | 
 | 67 | 	/* Return nsecs */ | 
 | 68 | 	return (u64)jiffies_to_usecs(jiffy) * 1000ULL; | 
 | 69 | } | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 70 |  | 
 | 71 | /* | 
 | 72 |  * trace_clock_global(): special globally coherent trace clock | 
 | 73 |  * | 
 | 74 |  * It has higher overhead than the other trace clocks but is still | 
 | 75 |  * an order of magnitude faster than GTOD derived hardware clocks. | 
 | 76 |  * | 
 | 77 |  * Used by plugins that need globally coherent timestamps. | 
 | 78 |  */ | 
 | 79 |  | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 80 | /* keep prev_time and lock in the same cacheline. */ | 
 | 81 | static struct { | 
 | 82 | 	u64 prev_time; | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 83 | 	arch_spinlock_t lock; | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 84 | } trace_clock_struct ____cacheline_aligned_in_smp = | 
 | 85 | 	{ | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 86 | 		.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 87 | 	}; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 88 |  | 
 | 89 | u64 notrace trace_clock_global(void) | 
 | 90 | { | 
 | 91 | 	unsigned long flags; | 
 | 92 | 	int this_cpu; | 
 | 93 | 	u64 now; | 
 | 94 |  | 
| Li Zefan | e36673e | 2010-03-24 10:57:37 +0800 | [diff] [blame] | 95 | 	local_irq_save(flags); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 96 |  | 
 | 97 | 	this_cpu = raw_smp_processor_id(); | 
| Namhyung Kim | 5e67b51 | 2012-12-27 11:49:45 +0900 | [diff] [blame] | 98 | 	now = sched_clock_cpu(this_cpu); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 99 | 	/* | 
 | 100 | 	 * If in an NMI context then dont risk lockups and return the | 
 | 101 | 	 * cpu_clock() time: | 
 | 102 | 	 */ | 
 | 103 | 	if (unlikely(in_nmi())) | 
 | 104 | 		goto out; | 
 | 105 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 106 | 	arch_spin_lock(&trace_clock_struct.lock); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 107 |  | 
 | 108 | 	/* | 
 | 109 | 	 * TODO: if this happens often then maybe we should reset | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 110 | 	 * my_scd->clock to prev_time+1, to make sure | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 111 | 	 * we start ticking with the local clock from now on? | 
 | 112 | 	 */ | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 113 | 	if ((s64)(now - trace_clock_struct.prev_time) < 0) | 
 | 114 | 		now = trace_clock_struct.prev_time + 1; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 115 |  | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 116 | 	trace_clock_struct.prev_time = now; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 117 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 118 | 	arch_spin_unlock(&trace_clock_struct.lock); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 119 |  | 
 | 120 |  out: | 
| Li Zefan | e36673e | 2010-03-24 10:57:37 +0800 | [diff] [blame] | 121 | 	local_irq_restore(flags); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 122 |  | 
 | 123 | 	return now; | 
 | 124 | } | 
| Steven Rostedt | 6249687 | 2011-09-19 11:35:58 -0400 | [diff] [blame] | 125 |  | 
 | 126 | static atomic64_t trace_counter; | 
 | 127 |  | 
 | 128 | /* | 
 | 129 |  * trace_clock_counter(): simply an atomic counter. | 
 | 130 |  * Use the trace_counter "counter" for cases where you do not care | 
 | 131 |  * about timings, but are interested in strict ordering. | 
 | 132 |  */ | 
 | 133 | u64 notrace trace_clock_counter(void) | 
 | 134 | { | 
 | 135 | 	return atomic64_add_return(1, &trace_counter); | 
 | 136 | } |