| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * tracing clocks | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
 | 5 |  * | 
 | 6 |  * Implements 3 trace clock variants, with differing scalability/precision | 
 | 7 |  * tradeoffs: | 
 | 8 |  * | 
 | 9 |  *  -   local: CPU-local trace clock | 
 | 10 |  *  -  medium: scalable global clock with some jitter | 
 | 11 |  *  -  global: globally monotonic, serialized clock | 
 | 12 |  * | 
 | 13 |  * Tracer plugins will chose a default from these clocks. | 
 | 14 |  */ | 
 | 15 | #include <linux/spinlock.h> | 
| Frederic Weisbecker | ae1f303 | 2010-02-28 19:42:38 +0100 | [diff] [blame] | 16 | #include <linux/irqflags.h> | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 17 | #include <linux/hardirq.h> | 
 | 18 | #include <linux/module.h> | 
 | 19 | #include <linux/percpu.h> | 
 | 20 | #include <linux/sched.h> | 
 | 21 | #include <linux/ktime.h> | 
| Dmitri Vorobiev | b8b9426 | 2009-03-22 19:11:11 +0200 | [diff] [blame] | 22 | #include <linux/trace_clock.h> | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 23 |  | 
| Steven Rostedt | 8b2a5da | 2009-11-11 19:36:03 -0500 | [diff] [blame] | 24 | #include "trace.h" | 
 | 25 |  | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 26 | /* | 
 | 27 |  * trace_clock_local(): the simplest and least coherent tracing clock. | 
 | 28 |  * | 
 | 29 |  * Useful for tracing that does not cross to other CPUs nor | 
 | 30 |  * does it go through idle events. | 
 | 31 |  */ | 
 | 32 | u64 notrace trace_clock_local(void) | 
 | 33 | { | 
| Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 34 | 	u64 clock; | 
| Steven Rostedt | 8b2a5da | 2009-11-11 19:36:03 -0500 | [diff] [blame] | 35 | 	int resched; | 
| Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 36 |  | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 37 | 	/* | 
 | 38 | 	 * sched_clock() is an architecture implemented, fast, scalable, | 
 | 39 | 	 * lockless clock. It is not guaranteed to be coherent across | 
 | 40 | 	 * CPUs, nor across CPU idle events. | 
 | 41 | 	 */ | 
| Steven Rostedt | 8b2a5da | 2009-11-11 19:36:03 -0500 | [diff] [blame] | 42 | 	resched = ftrace_preempt_disable(); | 
| Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 43 | 	clock = sched_clock(); | 
| Steven Rostedt | 8b2a5da | 2009-11-11 19:36:03 -0500 | [diff] [blame] | 44 | 	ftrace_preempt_enable(resched); | 
| Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 45 |  | 
 | 46 | 	return clock; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 47 | } | 
 | 48 |  | 
 | 49 | /* | 
 | 50 |  * trace_clock(): 'inbetween' trace clock. Not completely serialized, | 
 | 51 |  * but not completely incorrect when crossing CPUs either. | 
 | 52 |  * | 
 | 53 |  * This is based on cpu_clock(), which will allow at most ~1 jiffy of | 
 | 54 |  * jitter between CPUs. So it's a pretty scalable clock, but there | 
 | 55 |  * can be offsets in the trace data. | 
 | 56 |  */ | 
 | 57 | u64 notrace trace_clock(void) | 
 | 58 | { | 
 | 59 | 	return cpu_clock(raw_smp_processor_id()); | 
 | 60 | } | 
 | 61 |  | 
 | 62 |  | 
 | 63 | /* | 
 | 64 |  * trace_clock_global(): special globally coherent trace clock | 
 | 65 |  * | 
 | 66 |  * It has higher overhead than the other trace clocks but is still | 
 | 67 |  * an order of magnitude faster than GTOD derived hardware clocks. | 
 | 68 |  * | 
 | 69 |  * Used by plugins that need globally coherent timestamps. | 
 | 70 |  */ | 
 | 71 |  | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 72 | /* keep prev_time and lock in the same cacheline. */ | 
 | 73 | static struct { | 
 | 74 | 	u64 prev_time; | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 75 | 	arch_spinlock_t lock; | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 76 | } trace_clock_struct ____cacheline_aligned_in_smp = | 
 | 77 | 	{ | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 78 | 		.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 79 | 	}; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 80 |  | 
 | 81 | u64 notrace trace_clock_global(void) | 
 | 82 | { | 
 | 83 | 	unsigned long flags; | 
 | 84 | 	int this_cpu; | 
 | 85 | 	u64 now; | 
 | 86 |  | 
| Li Zefan | e36673e | 2010-03-24 10:57:37 +0800 | [diff] [blame] | 87 | 	local_irq_save(flags); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 88 |  | 
 | 89 | 	this_cpu = raw_smp_processor_id(); | 
 | 90 | 	now = cpu_clock(this_cpu); | 
 | 91 | 	/* | 
 | 92 | 	 * If in an NMI context then dont risk lockups and return the | 
 | 93 | 	 * cpu_clock() time: | 
 | 94 | 	 */ | 
 | 95 | 	if (unlikely(in_nmi())) | 
 | 96 | 		goto out; | 
 | 97 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 98 | 	arch_spin_lock(&trace_clock_struct.lock); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 99 |  | 
 | 100 | 	/* | 
 | 101 | 	 * TODO: if this happens often then maybe we should reset | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 102 | 	 * my_scd->clock to prev_time+1, to make sure | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 103 | 	 * we start ticking with the local clock from now on? | 
 | 104 | 	 */ | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 105 | 	if ((s64)(now - trace_clock_struct.prev_time) < 0) | 
 | 106 | 		now = trace_clock_struct.prev_time + 1; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 107 |  | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 108 | 	trace_clock_struct.prev_time = now; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 109 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 110 | 	arch_spin_unlock(&trace_clock_struct.lock); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 111 |  | 
 | 112 |  out: | 
| Li Zefan | e36673e | 2010-03-24 10:57:37 +0800 | [diff] [blame] | 113 | 	local_irq_restore(flags); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 114 |  | 
 | 115 | 	return now; | 
 | 116 | } |