| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 1 | /* | 
|  | 2 | * tracing clocks | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
|  | 5 | * | 
|  | 6 | * Implements 3 trace clock variants, with differing scalability/precision | 
|  | 7 | * tradeoffs: | 
|  | 8 | * | 
|  | 9 | *  -   local: CPU-local trace clock | 
|  | 10 | *  -  medium: scalable global clock with some jitter | 
|  | 11 | *  -  global: globally monotonic, serialized clock | 
|  | 12 | * | 
|  | 13 | * Tracer plugins will chose a default from these clocks. | 
|  | 14 | */ | 
|  | 15 | #include <linux/spinlock.h> | 
|  | 16 | #include <linux/hardirq.h> | 
|  | 17 | #include <linux/module.h> | 
|  | 18 | #include <linux/percpu.h> | 
|  | 19 | #include <linux/sched.h> | 
|  | 20 | #include <linux/ktime.h> | 
| Dmitri Vorobiev | b8b9426 | 2009-03-22 19:11:11 +0200 | [diff] [blame] | 21 | #include <linux/trace_clock.h> | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 22 |  | 
|  | 23 | /* | 
|  | 24 | * trace_clock_local(): the simplest and least coherent tracing clock. | 
|  | 25 | * | 
|  | 26 | * Useful for tracing that does not cross to other CPUs nor | 
|  | 27 | * does it go through idle events. | 
|  | 28 | */ | 
|  | 29 | u64 notrace trace_clock_local(void) | 
|  | 30 | { | 
| Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 31 | unsigned long flags; | 
|  | 32 | u64 clock; | 
|  | 33 |  | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 34 | /* | 
|  | 35 | * sched_clock() is an architecture implemented, fast, scalable, | 
|  | 36 | * lockless clock. It is not guaranteed to be coherent across | 
|  | 37 | * CPUs, nor across CPU idle events. | 
|  | 38 | */ | 
| Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 39 | raw_local_irq_save(flags); | 
|  | 40 | clock = sched_clock(); | 
|  | 41 | raw_local_irq_restore(flags); | 
|  | 42 |  | 
|  | 43 | return clock; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 44 | } | 
|  | 45 |  | 
|  | 46 | /* | 
|  | 47 | * trace_clock(): 'inbetween' trace clock. Not completely serialized, | 
|  | 48 | * but not completely incorrect when crossing CPUs either. | 
|  | 49 | * | 
|  | 50 | * This is based on cpu_clock(), which will allow at most ~1 jiffy of | 
|  | 51 | * jitter between CPUs. So it's a pretty scalable clock, but there | 
|  | 52 | * can be offsets in the trace data. | 
|  | 53 | */ | 
|  | 54 | u64 notrace trace_clock(void) | 
|  | 55 | { | 
|  | 56 | return cpu_clock(raw_smp_processor_id()); | 
|  | 57 | } | 
|  | 58 |  | 
|  | 59 |  | 
|  | 60 | /* | 
|  | 61 | * trace_clock_global(): special globally coherent trace clock | 
|  | 62 | * | 
|  | 63 | * It has higher overhead than the other trace clocks but is still | 
|  | 64 | * an order of magnitude faster than GTOD derived hardware clocks. | 
|  | 65 | * | 
|  | 66 | * Used by plugins that need globally coherent timestamps. | 
|  | 67 | */ | 
|  | 68 |  | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 69 | /* keep prev_time and lock in the same cacheline. */ | 
|  | 70 | static struct { | 
|  | 71 | u64 prev_time; | 
|  | 72 | raw_spinlock_t lock; | 
|  | 73 | } trace_clock_struct ____cacheline_aligned_in_smp = | 
|  | 74 | { | 
|  | 75 | .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, | 
|  | 76 | }; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 77 |  | 
|  | 78 | u64 notrace trace_clock_global(void) | 
|  | 79 | { | 
|  | 80 | unsigned long flags; | 
|  | 81 | int this_cpu; | 
|  | 82 | u64 now; | 
|  | 83 |  | 
|  | 84 | raw_local_irq_save(flags); | 
|  | 85 |  | 
|  | 86 | this_cpu = raw_smp_processor_id(); | 
|  | 87 | now = cpu_clock(this_cpu); | 
|  | 88 | /* | 
|  | 89 | * If in an NMI context then dont risk lockups and return the | 
|  | 90 | * cpu_clock() time: | 
|  | 91 | */ | 
|  | 92 | if (unlikely(in_nmi())) | 
|  | 93 | goto out; | 
|  | 94 |  | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 95 | __raw_spin_lock(&trace_clock_struct.lock); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 96 |  | 
|  | 97 | /* | 
|  | 98 | * TODO: if this happens often then maybe we should reset | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 99 | * my_scd->clock to prev_time+1, to make sure | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 100 | * we start ticking with the local clock from now on? | 
|  | 101 | */ | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 102 | if ((s64)(now - trace_clock_struct.prev_time) < 0) | 
|  | 103 | now = trace_clock_struct.prev_time + 1; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 104 |  | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 105 | trace_clock_struct.prev_time = now; | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 106 |  | 
| Steven Rostedt | 6ca6cca | 2009-09-15 12:24:22 -0400 | [diff] [blame] | 107 | __raw_spin_unlock(&trace_clock_struct.lock); | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 108 |  | 
|  | 109 | out: | 
|  | 110 | raw_local_irq_restore(flags); | 
|  | 111 |  | 
|  | 112 | return now; | 
|  | 113 | } |