blob: 9d589d8dcd1aa7e9470b6868980a20c4aeaf6160 [file] [log] [blame]
Ingo Molnar14131f22009-02-26 18:47:11 +01001/*
2 * tracing clocks
3 *
4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Implements 3 trace clock variants, with differing scalability/precision
7 * tradeoffs:
8 *
9 * - local: CPU-local trace clock
10 * - medium: scalable global clock with some jitter
11 * - global: globally monotonic, serialized clock
12 *
13 * Tracer plugins will chose a default from these clocks.
14 */
15#include <linux/spinlock.h>
Frederic Weisbeckerae1f3032010-02-28 19:42:38 +010016#include <linux/irqflags.h>
Ingo Molnar14131f22009-02-26 18:47:11 +010017#include <linux/hardirq.h>
18#include <linux/module.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/ktime.h>
Dmitri Vorobievb8b94262009-03-22 19:11:11 +020022#include <linux/trace_clock.h>
Ingo Molnar14131f22009-02-26 18:47:11 +010023
Steven Rostedt8b2a5da2009-11-11 19:36:03 -050024#include "trace.h"
25
Ingo Molnar14131f22009-02-26 18:47:11 +010026/*
27 * trace_clock_local(): the simplest and least coherent tracing clock.
28 *
29 * Useful for tracing that does not cross to other CPUs nor
30 * does it go through idle events.
31 */
32u64 notrace trace_clock_local(void)
33{
Peter Zijlstra6cc3c6e2009-03-10 19:03:43 +010034 u64 clock;
Steven Rostedt8b2a5da2009-11-11 19:36:03 -050035 int resched;
Peter Zijlstra6cc3c6e2009-03-10 19:03:43 +010036
Ingo Molnar14131f22009-02-26 18:47:11 +010037 /*
38 * sched_clock() is an architecture implemented, fast, scalable,
39 * lockless clock. It is not guaranteed to be coherent across
40 * CPUs, nor across CPU idle events.
41 */
Steven Rostedt8b2a5da2009-11-11 19:36:03 -050042 resched = ftrace_preempt_disable();
Peter Zijlstra6cc3c6e2009-03-10 19:03:43 +010043 clock = sched_clock();
Steven Rostedt8b2a5da2009-11-11 19:36:03 -050044 ftrace_preempt_enable(resched);
Peter Zijlstra6cc3c6e2009-03-10 19:03:43 +010045
46 return clock;
Ingo Molnar14131f22009-02-26 18:47:11 +010047}
48
49/*
50 * trace_clock(): 'inbetween' trace clock. Not completely serialized,
51 * but not completely incorrect when crossing CPUs either.
52 *
53 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
54 * jitter between CPUs. So it's a pretty scalable clock, but there
55 * can be offsets in the trace data.
56 */
57u64 notrace trace_clock(void)
58{
59 return cpu_clock(raw_smp_processor_id());
60}
61
62
63/*
64 * trace_clock_global(): special globally coherent trace clock
65 *
66 * It has higher overhead than the other trace clocks but is still
67 * an order of magnitude faster than GTOD derived hardware clocks.
68 *
69 * Used by plugins that need globally coherent timestamps.
70 */
71
Steven Rostedt6ca6cca2009-09-15 12:24:22 -040072/* keep prev_time and lock in the same cacheline. */
73static struct {
74 u64 prev_time;
Thomas Gleixner445c8952009-12-02 19:49:50 +010075 arch_spinlock_t lock;
Steven Rostedt6ca6cca2009-09-15 12:24:22 -040076} trace_clock_struct ____cacheline_aligned_in_smp =
77 {
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010078 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
Steven Rostedt6ca6cca2009-09-15 12:24:22 -040079 };
Ingo Molnar14131f22009-02-26 18:47:11 +010080
81u64 notrace trace_clock_global(void)
82{
83 unsigned long flags;
84 int this_cpu;
85 u64 now;
86
Li Zefane36673e2010-03-24 10:57:37 +080087 local_irq_save(flags);
Ingo Molnar14131f22009-02-26 18:47:11 +010088
89 this_cpu = raw_smp_processor_id();
90 now = cpu_clock(this_cpu);
91 /*
92 * If in an NMI context then dont risk lockups and return the
93 * cpu_clock() time:
94 */
95 if (unlikely(in_nmi()))
96 goto out;
97
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010098 arch_spin_lock(&trace_clock_struct.lock);
Ingo Molnar14131f22009-02-26 18:47:11 +010099
100 /*
101 * TODO: if this happens often then maybe we should reset
Steven Rostedt6ca6cca2009-09-15 12:24:22 -0400102 * my_scd->clock to prev_time+1, to make sure
Ingo Molnar14131f22009-02-26 18:47:11 +0100103 * we start ticking with the local clock from now on?
104 */
Steven Rostedt6ca6cca2009-09-15 12:24:22 -0400105 if ((s64)(now - trace_clock_struct.prev_time) < 0)
106 now = trace_clock_struct.prev_time + 1;
Ingo Molnar14131f22009-02-26 18:47:11 +0100107
Steven Rostedt6ca6cca2009-09-15 12:24:22 -0400108 trace_clock_struct.prev_time = now;
Ingo Molnar14131f22009-02-26 18:47:11 +0100109
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100110 arch_spin_unlock(&trace_clock_struct.lock);
Ingo Molnar14131f22009-02-26 18:47:11 +0100111
112 out:
Li Zefane36673e2010-03-24 10:57:37 +0800113 local_irq_restore(flags);
Ingo Molnar14131f22009-02-26 18:47:11 +0100114
115 return now;
116}