blob: a781c59b93c035717b19bca58ac044f61199fc41 [file] [log] [blame]
Russell King112f38a42010-12-15 19:23:07 +00001/*
2 * sched_clock.c: support for extending counters to full 64-bit ns counter
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/clocksource.h>
9#include <linux/init.h>
10#include <linux/jiffies.h>
11#include <linux/kernel.h>
Russell Kinga42c3622012-09-09 18:39:28 +010012#include <linux/moduleparam.h>
Russell King112f38a42010-12-15 19:23:07 +000013#include <linux/sched.h>
Russell Kingf153d012012-02-04 12:31:27 +000014#include <linux/syscore_ops.h>
Russell King112f38a42010-12-15 19:23:07 +000015#include <linux/timer.h>
16
17#include <asm/sched_clock.h>
18
Marc Zyngier2f0778af2011-12-15 12:19:23 +010019struct clock_data {
20 u64 epoch_ns;
21 u32 epoch_cyc;
22 u32 epoch_cyc_copy;
Rob Herringc1157392013-02-08 16:14:59 -060023 unsigned long rate;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010024 u32 mult;
25 u32 shift;
Colin Cross237ec6f2012-08-07 19:05:10 +010026 bool suspended;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010027};
28
Russell King112f38a42010-12-15 19:23:07 +000029static void sched_clock_poll(unsigned long wrap_ticks);
30static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
Russell Kinga42c3622012-09-09 18:39:28 +010031static int irqtime = -1;
32
33core_param(irqtime, irqtime, int, 0400);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010034
35static struct clock_data cd = {
36 .mult = NSEC_PER_SEC / HZ,
37};
38
39static u32 __read_mostly sched_clock_mask = 0xffffffff;
40
41static u32 notrace jiffy_sched_clock_read(void)
42{
43 return (u32)(jiffies - INITIAL_JIFFIES);
44}
45
46static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
47
Stephen Boydcea15092013-04-18 17:33:40 +010048static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010049{
50 return (cyc * mult) >> shift;
51}
52
Stephen Boydcea15092013-04-18 17:33:40 +010053static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010054{
55 u64 epoch_ns;
56 u32 epoch_cyc;
57
58 /*
59 * Load the epoch_cyc and epoch_ns atomically. We do this by
60 * ensuring that we always write epoch_cyc, epoch_ns and
61 * epoch_cyc_copy in strict order, and read them in strict order.
62 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
63 * the middle of an update, and we should repeat the load.
64 */
65 do {
66 epoch_cyc = cd.epoch_cyc;
67 smp_rmb();
68 epoch_ns = cd.epoch_ns;
69 smp_rmb();
70 } while (epoch_cyc != cd.epoch_cyc_copy);
71
72 return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
73}
74
75/*
76 * Atomically update the sched_clock epoch.
77 */
78static void notrace update_sched_clock(void)
79{
80 unsigned long flags;
81 u32 cyc;
82 u64 ns;
83
84 cyc = read_sched_clock();
85 ns = cd.epoch_ns +
86 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
87 cd.mult, cd.shift);
88 /*
89 * Write epoch_cyc and epoch_ns in a way that the update is
90 * detectable in cyc_to_fixed_sched_clock().
91 */
92 raw_local_irq_save(flags);
Joonsoo Kim7c4e9ce2013-02-09 05:52:45 +010093 cd.epoch_cyc_copy = cyc;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010094 smp_wmb();
95 cd.epoch_ns = ns;
96 smp_wmb();
Joonsoo Kim7c4e9ce2013-02-09 05:52:45 +010097 cd.epoch_cyc = cyc;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010098 raw_local_irq_restore(flags);
99}
Russell King112f38a42010-12-15 19:23:07 +0000100
101static void sched_clock_poll(unsigned long wrap_ticks)
102{
103 mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100104 update_sched_clock();
Russell King112f38a42010-12-15 19:23:07 +0000105}
106
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100107void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
Russell King112f38a42010-12-15 19:23:07 +0000108{
109 unsigned long r, w;
110 u64 res, wrap;
111 char r_unit;
112
Rob Herringc1157392013-02-08 16:14:59 -0600113 if (cd.rate > rate)
114 return;
115
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100116 BUG_ON(bits > 32);
117 WARN_ON(!irqs_disabled());
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100118 read_sched_clock = read;
119 sched_clock_mask = (1 << bits) - 1;
Rob Herringc1157392013-02-08 16:14:59 -0600120 cd.rate = rate;
Russell King112f38a42010-12-15 19:23:07 +0000121
122 /* calculate the mult/shift to convert counter ticks to ns. */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100123 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
Russell King112f38a42010-12-15 19:23:07 +0000124
125 r = rate;
126 if (r >= 4000000) {
127 r /= 1000000;
128 r_unit = 'M';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100129 } else if (r >= 1000) {
Russell King112f38a42010-12-15 19:23:07 +0000130 r /= 1000;
131 r_unit = 'k';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100132 } else
133 r_unit = ' ';
Russell King112f38a42010-12-15 19:23:07 +0000134
135 /* calculate how many ns until we wrap */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100136 wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
Russell King112f38a42010-12-15 19:23:07 +0000137 do_div(wrap, NSEC_PER_MSEC);
138 w = wrap;
139
140 /* calculate the ns resolution of this counter */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100141 res = cyc_to_ns(1ULL, cd.mult, cd.shift);
Russell King112f38a42010-12-15 19:23:07 +0000142 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100143 bits, r, r_unit, res, w);
Russell King112f38a42010-12-15 19:23:07 +0000144
145 /*
146 * Start the timer to keep sched_clock() properly updated and
147 * sets the initial epoch.
148 */
149 sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100150 update_sched_clock();
Russell King112f38a42010-12-15 19:23:07 +0000151
152 /*
153 * Ensure that sched_clock() starts off at 0ns
154 */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100155 cd.epoch_ns = 0;
156
Russell Kinga42c3622012-09-09 18:39:28 +0100157 /* Enable IRQ time accounting if we have a fast enough sched_clock */
158 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
159 enable_sched_clock_irqtime();
160
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100161 pr_debug("Registered %pF as sched_clock source\n", read);
162}
163
Rob Herring7e48c0b2013-04-01 13:53:38 -0500164static unsigned long long notrace sched_clock_32(void)
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100165{
166 u32 cyc = read_sched_clock();
167 return cyc_to_sched_clock(cyc, sched_clock_mask);
Russell King112f38a42010-12-15 19:23:07 +0000168}
Russell King211baa702011-01-11 16:23:04 +0000169
Rob Herring7e48c0b2013-04-01 13:53:38 -0500170unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
171
172unsigned long long notrace sched_clock(void)
173{
Stephen Boydffbfb5e2013-06-01 23:39:39 -0700174 if (cd.suspended)
175 return cd.epoch_ns;
176
Rob Herring7e48c0b2013-04-01 13:53:38 -0500177 return sched_clock_func();
178}
179
Russell King211baa702011-01-11 16:23:04 +0000180void __init sched_clock_postinit(void)
181{
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100182 /*
183 * If no sched_clock function has been provided at that point,
184 * make it the final one one.
185 */
186 if (read_sched_clock == jiffy_sched_clock_read)
187 setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
188
Russell King211baa702011-01-11 16:23:04 +0000189 sched_clock_poll(sched_clock_timer.data);
190}
Russell Kingf153d012012-02-04 12:31:27 +0000191
192static int sched_clock_suspend(void)
193{
194 sched_clock_poll(sched_clock_timer.data);
Felipe Balbi 26a4dae52012-10-23 19:00:03 +0100195 cd.suspended = true;
Russell Kingf153d012012-02-04 12:31:27 +0000196 return 0;
197}
198
Colin Cross237ec6f2012-08-07 19:05:10 +0100199static void sched_clock_resume(void)
200{
Felipe Balbi 26a4dae52012-10-23 19:00:03 +0100201 cd.epoch_cyc = read_sched_clock();
202 cd.epoch_cyc_copy = cd.epoch_cyc;
203 cd.suspended = false;
Colin Cross237ec6f2012-08-07 19:05:10 +0100204}
205
Russell Kingf153d012012-02-04 12:31:27 +0000206static struct syscore_ops sched_clock_ops = {
207 .suspend = sched_clock_suspend,
Colin Cross237ec6f2012-08-07 19:05:10 +0100208 .resume = sched_clock_resume,
Russell Kingf153d012012-02-04 12:31:27 +0000209};
210
211static int __init sched_clock_syscore_init(void)
212{
213 register_syscore_ops(&sched_clock_ops);
214 return 0;
215}
216device_initcall(sched_clock_syscore_init);