blob: fb51fceefe4f9e3bd4509407dfc6590305d4d6c4 [file] [log] [blame]
Russell King112f38a42010-12-15 19:23:07 +00001/*
2 * sched_clock.c: support for extending counters to full 64-bit ns counter
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/clocksource.h>
9#include <linux/init.h>
10#include <linux/jiffies.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
Russell Kingf153d012012-02-04 12:31:27 +000013#include <linux/syscore_ops.h>
Russell King112f38a42010-12-15 19:23:07 +000014#include <linux/timer.h>
15
16#include <asm/sched_clock.h>
17
Marc Zyngier2f0778af2011-12-15 12:19:23 +010018struct clock_data {
19 u64 epoch_ns;
20 u32 epoch_cyc;
21 u32 epoch_cyc_copy;
22 u32 mult;
23 u32 shift;
Colin Cross2d30f632012-08-07 19:05:10 +010024 bool suspended;
25 bool needs_suspend;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010026};
27
Russell King112f38a42010-12-15 19:23:07 +000028static void sched_clock_poll(unsigned long wrap_ticks);
29static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010030
31static struct clock_data cd = {
32 .mult = NSEC_PER_SEC / HZ,
33};
34
35static u32 __read_mostly sched_clock_mask = 0xffffffff;
36
37static u32 notrace jiffy_sched_clock_read(void)
38{
39 return (u32)(jiffies - INITIAL_JIFFIES);
40}
41
42static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
43
44static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
45{
46 return (cyc * mult) >> shift;
47}
48
49static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
50{
51 u64 epoch_ns;
52 u32 epoch_cyc;
53
Colin Cross2d30f632012-08-07 19:05:10 +010054 if (cd.suspended)
55 return cd.epoch_ns;
56
Marc Zyngier2f0778af2011-12-15 12:19:23 +010057 /*
58 * Load the epoch_cyc and epoch_ns atomically. We do this by
59 * ensuring that we always write epoch_cyc, epoch_ns and
60 * epoch_cyc_copy in strict order, and read them in strict order.
61 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
62 * the middle of an update, and we should repeat the load.
63 */
64 do {
65 epoch_cyc = cd.epoch_cyc;
66 smp_rmb();
67 epoch_ns = cd.epoch_ns;
68 smp_rmb();
69 } while (epoch_cyc != cd.epoch_cyc_copy);
70
71 return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
72}
73
74/*
75 * Atomically update the sched_clock epoch.
76 */
77static void notrace update_sched_clock(void)
78{
79 unsigned long flags;
80 u32 cyc;
81 u64 ns;
82
83 cyc = read_sched_clock();
84 ns = cd.epoch_ns +
85 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
86 cd.mult, cd.shift);
87 /*
88 * Write epoch_cyc and epoch_ns in a way that the update is
89 * detectable in cyc_to_fixed_sched_clock().
90 */
91 raw_local_irq_save(flags);
92 cd.epoch_cyc = cyc;
93 smp_wmb();
94 cd.epoch_ns = ns;
95 smp_wmb();
96 cd.epoch_cyc_copy = cyc;
97 raw_local_irq_restore(flags);
98}
Russell King112f38a42010-12-15 19:23:07 +000099
100static void sched_clock_poll(unsigned long wrap_ticks)
101{
102 mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100103 update_sched_clock();
Russell King112f38a42010-12-15 19:23:07 +0000104}
105
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100106void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
Russell King112f38a42010-12-15 19:23:07 +0000107{
108 unsigned long r, w;
109 u64 res, wrap;
110 char r_unit;
111
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100112 BUG_ON(bits > 32);
113 WARN_ON(!irqs_disabled());
114 WARN_ON(read_sched_clock != jiffy_sched_clock_read);
115 read_sched_clock = read;
116 sched_clock_mask = (1 << bits) - 1;
Russell King112f38a42010-12-15 19:23:07 +0000117
118 /* calculate the mult/shift to convert counter ticks to ns. */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100119 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
Russell King112f38a42010-12-15 19:23:07 +0000120
121 r = rate;
122 if (r >= 4000000) {
123 r /= 1000000;
124 r_unit = 'M';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100125 } else if (r >= 1000) {
Russell King112f38a42010-12-15 19:23:07 +0000126 r /= 1000;
127 r_unit = 'k';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100128 } else
129 r_unit = ' ';
Russell King112f38a42010-12-15 19:23:07 +0000130
131 /* calculate how many ns until we wrap */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100132 wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
Russell King112f38a42010-12-15 19:23:07 +0000133 do_div(wrap, NSEC_PER_MSEC);
134 w = wrap;
135
136 /* calculate the ns resolution of this counter */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100137 res = cyc_to_ns(1ULL, cd.mult, cd.shift);
Russell King112f38a42010-12-15 19:23:07 +0000138 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100139 bits, r, r_unit, res, w);
Russell King112f38a42010-12-15 19:23:07 +0000140
141 /*
142 * Start the timer to keep sched_clock() properly updated and
143 * sets the initial epoch.
144 */
145 sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100146 update_sched_clock();
Russell King112f38a42010-12-15 19:23:07 +0000147
148 /*
149 * Ensure that sched_clock() starts off at 0ns
150 */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100151 cd.epoch_ns = 0;
152
153 pr_debug("Registered %pF as sched_clock source\n", read);
154}
155
156unsigned long long notrace sched_clock(void)
157{
158 u32 cyc = read_sched_clock();
159 return cyc_to_sched_clock(cyc, sched_clock_mask);
Russell King112f38a42010-12-15 19:23:07 +0000160}
Russell King211baa72011-01-11 16:23:04 +0000161
162void __init sched_clock_postinit(void)
163{
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100164 /*
165 * If no sched_clock function has been provided at that point,
166 * make it the final one one.
167 */
168 if (read_sched_clock == jiffy_sched_clock_read)
169 setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
170
Russell King211baa72011-01-11 16:23:04 +0000171 sched_clock_poll(sched_clock_timer.data);
172}
Russell Kingf153d012012-02-04 12:31:27 +0000173
174static int sched_clock_suspend(void)
175{
176 sched_clock_poll(sched_clock_timer.data);
Felipe Balbi 25575d132012-10-23 19:00:03 +0100177 cd.suspended = true;
Russell Kingf153d012012-02-04 12:31:27 +0000178 return 0;
179}
180
Colin Cross2d30f632012-08-07 19:05:10 +0100181static void sched_clock_resume(void)
182{
Felipe Balbi 25575d132012-10-23 19:00:03 +0100183 cd.epoch_cyc = read_sched_clock();
184 cd.epoch_cyc_copy = cd.epoch_cyc;
185 cd.suspended = false;
Colin Cross2d30f632012-08-07 19:05:10 +0100186}
187
Russell Kingf153d012012-02-04 12:31:27 +0000188static struct syscore_ops sched_clock_ops = {
189 .suspend = sched_clock_suspend,
Colin Cross2d30f632012-08-07 19:05:10 +0100190 .resume = sched_clock_resume,
Russell Kingf153d012012-02-04 12:31:27 +0000191};
192
193static int __init sched_clock_syscore_init(void)
194{
195 register_syscore_ops(&sched_clock_ops);
196 return 0;
197}
198device_initcall(sched_clock_syscore_init);