blob: 0e3a8448e1155991b9973082ab40bd13b6b0a177 [file] [log] [blame]
Thomas Gleixnerd316c572007-02-16 01:28:00 -08001/*
2 * linux/kernel/time/clockevents.c
3 *
4 * This file contains functions which manage clock event devices.
5 *
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9 *
10 * This code is licenced under the GPL version 2. For details see
11 * kernel-base/COPYING.
12 */
13
14#include <linux/clockchips.h>
15#include <linux/hrtimer.h>
16#include <linux/init.h>
17#include <linux/module.h>
Thomas Gleixnerd316c572007-02-16 01:28:00 -080018#include <linux/smp.h>
Thomas Gleixnerd316c572007-02-16 01:28:00 -080019
H Hartley Sweeten8e1a9282009-10-16 18:19:01 -040020#include "tick-internal.h"
21
Thomas Gleixnerd316c572007-02-16 01:28:00 -080022/* The registered clock event devices */
23static LIST_HEAD(clockevent_devices);
24static LIST_HEAD(clockevents_released);
Thomas Gleixnerd316c572007-02-16 01:28:00 -080025/* Protection for the above */
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +010026static DEFINE_RAW_SPINLOCK(clockevents_lock);
Thomas Gleixnerd316c572007-02-16 01:28:00 -080027
28/**
29 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
30 * @latch: value to convert
31 * @evt: pointer to clock event device descriptor
32 *
33 * Math helper, returns latch value converted to nanoseconds (bound checked)
34 */
Jon Hunter97813f22009-08-18 12:45:11 -050035u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
Thomas Gleixnerd316c572007-02-16 01:28:00 -080036{
Jon Hunter97813f22009-08-18 12:45:11 -050037 u64 clc = (u64) latch << evt->shift;
Thomas Gleixnerd316c572007-02-16 01:28:00 -080038
Ingo Molnar45fe4fe2008-01-30 13:30:03 +010039 if (unlikely(!evt->mult)) {
40 evt->mult = 1;
41 WARN_ON(1);
42 }
43
Thomas Gleixnerd316c572007-02-16 01:28:00 -080044 do_div(clc, evt->mult);
45 if (clc < 1000)
46 clc = 1000;
Jon Hunter97813f22009-08-18 12:45:11 -050047 if (clc > KTIME_MAX)
48 clc = KTIME_MAX;
Thomas Gleixnerd316c572007-02-16 01:28:00 -080049
Jon Hunter97813f22009-08-18 12:45:11 -050050 return clc;
Thomas Gleixnerd316c572007-02-16 01:28:00 -080051}
Magnus Dammc81fc2c2009-05-01 14:52:47 +090052EXPORT_SYMBOL_GPL(clockevent_delta2ns);
Thomas Gleixnerd316c572007-02-16 01:28:00 -080053
54/**
55 * clockevents_set_mode - set the operating mode of a clock event device
56 * @dev: device to modify
57 * @mode: new mode
58 *
59 * Must be called with interrupts disabled !
60 */
61void clockevents_set_mode(struct clock_event_device *dev,
62 enum clock_event_mode mode)
63{
64 if (dev->mode != mode) {
65 dev->set_mode(mode, dev);
66 dev->mode = mode;
Magnus Damm2d682592009-01-16 17:14:38 +090067
68 /*
69 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
70 * on it, so fix it up and emit a warning:
71 */
72 if (mode == CLOCK_EVT_MODE_ONESHOT) {
73 if (unlikely(!dev->mult)) {
74 dev->mult = 1;
75 WARN_ON(1);
76 }
77 }
Thomas Gleixnerd316c572007-02-16 01:28:00 -080078 }
79}
80
81/**
Thomas Gleixner2344abb2008-09-16 11:32:50 -070082 * clockevents_shutdown - shutdown the device and clear next_event
83 * @dev: device to shutdown
84 */
85void clockevents_shutdown(struct clock_event_device *dev)
86{
87 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
88 dev->next_event.tv64 = KTIME_MAX;
89}
90
Martin Schwidefskyd1748302011-08-23 15:29:42 +020091#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
92
93/* Limit min_delta to a jiffie */
94#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
95
96/**
97 * clockevents_increase_min_delta - raise minimum delta of a clock event device
98 * @dev: device to increase the minimum delta
99 *
100 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
101 */
102static int clockevents_increase_min_delta(struct clock_event_device *dev)
103{
104 /* Nothing to do if we already reached the limit */
105 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
106 printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
107 dev->next_event.tv64 = KTIME_MAX;
108 return -ETIME;
109 }
110
111 if (dev->min_delta_ns < 5000)
112 dev->min_delta_ns = 5000;
113 else
114 dev->min_delta_ns += dev->min_delta_ns >> 1;
115
116 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
117 dev->min_delta_ns = MIN_DELTA_LIMIT;
118
119 printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
120 dev->name ? dev->name : "?",
121 (unsigned long long) dev->min_delta_ns);
122 return 0;
123}
124
125/**
126 * clockevents_program_min_delta - Set clock event device to the minimum delay.
127 * @dev: device to program
128 *
129 * Returns 0 on success, -ETIME when the retry loop failed.
130 */
131static int clockevents_program_min_delta(struct clock_event_device *dev)
132{
133 unsigned long long clc;
134 int64_t delta;
135 int i;
136
137 for (i = 0;;) {
138 delta = dev->min_delta_ns;
139 dev->next_event = ktime_add_ns(ktime_get(), delta);
140
141 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
142 return 0;
143
144 dev->retries++;
145 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
146 if (dev->set_next_event((unsigned long) clc, dev) == 0)
147 return 0;
148
149 if (++i > 2) {
150 /*
151 * We tried 3 times to program the device with the
152 * given min_delta_ns. Try to increase the minimum
153 * delta, if that fails as well get out of here.
154 */
155 if (clockevents_increase_min_delta(dev))
156 return -ETIME;
157 i = 0;
158 }
159 }
160}
161
162#else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
163
164/**
165 * clockevents_program_min_delta - Set clock event device to the minimum delay.
166 * @dev: device to program
167 *
168 * Returns 0 on success, -ETIME when the retry loop failed.
169 */
170static int clockevents_program_min_delta(struct clock_event_device *dev)
171{
172 unsigned long long clc;
173 int64_t delta;
174
175 delta = dev->min_delta_ns;
176 dev->next_event = ktime_add_ns(ktime_get(), delta);
177
178 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
179 return 0;
180
181 dev->retries++;
182 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
183 return dev->set_next_event((unsigned long) clc, dev);
184}
185
186#endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
187
Thomas Gleixner2344abb2008-09-16 11:32:50 -0700188/**
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800189 * clockevents_program_event - Reprogram the clock event device.
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200190 * @dev: device to program
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800191 * @expires: absolute expiry time (monotonic clock)
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200192 * @force: program minimum delay if expires can not be set
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800193 *
194 * Returns 0 on success, -ETIME when the event is in the past.
195 */
196int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200197 bool force)
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800198{
199 unsigned long long clc;
200 int64_t delta;
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200201 int rc;
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800202
Thomas Gleixner167b1de2007-12-07 19:16:17 +0100203 if (unlikely(expires.tv64 < 0)) {
204 WARN_ON_ONCE(1);
205 return -ETIME;
206 }
207
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800208 dev->next_event = expires;
209
210 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
211 return 0;
212
Martin Schwidefsky65516f82011-08-23 15:29:43 +0200213 /* Shortcut for clockevent devices that can deal with ktime. */
214 if (dev->features & CLOCK_EVT_FEAT_KTIME)
215 return dev->set_next_ktime(expires, dev);
216
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200217 delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
218 if (delta <= 0)
219 return force ? clockevents_program_min_delta(dev) : -ETIME;
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800220
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200221 delta = min(delta, (int64_t) dev->max_delta_ns);
222 delta = max(delta, (int64_t) dev->min_delta_ns);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800223
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200224 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
225 rc = dev->set_next_event((unsigned long) clc, dev);
226
227 return (rc && force) ? clockevents_program_min_delta(dev) : rc;
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800228}
229
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800230/*
Li Zefan3eb05672008-02-08 04:19:25 -0800231 * Called after a notify add to make devices available which were
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800232 * released from the notifier call.
233 */
234static void clockevents_notify_released(void)
235{
236 struct clock_event_device *dev;
237
238 while (!list_empty(&clockevents_released)) {
239 dev = list_entry(clockevents_released.next,
240 struct clock_event_device, list);
241 list_del(&dev->list);
242 list_add(&dev->list, &clockevent_devices);
Thomas Gleixner7172a282013-04-25 20:31:47 +0000243 tick_check_new_device(dev);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800244 }
245}
246
247/**
248 * clockevents_register_device - register a clock event device
249 * @dev: device to register
250 */
251void clockevents_register_device(struct clock_event_device *dev)
252{
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700253 unsigned long flags;
254
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800255 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
Thomas Gleixner1b054b62011-06-03 11:13:33 +0200256 if (!dev->cpumask) {
257 WARN_ON(num_possible_cpus() > 1);
258 dev->cpumask = cpumask_of(smp_processor_id());
259 }
Rusty Russell320ab2b2008-12-13 21:20:26 +1030260
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100261 raw_spin_lock_irqsave(&clockevents_lock, flags);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800262
263 list_add(&dev->list, &clockevent_devices);
Thomas Gleixner7172a282013-04-25 20:31:47 +0000264 tick_check_new_device(dev);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800265 clockevents_notify_released();
266
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100267 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800268}
Magnus Dammc81fc2c2009-05-01 14:52:47 +0900269EXPORT_SYMBOL_GPL(clockevents_register_device);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800270
Magnus Damme5400322012-05-09 23:39:34 +0900271void clockevents_config(struct clock_event_device *dev, u32 freq)
Thomas Gleixner57f0fcb2011-05-18 21:33:41 +0000272{
Thomas Gleixnerc0e299b2011-05-20 10:50:52 +0200273 u64 sec;
Thomas Gleixner57f0fcb2011-05-18 21:33:41 +0000274
275 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
276 return;
277
278 /*
279 * Calculate the maximum number of seconds we can sleep. Limit
280 * to 10 minutes for hardware which can program more than
281 * 32bit ticks so we still get reasonable conversion values.
282 */
283 sec = dev->max_delta_ticks;
284 do_div(sec, freq);
285 if (!sec)
286 sec = 1;
287 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
288 sec = 600;
289
290 clockevents_calc_mult_shift(dev, freq, sec);
291 dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
292 dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
293}
294
295/**
296 * clockevents_config_and_register - Configure and register a clock event device
297 * @dev: device to register
298 * @freq: The clock frequency
299 * @min_delta: The minimum clock ticks to program in oneshot mode
300 * @max_delta: The maximum clock ticks to program in oneshot mode
301 *
302 * min/max_delta can be 0 for devices which do not support oneshot mode.
303 */
304void clockevents_config_and_register(struct clock_event_device *dev,
305 u32 freq, unsigned long min_delta,
306 unsigned long max_delta)
307{
308 dev->min_delta_ticks = min_delta;
309 dev->max_delta_ticks = max_delta;
310 clockevents_config(dev, freq);
311 clockevents_register_device(dev);
312}
Shawn Guoc35ef952013-01-12 11:50:04 +0000313EXPORT_SYMBOL_GPL(clockevents_config_and_register);
Thomas Gleixner57f0fcb2011-05-18 21:33:41 +0000314
Thomas Gleixner80b816b2011-05-18 21:33:42 +0000315/**
316 * clockevents_update_freq - Update frequency and reprogram a clock event device.
317 * @dev: device to modify
318 * @freq: new device frequency
319 *
320 * Reconfigure and reprogram a clock event device in oneshot
321 * mode. Must be called on the cpu for which the device delivers per
322 * cpu timer events with interrupts disabled! Returns 0 on success,
323 * -ETIME when the event is in the past.
324 */
325int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
326{
327 clockevents_config(dev, freq);
328
329 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
330 return 0;
331
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200332 return clockevents_program_event(dev, dev->next_event, false);
Thomas Gleixner80b816b2011-05-18 21:33:42 +0000333}
334
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800335/*
336 * Noop handler when we shut down an event device
337 */
Venkatesh Pallipadi7c1e7682008-09-03 21:36:50 +0000338void clockevents_handle_noop(struct clock_event_device *dev)
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800339{
340}
341
342/**
343 * clockevents_exchange_device - release and request clock devices
344 * @old: device to release (can be NULL)
345 * @new: device to request (can be NULL)
346 *
347 * Called from the notifier chain. clockevents_lock is held already
348 */
349void clockevents_exchange_device(struct clock_event_device *old,
350 struct clock_event_device *new)
351{
352 unsigned long flags;
353
354 local_irq_save(flags);
355 /*
356 * Caller releases a clock event device. We queue it into the
357 * released list and do a notify add later.
358 */
359 if (old) {
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800360 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
361 list_del(&old->list);
362 list_add(&old->list, &clockevents_released);
363 }
364
365 if (new) {
366 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
Thomas Gleixner2344abb2008-09-16 11:32:50 -0700367 clockevents_shutdown(new);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800368 }
369 local_irq_restore(flags);
370}
371
Rafael J. Wysockiadc78e62012-08-06 01:40:41 +0200372/**
373 * clockevents_suspend - suspend clock devices
374 */
375void clockevents_suspend(void)
376{
377 struct clock_event_device *dev;
378
379 list_for_each_entry_reverse(dev, &clockevent_devices, list)
380 if (dev->suspend)
381 dev->suspend(dev);
382}
383
384/**
385 * clockevents_resume - resume clock devices
386 */
387void clockevents_resume(void)
388{
389 struct clock_event_device *dev;
390
391 list_for_each_entry(dev, &clockevent_devices, list)
392 if (dev->resume)
393 dev->resume(dev);
394}
395
Thomas Gleixnerde68d9b2007-10-12 23:04:05 +0200396#ifdef CONFIG_GENERIC_CLOCKEVENTS
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800397/**
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800398 * clockevents_notify - notification about relevant events
399 */
400void clockevents_notify(unsigned long reason, void *arg)
401{
Thomas Gleixnerbb6eddf2009-12-10 15:35:10 +0100402 struct clock_event_device *dev, *tmp;
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700403 unsigned long flags;
Thomas Gleixnerbb6eddf2009-12-10 15:35:10 +0100404 int cpu;
Li Zefan0b858e62008-02-08 04:19:24 -0800405
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100406 raw_spin_lock_irqsave(&clockevents_lock, flags);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800407
408 switch (reason) {
Thomas Gleixner8c53daf2013-04-25 20:31:48 +0000409 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
410 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
411 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
412 tick_broadcast_on_off(reason, arg);
413 break;
414
415 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
416 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
417 tick_broadcast_oneshot_control(reason);
418 break;
419
420 case CLOCK_EVT_NOTIFY_CPU_DYING:
421 tick_handover_do_timer(arg);
422 break;
423
424 case CLOCK_EVT_NOTIFY_SUSPEND:
425 tick_suspend();
426 tick_suspend_broadcast();
427 break;
428
429 case CLOCK_EVT_NOTIFY_RESUME:
430 tick_resume();
431 break;
432
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800433 case CLOCK_EVT_NOTIFY_CPU_DEAD:
Thomas Gleixner8c53daf2013-04-25 20:31:48 +0000434 tick_shutdown_broadcast_oneshot(arg);
435 tick_shutdown_broadcast(arg);
436 tick_shutdown(arg);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800437 /*
438 * Unregister the clock event devices which were
439 * released from the users in the notify chain.
440 */
Thomas Gleixnerbb6eddf2009-12-10 15:35:10 +0100441 list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
442 list_del(&dev->list);
443 /*
444 * Now check whether the CPU has left unused per cpu devices
445 */
446 cpu = *((int *)arg);
447 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
448 if (cpumask_test_cpu(cpu, dev->cpumask) &&
Xiaotian Fengea9d8e32010-01-07 11:22:44 +0800449 cpumask_weight(dev->cpumask) == 1 &&
450 !tick_is_broadcast_device(dev)) {
Thomas Gleixnerbb6eddf2009-12-10 15:35:10 +0100451 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
452 list_del(&dev->list);
453 }
454 }
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800455 break;
456 default:
457 break;
458 }
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100459 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800460}
461EXPORT_SYMBOL_GPL(clockevents_notify);
Thomas Gleixnerde68d9b2007-10-12 23:04:05 +0200462#endif