john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/time/clocksource.c |
| 3 | * |
| 4 | * This file contains the functions which manage clocksource drivers. |
| 5 | * |
| 6 | * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2 of the License, or |
| 11 | * (at your option) any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software |
| 20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | * |
| 22 | * TODO WishList: |
| 23 | * o Allow clocksource drivers to be unregistered |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 24 | */ |
| 25 | |
| 26 | #include <linux/clocksource.h> |
| 27 | #include <linux/sysdev.h> |
| 28 | #include <linux/init.h> |
| 29 | #include <linux/module.h> |
Mathieu Desnoyers | dc29a36 | 2007-02-10 01:43:43 -0800 | [diff] [blame] | 30 | #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 31 | #include <linux/tick.h> |
Martin Schwidefsky | 01548f4 | 2009-08-18 17:09:42 +0200 | [diff] [blame] | 32 | #include <linux/kthread.h> |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 33 | |
Patrick Ohly | a038a35 | 2009-02-12 05:03:34 +0000 | [diff] [blame] | 34 | void timecounter_init(struct timecounter *tc, |
| 35 | const struct cyclecounter *cc, |
| 36 | u64 start_tstamp) |
| 37 | { |
| 38 | tc->cc = cc; |
| 39 | tc->cycle_last = cc->read(cc); |
| 40 | tc->nsec = start_tstamp; |
| 41 | } |
| 42 | EXPORT_SYMBOL(timecounter_init); |
| 43 | |
| 44 | /** |
| 45 | * timecounter_read_delta - get nanoseconds since last call of this function |
| 46 | * @tc: Pointer to time counter |
| 47 | * |
| 48 | * When the underlying cycle counter runs over, this will be handled |
| 49 | * correctly as long as it does not run over more than once between |
| 50 | * calls. |
| 51 | * |
| 52 | * The first call to this function for a new time counter initializes |
| 53 | * the time tracking and returns an undefined result. |
| 54 | */ |
| 55 | static u64 timecounter_read_delta(struct timecounter *tc) |
| 56 | { |
| 57 | cycle_t cycle_now, cycle_delta; |
| 58 | u64 ns_offset; |
| 59 | |
| 60 | /* read cycle counter: */ |
| 61 | cycle_now = tc->cc->read(tc->cc); |
| 62 | |
| 63 | /* calculate the delta since the last timecounter_read_delta(): */ |
| 64 | cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; |
| 65 | |
| 66 | /* convert to nanoseconds: */ |
| 67 | ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta); |
| 68 | |
| 69 | /* update time stamp of timecounter_read_delta() call: */ |
| 70 | tc->cycle_last = cycle_now; |
| 71 | |
| 72 | return ns_offset; |
| 73 | } |
| 74 | |
| 75 | u64 timecounter_read(struct timecounter *tc) |
| 76 | { |
| 77 | u64 nsec; |
| 78 | |
| 79 | /* increment time by nanoseconds since last call */ |
| 80 | nsec = timecounter_read_delta(tc); |
| 81 | nsec += tc->nsec; |
| 82 | tc->nsec = nsec; |
| 83 | |
| 84 | return nsec; |
| 85 | } |
| 86 | EXPORT_SYMBOL(timecounter_read); |
| 87 | |
| 88 | u64 timecounter_cyc2time(struct timecounter *tc, |
| 89 | cycle_t cycle_tstamp) |
| 90 | { |
| 91 | u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; |
| 92 | u64 nsec; |
| 93 | |
| 94 | /* |
| 95 | * Instead of always treating cycle_tstamp as more recent |
| 96 | * than tc->cycle_last, detect when it is too far in the |
| 97 | * future and treat it as old time stamp instead. |
| 98 | */ |
| 99 | if (cycle_delta > tc->cc->mask / 2) { |
| 100 | cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; |
| 101 | nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta); |
| 102 | } else { |
| 103 | nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec; |
| 104 | } |
| 105 | |
| 106 | return nsec; |
| 107 | } |
| 108 | EXPORT_SYMBOL(timecounter_cyc2time); |
| 109 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 110 | /*[Clocksource internal variables]--------- |
| 111 | * curr_clocksource: |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 112 | * currently selected clocksource. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 113 | * clocksource_list: |
| 114 | * linked list with the registered clocksources |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 115 | * clocksource_mutex: |
| 116 | * protects manipulations to curr_clocksource and the clocksource_list |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 117 | * override_name: |
| 118 | * Name of the user-specified clocksource. |
| 119 | */ |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 120 | static struct clocksource *curr_clocksource; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 121 | static LIST_HEAD(clocksource_list); |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 122 | static DEFINE_MUTEX(clocksource_mutex); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 123 | static char override_name[32]; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 124 | |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 125 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
| 126 | static LIST_HEAD(watchdog_list); |
| 127 | static struct clocksource *watchdog; |
| 128 | static struct timer_list watchdog_timer; |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 129 | static struct work_struct watchdog_work; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 130 | static DEFINE_SPINLOCK(watchdog_lock); |
| 131 | static cycle_t watchdog_last; |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 132 | static int watchdog_running; |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 133 | |
Martin Schwidefsky | 01548f4 | 2009-08-18 17:09:42 +0200 | [diff] [blame] | 134 | static int clocksource_watchdog_kthread(void *data); |
Thomas Gleixner | d0981a1 | 2009-08-19 11:26:09 +0200 | [diff] [blame] | 135 | static void __clocksource_change_rating(struct clocksource *cs, int rating); |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 136 | |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 137 | /* |
Daniel Walker | 35c35d1 | 2007-05-09 02:33:40 -0700 | [diff] [blame] | 138 | * Interval: 0.5sec Threshold: 0.0625s |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 139 | */ |
| 140 | #define WATCHDOG_INTERVAL (HZ >> 1) |
Daniel Walker | 35c35d1 | 2007-05-09 02:33:40 -0700 | [diff] [blame] | 141 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 142 | |
Martin Schwidefsky | 01548f4 | 2009-08-18 17:09:42 +0200 | [diff] [blame] | 143 | static void clocksource_watchdog_work(struct work_struct *work) |
| 144 | { |
| 145 | /* |
| 146 | * If kthread_run fails the next watchdog scan over the |
| 147 | * watchdog_list will find the unstable clock again. |
| 148 | */ |
| 149 | kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); |
| 150 | } |
| 151 | |
Thomas Gleixner | 7285dd7 | 2009-08-28 20:25:24 +0200 | [diff] [blame^] | 152 | static void __clocksource_unstable(struct clocksource *cs) |
| 153 | { |
| 154 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); |
| 155 | cs->flags |= CLOCK_SOURCE_UNSTABLE; |
| 156 | schedule_work(&watchdog_work); |
| 157 | } |
| 158 | |
Martin Schwidefsky | 8cf4e75 | 2009-08-14 15:47:22 +0200 | [diff] [blame] | 159 | static void clocksource_unstable(struct clocksource *cs, int64_t delta) |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 160 | { |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 161 | printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", |
| 162 | cs->name, delta); |
Thomas Gleixner | 7285dd7 | 2009-08-28 20:25:24 +0200 | [diff] [blame^] | 163 | __clocksource_unstable(cs); |
| 164 | } |
| 165 | |
| 166 | /** |
| 167 | * clocksource_mark_unstable - mark clocksource unstable via watchdog |
| 168 | * @cs: clocksource to be marked unstable |
| 169 | * |
| 170 | * This function is called instead of clocksource_change_rating from |
| 171 | * cpu hotplug code to avoid a deadlock between the clocksource mutex |
| 172 | * and the cpu hotplug mutex. It defers the update of the clocksource |
| 173 | * to the watchdog thread. |
| 174 | */ |
| 175 | void clocksource_mark_unstable(struct clocksource *cs) |
| 176 | { |
| 177 | unsigned long flags; |
| 178 | |
| 179 | spin_lock_irqsave(&watchdog_lock, flags); |
| 180 | if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { |
| 181 | if (list_empty(&cs->wd_list)) |
| 182 | list_add(&cs->wd_list, &watchdog_list); |
| 183 | __clocksource_unstable(cs); |
| 184 | } |
| 185 | spin_unlock_irqrestore(&watchdog_lock, flags); |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | static void clocksource_watchdog(unsigned long data) |
| 189 | { |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 190 | struct clocksource *cs; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 191 | cycle_t csnow, wdnow; |
| 192 | int64_t wd_nsec, cs_nsec; |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 193 | int next_cpu; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 194 | |
| 195 | spin_lock(&watchdog_lock); |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 196 | if (!watchdog_running) |
| 197 | goto out; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 198 | |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 199 | wdnow = watchdog->read(watchdog); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 200 | wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask, |
| 201 | watchdog->mult, watchdog->shift); |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 202 | watchdog_last = wdnow; |
| 203 | |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 204 | list_for_each_entry(cs, &watchdog_list, wd_list) { |
| 205 | |
| 206 | /* Clocksource already marked unstable? */ |
Martin Schwidefsky | 01548f4 | 2009-08-18 17:09:42 +0200 | [diff] [blame] | 207 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { |
| 208 | schedule_work(&watchdog_work); |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 209 | continue; |
Martin Schwidefsky | 01548f4 | 2009-08-18 17:09:42 +0200 | [diff] [blame] | 210 | } |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 211 | |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 212 | csnow = cs->read(cs); |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 213 | |
Martin Schwidefsky | 8cf4e75 | 2009-08-14 15:47:22 +0200 | [diff] [blame] | 214 | /* Clocksource initialized ? */ |
| 215 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { |
| 216 | cs->flags |= CLOCK_SOURCE_WATCHDOG; |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 217 | cs->wd_last = csnow; |
| 218 | continue; |
| 219 | } |
| 220 | |
Martin Schwidefsky | 8cf4e75 | 2009-08-14 15:47:22 +0200 | [diff] [blame] | 221 | /* Check the deviation from the watchdog clocksource. */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 222 | cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & |
| 223 | cs->mask, cs->mult, cs->shift); |
Martin Schwidefsky | 8cf4e75 | 2009-08-14 15:47:22 +0200 | [diff] [blame] | 224 | cs->wd_last = csnow; |
| 225 | if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { |
| 226 | clocksource_unstable(cs, cs_nsec - wd_nsec); |
| 227 | continue; |
| 228 | } |
| 229 | |
| 230 | if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && |
| 231 | (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && |
| 232 | (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { |
| 233 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
| 234 | /* |
| 235 | * We just marked the clocksource as highres-capable, |
| 236 | * notify the rest of the system as well so that we |
| 237 | * transition into high-res mode: |
| 238 | */ |
| 239 | tick_clock_notify(); |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 240 | } |
| 241 | } |
| 242 | |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 243 | /* |
| 244 | * Cycle through CPUs to check if the CPUs stay synchronized |
| 245 | * to each other. |
| 246 | */ |
| 247 | next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); |
| 248 | if (next_cpu >= nr_cpu_ids) |
| 249 | next_cpu = cpumask_first(cpu_online_mask); |
| 250 | watchdog_timer.expires += WATCHDOG_INTERVAL; |
| 251 | add_timer_on(&watchdog_timer, next_cpu); |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 252 | out: |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 253 | spin_unlock(&watchdog_lock); |
| 254 | } |
Martin Schwidefsky | 0f8e8ef | 2009-08-14 15:47:23 +0200 | [diff] [blame] | 255 | |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 256 | static inline void clocksource_start_watchdog(void) |
| 257 | { |
| 258 | if (watchdog_running || !watchdog || list_empty(&watchdog_list)) |
| 259 | return; |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 260 | INIT_WORK(&watchdog_work, clocksource_watchdog_work); |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 261 | init_timer(&watchdog_timer); |
| 262 | watchdog_timer.function = clocksource_watchdog; |
| 263 | watchdog_last = watchdog->read(watchdog); |
| 264 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; |
| 265 | add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); |
| 266 | watchdog_running = 1; |
| 267 | } |
| 268 | |
| 269 | static inline void clocksource_stop_watchdog(void) |
| 270 | { |
| 271 | if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) |
| 272 | return; |
| 273 | del_timer(&watchdog_timer); |
| 274 | watchdog_running = 0; |
| 275 | } |
| 276 | |
Martin Schwidefsky | 0f8e8ef | 2009-08-14 15:47:23 +0200 | [diff] [blame] | 277 | static inline void clocksource_reset_watchdog(void) |
| 278 | { |
| 279 | struct clocksource *cs; |
| 280 | |
| 281 | list_for_each_entry(cs, &watchdog_list, wd_list) |
| 282 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; |
| 283 | } |
| 284 | |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 285 | static void clocksource_resume_watchdog(void) |
| 286 | { |
Martin Schwidefsky | 0f8e8ef | 2009-08-14 15:47:23 +0200 | [diff] [blame] | 287 | unsigned long flags; |
| 288 | |
| 289 | spin_lock_irqsave(&watchdog_lock, flags); |
| 290 | clocksource_reset_watchdog(); |
| 291 | spin_unlock_irqrestore(&watchdog_lock, flags); |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 292 | } |
| 293 | |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 294 | static void clocksource_enqueue_watchdog(struct clocksource *cs) |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 295 | { |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 296 | unsigned long flags; |
| 297 | |
| 298 | spin_lock_irqsave(&watchdog_lock, flags); |
| 299 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 300 | /* cs is a clocksource to be watched. */ |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 301 | list_add(&cs->wd_list, &watchdog_list); |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 302 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; |
Thomas Gleixner | 948ac6d | 2007-03-25 14:42:51 +0200 | [diff] [blame] | 303 | } else { |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 304 | /* cs is a watchdog. */ |
Thomas Gleixner | 948ac6d | 2007-03-25 14:42:51 +0200 | [diff] [blame] | 305 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 306 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 307 | /* Pick the best watchdog. */ |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 308 | if (!watchdog || cs->rating > watchdog->rating) { |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 309 | watchdog = cs; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 310 | /* Reset watchdog cycles */ |
Martin Schwidefsky | 0f8e8ef | 2009-08-14 15:47:23 +0200 | [diff] [blame] | 311 | clocksource_reset_watchdog(); |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 312 | } |
| 313 | } |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 314 | /* Check if the watchdog timer needs to be started. */ |
| 315 | clocksource_start_watchdog(); |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 316 | spin_unlock_irqrestore(&watchdog_lock, flags); |
| 317 | } |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 318 | |
| 319 | static void clocksource_dequeue_watchdog(struct clocksource *cs) |
| 320 | { |
| 321 | struct clocksource *tmp; |
| 322 | unsigned long flags; |
| 323 | |
| 324 | spin_lock_irqsave(&watchdog_lock, flags); |
| 325 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { |
| 326 | /* cs is a watched clocksource. */ |
| 327 | list_del_init(&cs->wd_list); |
| 328 | } else if (cs == watchdog) { |
| 329 | /* Reset watchdog cycles */ |
| 330 | clocksource_reset_watchdog(); |
| 331 | /* Current watchdog is removed. Find an alternative. */ |
| 332 | watchdog = NULL; |
| 333 | list_for_each_entry(tmp, &clocksource_list, list) { |
| 334 | if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY) |
| 335 | continue; |
| 336 | if (!watchdog || tmp->rating > watchdog->rating) |
| 337 | watchdog = tmp; |
| 338 | } |
| 339 | } |
| 340 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; |
| 341 | /* Check if the watchdog timer needs to be stopped. */ |
| 342 | clocksource_stop_watchdog(); |
| 343 | spin_unlock_irqrestore(&watchdog_lock, flags); |
| 344 | } |
| 345 | |
Martin Schwidefsky | 01548f4 | 2009-08-18 17:09:42 +0200 | [diff] [blame] | 346 | static int clocksource_watchdog_kthread(void *data) |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 347 | { |
| 348 | struct clocksource *cs, *tmp; |
| 349 | unsigned long flags; |
Thomas Gleixner | 6ea41d2 | 2009-08-15 13:20:42 +0200 | [diff] [blame] | 350 | LIST_HEAD(unstable); |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 351 | |
Thomas Gleixner | d0981a1 | 2009-08-19 11:26:09 +0200 | [diff] [blame] | 352 | mutex_lock(&clocksource_mutex); |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 353 | spin_lock_irqsave(&watchdog_lock, flags); |
| 354 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) |
| 355 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { |
| 356 | list_del_init(&cs->wd_list); |
Thomas Gleixner | 6ea41d2 | 2009-08-15 13:20:42 +0200 | [diff] [blame] | 357 | list_add(&cs->wd_list, &unstable); |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 358 | } |
| 359 | /* Check if the watchdog timer needs to be stopped. */ |
| 360 | clocksource_stop_watchdog(); |
Thomas Gleixner | 6ea41d2 | 2009-08-15 13:20:42 +0200 | [diff] [blame] | 361 | spin_unlock_irqrestore(&watchdog_lock, flags); |
| 362 | |
| 363 | /* Needs to be done outside of watchdog lock */ |
| 364 | list_for_each_entry_safe(cs, tmp, &unstable, wd_list) { |
| 365 | list_del_init(&cs->wd_list); |
Thomas Gleixner | d0981a1 | 2009-08-19 11:26:09 +0200 | [diff] [blame] | 366 | __clocksource_change_rating(cs, 0); |
Thomas Gleixner | 6ea41d2 | 2009-08-15 13:20:42 +0200 | [diff] [blame] | 367 | } |
Thomas Gleixner | d0981a1 | 2009-08-19 11:26:09 +0200 | [diff] [blame] | 368 | mutex_unlock(&clocksource_mutex); |
Martin Schwidefsky | 01548f4 | 2009-08-18 17:09:42 +0200 | [diff] [blame] | 369 | return 0; |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 370 | } |
| 371 | |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 372 | #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ |
| 373 | |
| 374 | static void clocksource_enqueue_watchdog(struct clocksource *cs) |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 375 | { |
| 376 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
| 377 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
| 378 | } |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 379 | |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 380 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 381 | static inline void clocksource_resume_watchdog(void) { } |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 382 | |
| 383 | #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 384 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 385 | /** |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 386 | * clocksource_resume - resume the clocksource(s) |
| 387 | */ |
| 388 | void clocksource_resume(void) |
| 389 | { |
Matthias Kaehlcke | 2e19758 | 2007-10-18 23:39:58 -0700 | [diff] [blame] | 390 | struct clocksource *cs; |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 391 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 392 | mutex_lock(&clocksource_mutex); |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 393 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 394 | list_for_each_entry(cs, &clocksource_list, list) |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 395 | if (cs->resume) |
| 396 | cs->resume(); |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 397 | |
| 398 | clocksource_resume_watchdog(); |
| 399 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 400 | mutex_unlock(&clocksource_mutex); |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 401 | } |
| 402 | |
| 403 | /** |
Jason Wessel | 7c3078b | 2008-02-15 14:55:54 -0600 | [diff] [blame] | 404 | * clocksource_touch_watchdog - Update watchdog |
| 405 | * |
| 406 | * Update the watchdog after exception contexts such as kgdb so as not |
| 407 | * to incorrectly trip the watchdog. |
| 408 | * |
| 409 | */ |
| 410 | void clocksource_touch_watchdog(void) |
| 411 | { |
| 412 | clocksource_resume_watchdog(); |
| 413 | } |
| 414 | |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 415 | #ifdef CONFIG_GENERIC_TIME |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 416 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 417 | static int finished_booting; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 418 | |
| 419 | /** |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 420 | * clocksource_select - Select the best clocksource available |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 421 | * |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 422 | * Private function. Must hold clocksource_mutex when called. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 423 | * |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 424 | * Select the clocksource with the best rating, or the clocksource, |
| 425 | * which is selected by userspace override. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 426 | */ |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 427 | static void clocksource_select(void) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 428 | { |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 429 | struct clocksource *best, *cs; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 430 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 431 | if (!finished_booting || list_empty(&clocksource_list)) |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 432 | return; |
| 433 | /* First clocksource on the list has the best rating. */ |
| 434 | best = list_first_entry(&clocksource_list, struct clocksource, list); |
| 435 | /* Check for the override clocksource. */ |
| 436 | list_for_each_entry(cs, &clocksource_list, list) { |
| 437 | if (strcmp(cs->name, override_name) != 0) |
| 438 | continue; |
| 439 | /* |
| 440 | * Check to make sure we don't switch to a non-highres |
| 441 | * capable clocksource if the tick code is in oneshot |
| 442 | * mode (highres or nohz) |
| 443 | */ |
| 444 | if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && |
| 445 | tick_oneshot_mode_active()) { |
| 446 | /* Override clocksource cannot be used. */ |
| 447 | printk(KERN_WARNING "Override clocksource %s is not " |
| 448 | "HRT compatible. Cannot switch while in " |
| 449 | "HRT/NOHZ mode\n", cs->name); |
| 450 | override_name[0] = 0; |
| 451 | } else |
| 452 | /* Override clocksource can be used. */ |
| 453 | best = cs; |
| 454 | break; |
| 455 | } |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 456 | if (curr_clocksource != best) { |
| 457 | printk(KERN_INFO "Switching to clocksource %s\n", best->name); |
| 458 | curr_clocksource = best; |
| 459 | timekeeping_notify(curr_clocksource); |
| 460 | } |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 461 | } |
| 462 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 463 | /* |
| 464 | * clocksource_done_booting - Called near the end of core bootup |
| 465 | * |
| 466 | * Hack to avoid lots of clocksource churn at boot time. |
| 467 | * We use fs_initcall because we want this to start before |
| 468 | * device_initcall but after subsys_initcall. |
| 469 | */ |
| 470 | static int __init clocksource_done_booting(void) |
| 471 | { |
| 472 | finished_booting = 1; |
| 473 | clocksource_select(); |
| 474 | return 0; |
| 475 | } |
| 476 | fs_initcall(clocksource_done_booting); |
| 477 | |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 478 | #else /* CONFIG_GENERIC_TIME */ |
| 479 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 480 | static inline void clocksource_select(void) { } |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 481 | |
| 482 | #endif |
| 483 | |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 484 | /* |
| 485 | * Enqueue the clocksource sorted by rating |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 486 | */ |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 487 | static void clocksource_enqueue(struct clocksource *cs) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 488 | { |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 489 | struct list_head *entry = &clocksource_list; |
| 490 | struct clocksource *tmp; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 491 | |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 492 | list_for_each_entry(tmp, &clocksource_list, list) |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 493 | /* Keep track of the place, where to insert */ |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 494 | if (tmp->rating >= cs->rating) |
| 495 | entry = &tmp->list; |
| 496 | list_add(&cs->list, entry); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 497 | } |
| 498 | |
| 499 | /** |
john stultz | a275254 | 2006-06-26 00:25:14 -0700 | [diff] [blame] | 500 | * clocksource_register - Used to install new clocksources |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 501 | * @t: clocksource to be registered |
| 502 | * |
| 503 | * Returns -EBUSY if registration fails, zero otherwise. |
| 504 | */ |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 505 | int clocksource_register(struct clocksource *cs) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 506 | { |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 507 | mutex_lock(&clocksource_mutex); |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 508 | clocksource_enqueue(cs); |
| 509 | clocksource_select(); |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 510 | clocksource_enqueue_watchdog(cs); |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 511 | mutex_unlock(&clocksource_mutex); |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 512 | return 0; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 513 | } |
john stultz | a275254 | 2006-06-26 00:25:14 -0700 | [diff] [blame] | 514 | EXPORT_SYMBOL(clocksource_register); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 515 | |
Thomas Gleixner | d0981a1 | 2009-08-19 11:26:09 +0200 | [diff] [blame] | 516 | static void __clocksource_change_rating(struct clocksource *cs, int rating) |
| 517 | { |
| 518 | list_del(&cs->list); |
| 519 | cs->rating = rating; |
| 520 | clocksource_enqueue(cs); |
| 521 | clocksource_select(); |
| 522 | } |
| 523 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 524 | /** |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 525 | * clocksource_change_rating - Change the rating of a registered clocksource |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 526 | */ |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 527 | void clocksource_change_rating(struct clocksource *cs, int rating) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 528 | { |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 529 | mutex_lock(&clocksource_mutex); |
Thomas Gleixner | d0981a1 | 2009-08-19 11:26:09 +0200 | [diff] [blame] | 530 | __clocksource_change_rating(cs, rating); |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 531 | mutex_unlock(&clocksource_mutex); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 532 | } |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 533 | EXPORT_SYMBOL(clocksource_change_rating); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 534 | |
Thomas Gleixner | 4713e22c | 2008-01-30 13:30:02 +0100 | [diff] [blame] | 535 | /** |
| 536 | * clocksource_unregister - remove a registered clocksource |
| 537 | */ |
| 538 | void clocksource_unregister(struct clocksource *cs) |
| 539 | { |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 540 | mutex_lock(&clocksource_mutex); |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 541 | clocksource_dequeue_watchdog(cs); |
Thomas Gleixner | 4713e22c | 2008-01-30 13:30:02 +0100 | [diff] [blame] | 542 | list_del(&cs->list); |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 543 | clocksource_select(); |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 544 | mutex_unlock(&clocksource_mutex); |
Thomas Gleixner | 4713e22c | 2008-01-30 13:30:02 +0100 | [diff] [blame] | 545 | } |
Martin Schwidefsky | fb63a0e | 2009-08-14 15:47:24 +0200 | [diff] [blame] | 546 | EXPORT_SYMBOL(clocksource_unregister); |
Thomas Gleixner | 4713e22c | 2008-01-30 13:30:02 +0100 | [diff] [blame] | 547 | |
Daniel Walker | 2b01370 | 2006-12-10 02:21:30 -0800 | [diff] [blame] | 548 | #ifdef CONFIG_SYSFS |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 549 | /** |
| 550 | * sysfs_show_current_clocksources - sysfs interface for current clocksource |
| 551 | * @dev: unused |
| 552 | * @buf: char buffer to be filled with clocksource list |
| 553 | * |
| 554 | * Provides sysfs interface for listing current clocksource. |
| 555 | */ |
| 556 | static ssize_t |
Andi Kleen | 4a0b2b4 | 2008-07-01 18:48:41 +0200 | [diff] [blame] | 557 | sysfs_show_current_clocksources(struct sys_device *dev, |
| 558 | struct sysdev_attribute *attr, char *buf) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 559 | { |
Miao Xie | 5e2cb10 | 2008-02-06 01:36:53 -0800 | [diff] [blame] | 560 | ssize_t count = 0; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 561 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 562 | mutex_lock(&clocksource_mutex); |
Miao Xie | 5e2cb10 | 2008-02-06 01:36:53 -0800 | [diff] [blame] | 563 | count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 564 | mutex_unlock(&clocksource_mutex); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 565 | |
Miao Xie | 5e2cb10 | 2008-02-06 01:36:53 -0800 | [diff] [blame] | 566 | return count; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 567 | } |
| 568 | |
| 569 | /** |
| 570 | * sysfs_override_clocksource - interface for manually overriding clocksource |
| 571 | * @dev: unused |
| 572 | * @buf: name of override clocksource |
| 573 | * @count: length of buffer |
| 574 | * |
| 575 | * Takes input from sysfs interface for manually overriding the default |
| 576 | * clocksource selction. |
| 577 | */ |
| 578 | static ssize_t sysfs_override_clocksource(struct sys_device *dev, |
Andi Kleen | 4a0b2b4 | 2008-07-01 18:48:41 +0200 | [diff] [blame] | 579 | struct sysdev_attribute *attr, |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 580 | const char *buf, size_t count) |
| 581 | { |
| 582 | size_t ret = count; |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 583 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 584 | /* strings from sysfs write are not 0 terminated! */ |
| 585 | if (count >= sizeof(override_name)) |
| 586 | return -EINVAL; |
| 587 | |
| 588 | /* strip of \n: */ |
| 589 | if (buf[count-1] == '\n') |
| 590 | count--; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 591 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 592 | mutex_lock(&clocksource_mutex); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 593 | |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 594 | if (count > 0) |
| 595 | memcpy(override_name, buf, count); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 596 | override_name[count] = 0; |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 597 | clocksource_select(); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 598 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 599 | mutex_unlock(&clocksource_mutex); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 600 | |
| 601 | return ret; |
| 602 | } |
| 603 | |
| 604 | /** |
| 605 | * sysfs_show_available_clocksources - sysfs interface for listing clocksource |
| 606 | * @dev: unused |
| 607 | * @buf: char buffer to be filled with clocksource list |
| 608 | * |
| 609 | * Provides sysfs interface for listing registered clocksources |
| 610 | */ |
| 611 | static ssize_t |
Andi Kleen | 4a0b2b4 | 2008-07-01 18:48:41 +0200 | [diff] [blame] | 612 | sysfs_show_available_clocksources(struct sys_device *dev, |
| 613 | struct sysdev_attribute *attr, |
| 614 | char *buf) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 615 | { |
Matthias Kaehlcke | 2e19758 | 2007-10-18 23:39:58 -0700 | [diff] [blame] | 616 | struct clocksource *src; |
Miao Xie | 5e2cb10 | 2008-02-06 01:36:53 -0800 | [diff] [blame] | 617 | ssize_t count = 0; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 618 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 619 | mutex_lock(&clocksource_mutex); |
Matthias Kaehlcke | 2e19758 | 2007-10-18 23:39:58 -0700 | [diff] [blame] | 620 | list_for_each_entry(src, &clocksource_list, list) { |
Thomas Gleixner | cd6d95d | 2009-06-12 11:29:27 +0200 | [diff] [blame] | 621 | /* |
| 622 | * Don't show non-HRES clocksource if the tick code is |
| 623 | * in one shot mode (highres=on or nohz=on) |
| 624 | */ |
| 625 | if (!tick_oneshot_mode_active() || |
| 626 | (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) |
john stultz | 3f68535 | 2009-01-21 22:53:22 -0700 | [diff] [blame] | 627 | count += snprintf(buf + count, |
Miao Xie | 5e2cb10 | 2008-02-06 01:36:53 -0800 | [diff] [blame] | 628 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), |
| 629 | "%s ", src->name); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 630 | } |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 631 | mutex_unlock(&clocksource_mutex); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 632 | |
Miao Xie | 5e2cb10 | 2008-02-06 01:36:53 -0800 | [diff] [blame] | 633 | count += snprintf(buf + count, |
| 634 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 635 | |
Miao Xie | 5e2cb10 | 2008-02-06 01:36:53 -0800 | [diff] [blame] | 636 | return count; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 637 | } |
| 638 | |
| 639 | /* |
| 640 | * Sysfs setup bits: |
| 641 | */ |
Heiko Carstens | 4f95f81 | 2008-05-03 14:23:14 +0200 | [diff] [blame] | 642 | static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources, |
Daniel Walker | f5f1a24 | 2006-12-10 02:21:33 -0800 | [diff] [blame] | 643 | sysfs_override_clocksource); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 644 | |
Heiko Carstens | 4f95f81 | 2008-05-03 14:23:14 +0200 | [diff] [blame] | 645 | static SYSDEV_ATTR(available_clocksource, 0444, |
Daniel Walker | f5f1a24 | 2006-12-10 02:21:33 -0800 | [diff] [blame] | 646 | sysfs_show_available_clocksources, NULL); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 647 | |
| 648 | static struct sysdev_class clocksource_sysclass = { |
Kay Sievers | af5ca3f | 2007-12-20 02:09:39 +0100 | [diff] [blame] | 649 | .name = "clocksource", |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 650 | }; |
| 651 | |
| 652 | static struct sys_device device_clocksource = { |
| 653 | .id = 0, |
| 654 | .cls = &clocksource_sysclass, |
| 655 | }; |
| 656 | |
john stultz | ad59617 | 2006-06-26 00:25:06 -0700 | [diff] [blame] | 657 | static int __init init_clocksource_sysfs(void) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 658 | { |
| 659 | int error = sysdev_class_register(&clocksource_sysclass); |
| 660 | |
| 661 | if (!error) |
| 662 | error = sysdev_register(&device_clocksource); |
| 663 | if (!error) |
| 664 | error = sysdev_create_file( |
| 665 | &device_clocksource, |
| 666 | &attr_current_clocksource); |
| 667 | if (!error) |
| 668 | error = sysdev_create_file( |
| 669 | &device_clocksource, |
| 670 | &attr_available_clocksource); |
| 671 | return error; |
| 672 | } |
| 673 | |
| 674 | device_initcall(init_clocksource_sysfs); |
Daniel Walker | 2b01370 | 2006-12-10 02:21:30 -0800 | [diff] [blame] | 675 | #endif /* CONFIG_SYSFS */ |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 676 | |
| 677 | /** |
| 678 | * boot_override_clocksource - boot clock override |
| 679 | * @str: override name |
| 680 | * |
| 681 | * Takes a clocksource= boot argument and uses it |
| 682 | * as the clocksource override name. |
| 683 | */ |
| 684 | static int __init boot_override_clocksource(char* str) |
| 685 | { |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 686 | mutex_lock(&clocksource_mutex); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 687 | if (str) |
| 688 | strlcpy(override_name, str, sizeof(override_name)); |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 689 | mutex_unlock(&clocksource_mutex); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 690 | return 1; |
| 691 | } |
| 692 | |
| 693 | __setup("clocksource=", boot_override_clocksource); |
| 694 | |
| 695 | /** |
| 696 | * boot_override_clock - Compatibility layer for deprecated boot option |
| 697 | * @str: override name |
| 698 | * |
| 699 | * DEPRECATED! Takes a clock= boot argument and uses it |
| 700 | * as the clocksource override name |
| 701 | */ |
| 702 | static int __init boot_override_clock(char* str) |
| 703 | { |
john stultz | 5d0cf41 | 2006-06-26 00:25:12 -0700 | [diff] [blame] | 704 | if (!strcmp(str, "pmtmr")) { |
| 705 | printk("Warning: clock=pmtmr is deprecated. " |
| 706 | "Use clocksource=acpi_pm.\n"); |
| 707 | return boot_override_clocksource("acpi_pm"); |
| 708 | } |
| 709 | printk("Warning! clock= boot option is deprecated. " |
| 710 | "Use clocksource=xyz\n"); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 711 | return boot_override_clocksource(str); |
| 712 | } |
| 713 | |
| 714 | __setup("clock=", boot_override_clock); |