| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * Common time routines among all ppc machines. | 
 | 3 |  * | 
 | 4 |  * Written by Cort Dougan (cort@cs.nmt.edu) to merge | 
 | 5 |  * Paul Mackerras' version and mine for PReP and Pmac. | 
 | 6 |  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). | 
 | 7 |  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) | 
 | 8 |  * | 
 | 9 |  * First round of bugfixes by Gabriel Paubert (paubert@iram.es) | 
 | 10 |  * to make clock more stable (2.4.0-test5). The only thing | 
 | 11 |  * that this code assumes is that the timebases have been synchronized | 
 | 12 |  * by firmware on SMP and are never stopped (never do sleep | 
 | 13 |  * on SMP then, nap and doze are OK). | 
 | 14 |  *  | 
 | 15 |  * Speeded up do_gettimeofday by getting rid of references to | 
 | 16 |  * xtime (which required locks for consistency). (mikejc@us.ibm.com) | 
 | 17 |  * | 
 | 18 |  * TODO (not necessarily in this file): | 
 | 19 |  * - improve precision and reproducibility of timebase frequency | 
| Stephen Rothwell | f533927 | 2012-03-15 18:18:00 +0000 | [diff] [blame] | 20 |  * measurement at boot time. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 |  * - for astronomical applications: add a new function to get | 
 | 22 |  * non ambiguous timestamps even around leap seconds. This needs | 
 | 23 |  * a new timestamp format and a good name. | 
 | 24 |  * | 
 | 25 |  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96 | 
 | 26 |  *             "A Kernel Model for Precision Timekeeping" by Dave Mills | 
 | 27 |  * | 
 | 28 |  *      This program is free software; you can redistribute it and/or | 
 | 29 |  *      modify it under the terms of the GNU General Public License | 
 | 30 |  *      as published by the Free Software Foundation; either version | 
 | 31 |  *      2 of the License, or (at your option) any later version. | 
 | 32 |  */ | 
 | 33 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <linux/errno.h> | 
| Paul Gortmaker | 4b16f8e | 2011-07-22 18:24:23 -0400 | [diff] [blame] | 35 | #include <linux/export.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/sched.h> | 
 | 37 | #include <linux/kernel.h> | 
 | 38 | #include <linux/param.h> | 
 | 39 | #include <linux/string.h> | 
 | 40 | #include <linux/mm.h> | 
 | 41 | #include <linux/interrupt.h> | 
 | 42 | #include <linux/timex.h> | 
 | 43 | #include <linux/kernel_stat.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <linux/time.h> | 
 | 45 | #include <linux/init.h> | 
 | 46 | #include <linux/profile.h> | 
 | 47 | #include <linux/cpu.h> | 
 | 48 | #include <linux/security.h> | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 49 | #include <linux/percpu.h> | 
 | 50 | #include <linux/rtc.h> | 
| Paul Mackerras | 092b8f3 | 2006-02-20 10:38:56 +1100 | [diff] [blame] | 51 | #include <linux/jiffies.h> | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 52 | #include <linux/posix-timers.h> | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 53 | #include <linux/irq.h> | 
| Benjamin Herrenschmidt | 177996e | 2009-06-09 21:12:00 +0000 | [diff] [blame] | 54 | #include <linux/delay.h> | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 55 | #include <linux/irq_work.h> | 
| Anton Blanchard | 6795b85 | 2009-10-26 18:49:14 +0000 | [diff] [blame] | 56 | #include <asm/trace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #include <asm/io.h> | 
 | 59 | #include <asm/processor.h> | 
 | 60 | #include <asm/nvram.h> | 
 | 61 | #include <asm/cache.h> | 
 | 62 | #include <asm/machdep.h> | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 63 | #include <asm/uaccess.h> | 
 | 64 | #include <asm/time.h> | 
 | 65 | #include <asm/prom.h> | 
 | 66 | #include <asm/irq.h> | 
 | 67 | #include <asm/div64.h> | 
| Paul Mackerras | 2249ca9 | 2005-11-07 13:18:13 +1100 | [diff] [blame] | 68 | #include <asm/smp.h> | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 69 | #include <asm/vdso_datapage.h> | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 70 | #include <asm/firmware.h> | 
| Michael Neuling | 06b8e87 | 2008-02-06 01:36:12 -0800 | [diff] [blame] | 71 | #include <asm/cputime.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 |  | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 73 | /* powerpc clocksource/clockevent code */ | 
 | 74 |  | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 75 | #include <linux/clockchips.h> | 
| John Stultz | 189374a | 2012-09-04 15:27:48 -0400 | [diff] [blame] | 76 | #include <linux/timekeeper_internal.h> | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 77 |  | 
| Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 78 | static cycle_t rtc_read(struct clocksource *); | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 79 | static struct clocksource clocksource_rtc = { | 
 | 80 | 	.name         = "rtc", | 
 | 81 | 	.rating       = 400, | 
 | 82 | 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS, | 
 | 83 | 	.mask         = CLOCKSOURCE_MASK(64), | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 84 | 	.read         = rtc_read, | 
 | 85 | }; | 
 | 86 |  | 
| Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 87 | static cycle_t timebase_read(struct clocksource *); | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 88 | static struct clocksource clocksource_timebase = { | 
 | 89 | 	.name         = "timebase", | 
 | 90 | 	.rating       = 400, | 
 | 91 | 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS, | 
 | 92 | 	.mask         = CLOCKSOURCE_MASK(64), | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 93 | 	.read         = timebase_read, | 
 | 94 | }; | 
 | 95 |  | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 96 | #define DECREMENTER_MAX	0x7fffffff | 
 | 97 |  | 
 | 98 | static int decrementer_set_next_event(unsigned long evt, | 
 | 99 | 				      struct clock_event_device *dev); | 
 | 100 | static void decrementer_set_mode(enum clock_event_mode mode, | 
 | 101 | 				 struct clock_event_device *dev); | 
 | 102 |  | 
| Bharat Bhushan | 6e35994 | 2012-04-18 06:01:19 +0000 | [diff] [blame] | 103 | struct clock_event_device decrementer_clockevent = { | 
| Anton Blanchard | 621692c | 2011-11-23 20:07:21 +0000 | [diff] [blame] | 104 | 	.name           = "decrementer", | 
 | 105 | 	.rating         = 200, | 
 | 106 | 	.irq            = 0, | 
 | 107 | 	.set_next_event = decrementer_set_next_event, | 
 | 108 | 	.set_mode       = decrementer_set_mode, | 
 | 109 | 	.features       = CLOCK_EVT_FEAT_ONESHOT, | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 110 | }; | 
| Bharat Bhushan | 6e35994 | 2012-04-18 06:01:19 +0000 | [diff] [blame] | 111 | EXPORT_SYMBOL(decrementer_clockevent); | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 112 |  | 
| Anton Blanchard | 7df1027 | 2011-11-23 20:07:22 +0000 | [diff] [blame] | 113 | DEFINE_PER_CPU(u64, decrementers_next_tb); | 
 | 114 | static DEFINE_PER_CPU(struct clock_event_device, decrementers); | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 115 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | #define XSEC_PER_SEC (1024*1024) | 
 | 117 |  | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 118 | #ifdef CONFIG_PPC64 | 
 | 119 | #define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC) | 
 | 120 | #else | 
 | 121 | /* compute ((xsec << 12) * max) >> 32 */ | 
 | 122 | #define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max) | 
 | 123 | #endif | 
 | 124 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | unsigned long tb_ticks_per_jiffy; | 
 | 126 | unsigned long tb_ticks_per_usec = 100; /* sane default */ | 
 | 127 | EXPORT_SYMBOL(tb_ticks_per_usec); | 
 | 128 | unsigned long tb_ticks_per_sec; | 
| Paul Mackerras | 2cf82c0 | 2006-02-27 15:41:47 +1100 | [diff] [blame] | 129 | EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */ | 
| Paul Mackerras | 092b8f3 | 2006-02-20 10:38:56 +1100 | [diff] [blame] | 130 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | DEFINE_SPINLOCK(rtc_lock); | 
| Benjamin Herrenschmidt | 6ae3db1 | 2005-06-27 14:36:35 -0700 | [diff] [blame] | 132 | EXPORT_SYMBOL_GPL(rtc_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 |  | 
| Tony Breeds | fc9069f | 2007-07-04 14:04:31 +1000 | [diff] [blame] | 134 | static u64 tb_to_ns_scale __read_mostly; | 
 | 135 | static unsigned tb_to_ns_shift __read_mostly; | 
| Heiko Schocher | 364a124 | 2010-11-22 21:30:33 +0000 | [diff] [blame] | 136 | static u64 boot_tb __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | extern struct timezone sys_tz; | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 139 | static long timezone_offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 |  | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 141 | unsigned long ppc_proc_freq; | 
| Timur Tabi | 55ec2fc | 2010-09-20 11:23:41 -0500 | [diff] [blame] | 142 | EXPORT_SYMBOL_GPL(ppc_proc_freq); | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 143 | unsigned long ppc_tb_freq; | 
| Timur Tabi | 55ec2fc | 2010-09-20 11:23:41 -0500 | [diff] [blame] | 144 | EXPORT_SYMBOL_GPL(ppc_tb_freq); | 
| Paul Mackerras | 96c4450 | 2005-10-23 17:14:56 +1000 | [diff] [blame] | 145 |  | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 146 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 147 | /* | 
 | 148 |  * Factors for converting from cputime_t (timebase ticks) to | 
| Andreas Schwab | 9f5072d | 2011-12-09 11:35:08 +0000 | [diff] [blame] | 149 |  * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 150 |  * These are all stored as 0.64 fixed-point binary fractions. | 
 | 151 |  */ | 
 | 152 | u64 __cputime_jiffies_factor; | 
| Paul Mackerras | 2cf82c0 | 2006-02-27 15:41:47 +1100 | [diff] [blame] | 153 | EXPORT_SYMBOL(__cputime_jiffies_factor); | 
| Andreas Schwab | 9f5072d | 2011-12-09 11:35:08 +0000 | [diff] [blame] | 154 | u64 __cputime_usec_factor; | 
 | 155 | EXPORT_SYMBOL(__cputime_usec_factor); | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 156 | u64 __cputime_sec_factor; | 
| Paul Mackerras | 2cf82c0 | 2006-02-27 15:41:47 +1100 | [diff] [blame] | 157 | EXPORT_SYMBOL(__cputime_sec_factor); | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 158 | u64 __cputime_clockt_factor; | 
| Paul Mackerras | 2cf82c0 | 2006-02-27 15:41:47 +1100 | [diff] [blame] | 159 | EXPORT_SYMBOL(__cputime_clockt_factor); | 
| Michael Neuling | 06b8e87 | 2008-02-06 01:36:12 -0800 | [diff] [blame] | 160 | DEFINE_PER_CPU(unsigned long, cputime_last_delta); | 
 | 161 | DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 162 |  | 
| Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 163 | cputime_t cputime_one_jiffy; | 
 | 164 |  | 
| Paul Mackerras | 872e439 | 2010-08-31 01:59:53 +0000 | [diff] [blame] | 165 | void (*dtl_consumer)(struct dtl_entry *, u64); | 
 | 166 |  | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 167 | static void calc_cputime_factors(void) | 
 | 168 | { | 
 | 169 | 	struct div_result res; | 
 | 170 |  | 
 | 171 | 	div128_by_32(HZ, 0, tb_ticks_per_sec, &res); | 
 | 172 | 	__cputime_jiffies_factor = res.result_low; | 
| Andreas Schwab | 9f5072d | 2011-12-09 11:35:08 +0000 | [diff] [blame] | 173 | 	div128_by_32(1000000, 0, tb_ticks_per_sec, &res); | 
 | 174 | 	__cputime_usec_factor = res.result_low; | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 175 | 	div128_by_32(1, 0, tb_ticks_per_sec, &res); | 
 | 176 | 	__cputime_sec_factor = res.result_low; | 
 | 177 | 	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); | 
 | 178 | 	__cputime_clockt_factor = res.result_low; | 
 | 179 | } | 
 | 180 |  | 
 | 181 | /* | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 182 |  * Read the SPURR on systems that have it, otherwise the PURR, | 
 | 183 |  * or if that doesn't exist return the timebase value passed in. | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 184 |  */ | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 185 | static u64 read_spurr(u64 tb) | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 186 | { | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 187 | 	if (cpu_has_feature(CPU_FTR_SPURR)) | 
 | 188 | 		return mfspr(SPRN_SPURR); | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 189 | 	if (cpu_has_feature(CPU_FTR_PURR)) | 
 | 190 | 		return mfspr(SPRN_PURR); | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 191 | 	return tb; | 
 | 192 | } | 
 | 193 |  | 
 | 194 | #ifdef CONFIG_PPC_SPLPAR | 
 | 195 |  | 
 | 196 | /* | 
 | 197 |  * Scan the dispatch trace log and count up the stolen time. | 
 | 198 |  * Should be called with interrupts disabled. | 
 | 199 |  */ | 
 | 200 | static u64 scan_dispatch_log(u64 stop_tb) | 
 | 201 | { | 
| Paul Mackerras | 872e439 | 2010-08-31 01:59:53 +0000 | [diff] [blame] | 202 | 	u64 i = local_paca->dtl_ridx; | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 203 | 	struct dtl_entry *dtl = local_paca->dtl_curr; | 
 | 204 | 	struct dtl_entry *dtl_end = local_paca->dispatch_log_end; | 
 | 205 | 	struct lppaca *vpa = local_paca->lppaca_ptr; | 
 | 206 | 	u64 tb_delta; | 
 | 207 | 	u64 stolen = 0; | 
 | 208 | 	u64 dtb; | 
 | 209 |  | 
| Anton Blanchard | 84ffae5 | 2011-04-07 21:44:21 +0000 | [diff] [blame] | 210 | 	if (!dtl) | 
 | 211 | 		return 0; | 
 | 212 |  | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 213 | 	if (i == vpa->dtl_idx) | 
 | 214 | 		return 0; | 
 | 215 | 	while (i < vpa->dtl_idx) { | 
| Paul Mackerras | 872e439 | 2010-08-31 01:59:53 +0000 | [diff] [blame] | 216 | 		if (dtl_consumer) | 
 | 217 | 			dtl_consumer(dtl, i); | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 218 | 		dtb = dtl->timebase; | 
 | 219 | 		tb_delta = dtl->enqueue_to_dispatch_time + | 
 | 220 | 			dtl->ready_to_enqueue_time; | 
 | 221 | 		barrier(); | 
 | 222 | 		if (i + N_DISPATCH_LOG < vpa->dtl_idx) { | 
 | 223 | 			/* buffer has overflowed */ | 
 | 224 | 			i = vpa->dtl_idx - N_DISPATCH_LOG; | 
 | 225 | 			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | 
 | 226 | 			continue; | 
 | 227 | 		} | 
 | 228 | 		if (dtb > stop_tb) | 
 | 229 | 			break; | 
 | 230 | 		stolen += tb_delta; | 
 | 231 | 		++i; | 
 | 232 | 		++dtl; | 
 | 233 | 		if (dtl == dtl_end) | 
 | 234 | 			dtl = local_paca->dispatch_log; | 
 | 235 | 	} | 
 | 236 | 	local_paca->dtl_ridx = i; | 
 | 237 | 	local_paca->dtl_curr = dtl; | 
 | 238 | 	return stolen; | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 239 | } | 
 | 240 |  | 
 | 241 | /* | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 242 |  * Accumulate stolen time by scanning the dispatch trace log. | 
 | 243 |  * Called on entry from user mode. | 
| Michael Neuling | 4603ac1 | 2007-10-18 03:06:37 -0700 | [diff] [blame] | 244 |  */ | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 245 | void accumulate_stolen_time(void) | 
| Michael Neuling | 4603ac1 | 2007-10-18 03:06:37 -0700 | [diff] [blame] | 246 | { | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 247 | 	u64 sst, ust; | 
 | 248 |  | 
| Tejun Heo | b18ae08 | 2011-01-03 03:49:25 +0000 | [diff] [blame] | 249 | 	u8 save_soft_enabled = local_paca->soft_enabled; | 
| Tejun Heo | b18ae08 | 2011-01-03 03:49:25 +0000 | [diff] [blame] | 250 |  | 
 | 251 | 	/* We are called early in the exception entry, before | 
 | 252 | 	 * soft/hard_enabled are sync'ed to the expected state | 
 | 253 | 	 * for the exception. We are hard disabled but the PACA | 
 | 254 | 	 * needs to reflect that so various debug stuff doesn't | 
 | 255 | 	 * complain | 
 | 256 | 	 */ | 
 | 257 | 	local_paca->soft_enabled = 0; | 
| Tejun Heo | b18ae08 | 2011-01-03 03:49:25 +0000 | [diff] [blame] | 258 |  | 
 | 259 | 	sst = scan_dispatch_log(local_paca->starttime_user); | 
 | 260 | 	ust = scan_dispatch_log(local_paca->starttime); | 
 | 261 | 	local_paca->system_time -= sst; | 
 | 262 | 	local_paca->user_time -= ust; | 
 | 263 | 	local_paca->stolen_time += ust + sst; | 
 | 264 |  | 
 | 265 | 	local_paca->soft_enabled = save_soft_enabled; | 
| Michael Neuling | 4603ac1 | 2007-10-18 03:06:37 -0700 | [diff] [blame] | 266 | } | 
 | 267 |  | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 268 | static inline u64 calculate_stolen_time(u64 stop_tb) | 
 | 269 | { | 
 | 270 | 	u64 stolen = 0; | 
 | 271 |  | 
 | 272 | 	if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) { | 
 | 273 | 		stolen = scan_dispatch_log(stop_tb); | 
 | 274 | 		get_paca()->system_time -= stolen; | 
 | 275 | 	} | 
 | 276 |  | 
 | 277 | 	stolen += get_paca()->stolen_time; | 
 | 278 | 	get_paca()->stolen_time = 0; | 
 | 279 | 	return stolen; | 
 | 280 | } | 
 | 281 |  | 
 | 282 | #else /* CONFIG_PPC_SPLPAR */ | 
 | 283 | static inline u64 calculate_stolen_time(u64 stop_tb) | 
 | 284 | { | 
 | 285 | 	return 0; | 
 | 286 | } | 
 | 287 |  | 
 | 288 | #endif /* CONFIG_PPC_SPLPAR */ | 
 | 289 |  | 
| Michael Neuling | 4603ac1 | 2007-10-18 03:06:37 -0700 | [diff] [blame] | 290 | /* | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 291 |  * Account time for a transition between system, hard irq | 
 | 292 |  * or soft irq state. | 
 | 293 |  */ | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 294 | static u64 vtime_delta(struct task_struct *tsk, | 
 | 295 | 			u64 *sys_scaled, u64 *stolen) | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 296 | { | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 297 | 	u64 now, nowscaled, deltascaled; | 
 | 298 | 	u64 udelta, delta, user_scaled; | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 299 |  | 
| Frederic Weisbecker | 1b2852b | 2012-11-19 17:00:24 +0100 | [diff] [blame] | 300 | 	WARN_ON_ONCE(!irqs_disabled()); | 
 | 301 |  | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 302 | 	now = mftb(); | 
| Michael Neuling | 4603ac1 | 2007-10-18 03:06:37 -0700 | [diff] [blame] | 303 | 	nowscaled = read_spurr(now); | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 304 | 	get_paca()->system_time += now - get_paca()->starttime; | 
 | 305 | 	get_paca()->starttime = now; | 
| Michael Neuling | 4603ac1 | 2007-10-18 03:06:37 -0700 | [diff] [blame] | 306 | 	deltascaled = nowscaled - get_paca()->startspurr; | 
 | 307 | 	get_paca()->startspurr = nowscaled; | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 308 |  | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 309 | 	*stolen = calculate_stolen_time(now); | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 310 |  | 
 | 311 | 	delta = get_paca()->system_time; | 
 | 312 | 	get_paca()->system_time = 0; | 
 | 313 | 	udelta = get_paca()->user_time - get_paca()->utime_sspurr; | 
 | 314 | 	get_paca()->utime_sspurr = get_paca()->user_time; | 
 | 315 |  | 
 | 316 | 	/* | 
 | 317 | 	 * Because we don't read the SPURR on every kernel entry/exit, | 
 | 318 | 	 * deltascaled includes both user and system SPURR ticks. | 
 | 319 | 	 * Apportion these ticks to system SPURR ticks and user | 
 | 320 | 	 * SPURR ticks in the same ratio as the system time (delta) | 
 | 321 | 	 * and user time (udelta) values obtained from the timebase | 
 | 322 | 	 * over the same interval.  The system ticks get accounted here; | 
 | 323 | 	 * the user ticks get saved up in paca->user_time_scaled to be | 
 | 324 | 	 * used by account_process_tick. | 
 | 325 | 	 */ | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 326 | 	*sys_scaled = delta; | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 327 | 	user_scaled = udelta; | 
 | 328 | 	if (deltascaled != delta + udelta) { | 
 | 329 | 		if (udelta) { | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 330 | 			*sys_scaled = deltascaled * delta / (delta + udelta); | 
 | 331 | 			user_scaled = deltascaled - *sys_scaled; | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 332 | 		} else { | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 333 | 			*sys_scaled = deltascaled; | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 334 | 		} | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 335 | 	} | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 336 | 	get_paca()->user_time_scaled += user_scaled; | 
 | 337 |  | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 338 | 	return delta; | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 339 | } | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 340 |  | 
| Frederic Weisbecker | fd25b4c | 2012-11-13 18:21:22 +0100 | [diff] [blame] | 341 | void vtime_account_system(struct task_struct *tsk) | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 342 | { | 
 | 343 | 	u64 delta, sys_scaled, stolen; | 
 | 344 |  | 
 | 345 | 	delta = vtime_delta(tsk, &sys_scaled, &stolen); | 
 | 346 | 	account_system_time(tsk, 0, delta, sys_scaled); | 
 | 347 | 	if (stolen) | 
 | 348 | 		account_steal_time(stolen); | 
 | 349 | } | 
| Frederic Weisbecker | c11f11f | 2013-01-21 00:50:22 +0100 | [diff] [blame] | 350 | EXPORT_SYMBOL_GPL(vtime_account_system); | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 351 |  | 
| Frederic Weisbecker | fd25b4c | 2012-11-13 18:21:22 +0100 | [diff] [blame] | 352 | void vtime_account_idle(struct task_struct *tsk) | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 353 | { | 
 | 354 | 	u64 delta, sys_scaled, stolen; | 
 | 355 |  | 
 | 356 | 	delta = vtime_delta(tsk, &sys_scaled, &stolen); | 
 | 357 | 	account_idle_time(delta + stolen); | 
 | 358 | } | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 359 |  | 
 | 360 | /* | 
| Frederic Weisbecker | bcebdf8 | 2012-11-13 23:51:06 +0100 | [diff] [blame] | 361 |  * Transfer the user time accumulated in the paca | 
 | 362 |  * by the exception entry and exit code to the generic | 
 | 363 |  * process user time records. | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 364 |  * Must be called with interrupts disabled. | 
| Frederic Weisbecker | bcebdf8 | 2012-11-13 23:51:06 +0100 | [diff] [blame] | 365 |  * Assumes that vtime_account_system/idle() has been called | 
 | 366 |  * recently (i.e. since the last entry from usermode) so that | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 367 |  * get_paca()->user_time_scaled is up to date. | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 368 |  */ | 
| Frederic Weisbecker | bcebdf8 | 2012-11-13 23:51:06 +0100 | [diff] [blame] | 369 | void vtime_account_user(struct task_struct *tsk) | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 370 | { | 
| Michael Neuling | 4603ac1 | 2007-10-18 03:06:37 -0700 | [diff] [blame] | 371 | 	cputime_t utime, utimescaled; | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 372 |  | 
 | 373 | 	utime = get_paca()->user_time; | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 374 | 	utimescaled = get_paca()->user_time_scaled; | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 375 | 	get_paca()->user_time = 0; | 
| Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 376 | 	get_paca()->user_time_scaled = 0; | 
 | 377 | 	get_paca()->utime_sspurr = 0; | 
| Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 378 | 	account_user_time(tsk, utime, utimescaled); | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 379 | } | 
 | 380 |  | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 381 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 382 | #define calc_cputime_factors() | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 383 | #endif | 
 | 384 |  | 
| Paul Mackerras | 6defa38 | 2005-11-18 13:44:17 +1100 | [diff] [blame] | 385 | void __delay(unsigned long loops) | 
 | 386 | { | 
 | 387 | 	unsigned long start; | 
 | 388 | 	int diff; | 
 | 389 |  | 
 | 390 | 	if (__USE_RTC()) { | 
 | 391 | 		start = get_rtcl(); | 
 | 392 | 		do { | 
 | 393 | 			/* the RTCL register wraps at 1000000000 */ | 
 | 394 | 			diff = get_rtcl() - start; | 
 | 395 | 			if (diff < 0) | 
 | 396 | 				diff += 1000000000; | 
 | 397 | 		} while (diff < loops); | 
 | 398 | 	} else { | 
 | 399 | 		start = get_tbl(); | 
 | 400 | 		while (get_tbl() - start < loops) | 
 | 401 | 			HMT_low(); | 
 | 402 | 		HMT_medium(); | 
 | 403 | 	} | 
 | 404 | } | 
 | 405 | EXPORT_SYMBOL(__delay); | 
 | 406 |  | 
 | 407 | void udelay(unsigned long usecs) | 
 | 408 | { | 
 | 409 | 	__delay(tb_ticks_per_usec * usecs); | 
 | 410 | } | 
 | 411 | EXPORT_SYMBOL(udelay); | 
 | 412 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | #ifdef CONFIG_SMP | 
 | 414 | unsigned long profile_pc(struct pt_regs *regs) | 
 | 415 | { | 
 | 416 | 	unsigned long pc = instruction_pointer(regs); | 
 | 417 |  | 
 | 418 | 	if (in_lock_functions(pc)) | 
 | 419 | 		return regs->link; | 
 | 420 |  | 
 | 421 | 	return pc; | 
 | 422 | } | 
 | 423 | EXPORT_SYMBOL(profile_pc); | 
 | 424 | #endif | 
 | 425 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 426 | #ifdef CONFIG_IRQ_WORK | 
| Paul Mackerras | 105988c | 2009-06-17 21:50:04 +1000 | [diff] [blame] | 427 |  | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 428 | /* | 
 | 429 |  * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... | 
 | 430 |  */ | 
 | 431 | #ifdef CONFIG_PPC64 | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 432 | static inline unsigned long test_irq_work_pending(void) | 
| Paul Mackerras | 105988c | 2009-06-17 21:50:04 +1000 | [diff] [blame] | 433 | { | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 434 | 	unsigned long x; | 
 | 435 |  | 
 | 436 | 	asm volatile("lbz %0,%1(13)" | 
 | 437 | 		: "=r" (x) | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 438 | 		: "i" (offsetof(struct paca_struct, irq_work_pending))); | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 439 | 	return x; | 
| Paul Mackerras | 105988c | 2009-06-17 21:50:04 +1000 | [diff] [blame] | 440 | } | 
 | 441 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 442 | static inline void set_irq_work_pending_flag(void) | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 443 | { | 
 | 444 | 	asm volatile("stb %0,%1(13)" : : | 
 | 445 | 		"r" (1), | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 446 | 		"i" (offsetof(struct paca_struct, irq_work_pending))); | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 447 | } | 
 | 448 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 449 | static inline void clear_irq_work_pending(void) | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 450 | { | 
 | 451 | 	asm volatile("stb %0,%1(13)" : : | 
 | 452 | 		"r" (0), | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 453 | 		"i" (offsetof(struct paca_struct, irq_work_pending))); | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 454 | } | 
 | 455 |  | 
 | 456 | #else /* 32-bit */ | 
 | 457 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 458 | DEFINE_PER_CPU(u8, irq_work_pending); | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 459 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 460 | #define set_irq_work_pending_flag()	__get_cpu_var(irq_work_pending) = 1 | 
 | 461 | #define test_irq_work_pending()		__get_cpu_var(irq_work_pending) | 
 | 462 | #define clear_irq_work_pending()	__get_cpu_var(irq_work_pending) = 0 | 
| Paul Mackerras | 105988c | 2009-06-17 21:50:04 +1000 | [diff] [blame] | 463 |  | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 464 | #endif /* 32 vs 64 bit */ | 
 | 465 |  | 
| Peter Zijlstra | 4f8b50b | 2011-06-27 17:22:43 +0200 | [diff] [blame] | 466 | void arch_irq_work_raise(void) | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 467 | { | 
 | 468 | 	preempt_disable(); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 469 | 	set_irq_work_pending_flag(); | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 470 | 	set_dec(1); | 
 | 471 | 	preempt_enable(); | 
 | 472 | } | 
 | 473 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 474 | #else  /* CONFIG_IRQ_WORK */ | 
| Paul Mackerras | 105988c | 2009-06-17 21:50:04 +1000 | [diff] [blame] | 475 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 476 | #define test_irq_work_pending()	0 | 
 | 477 | #define clear_irq_work_pending() | 
| Paul Mackerras | 105988c | 2009-06-17 21:50:04 +1000 | [diff] [blame] | 478 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 479 | #endif /* CONFIG_IRQ_WORK */ | 
| Paul Mackerras | 105988c | 2009-06-17 21:50:04 +1000 | [diff] [blame] | 480 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 |  * timer_interrupt - gets called when the decrementer overflows, | 
 | 483 |  * with interrupts disabled. | 
 | 484 |  */ | 
| Kumar Gala | c7aeffc | 2005-09-19 09:30:27 -0500 | [diff] [blame] | 485 | void timer_interrupt(struct pt_regs * regs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | { | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 487 | 	struct pt_regs *old_regs; | 
| Anton Blanchard | 7df1027 | 2011-11-23 20:07:22 +0000 | [diff] [blame] | 488 | 	u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 
 | 489 | 	struct clock_event_device *evt = &__get_cpu_var(decrementers); | 
| Paul Mackerras | 860aed2 | 2012-06-01 18:13:43 +1000 | [diff] [blame] | 490 | 	u64 now; | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 491 |  | 
| Benjamin Herrenschmidt | 963e5d3 | 2011-03-29 14:51:10 +1100 | [diff] [blame] | 492 | 	/* Ensure a positive value is written to the decrementer, or else | 
 | 493 | 	 * some CPUs will continue to take decrementer exceptions. | 
 | 494 | 	 */ | 
 | 495 | 	set_dec(DECREMENTER_MAX); | 
 | 496 |  | 
 | 497 | 	/* Some implementations of hotplug will get timer interrupts while | 
| Tiejun Chen | 689dfa8 | 2013-01-15 17:01:19 +0000 | [diff] [blame] | 498 | 	 * offline, just ignore these and we also need to set | 
 | 499 | 	 * decrementers_next_tb as MAX to make sure __check_irq_replay | 
 | 500 | 	 * don't replay timer interrupt when return, otherwise we'll trap | 
 | 501 | 	 * here infinitely :( | 
| Benjamin Herrenschmidt | 963e5d3 | 2011-03-29 14:51:10 +1100 | [diff] [blame] | 502 | 	 */ | 
| Tiejun Chen | 689dfa8 | 2013-01-15 17:01:19 +0000 | [diff] [blame] | 503 | 	if (!cpu_online(smp_processor_id())) { | 
 | 504 | 		*next_tb = ~(u64)0; | 
| Benjamin Herrenschmidt | 963e5d3 | 2011-03-29 14:51:10 +1100 | [diff] [blame] | 505 | 		return; | 
| Tiejun Chen | 689dfa8 | 2013-01-15 17:01:19 +0000 | [diff] [blame] | 506 | 	} | 
| Benjamin Herrenschmidt | 963e5d3 | 2011-03-29 14:51:10 +1100 | [diff] [blame] | 507 |  | 
| Benjamin Herrenschmidt | 7230c56 | 2012-03-06 18:27:59 +1100 | [diff] [blame] | 508 | 	/* Conditionally hard-enable interrupts now that the DEC has been | 
 | 509 | 	 * bumped to its maximum value | 
 | 510 | 	 */ | 
 | 511 | 	may_hard_irq_enable(); | 
 | 512 |  | 
| Anton Blanchard | 89713ed | 2010-01-31 20:34:06 +0000 | [diff] [blame] | 513 | 	__get_cpu_var(irq_stat).timer_irqs++; | 
 | 514 |  | 
| Paul Mackerras | b0d278b | 2010-08-10 20:38:23 +0000 | [diff] [blame] | 515 | #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 516 | 	if (atomic_read(&ppc_n_lost_interrupts) != 0) | 
 | 517 | 		do_IRQ(regs); | 
 | 518 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 |  | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 520 | 	old_regs = set_irq_regs(regs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | 	irq_enter(); | 
 | 522 |  | 
| Li Zhong | e72bbba | 2012-09-10 15:37:43 +0000 | [diff] [blame] | 523 | 	trace_timer_interrupt_entry(regs); | 
 | 524 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 525 | 	if (test_irq_work_pending()) { | 
 | 526 | 		clear_irq_work_pending(); | 
 | 527 | 		irq_work_run(); | 
| Paul Mackerras | 0fe1ac4 | 2010-04-13 20:46:04 +0000 | [diff] [blame] | 528 | 	} | 
 | 529 |  | 
| Paul Mackerras | 860aed2 | 2012-06-01 18:13:43 +1000 | [diff] [blame] | 530 | 	now = get_tb_or_rtc(); | 
 | 531 | 	if (now >= *next_tb) { | 
 | 532 | 		*next_tb = ~(u64)0; | 
 | 533 | 		if (evt->event_handler) | 
 | 534 | 			evt->event_handler(evt); | 
 | 535 | 	} else { | 
 | 536 | 		now = *next_tb - now; | 
 | 537 | 		if (now <= DECREMENTER_MAX) | 
 | 538 | 			set_dec((int)now); | 
 | 539 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 |  | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 541 | #ifdef CONFIG_PPC64 | 
| Stephen Rothwell | 8d15a3e | 2005-08-03 14:40:16 +1000 | [diff] [blame] | 542 | 	/* collect purr register values often, for accurate calculations */ | 
| Stephen Rothwell | 1ababe1 | 2005-08-03 14:35:25 +1000 | [diff] [blame] | 543 | 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | 
 | 545 | 		cu->current_tb = mfspr(SPRN_PURR); | 
 | 546 | 	} | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 547 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 |  | 
| Li Zhong | e72bbba | 2012-09-10 15:37:43 +0000 | [diff] [blame] | 549 | 	trace_timer_interrupt_exit(regs); | 
 | 550 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | 	irq_exit(); | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 552 | 	set_irq_regs(old_regs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | } | 
 | 554 |  | 
| Paul Mackerras | dabe859 | 2012-07-26 13:56:11 +0000 | [diff] [blame] | 555 | /* | 
 | 556 |  * Hypervisor decrementer interrupts shouldn't occur but are sometimes | 
 | 557 |  * left pending on exit from a KVM guest.  We don't need to do anything | 
 | 558 |  * to clear them, as they are edge-triggered. | 
 | 559 |  */ | 
 | 560 | void hdec_interrupt(struct pt_regs *regs) | 
 | 561 | { | 
 | 562 | } | 
 | 563 |  | 
| Scott Wood | 7ac5dde | 2007-12-13 04:35:19 +1100 | [diff] [blame] | 564 | #ifdef CONFIG_SUSPEND | 
| Paul Mackerras | d75d68c | 2010-06-20 19:04:14 +0000 | [diff] [blame] | 565 | static void generic_suspend_disable_irqs(void) | 
| Scott Wood | 7ac5dde | 2007-12-13 04:35:19 +1100 | [diff] [blame] | 566 | { | 
| Scott Wood | 7ac5dde | 2007-12-13 04:35:19 +1100 | [diff] [blame] | 567 | 	/* Disable the decrementer, so that it doesn't interfere | 
 | 568 | 	 * with suspending. | 
 | 569 | 	 */ | 
 | 570 |  | 
| Anton Blanchard | 621692c | 2011-11-23 20:07:21 +0000 | [diff] [blame] | 571 | 	set_dec(DECREMENTER_MAX); | 
| Scott Wood | 7ac5dde | 2007-12-13 04:35:19 +1100 | [diff] [blame] | 572 | 	local_irq_disable(); | 
| Anton Blanchard | 621692c | 2011-11-23 20:07:21 +0000 | [diff] [blame] | 573 | 	set_dec(DECREMENTER_MAX); | 
| Scott Wood | 7ac5dde | 2007-12-13 04:35:19 +1100 | [diff] [blame] | 574 | } | 
 | 575 |  | 
| Paul Mackerras | d75d68c | 2010-06-20 19:04:14 +0000 | [diff] [blame] | 576 | static void generic_suspend_enable_irqs(void) | 
| Scott Wood | 7ac5dde | 2007-12-13 04:35:19 +1100 | [diff] [blame] | 577 | { | 
| Scott Wood | 7ac5dde | 2007-12-13 04:35:19 +1100 | [diff] [blame] | 578 | 	local_irq_enable(); | 
| Scott Wood | 7ac5dde | 2007-12-13 04:35:19 +1100 | [diff] [blame] | 579 | } | 
 | 580 |  | 
 | 581 | /* Overrides the weak version in kernel/power/main.c */ | 
 | 582 | void arch_suspend_disable_irqs(void) | 
 | 583 | { | 
 | 584 | 	if (ppc_md.suspend_disable_irqs) | 
 | 585 | 		ppc_md.suspend_disable_irqs(); | 
 | 586 | 	generic_suspend_disable_irqs(); | 
 | 587 | } | 
 | 588 |  | 
 | 589 | /* Overrides the weak version in kernel/power/main.c */ | 
 | 590 | void arch_suspend_enable_irqs(void) | 
 | 591 | { | 
 | 592 | 	generic_suspend_enable_irqs(); | 
 | 593 | 	if (ppc_md.suspend_enable_irqs) | 
 | 594 | 		ppc_md.suspend_enable_irqs(); | 
 | 595 | } | 
 | 596 | #endif | 
 | 597 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | /* | 
 | 599 |  * Scheduler clock - returns current time in nanosec units. | 
 | 600 |  * | 
 | 601 |  * Note: mulhdu(a, b) (multiply high double unsigned) returns | 
 | 602 |  * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b | 
 | 603 |  * are 64-bit unsigned numbers. | 
 | 604 |  */ | 
 | 605 | unsigned long long sched_clock(void) | 
 | 606 | { | 
| Paul Mackerras | 96c4450 | 2005-10-23 17:14:56 +1000 | [diff] [blame] | 607 | 	if (__USE_RTC()) | 
 | 608 | 		return get_rtc(); | 
| Tony Breeds | fc9069f | 2007-07-04 14:04:31 +1000 | [diff] [blame] | 609 | 	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | } | 
 | 611 |  | 
| Anton Blanchard | 0bb474a4 | 2006-06-20 18:47:26 +1000 | [diff] [blame] | 612 | static int __init get_freq(char *name, int cells, unsigned long *val) | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 613 | { | 
 | 614 | 	struct device_node *cpu; | 
| Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 615 | 	const unsigned int *fp; | 
| Anton Blanchard | 0bb474a4 | 2006-06-20 18:47:26 +1000 | [diff] [blame] | 616 | 	int found = 0; | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 617 |  | 
| Anton Blanchard | 0bb474a4 | 2006-06-20 18:47:26 +1000 | [diff] [blame] | 618 | 	/* The cpu node should have timebase and clock frequency properties */ | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 619 | 	cpu = of_find_node_by_type(NULL, "cpu"); | 
 | 620 |  | 
| Olaf Hering | d8a8188 | 2006-02-04 10:34:56 +0100 | [diff] [blame] | 621 | 	if (cpu) { | 
| Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 622 | 		fp = of_get_property(cpu, name, NULL); | 
| Olaf Hering | d8a8188 | 2006-02-04 10:34:56 +0100 | [diff] [blame] | 623 | 		if (fp) { | 
| Anton Blanchard | 0bb474a4 | 2006-06-20 18:47:26 +1000 | [diff] [blame] | 624 | 			found = 1; | 
| Paul Mackerras | a4dc7ff | 2006-09-19 14:06:27 +1000 | [diff] [blame] | 625 | 			*val = of_read_ulong(fp, cells); | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 626 | 		} | 
| Anton Blanchard | 0bb474a4 | 2006-06-20 18:47:26 +1000 | [diff] [blame] | 627 |  | 
 | 628 | 		of_node_put(cpu); | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 629 | 	} | 
| Anton Blanchard | 0bb474a4 | 2006-06-20 18:47:26 +1000 | [diff] [blame] | 630 |  | 
 | 631 | 	return found; | 
 | 632 | } | 
 | 633 |  | 
| Benjamin Herrenschmidt | 77c0a70 | 2009-08-28 14:25:04 +1000 | [diff] [blame] | 634 | /* should become __cpuinit when secondary_cpu_time_init also is */ | 
 | 635 | void start_cpu_decrementer(void) | 
 | 636 | { | 
 | 637 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | 
 | 638 | 	/* Clear any pending timer interrupts */ | 
 | 639 | 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); | 
 | 640 |  | 
 | 641 | 	/* Enable decrementer interrupt */ | 
 | 642 | 	mtspr(SPRN_TCR, TCR_DIE); | 
 | 643 | #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */ | 
 | 644 | } | 
 | 645 |  | 
| Anton Blanchard | 0bb474a4 | 2006-06-20 18:47:26 +1000 | [diff] [blame] | 646 | void __init generic_calibrate_decr(void) | 
 | 647 | { | 
 | 648 | 	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */ | 
 | 649 |  | 
 | 650 | 	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && | 
 | 651 | 	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { | 
 | 652 |  | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 653 | 		printk(KERN_ERR "WARNING: Estimating decrementer frequency " | 
 | 654 | 				"(not found)\n"); | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 655 | 	} | 
| Anton Blanchard | 0bb474a4 | 2006-06-20 18:47:26 +1000 | [diff] [blame] | 656 |  | 
 | 657 | 	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */ | 
 | 658 |  | 
 | 659 | 	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && | 
 | 660 | 	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) { | 
 | 661 |  | 
 | 662 | 		printk(KERN_ERR "WARNING: Estimating processor frequency " | 
 | 663 | 				"(not found)\n"); | 
 | 664 | 	} | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 665 | } | 
| Arnd Bergmann | 10f7e7c | 2005-06-23 09:43:07 +1000 | [diff] [blame] | 666 |  | 
| Tony Breeds | aa3be5f | 2007-09-21 13:26:02 +1000 | [diff] [blame] | 667 | int update_persistent_clock(struct timespec now) | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 668 | { | 
 | 669 | 	struct rtc_time tm; | 
 | 670 |  | 
| Tony Breeds | aa3be5f | 2007-09-21 13:26:02 +1000 | [diff] [blame] | 671 | 	if (!ppc_md.set_rtc_time) | 
| Jason Gunthorpe | 023f333 | 2012-12-17 14:30:53 -0700 | [diff] [blame] | 672 | 		return -ENODEV; | 
| Tony Breeds | aa3be5f | 2007-09-21 13:26:02 +1000 | [diff] [blame] | 673 |  | 
 | 674 | 	to_tm(now.tv_sec + 1 + timezone_offset, &tm); | 
 | 675 | 	tm.tm_year -= 1900; | 
 | 676 | 	tm.tm_mon -= 1; | 
 | 677 |  | 
 | 678 | 	return ppc_md.set_rtc_time(&tm); | 
 | 679 | } | 
 | 680 |  | 
| Benjamin Herrenschmidt | 978d7eb | 2009-11-01 19:11:03 +0000 | [diff] [blame] | 681 | static void __read_persistent_clock(struct timespec *ts) | 
| Tony Breeds | aa3be5f | 2007-09-21 13:26:02 +1000 | [diff] [blame] | 682 | { | 
 | 683 | 	struct rtc_time tm; | 
 | 684 | 	static int first = 1; | 
 | 685 |  | 
| Martin Schwidefsky | d90246c | 2009-08-22 22:23:13 +0200 | [diff] [blame] | 686 | 	ts->tv_nsec = 0; | 
| Tony Breeds | aa3be5f | 2007-09-21 13:26:02 +1000 | [diff] [blame] | 687 | 	/* XXX this is a litle fragile but will work okay in the short term */ | 
 | 688 | 	if (first) { | 
 | 689 | 		first = 0; | 
 | 690 | 		if (ppc_md.time_init) | 
 | 691 | 			timezone_offset = ppc_md.time_init(); | 
 | 692 |  | 
 | 693 | 		/* get_boot_time() isn't guaranteed to be safe to call late */ | 
| Martin Schwidefsky | d90246c | 2009-08-22 22:23:13 +0200 | [diff] [blame] | 694 | 		if (ppc_md.get_boot_time) { | 
 | 695 | 			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; | 
 | 696 | 			return; | 
 | 697 | 		} | 
| Tony Breeds | aa3be5f | 2007-09-21 13:26:02 +1000 | [diff] [blame] | 698 | 	} | 
| Martin Schwidefsky | d90246c | 2009-08-22 22:23:13 +0200 | [diff] [blame] | 699 | 	if (!ppc_md.get_rtc_time) { | 
 | 700 | 		ts->tv_sec = 0; | 
 | 701 | 		return; | 
 | 702 | 	} | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 703 | 	ppc_md.get_rtc_time(&tm); | 
| Benjamin Herrenschmidt | 978d7eb | 2009-11-01 19:11:03 +0000 | [diff] [blame] | 704 |  | 
| Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 705 | 	ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, | 
 | 706 | 			    tm.tm_hour, tm.tm_min, tm.tm_sec); | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 707 | } | 
 | 708 |  | 
| Benjamin Herrenschmidt | 978d7eb | 2009-11-01 19:11:03 +0000 | [diff] [blame] | 709 | void read_persistent_clock(struct timespec *ts) | 
 | 710 | { | 
 | 711 | 	__read_persistent_clock(ts); | 
 | 712 |  | 
 | 713 | 	/* Sanitize it in case real time clock is set below EPOCH */ | 
 | 714 | 	if (ts->tv_sec < 0) { | 
 | 715 | 		ts->tv_sec = 0; | 
 | 716 | 		ts->tv_nsec = 0; | 
 | 717 | 	} | 
 | 718 | 		 | 
 | 719 | } | 
 | 720 |  | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 721 | /* clocksource code */ | 
| Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 722 | static cycle_t rtc_read(struct clocksource *cs) | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 723 | { | 
 | 724 | 	return (cycle_t)get_rtc(); | 
 | 725 | } | 
 | 726 |  | 
| Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 727 | static cycle_t timebase_read(struct clocksource *cs) | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 728 | { | 
 | 729 | 	return (cycle_t)get_tb(); | 
 | 730 | } | 
 | 731 |  | 
| John Stultz | 7063942 | 2012-09-04 15:34:21 -0400 | [diff] [blame] | 732 | void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, | 
| John Stultz | 7615856 | 2010-07-13 17:56:23 -0700 | [diff] [blame] | 733 | 			struct clocksource *clock, u32 mult) | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 734 | { | 
| John Stultz | b0797b6 | 2010-07-13 17:56:21 -0700 | [diff] [blame] | 735 | 	u64 new_tb_to_xs, new_stamp_xsec; | 
| Paul Mackerras | 0e469db | 2010-06-20 19:03:08 +0000 | [diff] [blame] | 736 | 	u32 frac_sec; | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 737 |  | 
 | 738 | 	if (clock != &clocksource_timebase) | 
 | 739 | 		return; | 
 | 740 |  | 
 | 741 | 	/* Make userspace gettimeofday spin until we're done. */ | 
 | 742 | 	++vdso_data->tb_update_count; | 
 | 743 | 	smp_mb(); | 
 | 744 |  | 
| Anton Blanchard | 11b8633 | 2011-11-23 20:07:19 +0000 | [diff] [blame] | 745 | 	/* 19342813113834067 ~= 2^(20+64) / 1e9 */ | 
 | 746 | 	new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift); | 
| John Stultz | 06d518e | 2010-07-13 17:56:22 -0700 | [diff] [blame] | 747 | 	new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC; | 
| John Stultz | b0797b6 | 2010-07-13 17:56:21 -0700 | [diff] [blame] | 748 | 	do_div(new_stamp_xsec, 1000000000); | 
| John Stultz | 06d518e | 2010-07-13 17:56:22 -0700 | [diff] [blame] | 749 | 	new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC; | 
| John Stultz | b0797b6 | 2010-07-13 17:56:21 -0700 | [diff] [blame] | 750 |  | 
| Paul Mackerras | 0e469db | 2010-06-20 19:03:08 +0000 | [diff] [blame] | 751 | 	BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC); | 
 | 752 | 	/* this is tv_nsec / 1e9 as a 0.32 fraction */ | 
 | 753 | 	frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32; | 
| Thomas Gleixner | 47916be | 2010-07-28 21:49:22 +0200 | [diff] [blame] | 754 |  | 
| John Stultz | b0797b6 | 2010-07-13 17:56:21 -0700 | [diff] [blame] | 755 | 	/* | 
 | 756 | 	 * tb_update_count is used to allow the userspace gettimeofday code | 
 | 757 | 	 * to assure itself that it sees a consistent view of the tb_to_xs and | 
 | 758 | 	 * stamp_xsec variables.  It reads the tb_update_count, then reads | 
 | 759 | 	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If | 
 | 760 | 	 * the two values of tb_update_count match and are even then the | 
 | 761 | 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it | 
 | 762 | 	 * loops back and reads them again until this criteria is met. | 
 | 763 | 	 * We expect the caller to have done the first increment of | 
 | 764 | 	 * vdso_data->tb_update_count already. | 
 | 765 | 	 */ | 
 | 766 | 	vdso_data->tb_orig_stamp = clock->cycle_last; | 
 | 767 | 	vdso_data->stamp_xsec = new_stamp_xsec; | 
 | 768 | 	vdso_data->tb_to_xs = new_tb_to_xs; | 
| John Stultz | 7615856 | 2010-07-13 17:56:23 -0700 | [diff] [blame] | 769 | 	vdso_data->wtom_clock_sec = wtm->tv_sec; | 
 | 770 | 	vdso_data->wtom_clock_nsec = wtm->tv_nsec; | 
| John Stultz | 06d518e | 2010-07-13 17:56:22 -0700 | [diff] [blame] | 771 | 	vdso_data->stamp_xtime = *wall_time; | 
| Thomas Gleixner | 47916be | 2010-07-28 21:49:22 +0200 | [diff] [blame] | 772 | 	vdso_data->stamp_sec_fraction = frac_sec; | 
| John Stultz | b0797b6 | 2010-07-13 17:56:21 -0700 | [diff] [blame] | 773 | 	smp_wmb(); | 
 | 774 | 	++(vdso_data->tb_update_count); | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 775 | } | 
 | 776 |  | 
 | 777 | void update_vsyscall_tz(void) | 
 | 778 | { | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 779 | 	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; | 
 | 780 | 	vdso_data->tz_dsttime = sys_tz.tz_dsttime; | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 781 | } | 
 | 782 |  | 
| Michael Ellerman | 1c21a29 | 2008-05-08 14:27:19 +1000 | [diff] [blame] | 783 | static void __init clocksource_init(void) | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 784 | { | 
 | 785 | 	struct clocksource *clock; | 
 | 786 |  | 
 | 787 | 	if (__USE_RTC()) | 
 | 788 | 		clock = &clocksource_rtc; | 
 | 789 | 	else | 
 | 790 | 		clock = &clocksource_timebase; | 
 | 791 |  | 
| Anton Blanchard | 11b8633 | 2011-11-23 20:07:19 +0000 | [diff] [blame] | 792 | 	if (clocksource_register_hz(clock, tb_ticks_per_sec)) { | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 793 | 		printk(KERN_ERR "clocksource: %s is already registered\n", | 
 | 794 | 		       clock->name); | 
 | 795 | 		return; | 
 | 796 | 	} | 
 | 797 |  | 
 | 798 | 	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n", | 
 | 799 | 	       clock->name, clock->mult, clock->shift); | 
 | 800 | } | 
 | 801 |  | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 802 | static int decrementer_set_next_event(unsigned long evt, | 
 | 803 | 				      struct clock_event_device *dev) | 
 | 804 | { | 
| Anton Blanchard | 7df1027 | 2011-11-23 20:07:22 +0000 | [diff] [blame] | 805 | 	__get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 806 | 	set_dec(evt); | 
 | 807 | 	return 0; | 
 | 808 | } | 
 | 809 |  | 
 | 810 | static void decrementer_set_mode(enum clock_event_mode mode, | 
 | 811 | 				 struct clock_event_device *dev) | 
 | 812 | { | 
 | 813 | 	if (mode != CLOCK_EVT_MODE_ONESHOT) | 
 | 814 | 		decrementer_set_next_event(DECREMENTER_MAX, dev); | 
 | 815 | } | 
 | 816 |  | 
 | 817 | static void register_decrementer_clockevent(int cpu) | 
 | 818 | { | 
| Anton Blanchard | 7df1027 | 2011-11-23 20:07:22 +0000 | [diff] [blame] | 819 | 	struct clock_event_device *dec = &per_cpu(decrementers, cpu); | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 820 |  | 
 | 821 | 	*dec = decrementer_clockevent; | 
| Rusty Russell | 320ab2b | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 822 | 	dec->cpumask = cpumask_of(cpu); | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 823 |  | 
| Anton Blanchard | b919ee8 | 2010-02-07 19:26:29 +0000 | [diff] [blame] | 824 | 	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", | 
 | 825 | 		    dec->name, dec->mult, dec->shift, cpu); | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 826 |  | 
 | 827 | 	clockevents_register_device(dec); | 
 | 828 | } | 
 | 829 |  | 
| Milton Miller | c481887 | 2007-12-14 15:52:10 +1100 | [diff] [blame] | 830 | static void __init init_decrementer_clockevent(void) | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 831 | { | 
 | 832 | 	int cpu = smp_processor_id(); | 
 | 833 |  | 
| Anton Blanchard | d8afc6f | 2011-11-23 20:07:18 +0000 | [diff] [blame] | 834 | 	clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4); | 
 | 835 |  | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 836 | 	decrementer_clockevent.max_delta_ns = | 
 | 837 | 		clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); | 
| Paul Mackerras | 43875cc | 2007-10-31 22:25:35 +1100 | [diff] [blame] | 838 | 	decrementer_clockevent.min_delta_ns = | 
 | 839 | 		clockevent_delta2ns(2, &decrementer_clockevent); | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 840 |  | 
 | 841 | 	register_decrementer_clockevent(cpu); | 
 | 842 | } | 
 | 843 |  | 
 | 844 | void secondary_cpu_time_init(void) | 
 | 845 | { | 
| Benjamin Herrenschmidt | 77c0a70 | 2009-08-28 14:25:04 +1000 | [diff] [blame] | 846 | 	/* Start the decrementer on CPUs that have manual control | 
 | 847 | 	 * such as BookE | 
 | 848 | 	 */ | 
 | 849 | 	start_cpu_decrementer(); | 
 | 850 |  | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 851 | 	/* FIME: Should make unrelatred change to move snapshot_timebase | 
 | 852 | 	 * call here ! */ | 
 | 853 | 	register_decrementer_clockevent(smp_processor_id()); | 
 | 854 | } | 
 | 855 |  | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 856 | /* This function is only called on the boot processor */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | void __init time_init(void) | 
 | 858 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | 	struct div_result res; | 
| Paul Mackerras | d75d68c | 2010-06-20 19:04:14 +0000 | [diff] [blame] | 860 | 	u64 scale; | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 861 | 	unsigned shift; | 
 | 862 |  | 
| Paul Mackerras | 96c4450 | 2005-10-23 17:14:56 +1000 | [diff] [blame] | 863 | 	if (__USE_RTC()) { | 
 | 864 | 		/* 601 processor: dec counts down by 128 every 128ns */ | 
 | 865 | 		ppc_tb_freq = 1000000000; | 
| Paul Mackerras | 96c4450 | 2005-10-23 17:14:56 +1000 | [diff] [blame] | 866 | 	} else { | 
 | 867 | 		/* Normal PowerPC with timebase register */ | 
 | 868 | 		ppc_md.calibrate_decr(); | 
| Olof Johansson | 224ad80 | 2006-04-12 15:20:27 -0500 | [diff] [blame] | 869 | 		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", | 
| Paul Mackerras | 96c4450 | 2005-10-23 17:14:56 +1000 | [diff] [blame] | 870 | 		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); | 
| Olof Johansson | 224ad80 | 2006-04-12 15:20:27 -0500 | [diff] [blame] | 871 | 		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n", | 
| Paul Mackerras | 96c4450 | 2005-10-23 17:14:56 +1000 | [diff] [blame] | 872 | 		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); | 
| Paul Mackerras | 96c4450 | 2005-10-23 17:14:56 +1000 | [diff] [blame] | 873 | 	} | 
| Paul Mackerras | 374e99d | 2005-10-20 21:04:51 +1000 | [diff] [blame] | 874 |  | 
 | 875 | 	tb_ticks_per_jiffy = ppc_tb_freq / HZ; | 
| Paul Mackerras | 092b8f3 | 2006-02-20 10:38:56 +1100 | [diff] [blame] | 876 | 	tb_ticks_per_sec = ppc_tb_freq; | 
| Paul Mackerras | 374e99d | 2005-10-20 21:04:51 +1000 | [diff] [blame] | 877 | 	tb_ticks_per_usec = ppc_tb_freq / 1000000; | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 878 | 	calc_cputime_factors(); | 
| Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 879 | 	setup_cputime_one_jiffy(); | 
| Paul Mackerras | 092b8f3 | 2006-02-20 10:38:56 +1100 | [diff] [blame] | 880 |  | 
 | 881 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | 	 * Compute scale factor for sched_clock. | 
 | 883 | 	 * The calibrate_decr() function has set tb_ticks_per_sec, | 
 | 884 | 	 * which is the timebase frequency. | 
 | 885 | 	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret | 
 | 886 | 	 * the 128-bit result as a 64.64 fixed-point number. | 
 | 887 | 	 * We then shift that number right until it is less than 1.0, | 
 | 888 | 	 * giving us the scale factor and shift count to use in | 
 | 889 | 	 * sched_clock(). | 
 | 890 | 	 */ | 
 | 891 | 	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); | 
 | 892 | 	scale = res.result_low; | 
 | 893 | 	for (shift = 0; res.result_high != 0; ++shift) { | 
 | 894 | 		scale = (scale >> 1) | (res.result_high << 63); | 
 | 895 | 		res.result_high >>= 1; | 
 | 896 | 	} | 
 | 897 | 	tb_to_ns_scale = scale; | 
 | 898 | 	tb_to_ns_shift = shift; | 
| Tony Breeds | fc9069f | 2007-07-04 14:04:31 +1000 | [diff] [blame] | 899 | 	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ | 
| Benjamin Herrenschmidt | c27da339 | 2007-09-19 14:21:56 +1000 | [diff] [blame] | 900 | 	boot_tb = get_tb_or_rtc(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 |  | 
| Paul Mackerras | 092b8f3 | 2006-02-20 10:38:56 +1100 | [diff] [blame] | 902 | 	/* If platform provided a timezone (pmac), we correct the time */ | 
| Anton Blanchard | 621692c | 2011-11-23 20:07:21 +0000 | [diff] [blame] | 903 | 	if (timezone_offset) { | 
| Paul Mackerras | 092b8f3 | 2006-02-20 10:38:56 +1100 | [diff] [blame] | 904 | 		sys_tz.tz_minuteswest = -timezone_offset / 60; | 
 | 905 | 		sys_tz.tz_dsttime = 0; | 
| Anton Blanchard | 621692c | 2011-11-23 20:07:21 +0000 | [diff] [blame] | 906 | 	} | 
| Paul Mackerras | 092b8f3 | 2006-02-20 10:38:56 +1100 | [diff] [blame] | 907 |  | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 908 | 	vdso_data->tb_update_count = 0; | 
 | 909 | 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 |  | 
| Benjamin Herrenschmidt | 77c0a70 | 2009-08-28 14:25:04 +1000 | [diff] [blame] | 911 | 	/* Start the decrementer on CPUs that have manual control | 
 | 912 | 	 * such as BookE | 
 | 913 | 	 */ | 
 | 914 | 	start_cpu_decrementer(); | 
 | 915 |  | 
| Stephen Rothwell | f533927 | 2012-03-15 18:18:00 +0000 | [diff] [blame] | 916 | 	/* Register the clocksource */ | 
 | 917 | 	clocksource_init(); | 
| Tony Breeds | 4a4cfe3 | 2007-09-22 07:35:52 +1000 | [diff] [blame] | 918 |  | 
| Tony Breeds | d831d0b | 2007-09-21 13:26:03 +1000 | [diff] [blame] | 919 | 	init_decrementer_clockevent(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | } | 
 | 921 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | #define FEBRUARY	2 | 
 | 924 | #define	STARTOFTIME	1970 | 
 | 925 | #define SECDAY		86400L | 
 | 926 | #define SECYR		(SECDAY * 365) | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 927 | #define	leapyear(year)		((year) % 4 == 0 && \ | 
 | 928 | 				 ((year) % 100 != 0 || (year) % 400 == 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | #define	days_in_year(a) 	(leapyear(a) ? 366 : 365) | 
 | 930 | #define	days_in_month(a) 	(month_days[(a) - 1]) | 
 | 931 |  | 
 | 932 | static int month_days[12] = { | 
 | 933 | 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 | 
 | 934 | }; | 
 | 935 |  | 
 | 936 | /* | 
 | 937 |  * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) | 
 | 938 |  */ | 
 | 939 | void GregorianDay(struct rtc_time * tm) | 
 | 940 | { | 
 | 941 | 	int leapsToDate; | 
 | 942 | 	int lastYear; | 
 | 943 | 	int day; | 
 | 944 | 	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; | 
 | 945 |  | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 946 | 	lastYear = tm->tm_year - 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 |  | 
 | 948 | 	/* | 
 | 949 | 	 * Number of leap corrections to apply up to end of last year | 
 | 950 | 	 */ | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 951 | 	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 |  | 
 | 953 | 	/* | 
 | 954 | 	 * This year is a leap year if it is divisible by 4 except when it is | 
 | 955 | 	 * divisible by 100 unless it is divisible by 400 | 
 | 956 | 	 * | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 957 | 	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 958 | 	 */ | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 959 | 	day = tm->tm_mon > 2 && leapyear(tm->tm_year); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 |  | 
 | 961 | 	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + | 
 | 962 | 		   tm->tm_mday; | 
 | 963 |  | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 964 | 	tm->tm_wday = day % 7; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | } | 
 | 966 |  | 
 | 967 | void to_tm(int tim, struct rtc_time * tm) | 
 | 968 | { | 
 | 969 | 	register int    i; | 
 | 970 | 	register long   hms, day; | 
 | 971 |  | 
 | 972 | 	day = tim / SECDAY; | 
 | 973 | 	hms = tim % SECDAY; | 
 | 974 |  | 
 | 975 | 	/* Hours, minutes, seconds are easy */ | 
 | 976 | 	tm->tm_hour = hms / 3600; | 
 | 977 | 	tm->tm_min = (hms % 3600) / 60; | 
 | 978 | 	tm->tm_sec = (hms % 3600) % 60; | 
 | 979 |  | 
 | 980 | 	/* Number of years in days */ | 
 | 981 | 	for (i = STARTOFTIME; day >= days_in_year(i); i++) | 
 | 982 | 		day -= days_in_year(i); | 
 | 983 | 	tm->tm_year = i; | 
 | 984 |  | 
 | 985 | 	/* Number of months in days left */ | 
 | 986 | 	if (leapyear(tm->tm_year)) | 
 | 987 | 		days_in_month(FEBRUARY) = 29; | 
 | 988 | 	for (i = 1; day >= days_in_month(i); i++) | 
 | 989 | 		day -= days_in_month(i); | 
 | 990 | 	days_in_month(FEBRUARY) = 28; | 
 | 991 | 	tm->tm_mon = i; | 
 | 992 |  | 
 | 993 | 	/* Days are what is left over (+1) from all that. */ | 
 | 994 | 	tm->tm_mday = day + 1; | 
 | 995 |  | 
 | 996 | 	/* | 
 | 997 | 	 * Determine the day of week | 
 | 998 | 	 */ | 
 | 999 | 	GregorianDay(tm); | 
 | 1000 | } | 
 | 1001 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | /* | 
 | 1003 |  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit | 
 | 1004 |  * result. | 
 | 1005 |  */ | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 1006 | void div128_by_32(u64 dividend_high, u64 dividend_low, | 
 | 1007 | 		  unsigned divisor, struct div_result *dr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1008 | { | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 1009 | 	unsigned long a, b, c, d; | 
 | 1010 | 	unsigned long w, x, y, z; | 
 | 1011 | 	u64 ra, rb, rc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 |  | 
 | 1013 | 	a = dividend_high >> 32; | 
 | 1014 | 	b = dividend_high & 0xffffffff; | 
 | 1015 | 	c = dividend_low >> 32; | 
 | 1016 | 	d = dividend_low & 0xffffffff; | 
 | 1017 |  | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 1018 | 	w = a / divisor; | 
 | 1019 | 	ra = ((u64)(a - (w * divisor)) << 32) + b; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 |  | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 1021 | 	rb = ((u64) do_div(ra, divisor) << 32) + c; | 
 | 1022 | 	x = ra; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1023 |  | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 1024 | 	rc = ((u64) do_div(rb, divisor) << 32) + d; | 
 | 1025 | 	y = rb; | 
 | 1026 |  | 
 | 1027 | 	do_div(rc, divisor); | 
 | 1028 | 	z = rc; | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 1029 |  | 
 | 1030 | 	dr->result_high = ((u64)w << 32) + x; | 
 | 1031 | 	dr->result_low  = ((u64)y << 32) + z; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 |  | 
 | 1033 | } | 
| Geert Uytterhoeven | bcd68a7 | 2009-02-19 16:50:46 +0100 | [diff] [blame] | 1034 |  | 
| Benjamin Herrenschmidt | 177996e | 2009-06-09 21:12:00 +0000 | [diff] [blame] | 1035 | /* We don't need to calibrate delay, we use the CPU timebase for that */ | 
 | 1036 | void calibrate_delay(void) | 
 | 1037 | { | 
 | 1038 | 	/* Some generic code (such as spinlock debug) use loops_per_jiffy | 
 | 1039 | 	 * as the number of __delay(1) in a jiffy, so make it so | 
 | 1040 | 	 */ | 
 | 1041 | 	loops_per_jiffy = tb_ticks_per_jiffy; | 
 | 1042 | } | 
 | 1043 |  | 
| Geert Uytterhoeven | bcd68a7 | 2009-02-19 16:50:46 +0100 | [diff] [blame] | 1044 | static int __init rtc_init(void) | 
 | 1045 | { | 
 | 1046 | 	struct platform_device *pdev; | 
 | 1047 |  | 
 | 1048 | 	if (!ppc_md.get_rtc_time) | 
 | 1049 | 		return -ENODEV; | 
 | 1050 |  | 
 | 1051 | 	pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); | 
| Geert Uytterhoeven | bcd68a7 | 2009-02-19 16:50:46 +0100 | [diff] [blame] | 1052 |  | 
| Adrian-Leonard Radu | 09652b0 | 2013-03-10 03:07:00 +0000 | [diff] [blame] | 1053 | 	return PTR_RET(pdev); | 
| Geert Uytterhoeven | bcd68a7 | 2009-02-19 16:50:46 +0100 | [diff] [blame] | 1054 | } | 
 | 1055 |  | 
 | 1056 | module_init(rtc_init); |