blob: 2a7ddc5793797ecb9d2146e415dee6740e815d49 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 *
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
14 *
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17 *
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
25 *
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35#include <linux/config.h>
36#include <linux/errno.h>
37#include <linux/module.h>
38#include <linux/sched.h>
39#include <linux/kernel.h>
40#include <linux/param.h>
41#include <linux/string.h>
42#include <linux/mm.h>
43#include <linux/interrupt.h>
44#include <linux/timex.h>
45#include <linux/kernel_stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/time.h>
47#include <linux/init.h>
48#include <linux/profile.h>
49#include <linux/cpu.h>
50#include <linux/security.h>
Paul Mackerrasf2783c12005-10-20 09:23:26 +100051#include <linux/percpu.h>
52#include <linux/rtc.h>
Paul Mackerras092b8f32006-02-20 10:38:56 +110053#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <asm/io.h>
56#include <asm/processor.h>
57#include <asm/nvram.h>
58#include <asm/cache.h>
59#include <asm/machdep.h>
Paul Mackerrasf2783c12005-10-20 09:23:26 +100060#include <asm/uaccess.h>
61#include <asm/time.h>
62#include <asm/prom.h>
63#include <asm/irq.h>
64#include <asm/div64.h>
Paul Mackerras2249ca92005-11-07 13:18:13 +110065#include <asm/smp.h>
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110066#include <asm/vdso_datapage.h>
Paul Mackerrasf2783c12005-10-20 09:23:26 +100067#ifdef CONFIG_PPC64
Paul Mackerrasf2783c12005-10-20 09:23:26 +100068#include <asm/firmware.h>
69#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#ifdef CONFIG_PPC_ISERIES
Kelly Daly8875ccf2005-11-02 14:13:34 +110071#include <asm/iseries/it_lp_queue.h>
Kelly Daly8021b8a2005-11-02 11:41:12 +110072#include <asm/iseries/hv_call_xm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#endif
Olof Johansson732ee212005-11-07 00:57:55 -080074#include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Linus Torvalds1da177e2005-04-16 15:20:36 -070076/* keep track of when we need to update the rtc */
77time_t last_rtc_update;
78extern int piranha_simulator;
79#ifdef CONFIG_PPC_ISERIES
80unsigned long iSeries_recal_titan = 0;
81unsigned long iSeries_recal_tb = 0;
82static unsigned long first_settimeofday = 1;
83#endif
84
Paul Mackerrasf2783c12005-10-20 09:23:26 +100085/* The decrementer counts down by 128 every 128ns on a 601. */
86#define DECREMENTER_COUNT_601 (1000000000 / HZ)
87
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#define XSEC_PER_SEC (1024*1024)
89
Paul Mackerrasf2783c12005-10-20 09:23:26 +100090#ifdef CONFIG_PPC64
91#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
92#else
93/* compute ((xsec << 12) * max) >> 32 */
94#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
95#endif
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097unsigned long tb_ticks_per_jiffy;
98unsigned long tb_ticks_per_usec = 100; /* sane default */
99EXPORT_SYMBOL(tb_ticks_per_usec);
100unsigned long tb_ticks_per_sec;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000101u64 tb_to_xs;
102unsigned tb_to_us;
Paul Mackerras092b8f32006-02-20 10:38:56 +1100103
104#define TICKLEN_SCALE (SHIFT_SCALE - 10)
105u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
106u64 ticklen_to_xs; /* 0.64 fraction */
107
108/* If last_tick_len corresponds to about 1/HZ seconds, then
109 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
110#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112DEFINE_SPINLOCK(rtc_lock);
Benjamin Herrenschmidt6ae3db12005-06-27 14:36:35 -0700113EXPORT_SYMBOL_GPL(rtc_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000115u64 tb_to_ns_scale;
116unsigned tb_to_ns_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118struct gettimeofday_struct do_gtod;
119
120extern unsigned long wall_jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122extern struct timezone sys_tz;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000123static long timezone_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Arnd Bergmann10f7e7c2005-06-23 09:43:07 +1000125unsigned long ppc_proc_freq;
126unsigned long ppc_tb_freq;
127
Paul Mackerras96c44502005-10-23 17:14:56 +1000128u64 tb_last_jiffy __cacheline_aligned_in_smp;
129unsigned long tb_last_stamp;
130
131/*
132 * Note that on ppc32 this only stores the bottom 32 bits of
133 * the timebase value, but that's enough to tell when a jiffy
134 * has passed.
135 */
136DEFINE_PER_CPU(unsigned long, last_jiffy);
137
Paul Mackerras6defa382005-11-18 13:44:17 +1100138void __delay(unsigned long loops)
139{
140 unsigned long start;
141 int diff;
142
143 if (__USE_RTC()) {
144 start = get_rtcl();
145 do {
146 /* the RTCL register wraps at 1000000000 */
147 diff = get_rtcl() - start;
148 if (diff < 0)
149 diff += 1000000000;
150 } while (diff < loops);
151 } else {
152 start = get_tbl();
153 while (get_tbl() - start < loops)
154 HMT_low();
155 HMT_medium();
156 }
157}
158EXPORT_SYMBOL(__delay);
159
160void udelay(unsigned long usecs)
161{
162 __delay(tb_ticks_per_usec * usecs);
163}
164EXPORT_SYMBOL(udelay);
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166static __inline__ void timer_check_rtc(void)
167{
168 /*
169 * update the rtc when needed, this should be performed on the
170 * right fraction of a second. Half or full second ?
171 * Full second works on mk48t59 clocks, others need testing.
172 * Note that this update is basically only used through
173 * the adjtimex system calls. Setting the HW clock in
174 * any other way is a /dev/rtc and userland business.
175 * This is still wrong by -0.5/+1.5 jiffies because of the
176 * timer interrupt resolution and possible delay, but here we
177 * hit a quantization limit which can only be solved by higher
178 * resolution timers and decoupling time management from timer
179 * interrupts. This is also wrong on the clocks
180 * which require being written at the half second boundary.
181 * We should have an rtc call that only sets the minutes and
182 * seconds like on Intel to avoid problems with non UTC clocks.
183 */
Kumar Galad2e61512005-10-20 11:43:33 -0500184 if (ppc_md.set_rtc_time && ntp_synced() &&
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000185 xtime.tv_sec - last_rtc_update >= 659 &&
Paul Mackerras092b8f32006-02-20 10:38:56 +1100186 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000187 struct rtc_time tm;
188 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
189 tm.tm_year -= 1900;
190 tm.tm_mon -= 1;
191 if (ppc_md.set_rtc_time(&tm) == 0)
192 last_rtc_update = xtime.tv_sec + 1;
193 else
194 /* Try again one minute later */
195 last_rtc_update += 60;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 }
197}
198
199/*
200 * This version of gettimeofday has microsecond resolution.
201 */
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000202static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000204 unsigned long sec, usec;
205 u64 tb_ticks, xsec;
206 struct gettimeofday_vars *temp_varp;
207 u64 temp_tb_to_xs, temp_stamp_xsec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 /*
210 * These calculations are faster (gets rid of divides)
211 * if done in units of 1/2^20 rather than microseconds.
212 * The conversion to microseconds at the end is done
213 * without a divide (and in fact, without a multiply)
214 */
215 temp_varp = do_gtod.varp;
216 tb_ticks = tb_val - temp_varp->tb_orig_stamp;
217 temp_tb_to_xs = temp_varp->tb_to_xs;
218 temp_stamp_xsec = temp_varp->stamp_xsec;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000219 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 sec = xsec / XSEC_PER_SEC;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000221 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
222 usec = SCALE_XSEC(usec, 1000000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 tv->tv_sec = sec;
225 tv->tv_usec = usec;
226}
227
228void do_gettimeofday(struct timeval *tv)
229{
Paul Mackerras96c44502005-10-23 17:14:56 +1000230 if (__USE_RTC()) {
231 /* do this the old way */
232 unsigned long flags, seq;
Paul Mackerras092b8f32006-02-20 10:38:56 +1100233 unsigned int sec, nsec, usec;
Paul Mackerras96c44502005-10-23 17:14:56 +1000234
235 do {
236 seq = read_seqbegin_irqsave(&xtime_lock, flags);
237 sec = xtime.tv_sec;
238 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
Paul Mackerras96c44502005-10-23 17:14:56 +1000239 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
Paul Mackerras092b8f32006-02-20 10:38:56 +1100240 usec = nsec / 1000;
Paul Mackerras96c44502005-10-23 17:14:56 +1000241 while (usec >= 1000000) {
242 usec -= 1000000;
243 ++sec;
244 }
245 tv->tv_sec = sec;
246 tv->tv_usec = usec;
247 return;
248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 __do_gettimeofday(tv, get_tb());
250}
251
252EXPORT_SYMBOL(do_gettimeofday);
253
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000254/*
255 * There are two copies of tb_to_xs and stamp_xsec so that no
256 * lock is needed to access and use these values in
257 * do_gettimeofday. We alternate the copies and as long as a
258 * reasonable time elapses between changes, there will never
259 * be inconsistent values. ntpd has a minimum of one minute
260 * between updates.
261 */
262static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
Paul Mackerras5d14a182005-10-20 22:33:06 +1000263 u64 new_tb_to_xs)
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000264{
265 unsigned temp_idx;
266 struct gettimeofday_vars *temp_varp;
267
268 temp_idx = (do_gtod.var_idx == 0);
269 temp_varp = &do_gtod.vars[temp_idx];
270
271 temp_varp->tb_to_xs = new_tb_to_xs;
272 temp_varp->tb_orig_stamp = new_tb_stamp;
273 temp_varp->stamp_xsec = new_stamp_xsec;
274 smp_mb();
275 do_gtod.varp = temp_varp;
276 do_gtod.var_idx = temp_idx;
277
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000278 /*
279 * tb_update_count is used to allow the userspace gettimeofday code
280 * to assure itself that it sees a consistent view of the tb_to_xs and
281 * stamp_xsec variables. It reads the tb_update_count, then reads
282 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
283 * the two values of tb_update_count match and are even then the
284 * tb_to_xs and stamp_xsec values are consistent. If not, then it
285 * loops back and reads them again until this criteria is met.
286 */
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100287 ++(vdso_data->tb_update_count);
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000288 smp_wmb();
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100289 vdso_data->tb_orig_stamp = new_tb_stamp;
290 vdso_data->stamp_xsec = new_stamp_xsec;
291 vdso_data->tb_to_xs = new_tb_to_xs;
292 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
293 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000294 smp_wmb();
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100295 ++(vdso_data->tb_update_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
298/*
299 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
300 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
301 * difference tb - tb_orig_stamp small enough to always fit inside a
302 * 32 bits number. This is a requirement of our fast 32 bits userland
303 * implementation in the vdso. If we "miss" a call to this function
304 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
305 * with a too big difference, then the vdso will fallback to calling
306 * the syscall
307 */
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000308static __inline__ void timer_recalc_offset(u64 cur_tb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000310 unsigned long offset;
311 u64 new_stamp_xsec;
Paul Mackerras092b8f32006-02-20 10:38:56 +1100312 u64 tlen, t2x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Paul Mackerras96c44502005-10-23 17:14:56 +1000314 if (__USE_RTC())
315 return;
Paul Mackerras092b8f32006-02-20 10:38:56 +1100316 tlen = current_tick_length();
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000317 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
Paul Mackerras092b8f32006-02-20 10:38:56 +1100318 if (tlen == last_tick_len && offset < 0x80000000u) {
319 /* check that we're still in sync; if not, resync */
320 struct timeval tv;
321 __do_gettimeofday(&tv, cur_tb);
322 if (tv.tv_sec <= xtime.tv_sec &&
323 (tv.tv_sec < xtime.tv_sec ||
324 tv.tv_usec * 1000 <= xtime.tv_nsec))
325 return;
326 }
327 if (tlen != last_tick_len) {
328 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
329 last_tick_len = tlen;
330 } else
331 t2x = do_gtod.varp->tb_to_xs;
332 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
333 do_div(new_stamp_xsec, 1000000000);
334 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
335 update_gtod(cur_tb, new_stamp_xsec, t2x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336}
337
338#ifdef CONFIG_SMP
339unsigned long profile_pc(struct pt_regs *regs)
340{
341 unsigned long pc = instruction_pointer(regs);
342
343 if (in_lock_functions(pc))
344 return regs->link;
345
346 return pc;
347}
348EXPORT_SYMBOL(profile_pc);
349#endif
350
351#ifdef CONFIG_PPC_ISERIES
352
353/*
354 * This function recalibrates the timebase based on the 49-bit time-of-day
355 * value in the Titan chip. The Titan is much more accurate than the value
356 * returned by the service processor for the timebase frequency.
357 */
358
359static void iSeries_tb_recal(void)
360{
361 struct div_result divres;
362 unsigned long titan, tb;
363 tb = get_tb();
364 titan = HvCallXm_loadTod();
365 if ( iSeries_recal_titan ) {
366 unsigned long tb_ticks = tb - iSeries_recal_tb;
367 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
368 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
369 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
370 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
371 char sign = '+';
372 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
373 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
374
375 if ( tick_diff < 0 ) {
376 tick_diff = -tick_diff;
377 sign = '-';
378 }
379 if ( tick_diff ) {
380 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
381 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
382 new_tb_ticks_per_jiffy, sign, tick_diff );
383 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
384 tb_ticks_per_sec = new_tb_ticks_per_sec;
385 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
386 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
387 tb_to_xs = divres.result_low;
388 do_gtod.varp->tb_to_xs = tb_to_xs;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100389 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
390 vdso_data->tb_to_xs = tb_to_xs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 }
392 else {
393 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
394 " new tb_ticks_per_jiffy = %lu\n"
395 " old tb_ticks_per_jiffy = %lu\n",
396 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
397 }
398 }
399 }
400 iSeries_recal_titan = titan;
401 iSeries_recal_tb = tb;
402}
403#endif
404
405/*
406 * For iSeries shared processors, we have to let the hypervisor
407 * set the hardware decrementer. We set a virtual decrementer
408 * in the lppaca and call the hypervisor if the virtual
409 * decrementer is less than the current value in the hardware
410 * decrementer. (almost always the new decrementer value will
411 * be greater than the current hardware decementer so the hypervisor
412 * call will not be needed)
413 */
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415/*
416 * timer_interrupt - gets called when the decrementer overflows,
417 * with interrupts disabled.
418 */
Kumar Galac7aeffc2005-09-19 09:30:27 -0500419void timer_interrupt(struct pt_regs * regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420{
421 int next_dec;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000422 int cpu = smp_processor_id();
423 unsigned long ticks;
424
425#ifdef CONFIG_PPC32
426 if (atomic_read(&ppc_n_lost_interrupts) != 0)
427 do_IRQ(regs);
428#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
430 irq_enter();
431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 profile_tick(CPU_PROFILING, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000434#ifdef CONFIG_PPC_ISERIES
David Gibson3356bb92006-01-13 10:26:42 +1100435 get_lppaca()->int_dword.fields.decr_int = 0;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000436#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000438 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
439 >= tb_ticks_per_jiffy) {
440 /* Update last_jiffy */
441 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
442 /* Handle RTCL overflow on 601 */
443 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
444 per_cpu(last_jiffy, cpu) -= 1000000000;
445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 /*
447 * We cannot disable the decrementer, so in the period
448 * between this cpu's being marked offline in cpu_online_map
449 * and calling stop-self, it is taking timer interrupts.
450 * Avoid calling into the scheduler rebalancing code if this
451 * is the case.
452 */
453 if (!cpu_is_offline(cpu))
454 update_process_times(user_mode(regs));
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 /*
457 * No need to check whether cpu is offline here; boot_cpuid
458 * should have been fixed up by now.
459 */
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000460 if (cpu != boot_cpuid)
461 continue;
462
463 write_seqlock(&xtime_lock);
Paul Mackerras96c44502005-10-23 17:14:56 +1000464 tb_last_jiffy += tb_ticks_per_jiffy;
465 tb_last_stamp = per_cpu(last_jiffy, cpu);
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000466 do_timer(regs);
Paul Mackerras092b8f32006-02-20 10:38:56 +1100467 timer_recalc_offset(tb_last_jiffy);
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000468 timer_check_rtc();
469 write_sequnlock(&xtime_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 }
471
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000472 next_dec = tb_ticks_per_jiffy - ticks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 set_dec(next_dec);
474
475#ifdef CONFIG_PPC_ISERIES
Michael Ellerman937b31b2005-06-30 15:15:42 +1000476 if (hvlpevent_is_pending())
Michael Ellerman74889802005-06-30 15:15:53 +1000477 process_hvlpevents(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478#endif
479
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000480#ifdef CONFIG_PPC64
Stephen Rothwell8d15a3e2005-08-03 14:40:16 +1000481 /* collect purr register values often, for accurate calculations */
Stephen Rothwell1ababe12005-08-03 14:35:25 +1000482 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
484 cu->current_tb = mfspr(SPRN_PURR);
485 }
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000486#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488 irq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489}
490
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000491void wakeup_decrementer(void)
492{
Paul Mackerras092b8f32006-02-20 10:38:56 +1100493 unsigned long ticks;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000494
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000495 /*
Paul Mackerras092b8f32006-02-20 10:38:56 +1100496 * The timebase gets saved on sleep and restored on wakeup,
497 * so all we need to do is to reset the decrementer.
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000498 */
Paul Mackerras092b8f32006-02-20 10:38:56 +1100499 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
500 if (ticks < tb_ticks_per_jiffy)
501 ticks = tb_ticks_per_jiffy - ticks;
502 else
503 ticks = 1;
504 set_dec(ticks);
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000505}
506
Paul Mackerrasa5b518e2005-10-22 14:55:23 +1000507#ifdef CONFIG_SMP
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000508void __init smp_space_timers(unsigned int max_cpus)
509{
510 int i;
511 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
512 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
513
Paul Mackerrascbe62e22005-11-10 14:28:03 +1100514 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
515 previous_tb -= tb_ticks_per_jiffy;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000516 for_each_cpu(i) {
517 if (i != boot_cpuid) {
518 previous_tb += offset;
519 per_cpu(last_jiffy, i) = previous_tb;
520 }
521 }
522}
523#endif
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525/*
526 * Scheduler clock - returns current time in nanosec units.
527 *
528 * Note: mulhdu(a, b) (multiply high double unsigned) returns
529 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
530 * are 64-bit unsigned numbers.
531 */
532unsigned long long sched_clock(void)
533{
Paul Mackerras96c44502005-10-23 17:14:56 +1000534 if (__USE_RTC())
535 return get_rtc();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
537}
538
539int do_settimeofday(struct timespec *tv)
540{
541 time_t wtm_sec, new_sec = tv->tv_sec;
542 long wtm_nsec, new_nsec = tv->tv_nsec;
543 unsigned long flags;
Paul Mackerras092b8f32006-02-20 10:38:56 +1100544 u64 new_xsec;
545 unsigned long tb_delta;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
548 return -EINVAL;
549
550 write_seqlock_irqsave(&xtime_lock, flags);
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000551
552 /*
553 * Updating the RTC is not the job of this code. If the time is
554 * stepped under NTP, the RTC will be updated after STA_UNSYNC
555 * is cleared. Tools like clock/hwclock either copy the RTC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 * to the system time, in which case there is no point in writing
557 * to the RTC again, or write to the RTC but then they don't call
558 * settimeofday to perform this operation.
559 */
560#ifdef CONFIG_PPC_ISERIES
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000561 if (first_settimeofday) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 iSeries_tb_recal();
563 first_settimeofday = 0;
564 }
565#endif
Paul Mackerras092b8f32006-02-20 10:38:56 +1100566
567 /*
568 * Subtract off the number of nanoseconds since the
569 * beginning of the last tick.
570 * Note that since we don't increment jiffies_64 anywhere other
571 * than in do_timer (since we don't have a lost tick problem),
572 * wall_jiffies will always be the same as jiffies,
573 * and therefore the (jiffies - wall_jiffies) computation
574 * has been removed.
575 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 tb_delta = tb_ticks_since(tb_last_stamp);
Paul Mackerras092b8f32006-02-20 10:38:56 +1100577 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
578 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
581 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
582
583 set_normalized_timespec(&xtime, new_sec, new_nsec);
584 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
585
586 /* In case of a large backwards jump in time with NTP, we want the
587 * clock to be updated as soon as the PLL is again in lock.
588 */
589 last_rtc_update = new_sec - 658;
590
john stultzb149ee22005-09-06 15:17:46 -0700591 ntp_clear();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Paul Mackerras092b8f32006-02-20 10:38:56 +1100593 new_xsec = xtime.tv_nsec;
594 if (new_xsec != 0) {
595 new_xsec *= XSEC_PER_SEC;
Paul Mackerras5f6b5b92005-10-30 22:55:52 +1100596 do_div(new_xsec, NSEC_PER_SEC);
597 }
Paul Mackerras092b8f32006-02-20 10:38:56 +1100598 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
Paul Mackerras96c44502005-10-23 17:14:56 +1000599 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100601 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
602 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
604 write_sequnlock_irqrestore(&xtime_lock, flags);
605 clock_was_set();
606 return 0;
607}
608
609EXPORT_SYMBOL(do_settimeofday);
610
Arnd Bergmann10f7e7c2005-06-23 09:43:07 +1000611void __init generic_calibrate_decr(void)
612{
613 struct device_node *cpu;
Arnd Bergmann10f7e7c2005-06-23 09:43:07 +1000614 unsigned int *fp;
615 int node_found;
616
617 /*
618 * The cpu node should have a timebase-frequency property
619 * to tell us the rate at which the decrementer counts.
620 */
621 cpu = of_find_node_by_type(NULL, "cpu");
622
623 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
624 node_found = 0;
Olaf Heringd8a81882006-02-04 10:34:56 +0100625 if (cpu) {
Arnd Bergmann10f7e7c2005-06-23 09:43:07 +1000626 fp = (unsigned int *)get_property(cpu, "timebase-frequency",
627 NULL);
Olaf Heringd8a81882006-02-04 10:34:56 +0100628 if (fp) {
Arnd Bergmann10f7e7c2005-06-23 09:43:07 +1000629 node_found = 1;
630 ppc_tb_freq = *fp;
631 }
632 }
633 if (!node_found)
634 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
635 "(not found)\n");
636
637 ppc_proc_freq = DEFAULT_PROC_FREQ;
638 node_found = 0;
Olaf Heringd8a81882006-02-04 10:34:56 +0100639 if (cpu) {
Arnd Bergmann10f7e7c2005-06-23 09:43:07 +1000640 fp = (unsigned int *)get_property(cpu, "clock-frequency",
641 NULL);
Olaf Heringd8a81882006-02-04 10:34:56 +0100642 if (fp) {
Arnd Bergmann10f7e7c2005-06-23 09:43:07 +1000643 node_found = 1;
644 ppc_proc_freq = *fp;
645 }
646 }
Kumar Gala0fd6f712005-10-25 23:02:59 -0500647#ifdef CONFIG_BOOKE
648 /* Set the time base to zero */
649 mtspr(SPRN_TBWL, 0);
650 mtspr(SPRN_TBWU, 0);
651
652 /* Clear any pending timer interrupts */
653 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
654
655 /* Enable decrementer interrupt */
656 mtspr(SPRN_TCR, TCR_DIE);
657#endif
Arnd Bergmann10f7e7c2005-06-23 09:43:07 +1000658 if (!node_found)
659 printk(KERN_ERR "WARNING: Estimating processor frequency "
660 "(not found)\n");
661
662 of_node_put(cpu);
Arnd Bergmann10f7e7c2005-06-23 09:43:07 +1000663}
Arnd Bergmann10f7e7c2005-06-23 09:43:07 +1000664
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000665unsigned long get_boot_time(void)
666{
667 struct rtc_time tm;
668
669 if (ppc_md.get_boot_time)
670 return ppc_md.get_boot_time();
671 if (!ppc_md.get_rtc_time)
672 return 0;
673 ppc_md.get_rtc_time(&tm);
674 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
675 tm.tm_hour, tm.tm_min, tm.tm_sec);
676}
677
678/* This function is only called on the boot processor */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679void __init time_init(void)
680{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 unsigned long flags;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000682 unsigned long tm = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 struct div_result res;
Paul Mackerras092b8f32006-02-20 10:38:56 +1100684 u64 scale, x;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000685 unsigned shift;
686
687 if (ppc_md.time_init != NULL)
688 timezone_offset = ppc_md.time_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Paul Mackerras96c44502005-10-23 17:14:56 +1000690 if (__USE_RTC()) {
691 /* 601 processor: dec counts down by 128 every 128ns */
692 ppc_tb_freq = 1000000000;
693 tb_last_stamp = get_rtcl();
694 tb_last_jiffy = tb_last_stamp;
695 } else {
696 /* Normal PowerPC with timebase register */
697 ppc_md.calibrate_decr();
698 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
699 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
700 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
701 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
702 tb_last_stamp = tb_last_jiffy = get_tb();
703 }
Paul Mackerras374e99d2005-10-20 21:04:51 +1000704
705 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
Paul Mackerras092b8f32006-02-20 10:38:56 +1100706 tb_ticks_per_sec = ppc_tb_freq;
Paul Mackerras374e99d2005-10-20 21:04:51 +1000707 tb_ticks_per_usec = ppc_tb_freq / 1000000;
708 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
Paul Mackerras092b8f32006-02-20 10:38:56 +1100709
710 /*
711 * Calculate the length of each tick in ns. It will not be
712 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
713 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
714 * rounded up.
715 */
716 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
717 do_div(x, ppc_tb_freq);
718 tick_nsec = x;
719 last_tick_len = x << TICKLEN_SCALE;
720
721 /*
722 * Compute ticklen_to_xs, which is a factor which gets multiplied
723 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
724 * It is computed as:
725 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
726 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
727 * so as to give the result as a 0.64 fixed-point fraction.
728 */
729 div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0,
730 tb_ticks_per_jiffy, &res);
731 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
732 ticklen_to_xs = res.result_low;
733
734 /* Compute tb_to_xs from tick_nsec */
735 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
Paul Mackerras374e99d2005-10-20 21:04:51 +1000736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 /*
738 * Compute scale factor for sched_clock.
739 * The calibrate_decr() function has set tb_ticks_per_sec,
740 * which is the timebase frequency.
741 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
742 * the 128-bit result as a 64.64 fixed-point number.
743 * We then shift that number right until it is less than 1.0,
744 * giving us the scale factor and shift count to use in
745 * sched_clock().
746 */
747 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
748 scale = res.result_low;
749 for (shift = 0; res.result_high != 0; ++shift) {
750 scale = (scale >> 1) | (res.result_high << 63);
751 res.result_high >>= 1;
752 }
753 tb_to_ns_scale = scale;
754 tb_to_ns_shift = shift;
755
756#ifdef CONFIG_PPC_ISERIES
757 if (!piranha_simulator)
758#endif
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000759 tm = get_boot_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761 write_seqlock_irqsave(&xtime_lock, flags);
Paul Mackerras092b8f32006-02-20 10:38:56 +1100762
763 /* If platform provided a timezone (pmac), we correct the time */
764 if (timezone_offset) {
765 sys_tz.tz_minuteswest = -timezone_offset / 60;
766 sys_tz.tz_dsttime = 0;
767 tm -= timezone_offset;
768 }
769
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000770 xtime.tv_sec = tm;
771 xtime.tv_nsec = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 do_gtod.varp = &do_gtod.vars[0];
773 do_gtod.var_idx = 0;
Paul Mackerras96c44502005-10-23 17:14:56 +1000774 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000775 __get_cpu_var(last_jiffy) = tb_last_stamp;
776 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
778 do_gtod.varp->tb_to_xs = tb_to_xs;
779 do_gtod.tb_to_us = tb_to_us;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100780
781 vdso_data->tb_orig_stamp = tb_last_jiffy;
782 vdso_data->tb_update_count = 0;
783 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
Paul Mackerras092b8f32006-02-20 10:38:56 +1100784 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100785 vdso_data->tb_to_xs = tb_to_xs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787 time_freq = 0;
788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 last_rtc_update = xtime.tv_sec;
790 set_normalized_timespec(&wall_to_monotonic,
791 -xtime.tv_sec, -xtime.tv_nsec);
792 write_sequnlock_irqrestore(&xtime_lock, flags);
793
794 /* Not exact, but the timer interrupt takes care of this */
795 set_dec(tb_ticks_per_jiffy);
796}
797
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799#define FEBRUARY 2
800#define STARTOFTIME 1970
801#define SECDAY 86400L
802#define SECYR (SECDAY * 365)
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000803#define leapyear(year) ((year) % 4 == 0 && \
804 ((year) % 100 != 0 || (year) % 400 == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805#define days_in_year(a) (leapyear(a) ? 366 : 365)
806#define days_in_month(a) (month_days[(a) - 1])
807
808static int month_days[12] = {
809 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
810};
811
812/*
813 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
814 */
815void GregorianDay(struct rtc_time * tm)
816{
817 int leapsToDate;
818 int lastYear;
819 int day;
820 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
821
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000822 lastYear = tm->tm_year - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
824 /*
825 * Number of leap corrections to apply up to end of last year
826 */
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000827 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
829 /*
830 * This year is a leap year if it is divisible by 4 except when it is
831 * divisible by 100 unless it is divisible by 400
832 *
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000833 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 */
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000835 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
838 tm->tm_mday;
839
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000840 tm->tm_wday = day % 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841}
842
843void to_tm(int tim, struct rtc_time * tm)
844{
845 register int i;
846 register long hms, day;
847
848 day = tim / SECDAY;
849 hms = tim % SECDAY;
850
851 /* Hours, minutes, seconds are easy */
852 tm->tm_hour = hms / 3600;
853 tm->tm_min = (hms % 3600) / 60;
854 tm->tm_sec = (hms % 3600) % 60;
855
856 /* Number of years in days */
857 for (i = STARTOFTIME; day >= days_in_year(i); i++)
858 day -= days_in_year(i);
859 tm->tm_year = i;
860
861 /* Number of months in days left */
862 if (leapyear(tm->tm_year))
863 days_in_month(FEBRUARY) = 29;
864 for (i = 1; day >= days_in_month(i); i++)
865 day -= days_in_month(i);
866 days_in_month(FEBRUARY) = 28;
867 tm->tm_mon = i;
868
869 /* Days are what is left over (+1) from all that. */
870 tm->tm_mday = day + 1;
871
872 /*
873 * Determine the day of week
874 */
875 GregorianDay(tm);
876}
877
878/* Auxiliary function to compute scaling factors */
879/* Actually the choice of a timebase running at 1/4 the of the bus
880 * frequency giving resolution of a few tens of nanoseconds is quite nice.
881 * It makes this computation very precise (27-28 bits typically) which
882 * is optimistic considering the stability of most processor clock
883 * oscillators and the precision with which the timebase frequency
884 * is measured but does not harm.
885 */
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000886unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
887{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 unsigned mlt=0, tmp, err;
889 /* No concern for performance, it's done once: use a stupid
890 * but safe and compact method to find the multiplier.
891 */
892
893 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000894 if (mulhwu(inscale, mlt|tmp) < outscale)
895 mlt |= tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 }
897
898 /* We might still be off by 1 for the best approximation.
899 * A side effect of this is that if outscale is too large
900 * the returned value will be zero.
901 * Many corner cases have been checked and seem to work,
902 * some might have been forgotten in the test however.
903 */
904
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000905 err = inscale * (mlt+1);
906 if (err <= inscale/2)
907 mlt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 return mlt;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000909}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
911/*
912 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
913 * result.
914 */
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000915void div128_by_32(u64 dividend_high, u64 dividend_low,
916 unsigned divisor, struct div_result *dr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917{
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000918 unsigned long a, b, c, d;
919 unsigned long w, x, y, z;
920 u64 ra, rb, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
922 a = dividend_high >> 32;
923 b = dividend_high & 0xffffffff;
924 c = dividend_low >> 32;
925 d = dividend_low & 0xffffffff;
926
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000927 w = a / divisor;
928 ra = ((u64)(a - (w * divisor)) << 32) + b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000930 rb = ((u64) do_div(ra, divisor) << 32) + c;
931 x = ra;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000933 rc = ((u64) do_div(rb, divisor) << 32) + d;
934 y = rb;
935
936 do_div(rc, divisor);
937 z = rc;
Paul Mackerrasf2783c12005-10-20 09:23:26 +1000938
939 dr->result_high = ((u64)w << 32) + x;
940 dr->result_low = ((u64)y << 32) + z;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
942}