remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index ae5c6c1..f1525ad 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -4,8 +4,9 @@
#include <linux/sched.h>
#include <linux/posix-timers.h>
-#include <asm/uaccess.h>
#include <linux/errno.h>
+#include <linux/math64.h>
+#include <asm/uaccess.h>
static int check_clock(const clockid_t which_clock)
{
@@ -47,12 +48,10 @@
union cpu_time_count cpu,
struct timespec *tp)
{
- if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
- tp->tv_sec = div_long_long_rem(cpu.sched,
- NSEC_PER_SEC, &tp->tv_nsec);
- } else {
+ if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
+ *tp = ns_to_timespec(cpu.sched);
+ else
cputime_to_timespec(cpu.cpu, tp);
- }
}
static inline int cpu_time_before(const clockid_t which_clock,
diff --git a/kernel/time.c b/kernel/time.c
index 343e2515..cbe0d5a2 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -392,13 +392,17 @@
struct timespec ns_to_timespec(const s64 nsec)
{
struct timespec ts;
+ s32 rem;
if (!nsec)
return (struct timespec) {0, 0};
- ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec);
- if (unlikely(nsec < 0))
- set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec);
+ ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
+ if (unlikely(rem < 0)) {
+ ts.tv_sec--;
+ rem += NSEC_PER_SEC;
+ }
+ ts.tv_nsec = rem;
return ts;
}
@@ -528,8 +532,10 @@
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
- u64 nsec = (u64)jiffies * TICK_NSEC;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
+ u32 rem;
+ value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
+ NSEC_PER_SEC, &rem);
+ value->tv_nsec = rem;
}
EXPORT_SYMBOL(jiffies_to_timespec);
@@ -567,12 +573,11 @@
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
- u64 nsec = (u64)jiffies * TICK_NSEC;
- long tv_usec;
+ u32 rem;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec);
- tv_usec /= NSEC_PER_USEC;
- value->tv_usec = tv_usec;
+ value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
+ NSEC_PER_SEC, &rem);
+ value->tv_usec = rem / NSEC_PER_USEC;
}
EXPORT_SYMBOL(jiffies_to_timeval);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index a4492f3..dbd6f89 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -234,7 +234,7 @@
*/
int do_adjtimex(struct timex *txc)
{
- long mtemp, save_adjust, rem;
+ long mtemp, save_adjust;
s64 freq_adj;
int result;
@@ -345,9 +345,7 @@
freq_adj += time_freq;
freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC);
time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC);
- time_offset = div_long_long_rem_signed(time_offset,
- NTP_INTERVAL_FREQ,
- &rem);
+ time_offset = div_s64(time_offset, NTP_INTERVAL_FREQ);
time_offset <<= SHIFT_UPDATE;
} /* STA_PLL */
} /* txc->modes & ADJ_OFFSET */