| Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MATH64_H | 
 | 2 | #define _LINUX_MATH64_H | 
 | 3 |  | 
 | 4 | #include <linux/types.h> | 
 | 5 | #include <asm/div64.h> | 
 | 6 |  | 
 | 7 | #if BITS_PER_LONG == 64 | 
 | 8 |  | 
| Sasha Levin | f910381 | 2012-03-15 12:36:13 -0400 | [diff] [blame] | 9 | #define div64_long(x,y) div64_s64((x),(y)) | 
 | 10 |  | 
| Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 11 | /** | 
 | 12 |  * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder | 
 | 13 |  * | 
 | 14 |  * This is commonly provided by 32bit archs to provide an optimized 64bit | 
 | 15 |  * divide. | 
 | 16 |  */ | 
 | 17 | static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) | 
 | 18 | { | 
 | 19 | 	*remainder = dividend % divisor; | 
 | 20 | 	return dividend / divisor; | 
 | 21 | } | 
 | 22 |  | 
 | 23 | /** | 
 | 24 |  * div_s64_rem - signed 64bit divide with 32bit divisor with remainder | 
 | 25 |  */ | 
 | 26 | static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) | 
 | 27 | { | 
 | 28 | 	*remainder = dividend % divisor; | 
 | 29 | 	return dividend / divisor; | 
 | 30 | } | 
 | 31 |  | 
| Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 32 | /** | 
 | 33 |  * div64_u64 - unsigned 64bit divide with 64bit divisor | 
 | 34 |  */ | 
 | 35 | static inline u64 div64_u64(u64 dividend, u64 divisor) | 
 | 36 | { | 
 | 37 | 	return dividend / divisor; | 
 | 38 | } | 
 | 39 |  | 
| Brian Behlendorf | 658716d | 2010-10-26 14:23:10 -0700 | [diff] [blame] | 40 | /** | 
 | 41 |  * div64_s64 - signed 64bit divide with 64bit divisor | 
 | 42 |  */ | 
 | 43 | static inline s64 div64_s64(s64 dividend, s64 divisor) | 
 | 44 | { | 
 | 45 | 	return dividend / divisor; | 
 | 46 | } | 
 | 47 |  | 
| Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 48 | #elif BITS_PER_LONG == 32 | 
 | 49 |  | 
| Sasha Levin | f910381 | 2012-03-15 12:36:13 -0400 | [diff] [blame] | 50 | #define div64_long(x,y) div_s64((x),(y)) | 
 | 51 |  | 
| Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 52 | #ifndef div_u64_rem | 
 | 53 | static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) | 
 | 54 | { | 
 | 55 | 	*remainder = do_div(dividend, divisor); | 
 | 56 | 	return dividend; | 
 | 57 | } | 
 | 58 | #endif | 
 | 59 |  | 
 | 60 | #ifndef div_s64_rem | 
 | 61 | extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); | 
 | 62 | #endif | 
 | 63 |  | 
| Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 64 | #ifndef div64_u64 | 
 | 65 | extern u64 div64_u64(u64 dividend, u64 divisor); | 
 | 66 | #endif | 
 | 67 |  | 
| Brian Behlendorf | 658716d | 2010-10-26 14:23:10 -0700 | [diff] [blame] | 68 | #ifndef div64_s64 | 
 | 69 | extern s64 div64_s64(s64 dividend, s64 divisor); | 
 | 70 | #endif | 
 | 71 |  | 
| Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 72 | #endif /* BITS_PER_LONG */ | 
 | 73 |  | 
 | 74 | /** | 
 | 75 |  * div_u64 - unsigned 64bit divide with 32bit divisor | 
 | 76 |  * | 
 | 77 |  * This is the most common 64bit divide and should be used if possible, | 
 | 78 |  * as many 32bit archs can optimize this variant better than a full 64bit | 
 | 79 |  * divide. | 
 | 80 |  */ | 
 | 81 | #ifndef div_u64 | 
 | 82 | static inline u64 div_u64(u64 dividend, u32 divisor) | 
 | 83 | { | 
 | 84 | 	u32 remainder; | 
 | 85 | 	return div_u64_rem(dividend, divisor, &remainder); | 
 | 86 | } | 
 | 87 | #endif | 
 | 88 |  | 
 | 89 | /** | 
 | 90 |  * div_s64 - signed 64bit divide with 32bit divisor | 
 | 91 |  */ | 
 | 92 | #ifndef div_s64 | 
 | 93 | static inline s64 div_s64(s64 dividend, s32 divisor) | 
 | 94 | { | 
 | 95 | 	s32 remainder; | 
 | 96 | 	return div_s64_rem(dividend, divisor, &remainder); | 
 | 97 | } | 
 | 98 | #endif | 
 | 99 |  | 
| Jeremy Fitzhardinge | f595ec9 | 2008-06-12 10:47:56 +0200 | [diff] [blame] | 100 | u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); | 
 | 101 |  | 
| Jeremy Fitzhardinge | d5e181f | 2008-06-12 10:47:58 +0200 | [diff] [blame] | 102 | static __always_inline u32 | 
 | 103 | __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) | 
 | 104 | { | 
 | 105 | 	u32 ret = 0; | 
 | 106 |  | 
 | 107 | 	while (dividend >= divisor) { | 
 | 108 | 		/* The following asm() prevents the compiler from | 
 | 109 | 		   optimising this loop into a modulo operation.  */ | 
 | 110 | 		asm("" : "+rm"(dividend)); | 
 | 111 |  | 
 | 112 | 		dividend -= divisor; | 
 | 113 | 		ret++; | 
 | 114 | 	} | 
 | 115 |  | 
 | 116 | 	*remainder = dividend; | 
 | 117 |  | 
 | 118 | 	return ret; | 
 | 119 | } | 
 | 120 |  | 
| Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 121 | #endif /* _LINUX_MATH64_H */ |