| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* This has so very few changes over libgcc2's __udivmoddi4 it isn't funny.  */ | 
|  | 2 |  | 
| Kumar Gala | d2b194e | 2008-06-04 02:59:29 -0500 | [diff] [blame] | 3 | #include <math-emu/soft-fp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 |  | 
|  | 5 | #undef count_leading_zeros | 
|  | 6 | #define count_leading_zeros  __FP_CLZ | 
|  | 7 |  | 
|  | 8 | void | 
|  | 9 | _fp_udivmodti4(_FP_W_TYPE q[2], _FP_W_TYPE r[2], | 
|  | 10 | _FP_W_TYPE n1, _FP_W_TYPE n0, | 
|  | 11 | _FP_W_TYPE d1, _FP_W_TYPE d0) | 
|  | 12 | { | 
|  | 13 | _FP_W_TYPE q0, q1, r0, r1; | 
|  | 14 | _FP_I_TYPE b, bm; | 
|  | 15 |  | 
|  | 16 | if (d1 == 0) | 
|  | 17 | { | 
|  | 18 | #if !UDIV_NEEDS_NORMALIZATION | 
|  | 19 | if (d0 > n1) | 
|  | 20 | { | 
|  | 21 | /* 0q = nn / 0D */ | 
|  | 22 |  | 
|  | 23 | udiv_qrnnd (q0, n0, n1, n0, d0); | 
|  | 24 | q1 = 0; | 
|  | 25 |  | 
|  | 26 | /* Remainder in n0.  */ | 
|  | 27 | } | 
|  | 28 | else | 
|  | 29 | { | 
|  | 30 | /* qq = NN / 0d */ | 
|  | 31 |  | 
|  | 32 | if (d0 == 0) | 
|  | 33 | d0 = 1 / d0;	/* Divide intentionally by zero.  */ | 
|  | 34 |  | 
|  | 35 | udiv_qrnnd (q1, n1, 0, n1, d0); | 
|  | 36 | udiv_qrnnd (q0, n0, n1, n0, d0); | 
|  | 37 |  | 
|  | 38 | /* Remainder in n0.  */ | 
|  | 39 | } | 
|  | 40 |  | 
|  | 41 | r0 = n0; | 
|  | 42 | r1 = 0; | 
|  | 43 |  | 
|  | 44 | #else /* UDIV_NEEDS_NORMALIZATION */ | 
|  | 45 |  | 
|  | 46 | if (d0 > n1) | 
|  | 47 | { | 
|  | 48 | /* 0q = nn / 0D */ | 
|  | 49 |  | 
|  | 50 | count_leading_zeros (bm, d0); | 
|  | 51 |  | 
|  | 52 | if (bm != 0) | 
|  | 53 | { | 
|  | 54 | /* Normalize, i.e. make the most significant bit of the | 
|  | 55 | denominator set.  */ | 
|  | 56 |  | 
|  | 57 | d0 = d0 << bm; | 
|  | 58 | n1 = (n1 << bm) | (n0 >> (_FP_W_TYPE_SIZE - bm)); | 
|  | 59 | n0 = n0 << bm; | 
|  | 60 | } | 
|  | 61 |  | 
|  | 62 | udiv_qrnnd (q0, n0, n1, n0, d0); | 
|  | 63 | q1 = 0; | 
|  | 64 |  | 
|  | 65 | /* Remainder in n0 >> bm.  */ | 
|  | 66 | } | 
|  | 67 | else | 
|  | 68 | { | 
|  | 69 | /* qq = NN / 0d */ | 
|  | 70 |  | 
|  | 71 | if (d0 == 0) | 
|  | 72 | d0 = 1 / d0;	/* Divide intentionally by zero.  */ | 
|  | 73 |  | 
|  | 74 | count_leading_zeros (bm, d0); | 
|  | 75 |  | 
|  | 76 | if (bm == 0) | 
|  | 77 | { | 
|  | 78 | /* From (n1 >= d0) /\ (the most significant bit of d0 is set), | 
|  | 79 | conclude (the most significant bit of n1 is set) /\ (the | 
|  | 80 | leading quotient digit q1 = 1). | 
|  | 81 |  | 
|  | 82 | This special case is necessary, not an optimization. | 
|  | 83 | (Shifts counts of SI_TYPE_SIZE are undefined.)  */ | 
|  | 84 |  | 
|  | 85 | n1 -= d0; | 
|  | 86 | q1 = 1; | 
|  | 87 | } | 
|  | 88 | else | 
|  | 89 | { | 
|  | 90 | _FP_W_TYPE n2; | 
|  | 91 |  | 
|  | 92 | /* Normalize.  */ | 
|  | 93 |  | 
|  | 94 | b = _FP_W_TYPE_SIZE - bm; | 
|  | 95 |  | 
|  | 96 | d0 = d0 << bm; | 
|  | 97 | n2 = n1 >> b; | 
|  | 98 | n1 = (n1 << bm) | (n0 >> b); | 
|  | 99 | n0 = n0 << bm; | 
|  | 100 |  | 
|  | 101 | udiv_qrnnd (q1, n1, n2, n1, d0); | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | /* n1 != d0...  */ | 
|  | 105 |  | 
|  | 106 | udiv_qrnnd (q0, n0, n1, n0, d0); | 
|  | 107 |  | 
|  | 108 | /* Remainder in n0 >> bm.  */ | 
|  | 109 | } | 
|  | 110 |  | 
|  | 111 | r0 = n0 >> bm; | 
|  | 112 | r1 = 0; | 
|  | 113 | #endif /* UDIV_NEEDS_NORMALIZATION */ | 
|  | 114 | } | 
|  | 115 | else | 
|  | 116 | { | 
|  | 117 | if (d1 > n1) | 
|  | 118 | { | 
|  | 119 | /* 00 = nn / DD */ | 
|  | 120 |  | 
|  | 121 | q0 = 0; | 
|  | 122 | q1 = 0; | 
|  | 123 |  | 
|  | 124 | /* Remainder in n1n0.  */ | 
|  | 125 | r0 = n0; | 
|  | 126 | r1 = n1; | 
|  | 127 | } | 
|  | 128 | else | 
|  | 129 | { | 
|  | 130 | /* 0q = NN / dd */ | 
|  | 131 |  | 
|  | 132 | count_leading_zeros (bm, d1); | 
|  | 133 | if (bm == 0) | 
|  | 134 | { | 
|  | 135 | /* From (n1 >= d1) /\ (the most significant bit of d1 is set), | 
|  | 136 | conclude (the most significant bit of n1 is set) /\ (the | 
|  | 137 | quotient digit q0 = 0 or 1). | 
|  | 138 |  | 
|  | 139 | This special case is necessary, not an optimization.  */ | 
|  | 140 |  | 
|  | 141 | /* The condition on the next line takes advantage of that | 
|  | 142 | n1 >= d1 (true due to program flow).  */ | 
|  | 143 | if (n1 > d1 || n0 >= d0) | 
|  | 144 | { | 
|  | 145 | q0 = 1; | 
|  | 146 | sub_ddmmss (n1, n0, n1, n0, d1, d0); | 
|  | 147 | } | 
|  | 148 | else | 
|  | 149 | q0 = 0; | 
|  | 150 |  | 
|  | 151 | q1 = 0; | 
|  | 152 |  | 
|  | 153 | r0 = n0; | 
|  | 154 | r1 = n1; | 
|  | 155 | } | 
|  | 156 | else | 
|  | 157 | { | 
|  | 158 | _FP_W_TYPE m1, m0, n2; | 
|  | 159 |  | 
|  | 160 | /* Normalize.  */ | 
|  | 161 |  | 
|  | 162 | b = _FP_W_TYPE_SIZE - bm; | 
|  | 163 |  | 
|  | 164 | d1 = (d1 << bm) | (d0 >> b); | 
|  | 165 | d0 = d0 << bm; | 
|  | 166 | n2 = n1 >> b; | 
|  | 167 | n1 = (n1 << bm) | (n0 >> b); | 
|  | 168 | n0 = n0 << bm; | 
|  | 169 |  | 
|  | 170 | udiv_qrnnd (q0, n1, n2, n1, d1); | 
|  | 171 | umul_ppmm (m1, m0, q0, d0); | 
|  | 172 |  | 
|  | 173 | if (m1 > n1 || (m1 == n1 && m0 > n0)) | 
|  | 174 | { | 
|  | 175 | q0--; | 
|  | 176 | sub_ddmmss (m1, m0, m1, m0, d1, d0); | 
|  | 177 | } | 
|  | 178 |  | 
|  | 179 | q1 = 0; | 
|  | 180 |  | 
|  | 181 | /* Remainder in (n1n0 - m1m0) >> bm.  */ | 
|  | 182 | sub_ddmmss (n1, n0, n1, n0, m1, m0); | 
|  | 183 | r0 = (n1 << b) | (n0 >> bm); | 
|  | 184 | r1 = n1 >> bm; | 
|  | 185 | } | 
|  | 186 | } | 
|  | 187 | } | 
|  | 188 |  | 
|  | 189 | q[0] = q0; q[1] = q1; | 
|  | 190 | r[0] = r0, r[1] = r1; | 
|  | 191 | } |