@@ -323,11 +323,7 @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
if (clk->period == 0) {
return 0;
}
- /*
- * BUG: when CONFIG_INT128 is not defined, the current implementation of
- * divu128 does not return a valid truncated quotient, so the result will
- * be wrong.
- */
+
divu128(&lo, &hi, clk->period);
return lo;
}
@@ -56,26 +56,32 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
return (__int128_t)a * b / c;
}
-static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
+static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
+ uint64_t divisor)
{
__uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
__uint128_t result = dividend / divisor;
+
*plow = result;
- *phigh = dividend % divisor;
+ *phigh = result >> 64;
+ return dividend % divisor;
}
-static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
+static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
+ int64_t divisor)
{
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
+ __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
__int128_t result = dividend / divisor;
+
*plow = result;
- *phigh = dividend % divisor;
+ *phigh = result >> 64;
+ return dividend % divisor;
}
#else
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
{
@@ -120,7 +120,7 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
{
- int64_t rt = 0;
+ uint64_t rt = 0;
int64_t ra = (int64_t)rau;
int64_t rb = (int64_t)rbu;
int overflow = 0;
@@ -2506,6 +2506,7 @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
int cr;
uint64_t lo_value;
uint64_t hi_value;
+ uint64_t rem;
ppc_avr_t ret = { .u64 = { 0, 0 } };
if (b->VsrSD(0) < 0) {
@@ -2541,10 +2542,10 @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
* In that case, we leave r unchanged.
*/
} else {
- divu128(&lo_value, &hi_value, 1000000000000000ULL);
+ rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
- for (i = 1; i < 16; hi_value /= 10, i++) {
- bcd_put_digit(&ret, hi_value % 10, i);
+ for (i = 1; i < 16; rem /= 10, i++) {
+ bcd_put_digit(&ret, rem % 10, i);
}
for (; i < 32; lo_value /= 10, i++) {
@@ -87,72 +87,117 @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
}
/*
- * Unsigned 128-by-64 division. Returns quotient via plow and
- * remainder via phigh.
- * The result must fit in 64 bits (plow) - otherwise, the result
- * is undefined.
- * This function will cause a division by zero if passed a zero divisor.
+ * Unsigned 128-by-64 division.
+ * Returns the remainder.
+ * Returns quotient via plow and phigh.
+ * Also returns the remainder via the function return value.
*/
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
{
uint64_t dhi = *phigh;
uint64_t dlo = *plow;
- unsigned i;
- uint64_t carry = 0;
+ uint64_t rem, dhighest;
+ int sh;
if (divisor == 0 || dhi == 0) {
*plow = dlo / divisor;
- *phigh = dlo % divisor;
+ *phigh = 0;
+ return dlo % divisor;
} else {
+ sh = clz64(divisor);
- for (i = 0; i < 64; i++) {
- carry = dhi >> 63;
- dhi = (dhi << 1) | (dlo >> 63);
- if (carry || (dhi >= divisor)) {
- dhi -= divisor;
- carry = 1;
- } else {
- carry = 0;
+ if (dhi < divisor) {
+ if (sh != 0) {
+ /* normalize the divisor, shifting the dividend accordingly */
+ divisor <<= sh;
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
+ dlo <<= sh;
}
- dlo = (dlo << 1) | carry;
+
+ *phigh = 0;
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
+ } else {
+ if (sh != 0) {
+ /* normalize the divisor, shifting the dividend accordingly */
+ divisor <<= sh;
+ dhighest = dhi >> (64 - sh);
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
+ dlo <<= sh;
+
+ *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor);
+ } else {
+ /**
+ * dhi >= divisor
+ * Since the MSB of divisor is set (sh == 0),
+ * (dhi - divisor) < divisor
+ *
+ * Thus, the high part of the quotient is 1, and we can
+ * calculate the low part with a single call to udiv_qrnnd
+ * after subtracting divisor from dhi
+ */
+ dhi -= divisor;
+ *phigh = 1;
+ }
+
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
}
- *plow = dlo;
- *phigh = dhi;
+ /*
+ * since the dividend/divisor might have been normalized,
+ * the remainder might also have to be shifted back
+ */
+ return rem >> sh;
}
}
/*
- * Signed 128-by-64 division. Returns quotient via plow and
- * remainder via phigh.
- * The result must fit in 64 bits (plow) - otherwise, the result
- * is undefined.
- * This function will cause a division by zero if passed a zero divisor.
+ * Signed 128-by-64 division.
+ * Returns quotient via plow and phigh.
+ * Also returns the remainder via the function return value.
*/
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor)
{
- int sgn_dvdnd = *phigh < 0;
- int sgn_divsr = divisor < 0;
+ bool neg_quotient = false, neg_remainder = false;
+ uint64_t unsig_hi = *phigh, unsig_lo = *plow;
+ uint64_t rem;
- if (sgn_dvdnd) {
- *plow = ~(*plow);
- *phigh = ~(*phigh);
- if (*plow == (int64_t)-1) {
+ if (*phigh < 0) {
+ neg_quotient = !neg_quotient;
+ neg_remainder = !neg_remainder;
+
+ if (unsig_lo == 0) {
+ unsig_hi = -unsig_hi;
+ } else {
+ unsig_hi = ~unsig_hi;
+ unsig_lo = -unsig_lo;
+ }
+ }
+
+ if (divisor < 0) {
+ neg_quotient = !neg_quotient;
+
+ divisor = -divisor;
+ }
+
+ rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor);
+
+ if (neg_quotient) {
+ if (unsig_lo == 0) {
+ *phigh = -unsig_hi;
*plow = 0;
- (*phigh)++;
- } else {
- (*plow)++;
- }
+ } else {
+ *phigh = ~unsig_hi;
+ *plow = -unsig_lo;
+ }
+ } else {
+ *phigh = unsig_hi;
+ *plow = unsig_lo;
}
- if (sgn_divsr) {
- divisor = 0 - divisor;
- }
-
- divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
-
- if (sgn_dvdnd ^ sgn_divsr) {
- *plow = 0 - *plow;
+ if (neg_remainder) {
+ return -rem;
+ } else {
+ return rem;
}
}
#endif