Browse Source

Merge pull request #397 from AaronKutch/float_refactor

Amanieu d'Antras 4 years ago
parent
commit
73326c1727

+ 16 - 2
.github/workflows/main.yml

@@ -109,6 +109,20 @@ jobs:
     - uses: actions/checkout@v1
       with:
         submodules: true
-    - name: Install Rust
-      run: rustup update stable && rustup default stable && rustup component add rustfmt
+    - name: Install stable `rustfmt`
+      run: rustup set profile minimal && rustup default stable && rustup component add rustfmt
     - run: cargo fmt -- --check
+
+  clippy:
+    name: Clippy
+    runs-on: ubuntu-latest
+    steps:
+    - uses: actions/checkout@v1
+      with:
+        submodules: true
+    # Unlike rustfmt, stable clippy does not work on code with nightly features.
+    # This acquires the most recent nightly with a clippy component.
+    - name: Install nightly `clippy`
+      run: |
+        rustup set profile minimal && rustup default "nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/clippy)" && rustup component add clippy
+    - run: cargo clippy -- -D clippy::all

+ 1 - 1
crates/panic-handler/src/lib.rs

@@ -1,4 +1,4 @@
-// Hack of a crate until rust-lang/rust#51647 is fixed
+//! This is needed for tests on targets that require a `#[panic_handler]` function
 
 #![feature(no_core)]
 #![no_core]

+ 2 - 3
src/float/add.rs

@@ -137,9 +137,8 @@ where
             a_significand <<= shift;
             a_exponent -= shift;
         }
-    } else
-    /* addition */
-    {
+    } else {
+        // addition
         a_significand += b_significand;
 
         // If the addition carried up, we need to right-shift the result and

+ 9 - 12
src/float/cmp.rs

@@ -63,25 +63,22 @@ fn cmp<F: Float>(a: F, b: F) -> Result {
     // a and b as signed integers as we would with a fp_ting-point compare.
     if a_srep & b_srep >= szero {
         if a_srep < b_srep {
-            return Result::Less;
+            Result::Less
         } else if a_srep == b_srep {
-            return Result::Equal;
+            Result::Equal
         } else {
-            return Result::Greater;
+            Result::Greater
         }
-    }
     // Otherwise, both are negative, so we need to flip the sense of the
     // comparison to get the correct result.  (This assumes a twos- or ones-
     // complement integer representation; if integers are represented in a
     // sign-magnitude representation, then this flip is incorrect).
-    else {
-        if a_srep > b_srep {
-            return Result::Less;
-        } else if a_srep == b_srep {
-            return Result::Equal;
-        } else {
-            return Result::Greater;
-        }
+    } else if a_srep > b_srep {
+        Result::Less
+    } else if a_srep == b_srep {
+        Result::Equal
+    } else {
+        Result::Greater
     }
 }
 

+ 144 - 155
src/float/conv.rs

@@ -1,90 +1,88 @@
 use float::Float;
-use int::Int;
-
-macro_rules! int_to_float {
-    ($i:expr, $ity:ty, $fty:ty) => {{
-        let i = $i;
-        if i == 0 {
-            return 0.0;
-        }
-
-        let mant_dig = <$fty>::SIGNIFICAND_BITS + 1;
-        let exponent_bias = <$fty>::EXPONENT_BIAS;
-
-        let n = <$ity as Int>::BITS;
-        let (s, a) = i.extract_sign();
-        let mut a = a;
-
-        // number of significant digits
-        let sd = n - a.leading_zeros();
-
-        // exponent
-        let mut e = sd - 1;
+use int::{CastInto, Int};
+
+fn int_to_float<I: Int, F: Float>(i: I) -> F
+where
+    F::Int: CastInto<u32>,
+    F::Int: CastInto<I>,
+    I::UnsignedInt: CastInto<F::Int>,
+    u32: CastInto<F::Int>,
+{
+    if i == I::ZERO {
+        return F::ZERO;
+    }
 
-        if <$ity as Int>::BITS < mant_dig {
-            return <$fty>::from_parts(
-                s,
-                (e + exponent_bias) as <$fty as Float>::Int,
-                (a as <$fty as Float>::Int) << (mant_dig - e - 1),
-            );
-        }
+    let two = I::UnsignedInt::ONE + I::UnsignedInt::ONE;
+    let four = two + two;
+    let sign = i < I::ZERO;
+    let mut x = Int::abs_diff(i, I::ZERO);
+
+    // number of significant digits in the integer
+    let i_sd = I::BITS - x.leading_zeros();
+    // significant digits for the float, including implicit bit
+    let f_sd = F::SIGNIFICAND_BITS + 1;
+
+    // exponent
+    let mut exp = i_sd - 1;
+
+    if I::BITS < f_sd {
+        return F::from_parts(
+            sign,
+            (exp + F::EXPONENT_BIAS).cast(),
+            x.cast() << (f_sd - exp - 1),
+        );
+    }
 
-        a = if sd > mant_dig {
-            /* start:  0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
-             *  finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
-             *                                                12345678901234567890123456
-             *  1 = msb 1 bit
-             *  P = bit MANT_DIG-1 bits to the right of 1
-             *  Q = bit MANT_DIG bits to the right of 1
-             *  R = "or" of all bits to the right of Q
-             */
-            let mant_dig_plus_one = mant_dig + 1;
-            let mant_dig_plus_two = mant_dig + 2;
-            a = if sd == mant_dig_plus_one {
-                a << 1
-            } else if sd == mant_dig_plus_two {
-                a
-            } else {
-                (a >> (sd - mant_dig_plus_two)) as <$ity as Int>::UnsignedInt
-                    | ((a & <$ity as Int>::UnsignedInt::max_value())
-                        .wrapping_shl((n + mant_dig_plus_two) - sd)
-                        != 0) as <$ity as Int>::UnsignedInt
-            };
-
-            /* finish: */
-            a |= ((a & 4) != 0) as <$ity as Int>::UnsignedInt; /* Or P into R */
-            a += 1; /* round - this step may add a significant bit */
-            a >>= 2; /* dump Q and R */
-
-            /* a is now rounded to mant_dig or mant_dig+1 bits */
-            if (a & (1 << mant_dig)) != 0 {
-                a >>= 1;
-                e += 1;
-            }
-            a
-        /* a is now rounded to mant_dig bits */
+    x = if i_sd > f_sd {
+        // start:  0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+        // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+        //                                               12345678901234567890123456
+        // 1 = the implicit bit
+        // P = bit f_sd-1 bits to the right of 1
+        // Q = bit f_sd bits to the right of 1
+        // R = "or" of all bits to the right of Q
+        let f_sd_add2 = f_sd + 2;
+        x = if i_sd == (f_sd + 1) {
+            x << 1
+        } else if i_sd == f_sd_add2 {
+            x
         } else {
-            a.wrapping_shl(mant_dig - sd)
-            /* a is now rounded to mant_dig bits */
+            (x >> (i_sd - f_sd_add2))
+                | Int::from_bool(
+                    (x & I::UnsignedInt::MAX).wrapping_shl((I::BITS + f_sd_add2) - i_sd)
+                        != Int::ZERO,
+                )
         };
 
-        <$fty>::from_parts(
-            s,
-            (e + exponent_bias) as <$fty as Float>::Int,
-            a as <$fty as Float>::Int,
-        )
-    }};
+        // R |= P
+        x |= Int::from_bool((x & four) != I::UnsignedInt::ZERO);
+        // round - this step may add a significant bit
+        x += Int::ONE;
+        // dump Q and R
+        x >>= 2;
+
+        // a is now rounded to f_sd or f_sd+1 bits
+        if (x & (I::UnsignedInt::ONE << f_sd)) != Int::ZERO {
+            x >>= 1;
+            exp += 1;
+        }
+        x
+    } else {
+        x.wrapping_shl(f_sd - i_sd)
+    };
+
+    F::from_parts(sign, (exp + F::EXPONENT_BIAS).cast(), x.cast())
 }
 
 intrinsics! {
     #[arm_aeabi_alias = __aeabi_i2f]
     pub extern "C" fn __floatsisf(i: i32) -> f32 {
-        int_to_float!(i, i32, f32)
+        int_to_float(i)
     }
 
     #[arm_aeabi_alias = __aeabi_i2d]
     pub extern "C" fn __floatsidf(i: i32) -> f64 {
-        int_to_float!(i, i32, f64)
+        int_to_float(i)
     }
 
     #[maybe_use_optimized_c_shim]
@@ -95,7 +93,7 @@ intrinsics! {
         if cfg!(target_arch = "x86_64") {
             i as f32
         } else {
-            int_to_float!(i, i64, f32)
+            int_to_float(i)
         }
     }
 
@@ -107,181 +105,172 @@ intrinsics! {
         if cfg!(target_arch = "x86_64") {
             i as f64
         } else {
-            int_to_float!(i, i64, f64)
+            int_to_float(i)
         }
     }
 
     #[unadjusted_on_win64]
     pub extern "C" fn __floattisf(i: i128) -> f32 {
-        int_to_float!(i, i128, f32)
+        int_to_float(i)
     }
 
     #[unadjusted_on_win64]
     pub extern "C" fn __floattidf(i: i128) -> f64 {
-        int_to_float!(i, i128, f64)
+        int_to_float(i)
     }
 
     #[arm_aeabi_alias = __aeabi_ui2f]
     pub extern "C" fn __floatunsisf(i: u32) -> f32 {
-        int_to_float!(i, u32, f32)
+        int_to_float(i)
     }
 
     #[arm_aeabi_alias = __aeabi_ui2d]
     pub extern "C" fn __floatunsidf(i: u32) -> f64 {
-        int_to_float!(i, u32, f64)
+        int_to_float(i)
     }
 
     #[maybe_use_optimized_c_shim]
     #[arm_aeabi_alias = __aeabi_ul2f]
     pub extern "C" fn __floatundisf(i: u64) -> f32 {
-        int_to_float!(i, u64, f32)
+        int_to_float(i)
     }
 
     #[maybe_use_optimized_c_shim]
     #[arm_aeabi_alias = __aeabi_ul2d]
     pub extern "C" fn __floatundidf(i: u64) -> f64 {
-        int_to_float!(i, u64, f64)
+        int_to_float(i)
     }
 
     #[unadjusted_on_win64]
     pub extern "C" fn __floatuntisf(i: u128) -> f32 {
-        int_to_float!(i, u128, f32)
+        int_to_float(i)
     }
 
     #[unadjusted_on_win64]
     pub extern "C" fn __floatuntidf(i: u128) -> f64 {
-        int_to_float!(i, u128, f64)
+        int_to_float(i)
     }
 }
 
-#[derive(PartialEq)]
-enum Sign {
-    Positive,
-    Negative,
-}
+fn float_to_int<F: Float, I: Int>(f: F) -> I
+where
+    F::ExpInt: CastInto<u32>,
+    u32: CastInto<F::ExpInt>,
+    F::Int: CastInto<I>,
+{
+    // converting NaNs is UB, so we don't consider them
+
+    let sign = f.sign();
+    let mut exp = f.exp();
 
-macro_rules! float_to_int {
-    ($f:expr, $fty:ty, $ity:ty) => {{
-        let f = $f;
-        let fixint_min = <$ity>::min_value();
-        let fixint_max = <$ity>::max_value();
-        let fixint_bits = <$ity as Int>::BITS as usize;
-        let fixint_unsigned = fixint_min == 0;
-
-        let sign_bit = <$fty>::SIGN_MASK;
-        let significand_bits = <$fty>::SIGNIFICAND_BITS as usize;
-        let exponent_bias = <$fty>::EXPONENT_BIAS as usize;
-        //let exponent_max = <$fty>::exponent_max() as usize;
-
-        // Break a into sign, exponent, significand
-        let a_rep = <$fty>::repr(f);
-        let a_abs = a_rep & !sign_bit;
-
-        // this is used to work around -1 not being available for unsigned
-        let sign = if (a_rep & sign_bit) == 0 {
-            Sign::Positive
+    // if less than one or unsigned & negative
+    if (exp < F::EXPONENT_BIAS.cast()) || (!I::SIGNED && sign) {
+        return I::ZERO;
+    }
+    exp -= F::EXPONENT_BIAS.cast();
+
+    // If the value is too large for `I`, saturate.
+    let bits: F::ExpInt = I::BITS.cast();
+    let max = if I::SIGNED {
+        bits - F::ExpInt::ONE
+    } else {
+        bits
+    };
+    if max <= exp {
+        return if sign {
+            // It happens that I::MIN is handled correctly
+            I::MIN
         } else {
-            Sign::Negative
+            I::MAX
         };
-        let mut exponent = (a_abs >> significand_bits) as usize;
-        let significand = (a_abs & <$fty>::SIGNIFICAND_MASK) | <$fty>::IMPLICIT_BIT;
+    };
 
-        // if < 1 or unsigned & negative
-        if exponent < exponent_bias || fixint_unsigned && sign == Sign::Negative {
-            return 0;
-        }
-        exponent -= exponent_bias;
-
-        // If the value is infinity, saturate.
-        // If the value is too large for the integer type, 0.
-        if exponent
-            >= (if fixint_unsigned {
-                fixint_bits
-            } else {
-                fixint_bits - 1
-            })
-        {
-            return if sign == Sign::Positive {
-                fixint_max
-            } else {
-                fixint_min
-            };
-        }
-        // If 0 <= exponent < significand_bits, right shift to get the result.
-        // Otherwise, shift left.
-        // (sign - 1) will never overflow as negative signs are already returned as 0 for unsigned
-        let r = if exponent < significand_bits {
-            (significand >> (significand_bits - exponent)) as $ity
+    // `0 <= exp < max`
+
+    // If 0 <= exponent < F::SIGNIFICAND_BITS, right shift to get the result. Otherwise, shift left.
+    let sig_bits: F::ExpInt = F::SIGNIFICAND_BITS.cast();
+    // The larger integer has to be casted into, or else the shift overflows
+    let r: I = if F::Int::BITS < I::BITS {
+        let tmp: I = if exp < sig_bits {
+            f.imp_frac().cast() >> (sig_bits - exp).cast()
         } else {
-            (significand as $ity) << (exponent - significand_bits)
+            f.imp_frac().cast() << (exp - sig_bits).cast()
         };
-
-        if sign == Sign::Negative {
-            (!r).wrapping_add(1)
+        tmp
+    } else {
+        let tmp: F::Int = if exp < sig_bits {
+            f.imp_frac() >> (sig_bits - exp).cast()
         } else {
-            r
-        }
-    }};
+            f.imp_frac() << (exp - sig_bits).cast()
+        };
+        tmp.cast()
+    };
+
+    if sign {
+        r.wrapping_neg()
+    } else {
+        r
+    }
 }
 
 intrinsics! {
     #[arm_aeabi_alias = __aeabi_f2iz]
     pub extern "C" fn __fixsfsi(f: f32) -> i32 {
-        float_to_int!(f, f32, i32)
+        float_to_int(f)
     }
 
     #[arm_aeabi_alias = __aeabi_f2lz]
     pub extern "C" fn __fixsfdi(f: f32) -> i64 {
-        float_to_int!(f, f32, i64)
+        float_to_int(f)
     }
 
     #[unadjusted_on_win64]
     pub extern "C" fn __fixsfti(f: f32) -> i128 {
-        float_to_int!(f, f32, i128)
+        float_to_int(f)
     }
 
     #[arm_aeabi_alias = __aeabi_d2iz]
     pub extern "C" fn __fixdfsi(f: f64) -> i32 {
-        float_to_int!(f, f64, i32)
+        float_to_int(f)
     }
 
     #[arm_aeabi_alias = __aeabi_d2lz]
     pub extern "C" fn __fixdfdi(f: f64) -> i64 {
-        float_to_int!(f, f64, i64)
+        float_to_int(f)
     }
 
     #[unadjusted_on_win64]
     pub extern "C" fn __fixdfti(f: f64) -> i128 {
-        float_to_int!(f, f64, i128)
+        float_to_int(f)
     }
 
     #[arm_aeabi_alias = __aeabi_f2uiz]
     pub extern "C" fn __fixunssfsi(f: f32) -> u32 {
-        float_to_int!(f, f32, u32)
+        float_to_int(f)
     }
 
     #[arm_aeabi_alias = __aeabi_f2ulz]
     pub extern "C" fn __fixunssfdi(f: f32) -> u64 {
-        float_to_int!(f, f32, u64)
+        float_to_int(f)
     }
 
     #[unadjusted_on_win64]
     pub extern "C" fn __fixunssfti(f: f32) -> u128 {
-        float_to_int!(f, f32, u128)
+        float_to_int(f)
     }
 
     #[arm_aeabi_alias = __aeabi_d2uiz]
     pub extern "C" fn __fixunsdfsi(f: f64) -> u32 {
-        float_to_int!(f, f64, u32)
+        float_to_int(f)
     }
 
     #[arm_aeabi_alias = __aeabi_d2ulz]
     pub extern "C" fn __fixunsdfdi(f: f64) -> u64 {
-        float_to_int!(f, f64, u64)
+        float_to_int(f)
     }
 
     #[unadjusted_on_win64]
     pub extern "C" fn __fixunsdfti(f: f64) -> u128 {
-        float_to_int!(f, f64, u128)
+        float_to_int(f)
     }
 }

+ 4 - 0
src/float/div.rs

@@ -1,3 +1,7 @@
+// The functions are complex with many branches, and explicit
+// `return`s makes it clear where function exit points are
+#![allow(clippy::needless_return)]
+
 use float::Float;
 use int::{CastInto, DInt, HInt, Int};
 

+ 35 - 5
src/float/mod.rs

@@ -15,6 +15,7 @@ pub mod sub;
 #[doc(hidden)]
 pub trait Float:
     Copy
+    + core::fmt::Debug
     + PartialEq
     + PartialOrd
     + ops::AddAssign
@@ -30,6 +31,9 @@ pub trait Float:
     /// A int of the same with as the float
     type SignedInt: Int;
 
+    /// An int capable of containing the exponent bits plus a sign bit. This is signed.
+    type ExpInt: Int;
+
     const ZERO: Self;
     const ONE: Self;
 
@@ -71,6 +75,18 @@ pub trait Float:
     /// compared.
     fn eq_repr(self, rhs: Self) -> bool;
 
+    /// Returns the sign bit
+    fn sign(self) -> bool;
+
+    /// Returns the exponent with bias
+    fn exp(self) -> Self::ExpInt;
+
+    /// Returns the significand with no implicit bit (or the "fractional" part)
+    fn frac(self) -> Self::Int;
+
+    /// Returns the significand with implicit bit
+    fn imp_frac(self) -> Self::Int;
+
     /// Returns a `Self::Int` transmuted back to `Self`
     fn from_repr(a: Self::Int) -> Self;
 
@@ -81,14 +97,16 @@ pub trait Float:
     fn normalize(significand: Self::Int) -> (i32, Self::Int);
 
     /// Returns if `self` is subnormal
-    fn is_subnormal(&self) -> bool;
+    fn is_subnormal(self) -> bool;
 }
 
 macro_rules! float_impl {
-    ($ty:ident, $ity:ident, $sity:ident, $bits:expr, $significand_bits:expr) => {
+    ($ty:ident, $ity:ident, $sity:ident, $expty:ident, $bits:expr, $significand_bits:expr) => {
         impl Float for $ty {
             type Int = $ity;
             type SignedInt = $sity;
+            type ExpInt = $expty;
+
             const ZERO: Self = 0.0;
             const ONE: Self = 1.0;
 
@@ -113,6 +131,18 @@ macro_rules! float_impl {
                     self.repr() == rhs.repr()
                 }
             }
+            fn sign(self) -> bool {
+                self.signed_repr() < Self::SignedInt::ZERO
+            }
+            fn exp(self) -> Self::ExpInt {
+                ((self.to_bits() & Self::EXPONENT_MASK) >> Self::SIGNIFICAND_BITS) as Self::ExpInt
+            }
+            fn frac(self) -> Self::Int {
+                self.to_bits() & Self::SIGNIFICAND_MASK
+            }
+            fn imp_frac(self) -> Self::Int {
+                self.frac() | Self::IMPLICIT_BIT
+            }
             fn from_repr(a: Self::Int) -> Self {
                 Self::from_bits(a)
             }
@@ -132,12 +162,12 @@ macro_rules! float_impl {
                     significand << shift as Self::Int,
                 )
             }
-            fn is_subnormal(&self) -> bool {
+            fn is_subnormal(self) -> bool {
                 (self.repr() & Self::EXPONENT_MASK) == Self::Int::ZERO
             }
         }
     };
 }
 
-float_impl!(f32, u32, i32, 32, 23);
-float_impl!(f64, u64, i64, 64, 52);
+float_impl!(f32, u32, i32, i16, 32, 23);
+float_impl!(f64, u64, i64, i16, 64, 52);

+ 1 - 1
src/float/mul.rs

@@ -181,7 +181,7 @@ where
         product_high += product_high & one;
     }
 
-    return F::from_repr(product_high);
+    F::from_repr(product_high)
 }
 
 intrinsics! {

+ 21 - 25
src/float/pow.rs

@@ -1,40 +1,36 @@
 use float::Float;
 use int::Int;
 
-trait Pow: Float {
-    /// Returns `a` raised to the power `b`
-    fn pow(self, mut b: i32) -> Self {
-        let mut a = self;
-        let recip = b < 0;
-        let mut r = Self::ONE;
-        loop {
-            if (b & 1) != 0 {
-                r *= a;
-            }
-            b = b.aborting_div(2);
-            if b == 0 {
-                break;
-            }
-            a *= a;
+/// Returns `a` raised to the power `b`
+fn pow<F: Float>(a: F, b: i32) -> F {
+    let mut a = a;
+    let recip = b < 0;
+    let mut pow = i32::abs_diff(b, 0);
+    let mut mul = F::ONE;
+    loop {
+        if (pow & 1) != 0 {
+            mul *= a;
         }
-
-        if recip {
-            Self::ONE / r
-        } else {
-            r
+        pow >>= 1;
+        if pow == 0 {
+            break;
         }
+        a *= a;
     }
-}
 
-impl Pow for f32 {}
-impl Pow for f64 {}
+    if recip {
+        F::ONE / mul
+    } else {
+        mul
+    }
+}
 
 intrinsics! {
     pub extern "C" fn __powisf2(a: f32, b: i32) -> f32 {
-        a.pow(b)
+        pow(a, b)
     }
 
     pub extern "C" fn __powidf2(a: f64, b: i32) -> f64 {
-        a.pow(b)
+        pow(a, b)
     }
 }

+ 25 - 55
src/int/mod.rs

@@ -15,9 +15,11 @@ pub use self::leading_zeros::__clzsi2;
 #[doc(hidden)]
 pub trait Int:
     Copy
+    + core::fmt::Debug
     + PartialEq
     + PartialOrd
     + ops::AddAssign
+    + ops::SubAssign
     + ops::BitAndAssign
     + ops::BitOrAssign
     + ops::BitXorAssign
@@ -38,12 +40,16 @@ pub trait Int:
     /// Unsigned version of Self
     type UnsignedInt: Int;
 
+    /// If `Self` is a signed integer
+    const SIGNED: bool;
+
     /// The bitwidth of the int type
     const BITS: u32;
 
     const ZERO: Self;
     const ONE: Self;
     const MIN: Self;
+    const MAX: Self;
 
     /// LUT used for maximizing the space covered and minimizing the computational cost of fuzzing
     /// in `testcrate`. For example, Self = u128 produces [0,1,2,7,8,15,16,31,32,63,64,95,96,111,
@@ -52,18 +58,6 @@ pub trait Int:
     /// The number of entries of `FUZZ_LENGTHS` actually used. The maximum is 20 for u128.
     const FUZZ_NUM: usize;
 
-    /// Extracts the sign from self and returns a tuple.
-    ///
-    /// # Examples
-    ///
-    /// ```rust,ignore
-    /// let i = -25_i32;
-    /// let (sign, u) = i.extract_sign();
-    /// assert_eq!(sign, true);
-    /// assert_eq!(u, 25_u32);
-    /// ```
-    fn extract_sign(self) -> (bool, Self::UnsignedInt);
-
     fn unsigned(self) -> Self::UnsignedInt;
     fn from_unsigned(unsigned: Self::UnsignedInt) -> Self;
 
@@ -77,8 +71,6 @@ pub trait Int:
 
     // copied from primitive integers, but put in a trait
     fn is_zero(self) -> bool;
-    fn max_value() -> Self;
-    fn min_value() -> Self;
     fn wrapping_neg(self) -> Self;
     fn wrapping_add(self, other: Self) -> Self;
     fn wrapping_mul(self, other: Self) -> Self;
@@ -87,25 +79,18 @@ pub trait Int:
     fn wrapping_shr(self, other: u32) -> Self;
     fn rotate_left(self, other: u32) -> Self;
     fn overflowing_add(self, other: Self) -> (Self, bool);
-    fn aborting_div(self, other: Self) -> Self;
-    fn aborting_rem(self, other: Self) -> Self;
     fn leading_zeros(self) -> u32;
 }
 
-fn unwrap<T>(t: Option<T>) -> T {
-    match t {
-        Some(t) => t,
-        None => ::abort(),
-    }
-}
-
 macro_rules! int_impl_common {
     ($ty:ty) => {
-        const BITS: u32 = <Self>::BITS;
+        const BITS: u32 = <Self as Int>::ZERO.count_zeros();
+        const SIGNED: bool = Self::MIN != Self::ZERO;
 
         const ZERO: Self = 0;
         const ONE: Self = 1;
         const MIN: Self = <Self>::MIN;
+        const MAX: Self = <Self>::MAX;
 
         const FUZZ_LENGTHS: [u8; 20] = {
             let bits = <Self as Int>::BITS;
@@ -177,14 +162,6 @@ macro_rules! int_impl_common {
             self == Self::ZERO
         }
 
-        fn max_value() -> Self {
-            <Self>::max_value()
-        }
-
-        fn min_value() -> Self {
-            <Self>::min_value()
-        }
-
         fn wrapping_neg(self) -> Self {
             <Self>::wrapping_neg(self)
         }
@@ -217,14 +194,6 @@ macro_rules! int_impl_common {
             <Self>::overflowing_add(self, other)
         }
 
-        fn aborting_div(self, other: Self) -> Self {
-            unwrap(<Self>::checked_div(self, other))
-        }
-
-        fn aborting_rem(self, other: Self) -> Self {
-            unwrap(<Self>::checked_rem(self, other))
-        }
-
         fn leading_zeros(self) -> u32 {
             <Self>::leading_zeros(self)
         }
@@ -237,20 +206,22 @@ macro_rules! int_impl {
             type OtherSign = $ity;
             type UnsignedInt = $uty;
 
-            fn extract_sign(self) -> (bool, $uty) {
-                (false, self)
-            }
-
             fn unsigned(self) -> $uty {
                 self
             }
 
+            // It makes writing macros easier if this is implemented for both signed and unsigned
+            #[allow(clippy::wrong_self_convention)]
             fn from_unsigned(me: $uty) -> Self {
                 me
             }
 
             fn abs_diff(self, other: Self) -> Self {
-                (self.wrapping_sub(other) as $ity).wrapping_abs() as $uty
+                if self < other {
+                    other.wrapping_sub(self)
+                } else {
+                    self.wrapping_sub(other)
+                }
             }
 
             int_impl_common!($uty);
@@ -260,14 +231,6 @@ macro_rules! int_impl {
             type OtherSign = $uty;
             type UnsignedInt = $uty;
 
-            fn extract_sign(self) -> (bool, $uty) {
-                if self < 0 {
-                    (true, (!(self as $uty)).wrapping_add(1))
-                } else {
-                    (false, self as $uty)
-                }
-            }
-
             fn unsigned(self) -> $uty {
                 self as $uty
             }
@@ -391,13 +354,14 @@ impl_h_int!(
 );
 
 /// Trait to express (possibly lossy) casting of integers
-pub(crate) trait CastInto<T: Copy>: Copy {
+#[doc(hidden)]
+pub trait CastInto<T: Copy>: Copy {
     fn cast(self) -> T;
 }
 
 macro_rules! cast_into {
     ($ty:ty) => {
-        cast_into!($ty; usize, isize, u32, i32, u64, i64, u128, i128);
+        cast_into!($ty; usize, isize, u8, i8, u16, i16, u32, i32, u64, i64, u128, i128);
     };
     ($ty:ty; $($into:ty),*) => {$(
         impl CastInto<$into> for $ty {
@@ -408,6 +372,12 @@ macro_rules! cast_into {
     )*};
 }
 
+cast_into!(usize);
+cast_into!(isize);
+cast_into!(u8);
+cast_into!(i8);
+cast_into!(u16);
+cast_into!(i16);
 cast_into!(u32);
 cast_into!(i32);
 cast_into!(u64);

+ 7 - 0
src/int/specialized_div_rem/mod.rs

@@ -1,5 +1,12 @@
 // TODO: when `unsafe_block_in_unsafe_fn` is stabilized, remove this
 #![allow(unused_unsafe)]
+// The functions are complex with many branches, and explicit
+// `return`s makes it clear where function exit points are
+#![allow(clippy::needless_return)]
+#![allow(clippy::comparison_chain)]
+// Clippy is confused by the complex configuration
+#![allow(clippy::if_same_then_else)]
+#![allow(clippy::needless_bool)]
 
 //! This `specialized_div_rem` module is originally from version 1.0.0 of the
 //! `specialized-div-rem` crate. Note that `for` loops with ranges are not used in this

+ 2 - 5
src/lib.rs

@@ -16,6 +16,8 @@
 // compiler on ABIs and such, so we should be "good enough" for now and changes
 // to the `u128` ABI will be reflected here.
 #![allow(improper_ctypes, improper_ctypes_definitions)]
+// `mem::swap` cannot be used because it may generate references to memcpy in unoptimized code.
+#![allow(clippy::manual_swap)]
 
 // We disable #[no_mangle] for tests so that we can verify the test results
 // against the native compiler-rt implementations of the builtins.
@@ -30,11 +32,6 @@
 #[cfg(test)]
 extern crate core;
 
-#[allow(unused_unsafe)]
-fn abort() -> ! {
-    unsafe { core::intrinsics::abort() }
-}
-
 #[macro_use]
 mod macros;
 

+ 3 - 0
src/mem/mod.rs

@@ -1,3 +1,6 @@
+// Trying to satisfy clippy here is hopeless
+#![allow(clippy::style)]
+
 #[allow(warnings)]
 #[cfg(target_pointer_width = "16")]
 type c_int = i16;

+ 1 - 4
testcrate/Cargo.toml

@@ -8,14 +8,11 @@ edition = "2018"
 test = false
 doctest = false
 
-[build-dependencies]
-rand = "0.7"
-
 [dependencies]
 # For fuzzing tests we want a deterministic seedable RNG. We also eliminate potential
 # problems with system RNGs on the variety of platforms this crate is tested on.
 # `xoshiro128**` is used for its quality, size, and speed at generating `u32` shift amounts.
-rand_xoshiro = "0.4"
+rand_xoshiro = "0.6"
 
 [dependencies.compiler_builtins]
 path = ".."

+ 0 - 1443
testcrate/build.rs

@@ -1,1443 +0,0 @@
-use rand::seq::SliceRandom;
-use rand::Rng;
-use std::collections::HashMap;
-use std::fmt;
-use std::fmt::Write as FmtWrite;
-use std::fs::{self, OpenOptions};
-use std::hash::{Hash, Hasher};
-use std::io::Write;
-use std::path::PathBuf;
-use std::{env, mem};
-
-const NTESTS: usize = 1_000;
-
-fn main() {
-    let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap());
-    let out_file = out_dir.join("generated.rs");
-    drop(fs::remove_file(&out_file));
-
-    let target = env::var("TARGET").unwrap();
-    let target_arch_arm = target.contains("arm") || target.contains("thumb");
-    let target_arch_mips = target.contains("mips");
-
-    // TODO accept NaNs. We don't do that right now because we can't check
-    // for NaN-ness on the thumb targets (due to missing intrinsics)
-
-    // float/add.rs
-    gen(
-        |(a, b): (MyF64, MyF64)| {
-            let c = a.0 + b.0;
-            if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                None
-            } else {
-                Some(c)
-            }
-        },
-        "builtins::float::add::__adddf3(a, b)",
-    );
-    gen(
-        |(a, b): (MyF32, MyF32)| {
-            let c = a.0 + b.0;
-            if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                None
-            } else {
-                Some(c)
-            }
-        },
-        "builtins::float::add::__addsf3(a, b)",
-    );
-
-    if target_arch_arm {
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                let c = a.0 + b.0;
-                if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                    None
-                } else {
-                    Some(c)
-                }
-            },
-            "builtins::float::add::__adddf3vfp(a, b)",
-        );
-        gen(
-            |(a, b): (LargeF32, LargeF32)| {
-                let c = a.0 + b.0;
-                if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                    None
-                } else {
-                    Some(c)
-                }
-            },
-            "builtins::float::add::__addsf3vfp(a, b)",
-        );
-    }
-
-    // float/cmp.rs
-    gen(
-        |(a, b): (MyF64, MyF64)| {
-            let (a, b) = (a.0, b.0);
-            if a.is_nan() || b.is_nan() {
-                return None;
-            }
-
-            if a.is_nan() || b.is_nan() {
-                Some(-1)
-            } else if a < b {
-                Some(-1)
-            } else if a > b {
-                Some(1)
-            } else {
-                Some(0)
-            }
-        },
-        "builtins::float::cmp::__gedf2(a, b)",
-    );
-    gen(
-        |(a, b): (MyF32, MyF32)| {
-            let (a, b) = (a.0, b.0);
-            if a.is_nan() || b.is_nan() {
-                return None;
-            }
-
-            if a.is_nan() || b.is_nan() {
-                Some(-1)
-            } else if a < b {
-                Some(-1)
-            } else if a > b {
-                Some(1)
-            } else {
-                Some(0)
-            }
-        },
-        "builtins::float::cmp::__gesf2(a, b)",
-    );
-    gen(
-        |(a, b): (MyF64, MyF64)| {
-            let (a, b) = (a.0, b.0);
-            if a.is_nan() || b.is_nan() {
-                return None;
-            }
-
-            if a.is_nan() || b.is_nan() {
-                Some(1)
-            } else if a < b {
-                Some(-1)
-            } else if a > b {
-                Some(1)
-            } else {
-                Some(0)
-            }
-        },
-        "builtins::float::cmp::__ledf2(a, b)",
-    );
-    gen(
-        |(a, b): (MyF32, MyF32)| {
-            let (a, b) = (a.0, b.0);
-            if a.is_nan() || b.is_nan() {
-                return None;
-            }
-
-            if a.is_nan() || b.is_nan() {
-                Some(1)
-            } else if a < b {
-                Some(-1)
-            } else if a > b {
-                Some(1)
-            } else {
-                Some(0)
-            }
-        },
-        "builtins::float::cmp::__lesf2(a, b)",
-    );
-
-    gen(
-        |(a, b): (MyF32, MyF32)| {
-            let c = a.0.is_nan() || b.0.is_nan();
-            Some(c as i32)
-        },
-        "builtins::float::cmp::__unordsf2(a, b)",
-    );
-
-    gen(
-        |(a, b): (MyF64, MyF64)| {
-            let c = a.0.is_nan() || b.0.is_nan();
-            Some(c as i32)
-        },
-        "builtins::float::cmp::__unorddf2(a, b)",
-    );
-
-    if target_arch_arm {
-        gen(
-            |(a, b): (MyF32, MyF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                let c = (a.0 <= b.0) as i32;
-                Some(c)
-            },
-            "builtins::float::cmp::__aeabi_fcmple(a, b)",
-        );
-
-        gen(
-            |(a, b): (MyF32, MyF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                let c = (a.0 >= b.0) as i32;
-                Some(c)
-            },
-            "builtins::float::cmp::__aeabi_fcmpge(a, b)",
-        );
-
-        gen(
-            |(a, b): (MyF32, MyF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                let c = (a.0 == b.0) as i32;
-                Some(c)
-            },
-            "builtins::float::cmp::__aeabi_fcmpeq(a, b)",
-        );
-
-        gen(
-            |(a, b): (MyF32, MyF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                let c = (a.0 < b.0) as i32;
-                Some(c)
-            },
-            "builtins::float::cmp::__aeabi_fcmplt(a, b)",
-        );
-
-        gen(
-            |(a, b): (MyF32, MyF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                let c = (a.0 > b.0) as i32;
-                Some(c)
-            },
-            "builtins::float::cmp::__aeabi_fcmpgt(a, b)",
-        );
-
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                let c = (a.0 <= b.0) as i32;
-                Some(c)
-            },
-            "builtins::float::cmp::__aeabi_dcmple(a, b)",
-        );
-
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                let c = (a.0 >= b.0) as i32;
-                Some(c)
-            },
-            "builtins::float::cmp::__aeabi_dcmpge(a, b)",
-        );
-
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                let c = (a.0 == b.0) as i32;
-                Some(c)
-            },
-            "builtins::float::cmp::__aeabi_dcmpeq(a, b)",
-        );
-
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                let c = (a.0 < b.0) as i32;
-                Some(c)
-            },
-            "builtins::float::cmp::__aeabi_dcmplt(a, b)",
-        );
-
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                let c = (a.0 > b.0) as i32;
-                Some(c)
-            },
-            "builtins::float::cmp::__aeabi_dcmpgt(a, b)",
-        );
-
-        gen(
-            |(a, b): (LargeF32, LargeF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 >= b.0) as i32)
-            },
-            "builtins::float::cmp::__gesf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 >= b.0) as i32)
-            },
-            "builtins::float::cmp::__gedf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (LargeF32, LargeF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 > b.0) as i32)
-            },
-            "builtins::float::cmp::__gtsf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 > b.0) as i32)
-            },
-            "builtins::float::cmp::__gtdf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (LargeF32, LargeF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 < b.0) as i32)
-            },
-            "builtins::float::cmp::__ltsf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 < b.0) as i32)
-            },
-            "builtins::float::cmp::__ltdf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (LargeF32, LargeF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 <= b.0) as i32)
-            },
-            "builtins::float::cmp::__lesf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 <= b.0) as i32)
-            },
-            "builtins::float::cmp::__ledf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (LargeF32, LargeF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 != b.0) as i32)
-            },
-            "builtins::float::cmp::__nesf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 != b.0) as i32)
-            },
-            "builtins::float::cmp::__nedf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (LargeF32, LargeF32)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 == b.0) as i32)
-            },
-            "builtins::float::cmp::__eqsf2vfp(a, b)",
-        );
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if a.0.is_nan() || b.0.is_nan() {
-                    return None;
-                }
-                Some((a.0 == b.0) as i32)
-            },
-            "builtins::float::cmp::__eqdf2vfp(a, b)",
-        );
-    }
-
-    // float/extend.rs
-    gen(
-        |a: MyF32| {
-            if a.0.is_nan() {
-                return None;
-            }
-            Some(f64::from(a.0))
-        },
-        "builtins::float::extend::__extendsfdf2(a)",
-    );
-    if target_arch_arm {
-        gen(
-            |a: LargeF32| {
-                if a.0.is_nan() {
-                    return None;
-                }
-                Some(f64::from(a.0))
-            },
-            "builtins::float::extend::__extendsfdf2vfp(a)",
-        );
-    }
-
-    // float/conv.rs
-    gen(
-        |a: MyF64| i64::cast(a.0),
-        "builtins::float::conv::__fixdfdi(a)",
-    );
-    gen(
-        |a: MyF64| i32::cast(a.0),
-        "builtins::float::conv::__fixdfsi(a)",
-    );
-    gen(
-        |a: MyF32| i64::cast(a.0),
-        "builtins::float::conv::__fixsfdi(a)",
-    );
-    gen(
-        |a: MyF32| i32::cast(a.0),
-        "builtins::float::conv::__fixsfsi(a)",
-    );
-    gen(
-        |a: MyF32| i128::cast(a.0),
-        "builtins::float::conv::__fixsfti(a)",
-    );
-    gen(
-        |a: MyF64| i128::cast(a.0),
-        "builtins::float::conv::__fixdfti(a)",
-    );
-    gen(
-        |a: MyF64| u64::cast(a.0),
-        "builtins::float::conv::__fixunsdfdi(a)",
-    );
-    gen(
-        |a: MyF64| u32::cast(a.0),
-        "builtins::float::conv::__fixunsdfsi(a)",
-    );
-    gen(
-        |a: MyF32| u64::cast(a.0),
-        "builtins::float::conv::__fixunssfdi(a)",
-    );
-    gen(
-        |a: MyF32| u32::cast(a.0),
-        "builtins::float::conv::__fixunssfsi(a)",
-    );
-    gen(
-        |a: MyF32| u128::cast(a.0),
-        "builtins::float::conv::__fixunssfti(a)",
-    );
-    gen(
-        |a: MyF64| u128::cast(a.0),
-        "builtins::float::conv::__fixunsdfti(a)",
-    );
-    gen(
-        |a: MyI64| Some(a.0 as f64),
-        "builtins::float::conv::__floatdidf(a)",
-    );
-    gen(
-        |a: MyI32| Some(a.0 as f64),
-        "builtins::float::conv::__floatsidf(a)",
-    );
-    gen(
-        |a: MyI32| Some(a.0 as f32),
-        "builtins::float::conv::__floatsisf(a)",
-    );
-    gen(
-        |a: MyU64| Some(a.0 as f64),
-        "builtins::float::conv::__floatundidf(a)",
-    );
-    gen(
-        |a: MyU32| Some(a.0 as f64),
-        "builtins::float::conv::__floatunsidf(a)",
-    );
-    gen(
-        |a: MyU32| Some(a.0 as f32),
-        "builtins::float::conv::__floatunsisf(a)",
-    );
-    gen(
-        |a: MyU128| Some(a.0 as f32),
-        "builtins::float::conv::__floatuntisf(a)",
-    );
-    if !target_arch_mips {
-        gen(
-            |a: MyI128| Some(a.0 as f32),
-            "builtins::float::conv::__floattisf(a)",
-        );
-        gen(
-            |a: MyI128| Some(a.0 as f64),
-            "builtins::float::conv::__floattidf(a)",
-        );
-        gen(
-            |a: MyU128| Some(a.0 as f64),
-            "builtins::float::conv::__floatuntidf(a)",
-        );
-    }
-
-    // float/pow.rs
-    gen(
-        |(a, b): (MyF64, MyI32)| {
-            let c = a.0.powi(b.0);
-            if a.0.is_nan() || c.is_nan() {
-                None
-            } else {
-                Some(c)
-            }
-        },
-        "builtins::float::pow::__powidf2(a, b)",
-    );
-    gen(
-        |(a, b): (MyF32, MyI32)| {
-            let c = a.0.powi(b.0);
-            if a.0.is_nan() || c.is_nan() {
-                None
-            } else {
-                Some(c)
-            }
-        },
-        "builtins::float::pow::__powisf2(a, b)",
-    );
-
-    // float/sub.rs
-    gen(
-        |(a, b): (MyF64, MyF64)| {
-            let c = a.0 - b.0;
-            if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                None
-            } else {
-                Some(c)
-            }
-        },
-        "builtins::float::sub::__subdf3(a, b)",
-    );
-    gen(
-        |(a, b): (MyF32, MyF32)| {
-            let c = a.0 - b.0;
-            if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                None
-            } else {
-                Some(c)
-            }
-        },
-        "builtins::float::sub::__subsf3(a, b)",
-    );
-
-    if target_arch_arm {
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                let c = a.0 - b.0;
-                if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                    None
-                } else {
-                    Some(c)
-                }
-            },
-            "builtins::float::sub::__subdf3vfp(a, b)",
-        );
-        gen(
-            |(a, b): (LargeF32, LargeF32)| {
-                let c = a.0 - b.0;
-                if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                    None
-                } else {
-                    Some(c)
-                }
-            },
-            "builtins::float::sub::__subsf3vfp(a, b)",
-        );
-    }
-
-    // float/mul.rs
-    gen(
-        |(a, b): (MyF64, MyF64)| {
-            let c = a.0 * b.0;
-            if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                None
-            } else {
-                Some(c)
-            }
-        },
-        "builtins::float::mul::__muldf3(a, b)",
-    );
-    gen(
-        |(a, b): (LargeF32, LargeF32)| {
-            let c = a.0 * b.0;
-            if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                None
-            } else {
-                Some(c)
-            }
-        },
-        "builtins::float::mul::__mulsf3(a, b)",
-    );
-
-    if target_arch_arm {
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                let c = a.0 * b.0;
-                if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                    None
-                } else {
-                    Some(c)
-                }
-            },
-            "builtins::float::mul::__muldf3vfp(a, b)",
-        );
-        gen(
-            |(a, b): (LargeF32, LargeF32)| {
-                let c = a.0 * b.0;
-                if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
-                    None
-                } else {
-                    Some(c)
-                }
-            },
-            "builtins::float::mul::__mulsf3vfp(a, b)",
-        );
-    }
-
-    // float/div.rs
-    gen(
-        |(a, b): (MyF64, MyF64)| {
-            if b.0 == 0.0 {
-                return None;
-            }
-            let c = a.0 / b.0;
-            if a.0.is_nan()
-                || b.0.is_nan()
-                || c.is_nan()
-                || c.abs() <= f64::from_bits(4503599627370495u64)
-            {
-                None
-            } else {
-                Some(c)
-            }
-        },
-        "builtins::float::div::__divdf3(a, b)",
-    );
-    gen(
-        |(a, b): (LargeF32, LargeF32)| {
-            if b.0 == 0.0 {
-                return None;
-            }
-            let c = a.0 / b.0;
-            if a.0.is_nan() || b.0.is_nan() || c.is_nan() || c.abs() <= f32::from_bits(16777215u32)
-            {
-                None
-            } else {
-                Some(c)
-            }
-        },
-        "builtins::float::div::__divsf3(a, b)",
-    );
-
-    if target_arch_arm {
-        gen(
-            |(a, b): (MyF64, MyF64)| {
-                if b.0 == 0.0 {
-                    return None;
-                }
-                let c = a.0 / b.0;
-                if a.0.is_nan()
-                    || b.0.is_nan()
-                    || c.is_nan()
-                    || c.abs() <= f64::from_bits(4503599627370495u64)
-                {
-                    None
-                } else {
-                    Some(c)
-                }
-            },
-            "builtins::float::div::__divdf3vfp(a, b)",
-        );
-        gen(
-            |(a, b): (LargeF32, LargeF32)| {
-                if b.0 == 0.0 {
-                    return None;
-                }
-                let c = a.0 / b.0;
-                if a.0.is_nan()
-                    || b.0.is_nan()
-                    || c.is_nan()
-                    || c.abs() <= f32::from_bits(16777215u32)
-                {
-                    None
-                } else {
-                    Some(c)
-                }
-            },
-            "builtins::float::div::__divsf3vfp(a, b)",
-        );
-    }
-
-    // int/addsub.rs
-    gen(
-        |(a, b): (MyU128, MyU128)| Some(a.0.wrapping_add(b.0)),
-        "builtins::int::addsub::__rust_u128_add(a, b)",
-    );
-    gen(
-        |(a, b): (MyI128, MyI128)| Some(a.0.wrapping_add(b.0)),
-        "builtins::int::addsub::__rust_i128_add(a, b)",
-    );
-    gen(
-        |(a, b): (MyU128, MyU128)| Some(a.0.overflowing_add(b.0)),
-        "builtins::int::addsub::__rust_u128_addo(a, b)",
-    );
-    gen(
-        |(a, b): (MyI128, MyI128)| Some(a.0.overflowing_add(b.0)),
-        "builtins::int::addsub::__rust_i128_addo(a, b)",
-    );
-    gen(
-        |(a, b): (MyU128, MyU128)| Some(a.0.wrapping_sub(b.0)),
-        "builtins::int::addsub::__rust_u128_sub(a, b)",
-    );
-    gen(
-        |(a, b): (MyI128, MyI128)| Some(a.0.wrapping_sub(b.0)),
-        "builtins::int::addsub::__rust_i128_sub(a, b)",
-    );
-    gen(
-        |(a, b): (MyU128, MyU128)| Some(a.0.overflowing_sub(b.0)),
-        "builtins::int::addsub::__rust_u128_subo(a, b)",
-    );
-    gen(
-        |(a, b): (MyI128, MyI128)| Some(a.0.overflowing_sub(b.0)),
-        "builtins::int::addsub::__rust_i128_subo(a, b)",
-    );
-
-    // int/mul.rs
-    gen(
-        |(a, b): (MyU64, MyU64)| Some(a.0.wrapping_mul(b.0)),
-        "builtins::int::mul::__muldi3(a, b)",
-    );
-    gen(
-        |(a, b): (MyI64, MyI64)| Some(a.0.overflowing_mul(b.0)),
-        "{
-            let mut o = 2;
-            let c = builtins::int::mul::__mulodi4(a, b, &mut o);
-            (c, match o { 0 => false, 1 => true, _ => panic!() })
-        }",
-    );
-    gen(
-        |(a, b): (MyI32, MyI32)| Some(a.0.overflowing_mul(b.0)),
-        "{
-            let mut o = 2;
-            let c = builtins::int::mul::__mulosi4(a, b, &mut o);
-            (c, match o { 0 => false, 1 => true, _ => panic!() })
-        }",
-    );
-    gen(
-        |(a, b): (MyI128, MyI128)| Some(a.0.wrapping_mul(b.0)),
-        "builtins::int::mul::__multi3(a, b)",
-    );
-    gen(
-        |(a, b): (MyI128, MyI128)| Some(a.0.overflowing_mul(b.0)),
-        "{
-            let mut o = 2;
-            let c = builtins::int::mul::__muloti4(a, b, &mut o);
-            (c, match o { 0 => false, 1 => true, _ => panic!() })
-        }",
-    );
-
-    // int/sdiv.rs
-    gen(
-        |(a, b): (MyI64, MyI64)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 / b.0)
-            }
-        },
-        "builtins::int::sdiv::__divdi3(a, b)",
-    );
-    gen(
-        |(a, b): (MyI64, MyI64)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some((a.0 / b.0, a.0 % b.0))
-            }
-        },
-        "{
-            let mut r = 0;
-            (builtins::int::sdiv::__divmoddi4(a, b, &mut r), r)
-        }",
-    );
-    gen(
-        |(a, b): (MyI32, MyI32)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some((a.0 / b.0, a.0 % b.0))
-            }
-        },
-        "{
-            let mut r = 0;
-            (builtins::int::sdiv::__divmodsi4(a, b, &mut r), r)
-        }",
-    );
-    gen(
-        |(a, b): (MyI128, MyI128)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some((a.0 / b.0, a.0 % b.0))
-            }
-        },
-        "{
-            let mut r = 0;
-            (builtins::int::sdiv::__divmodti4(a, b, &mut r), r)
-        }",
-    );
-    gen(
-        |(a, b): (MyI32, MyI32)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 / b.0)
-            }
-        },
-        "builtins::int::sdiv::__divsi3(a, b)",
-    );
-    gen(
-        |(a, b): (MyI32, MyI32)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 % b.0)
-            }
-        },
-        "builtins::int::sdiv::__modsi3(a, b)",
-    );
-    gen(
-        |(a, b): (MyI64, MyI64)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 % b.0)
-            }
-        },
-        "builtins::int::sdiv::__moddi3(a, b)",
-    );
-    gen(
-        |(a, b): (MyI128, MyI128)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 / b.0)
-            }
-        },
-        "builtins::int::sdiv::__divti3(a, b)",
-    );
-    gen(
-        |(a, b): (MyI128, MyI128)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 % b.0)
-            }
-        },
-        "builtins::int::sdiv::__modti3(a, b)",
-    );
-
-    // int/shift.rs
-    gen(
-        |(a, b): (MyU32, MyU32)| Some(a.0 << (b.0 % 32)),
-        "builtins::int::shift::__ashlsi3(a, b % 32)",
-    );
-    gen(
-        |(a, b): (MyU64, MyU32)| Some(a.0 << (b.0 % 64)),
-        "builtins::int::shift::__ashldi3(a, b % 64)",
-    );
-    gen(
-        |(a, b): (MyU128, MyU32)| Some(a.0 << (b.0 % 128)),
-        "builtins::int::shift::__ashlti3(a, b % 128)",
-    );
-    gen(
-        |(a, b): (MyI32, MyU32)| Some(a.0 >> (b.0 % 32)),
-        "builtins::int::shift::__ashrsi3(a, b % 32)",
-    );
-    gen(
-        |(a, b): (MyI64, MyU32)| Some(a.0 >> (b.0 % 64)),
-        "builtins::int::shift::__ashrdi3(a, b % 64)",
-    );
-    gen(
-        |(a, b): (MyI128, MyU32)| Some(a.0 >> (b.0 % 128)),
-        "builtins::int::shift::__ashrti3(a, b % 128)",
-    );
-    gen(
-        |(a, b): (MyU32, MyU32)| Some(a.0 >> (b.0 % 32)),
-        "builtins::int::shift::__lshrsi3(a, b % 32)",
-    );
-    gen(
-        |(a, b): (MyU64, MyU32)| Some(a.0 >> (b.0 % 64)),
-        "builtins::int::shift::__lshrdi3(a, b % 64)",
-    );
-    gen(
-        |(a, b): (MyU128, MyU32)| Some(a.0 >> (b.0 % 128)),
-        "builtins::int::shift::__lshrti3(a, b % 128)",
-    );
-
-    // int/udiv.rs
-    gen(
-        |(a, b): (MyU64, MyU64)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 / b.0)
-            }
-        },
-        "builtins::int::udiv::__udivdi3(a, b)",
-    );
-    gen(
-        |(a, b): (MyU64, MyU64)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some((a.0 / b.0, a.0 % b.0))
-            }
-        },
-        "{
-            let mut r = 0;
-            (builtins::int::udiv::__udivmoddi4(a, b, Some(&mut r)), r)
-        }",
-    );
-    gen(
-        |(a, b): (MyU32, MyU32)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some((a.0 / b.0, a.0 % b.0))
-            }
-        },
-        "{
-            let mut r = 0;
-            (builtins::int::udiv::__udivmodsi4(a, b, Some(&mut r)), r)
-        }",
-    );
-    gen(
-        |(a, b): (MyU32, MyU32)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 / b.0)
-            }
-        },
-        "builtins::int::udiv::__udivsi3(a, b)",
-    );
-    gen(
-        |(a, b): (MyU32, MyU32)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 % b.0)
-            }
-        },
-        "builtins::int::udiv::__umodsi3(a, b)",
-    );
-    gen(
-        |(a, b): (MyU64, MyU64)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 % b.0)
-            }
-        },
-        "builtins::int::udiv::__umoddi3(a, b)",
-    );
-    gen(
-        |(a, b): (MyU128, MyU128)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 / b.0)
-            }
-        },
-        "builtins::int::udiv::__udivti3(a, b)",
-    );
-    gen(
-        |(a, b): (MyU128, MyU128)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some(a.0 % b.0)
-            }
-        },
-        "builtins::int::udiv::__umodti3(a, b)",
-    );
-    gen(
-        |(a, b): (MyU128, MyU128)| {
-            if b.0 == 0 {
-                None
-            } else {
-                Some((a.0 / b.0, a.0 % b.0))
-            }
-        },
-        "{
-            let mut r = 0;
-            (builtins::int::udiv::__udivmodti4(a, b, Some(&mut r)), r)
-        }",
-    );
-}
-
-macro_rules! gen_float {
-    ($name:ident,
-     $fty:ident,
-     $uty:ident,
-     $bits:expr,
-     $significand_bits:expr) => {
-        pub fn $name<R>(rng: &mut R) -> $fty
-        where
-            R: Rng + ?Sized,
-        {
-            const BITS: u8 = $bits;
-            const SIGNIFICAND_BITS: u8 = $significand_bits;
-
-            const SIGNIFICAND_MASK: $uty = (1 << SIGNIFICAND_BITS) - 1;
-            const SIGN_MASK: $uty = (1 << (BITS - 1));
-            const EXPONENT_MASK: $uty = !(SIGN_MASK | SIGNIFICAND_MASK);
-
-            fn mk_f32(sign: bool, exponent: $uty, significand: $uty) -> $fty {
-                unsafe {
-                    mem::transmute(
-                        ((sign as $uty) << (BITS - 1))
-                            | ((exponent & EXPONENT_MASK) << SIGNIFICAND_BITS)
-                            | (significand & SIGNIFICAND_MASK),
-                    )
-                }
-            }
-
-            if rng.gen_range(0, 10) == 1 {
-                // Special values
-                *[
-                    -0.0,
-                    0.0,
-                    ::std::$fty::MIN,
-                    ::std::$fty::MIN_POSITIVE,
-                    ::std::$fty::MAX,
-                    ::std::$fty::NAN,
-                    ::std::$fty::INFINITY,
-                    -::std::$fty::INFINITY,
-                ]
-                .choose(rng)
-                .unwrap()
-            } else if rng.gen_range(0, 10) == 1 {
-                // NaN patterns
-                mk_f32(rng.gen(), rng.gen(), 0)
-            } else if rng.gen() {
-                // Denormalized
-                mk_f32(rng.gen(), 0, rng.gen())
-            } else {
-                // Random anything
-                mk_f32(rng.gen(), rng.gen(), rng.gen())
-            }
-        }
-    };
-}
-
-gen_float!(gen_f32, f32, u32, 32, 23);
-gen_float!(gen_f64, f64, u64, 64, 52);
-
-macro_rules! gen_large_float {
-    ($name:ident,
-     $fty:ident,
-     $uty:ident,
-     $bits:expr,
-     $significand_bits:expr) => {
-        pub fn $name<R>(rng: &mut R) -> $fty
-        where
-            R: Rng + ?Sized,
-        {
-            const BITS: u8 = $bits;
-            const SIGNIFICAND_BITS: u8 = $significand_bits;
-
-            const SIGNIFICAND_MASK: $uty = (1 << SIGNIFICAND_BITS) - 1;
-            const SIGN_MASK: $uty = (1 << (BITS - 1));
-            const EXPONENT_MASK: $uty = !(SIGN_MASK | SIGNIFICAND_MASK);
-
-            fn mk_f32(sign: bool, exponent: $uty, significand: $uty) -> $fty {
-                unsafe {
-                    mem::transmute(
-                        ((sign as $uty) << (BITS - 1))
-                            | ((exponent & EXPONENT_MASK) << SIGNIFICAND_BITS)
-                            | (significand & SIGNIFICAND_MASK),
-                    )
-                }
-            }
-
-            if rng.gen_range(0, 10) == 1 {
-                // Special values
-                *[
-                    -0.0,
-                    0.0,
-                    ::std::$fty::MIN,
-                    ::std::$fty::MIN_POSITIVE,
-                    ::std::$fty::MAX,
-                    ::std::$fty::NAN,
-                    ::std::$fty::INFINITY,
-                    -::std::$fty::INFINITY,
-                ]
-                .choose(rng)
-                .unwrap()
-            } else if rng.gen_range(0, 10) == 1 {
-                // NaN patterns
-                mk_f32(rng.gen(), rng.gen(), 0)
-            } else if rng.gen() {
-                // Denormalized
-                mk_f32(rng.gen(), 0, rng.gen())
-            } else {
-                // Random anything
-                rng.gen::<$fty>()
-            }
-        }
-    };
-}
-
-gen_large_float!(gen_large_f32, f32, u32, 32, 23);
-gen_large_float!(gen_large_f64, f64, u64, 64, 52);
-
-trait TestInput: Hash + Eq + fmt::Debug {
-    fn ty_name() -> String;
-    fn generate_lets(container: &str, cnt: &mut u8) -> String;
-    fn generate_static(&self, dst: &mut String);
-}
-
-trait TestOutput {
-    fn ty_name() -> String;
-    fn generate_static(&self, dst: &mut String);
-    fn generate_expr(container: &str) -> String;
-}
-
-fn gen<F, A, R>(mut generate: F, test: &str)
-where
-    F: FnMut(A) -> Option<R>,
-    A: TestInput + Copy,
-    R: TestOutput,
-    rand::distributions::Standard: rand::distributions::Distribution<A>,
-{
-    let rng = &mut rand::thread_rng();
-    let testname = test.split("::").last().unwrap().split("(").next().unwrap();
-    let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap());
-    let out_file = out_dir.join("generated.rs");
-
-    let mut testcases = HashMap::new();
-    let mut n = NTESTS;
-    while n > 0 {
-        let input: A = rng.gen();
-        if testcases.contains_key(&input) {
-            continue;
-        }
-        let output = match generate(input) {
-            Some(o) => o,
-            None => continue,
-        };
-        testcases.insert(input, output);
-        n -= 1;
-    }
-
-    let mut contents = String::new();
-    contents.push_str(&format!("mod {} {{\nuse super::*;\n", testname));
-    contents.push_str("#[test]\n");
-    contents.push_str("fn test() {\n");
-    contents.push_str(&format!(
-        "static TESTS: [({}, {}); {}] = [\n",
-        A::ty_name(),
-        R::ty_name(),
-        NTESTS
-    ));
-    for (input, output) in testcases {
-        contents.push_str("    (");
-        input.generate_static(&mut contents);
-        contents.push_str(", ");
-        output.generate_static(&mut contents);
-        contents.push_str("),\n");
-    }
-    contents.push_str("];\n");
-
-    contents.push_str(&format!(
-        r#"
-        for &(inputs, output) in TESTS.iter() {{
-            {}
-            assert_eq!({}, {}, "inputs {{:?}}", inputs)
-        }}
-    "#,
-        A::generate_lets("inputs", &mut 0),
-        R::generate_expr("output"),
-        test,
-    ));
-    contents.push_str("\n}\n");
-    contents.push_str("\n}\n");
-
-    OpenOptions::new()
-        .write(true)
-        .append(true)
-        .create(true)
-        .open(out_file)
-        .unwrap()
-        .write_all(contents.as_bytes())
-        .unwrap();
-}
-
-macro_rules! my_float {
-    ($(struct $name:ident($inner:ident) = $gen:ident;)*) => ($(
-        #[derive(Debug, Clone, Copy)]
-        struct $name($inner);
-
-        impl TestInput for $name {
-            fn ty_name() -> String {
-                format!("u{}", &stringify!($inner)[1..])
-            }
-
-            fn generate_lets(container: &str, cnt: &mut u8) -> String {
-                let me = *cnt;
-                *cnt += 1;
-                format!("let {} = {}::from_bits({});\n",
-                        (b'a' + me) as char,
-                        stringify!($inner),
-                        container)
-            }
-
-            fn generate_static(&self, dst: &mut String) {
-                write!(dst, "{}", self.0.to_bits()).unwrap();
-            }
-        }
-
-        impl rand::distributions::Distribution<$name> for rand::distributions::Standard {
-            fn sample<R: rand::Rng + ?Sized >(&self, r: &mut R) -> $name {
-                $name($gen(r))
-            }
-        }
-
-        impl Hash for $name {
-            fn hash<H: Hasher>(&self, h: &mut H) {
-                self.0.to_bits().hash(h)
-            }
-        }
-
-        impl PartialEq for $name {
-            fn eq(&self, other: &$name) -> bool {
-                self.0.to_bits() == other.0.to_bits()
-            }
-        }
-
-        impl Eq for $name {}
-
-    )*)
-}
-
-my_float! {
-    struct MyF64(f64) = gen_f64;
-    struct LargeF64(f64) = gen_large_f64;
-    struct MyF32(f32) = gen_f32;
-    struct LargeF32(f32) = gen_large_f32;
-}
-
-macro_rules! my_integer {
-    ($(struct $name:ident($inner:ident);)*) => ($(
-        #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
-        struct $name($inner);
-
-        impl TestInput for $name {
-            fn ty_name() -> String {
-                stringify!($inner).to_string()
-            }
-
-            fn generate_lets(container: &str, cnt: &mut u8) -> String {
-                let me = *cnt;
-                *cnt += 1;
-                format!("let {} = {};\n",
-                        (b'a' + me) as char,
-                        container)
-            }
-
-            fn generate_static(&self, dst: &mut String) {
-                write!(dst, "{}", self.0).unwrap();
-            }
-        }
-
-        impl rand::distributions::Distribution<$name> for rand::distributions::Standard {
-            fn sample<R: rand::Rng + ?Sized >(&self, r: &mut R) -> $name {
-                let bits = (0 as $inner).count_zeros();
-                let mut mk = || {
-                    if r.gen_range(0, 10) == 1 {
-                        *[
-                            ::std::$inner::MAX >> (bits / 2),
-                            0,
-                            ::std::$inner::MIN >> (bits / 2),
-                        ].choose(r).unwrap()
-                    } else {
-                        r.gen::<$inner>()
-                    }
-                };
-                let a = mk();
-                let b = mk();
-                $name((a << (bits / 2)) | (b & (!0 << (bits / 2))))
-            }
-        }
-    )*)
-}
-
-my_integer! {
-    struct MyI32(i32);
-    struct MyI64(i64);
-    struct MyI128(i128);
-    struct MyU16(u16);
-    struct MyU32(u32);
-    struct MyU64(u64);
-    struct MyU128(u128);
-}
-
-impl<A, B> TestInput for (A, B)
-where
-    A: TestInput,
-    B: TestInput,
-{
-    fn ty_name() -> String {
-        format!("({}, {})", A::ty_name(), B::ty_name())
-    }
-
-    fn generate_lets(container: &str, cnt: &mut u8) -> String {
-        format!(
-            "{}{}",
-            A::generate_lets(&format!("{}.0", container), cnt),
-            B::generate_lets(&format!("{}.1", container), cnt)
-        )
-    }
-
-    fn generate_static(&self, dst: &mut String) {
-        dst.push_str("(");
-        self.0.generate_static(dst);
-        dst.push_str(", ");
-        self.1.generate_static(dst);
-        dst.push_str(")");
-    }
-}
-
-impl TestOutput for f64 {
-    fn ty_name() -> String {
-        "u64".to_string()
-    }
-
-    fn generate_static(&self, dst: &mut String) {
-        write!(dst, "{}", self.to_bits()).unwrap();
-    }
-
-    fn generate_expr(container: &str) -> String {
-        format!("f64::from_bits({})", container)
-    }
-}
-
-impl TestOutput for f32 {
-    fn ty_name() -> String {
-        "u32".to_string()
-    }
-
-    fn generate_static(&self, dst: &mut String) {
-        write!(dst, "{}", self.to_bits()).unwrap();
-    }
-
-    fn generate_expr(container: &str) -> String {
-        format!("f32::from_bits({})", container)
-    }
-}
-
-macro_rules! plain_test_output {
-    ($($i:tt)*) => ($(
-        impl TestOutput for $i {
-            fn ty_name() -> String {
-                stringify!($i).to_string()
-            }
-
-            fn generate_static(&self, dst: &mut String) {
-                write!(dst, "{}", self).unwrap();
-            }
-
-            fn generate_expr(container: &str) -> String {
-                container.to_string()
-            }
-        }
-    )*)
-}
-
-plain_test_output!(i32 i64 i128 u32 u64 u128 bool);
-
-impl<A, B> TestOutput for (A, B)
-where
-    A: TestOutput,
-    B: TestOutput,
-{
-    fn ty_name() -> String {
-        format!("({}, {})", A::ty_name(), B::ty_name())
-    }
-
-    fn generate_static(&self, dst: &mut String) {
-        dst.push_str("(");
-        self.0.generate_static(dst);
-        dst.push_str(", ");
-        self.1.generate_static(dst);
-        dst.push_str(")");
-    }
-
-    fn generate_expr(container: &str) -> String {
-        container.to_string()
-    }
-}
-
-trait FromFloat<T>: Sized {
-    fn cast(src: T) -> Option<Self>;
-}
-
-macro_rules! from_float {
-    ($($src:ident => $($dst:ident),+);+;) => {
-        $(
-            $(
-                impl FromFloat<$src> for $dst {
-                    fn cast(src: $src) -> Option<$dst> {
-                        use std::{$dst, $src};
-
-                        if src.is_nan() ||
-                            src.is_infinite() ||
-                            src < std::$dst::MIN as $src ||
-                            src > std::$dst::MAX as $src
-                        {
-                            None
-                        } else {
-                            Some(src as $dst)
-                        }
-                    }
-                }
-            )+
-        )+
-    }
-}
-
-from_float! {
-    f32 => i32, i64, i128, u32, u64, u128;
-    f64 => i32, i64, i128, u32, u64, u128;
-}

+ 17 - 0
testcrate/tests/addsub.rs

@@ -1,3 +1,5 @@
+#![allow(unused_macros)]
+
 use testcrate::*;
 
 macro_rules! sum {
@@ -107,3 +109,18 @@ fn float_addsub() {
         f64, __adddf3, __subdf3;
     );
 }
+
+#[cfg(target_arch = "arm")]
+#[test]
+fn float_addsub_arm() {
+    use compiler_builtins::float::{
+        add::{__adddf3vfp, __addsf3vfp},
+        sub::{__subdf3vfp, __subsf3vfp},
+        Float,
+    };
+
+    float_sum!(
+        f32, __addsf3vfp, __subsf3vfp;
+        f64, __adddf3vfp, __subdf3vfp;
+    );
+}

+ 60 - 0
testcrate/tests/cmp.rs

@@ -1,3 +1,5 @@
+#![allow(unused_macros)]
+
 use testcrate::*;
 
 macro_rules! cmp {
@@ -50,3 +52,61 @@ fn float_comparisons() {
         );
     });
 }
+
+macro_rules! cmp2 {
+    ($x:ident, $y:ident, $($unordered_val:expr, $fn_std:expr, $fn_builtins:ident);*;) => {
+        $(
+            let cmp0: i32 = if $x.is_nan() || $y.is_nan() {
+                $unordered_val
+            } else {
+                $fn_std as i32
+            };
+            let cmp1: i32 = $fn_builtins($x, $y);
+            if cmp0 != cmp1 {
+                panic!("{}({}, {}): std: {}, builtins: {}", stringify!($fn_builtins), $x, $y, cmp0, cmp1);
+            }
+        )*
+    };
+}
+
+#[cfg(target_arch = "arm")]
+#[test]
+fn float_comparisons_arm() {
+    use compiler_builtins::float::cmp::{
+        __aeabi_dcmpeq, __aeabi_dcmpge, __aeabi_dcmpgt, __aeabi_dcmple, __aeabi_dcmplt,
+        __aeabi_fcmpeq, __aeabi_fcmpge, __aeabi_fcmpgt, __aeabi_fcmple, __aeabi_fcmplt, __eqdf2vfp,
+        __eqsf2vfp, __gedf2vfp, __gesf2vfp, __gtdf2vfp, __gtsf2vfp, __ledf2vfp, __lesf2vfp,
+        __ltdf2vfp, __ltsf2vfp, __nedf2vfp, __nesf2vfp,
+    };
+
+    fuzz_float_2(N, |x: f32, y: f32| {
+        cmp2!(x, y,
+            0, x < y, __aeabi_fcmplt;
+            0, x <= y, __aeabi_fcmple;
+            0, x == y, __aeabi_fcmpeq;
+            0, x >= y, __aeabi_fcmpge;
+            0, x > y, __aeabi_fcmpgt;
+            0, x < y, __ltsf2vfp;
+            0, x <= y, __lesf2vfp;
+            0, x == y, __eqsf2vfp;
+            0, x >= y, __gesf2vfp;
+            0, x > y, __gtsf2vfp;
+            1, x != y, __nesf2vfp;
+        );
+    });
+    fuzz_float_2(N, |x: f64, y: f64| {
+        cmp2!(x, y,
+            0, x < y, __aeabi_dcmplt;
+            0, x <= y, __aeabi_dcmple;
+            0, x == y, __aeabi_dcmpeq;
+            0, x >= y, __aeabi_dcmpge;
+            0, x > y, __aeabi_dcmpgt;
+            0, x < y, __ltdf2vfp;
+            0, x <= y, __ledf2vfp;
+            0, x == y, __eqdf2vfp;
+            0, x >= y, __gedf2vfp;
+            0, x > y, __gtdf2vfp;
+            1, x != y, __nedf2vfp;
+        );
+    });
+}

+ 17 - 1
testcrate/tests/div_rem.rs

@@ -1,3 +1,5 @@
+#![allow(unused_macros)]
+
 use compiler_builtins::int::sdiv::{__divmoddi4, __divmodsi4, __divmodti4};
 use compiler_builtins::int::udiv::{__udivmoddi4, __udivmodsi4, __udivmodti4, u128_divide_sparc};
 use testcrate::*;
@@ -108,7 +110,7 @@ macro_rules! float {
                 let quo0 = x / y;
                 let quo1: $i = $fn(x, y);
                 // division of subnormals is not currently handled
-                if !(Float::is_subnormal(&quo0) || Float::is_subnormal(&quo1)) {
+                if !(Float::is_subnormal(quo0) || Float::is_subnormal(quo1)) {
                     if !Float::eq_repr(quo0, quo1) {
                         panic!(
                             "{}({}, {}): std: {}, builtins: {}",
@@ -134,3 +136,17 @@ fn float_div() {
         f64, __divdf3;
     );
 }
+
+#[cfg(target_arch = "arm")]
+#[test]
+fn float_div_arm() {
+    use compiler_builtins::float::{
+        div::{__divdf3vfp, __divsf3vfp},
+        Float,
+    };
+
+    float!(
+        f32, __divsf3vfp;
+        f64, __divdf3vfp;
+    );
+}

+ 0 - 37
testcrate/tests/generated.rs

@@ -1,37 +0,0 @@
-#![feature(lang_items)]
-#![allow(bad_style)]
-#![allow(unused_imports)]
-#![no_std]
-
-extern crate compiler_builtins as builtins;
-
-#[cfg(all(
-    target_arch = "arm",
-    not(any(target_env = "gnu", target_env = "musl")),
-    target_os = "linux",
-    test
-))]
-extern crate utest_cortex_m_qemu;
-
-#[cfg(all(
-    target_arch = "arm",
-    not(any(target_env = "gnu", target_env = "musl")),
-    target_os = "linux",
-    test
-))]
-#[macro_use]
-extern crate utest_macros;
-
-#[cfg(all(
-    target_arch = "arm",
-    not(any(target_env = "gnu", target_env = "musl")),
-    target_os = "linux",
-    test
-))]
-macro_rules! panic { // overrides `panic!`
-    ($($tt:tt)*) => {
-        upanic!($($tt)*);
-    };
-}
-
-include!(concat!(env!("OUT_DIR"), "/generated.rs"));

+ 67 - 22
testcrate/tests/misc.rs

@@ -1,3 +1,7 @@
+// makes configuration easier
+#![allow(unused_macros)]
+
+use compiler_builtins::float::Float;
 use testcrate::*;
 
 /// Make sure that the the edge case tester and randomized tester don't break, and list examples of
@@ -89,46 +93,87 @@ fn leading_zeros() {
     })
 }
 
+macro_rules! extend {
+    ($fX:ident, $fD:ident, $fn:ident) => {
+        fuzz_float(N, |x: $fX| {
+            let tmp0 = x as $fD;
+            let tmp1: $fD = $fn(x);
+            if !Float::eq_repr(tmp0, tmp1) {
+                panic!(
+                    "{}({}): std: {}, builtins: {}",
+                    stringify!($fn),
+                    x,
+                    tmp0,
+                    tmp1
+                );
+            }
+        });
+    };
+}
+
 #[test]
 fn float_extend() {
-    fuzz_float(N, |x: f32| {
-        let tmp0 = x as f64;
-        let tmp1: f64 = compiler_builtins::float::extend::__extendsfdf2(x);
-        if !compiler_builtins::float::Float::eq_repr(tmp0, tmp1) {
-            panic!("__extendsfdf2({}): std: {}, builtins: {}", x, tmp0, tmp1);
-        }
-    });
+    use compiler_builtins::float::extend::__extendsfdf2;
+
+    extend!(f32, f64, __extendsfdf2);
+}
+
+#[cfg(target_arch = "arm")]
+#[test]
+fn float_extend_arm() {
+    use compiler_builtins::float::extend::__extendsfdf2vfp;
+
+    extend!(f32, f64, __extendsfdf2vfp);
 }
 
-// This doesn't quite work because of issues related to
+// This is approximate because of issues related to
 // https://github.com/rust-lang/rust/issues/73920.
-// TODO how do we resolve this?
-/*
+// TODO how do we resolve this indeterminacy?
 macro_rules! pow {
-    ($($f:ty, $fn:ident);*;) => {
+    ($($f:ty, $tolerance:expr, $fn:ident);*;) => {
         $(
             fuzz_float_2(N, |x: $f, y: $f| {
-                let n = y as i32;
-                let tmp0: $f = x.powi(n);
-                let tmp1: $f = $fn(x, n);
-                if tmp0 != tmp1 {
-                    panic!(
-                        "{}({}, {}): std: {}, builtins: {}",
-                        stringify!($fn), x, y, tmp0, tmp1
-                    );
+                if !(Float::is_subnormal(x) || Float::is_subnormal(y) || x.is_nan()) {
+                    let n = y.to_bits() & !<$f as Float>::SIGNIFICAND_MASK;
+                    let n = (n as <$f as Float>::SignedInt) >> <$f as Float>::SIGNIFICAND_BITS;
+                    let n = n as i32;
+                    let tmp0: $f = x.powi(n);
+                    let tmp1: $f = $fn(x, n);
+                    let (a, b) = if tmp0 < tmp1 {
+                        (tmp0, tmp1)
+                    } else {
+                        (tmp1, tmp0)
+                    };
+                    let good = {
+                        if a == b {
+                            // handles infinity equality
+                            true
+                        } else if a < $tolerance {
+                            b < $tolerance
+                        } else {
+                            let quo = b / a;
+                            (quo < (1. + $tolerance)) && (quo > (1. - $tolerance))
+                        }
+                    };
+                    if !good {
+                        panic!(
+                            "{}({}, {}): std: {}, builtins: {}",
+                            stringify!($fn), x, n, tmp0, tmp1
+                        );
+                    }
                 }
             });
         )*
     };
 }
 
+#[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
 #[test]
 fn float_pow() {
     use compiler_builtins::float::pow::{__powidf2, __powisf2};
 
     pow!(
-        f32, __powisf2;
-        f64, __powidf2;
+        f32, 1e-4, __powisf2;
+        f64, 1e-12, __powidf2;
     );
 }
-*/

+ 17 - 1
testcrate/tests/mul.rs

@@ -1,3 +1,5 @@
+#![allow(unused_macros)]
+
 use testcrate::*;
 
 macro_rules! mul {
@@ -86,7 +88,7 @@ macro_rules! float_mul {
                 let mul0 = x * y;
                 let mul1: $f = $fn(x, y);
                 // multiplication of subnormals is not currently handled
-                if !(Float::is_subnormal(&mul0) || Float::is_subnormal(&mul1)) {
+                if !(Float::is_subnormal(mul0) || Float::is_subnormal(mul1)) {
                     if !Float::eq_repr(mul0, mul1) {
                         panic!(
                             "{}({}, {}): std: {}, builtins: {}",
@@ -112,3 +114,17 @@ fn float_mul() {
         f64, __muldf3;
     );
 }
+
+#[cfg(target_arch = "arm")]
+#[test]
+fn float_mul_arm() {
+    use compiler_builtins::float::{
+        mul::{__muldf3vfp, __mulsf3vfp},
+        Float,
+    };
+
+    float_mul!(
+        f32, __mulsf3vfp;
+        f64, __muldf3vfp;
+    );
+}