Переглянути джерело

Merge pull request #43 from mattico/add-add_f3-builtins

Add soft float addition builtins
Jorge Aparicio 8 роки тому
батько
коміт
ebadb12986
11 змінених файлів з 474 додано та 80 видалено
  1. 2 2
      README.md
  2. 2 2
      src/arm.rs
  3. 324 0
      src/float/add.rs
  4. 65 0
      src/float/mod.rs
  5. 73 0
      src/int/mod.rs
  6. 1 1
      src/int/mul.rs
  7. 1 1
      src/int/sdiv.rs
  8. 1 1
      src/int/shift.rs
  9. 1 1
      src/int/udiv.rs
  10. 3 71
      src/lib.rs
  11. 1 1
      src/qc.rs

+ 2 - 2
README.md

@@ -26,8 +26,8 @@ See [rust-lang/rust#35437][0].
 
 ## Progress
 
-- [ ] adddf3.c
-- [ ] addsf3.c
+- [x] adddf3.c
+- [x] addsf3.c
 - [ ] arm/adddf3vfp.S
 - [ ] arm/addsf3vfp.S
 - [ ] arm/aeabi_dcmp.S

+ 2 - 2
src/arm.rs

@@ -60,12 +60,12 @@ pub unsafe fn __aeabi_ldivmod() {
 // TODO: These two functions should be defined as aliases
 #[cfg_attr(not(test), no_mangle)]
 pub extern "C" fn __aeabi_uidiv(a: u32, b: u32) -> u32 {
-    ::udiv::__udivsi3(a, b)
+    ::int::udiv::__udivsi3(a, b)
 }
 
 #[cfg_attr(not(test), no_mangle)]
 pub extern "C" fn __aeabi_idiv(a: i32, b: i32) -> i32 {
-    ::sdiv::__divsi3(a, b)
+    ::int::sdiv::__divsi3(a, b)
 }
 
 extern "C" {

+ 324 - 0
src/float/add.rs

@@ -0,0 +1,324 @@
+use core::num::Wrapping;
+use float::Float;
+
+macro_rules! add {
+    ($intrinsic:ident: $ty:ty) => {
+        /// Returns `a + b`
+        #[allow(unused_parens)]
+        #[cfg_attr(not(test), no_mangle)]
+        pub extern fn $intrinsic(a: $ty, b: $ty) -> $ty {
+            let one = Wrapping(1 as <$ty as Float>::Int);
+            let zero = Wrapping(0 as <$ty as Float>::Int);
+
+            let bits =             Wrapping(<$ty>::bits() as <$ty as Float>::Int);
+            let significand_bits = Wrapping(<$ty>::significand_bits() as <$ty as Float>::Int);
+            let exponent_bits =    bits - significand_bits - one;
+            let max_exponent =     (one << exponent_bits.0 as usize) - one;
+
+            let implicit_bit =     one << significand_bits.0 as usize;
+            let significand_mask = implicit_bit - one;
+            let sign_bit =         one << (significand_bits + exponent_bits).0 as usize;
+            let abs_mask =         sign_bit - one;
+            let exponent_mask =    abs_mask ^ significand_mask;
+            let inf_rep =          exponent_mask;
+            let quiet_bit =        implicit_bit >> 1;
+            let qnan_rep =         exponent_mask | quiet_bit;
+
+            let mut a_rep = Wrapping(a.repr());
+            let mut b_rep = Wrapping(b.repr());
+            let a_abs = a_rep & abs_mask;
+            let b_abs = b_rep & abs_mask;
+
+            // Detect if a or b is zero, infinity, or NaN.
+            if a_abs - one >= inf_rep - one ||
+                b_abs - one >= inf_rep - one {
+                // NaN + anything = qNaN
+                if a_abs > inf_rep {
+                    return (<$ty as Float>::from_repr((a_abs | quiet_bit).0));
+                }
+                // anything + NaN = qNaN
+                if b_abs > inf_rep {
+                    return (<$ty as Float>::from_repr((b_abs | quiet_bit).0));
+                }
+
+                if a_abs == inf_rep {
+                    // +/-infinity + -/+infinity = qNaN
+                    if (a.repr() ^ b.repr()) == sign_bit.0 {
+                        return (<$ty as Float>::from_repr(qnan_rep.0));
+                    } else {
+                        // +/-infinity + anything remaining = +/- infinity
+                        return a;
+                    }
+                }
+
+                // anything remaining + +/-infinity = +/-infinity
+                if b_abs == inf_rep {
+                    return b;
+                }
+
+                // zero + anything = anything
+                if a_abs.0 == 0 {
+                    // but we need to get the sign right for zero + zero
+                    if b_abs.0 == 0 {
+                        return (<$ty as Float>::from_repr(a.repr() & b.repr()));
+                    } else {
+                        return b;
+                    }
+                }
+
+                // anything + zero = anything
+                if b_abs.0 == 0 {
+                     return a;
+                }
+            }
+
+            // Swap a and b if necessary so that a has the larger absolute value.
+            if b_abs > a_abs {
+                ::core::mem::swap(&mut a_rep, &mut b_rep);
+            }
+
+            // Extract the exponent and significand from the (possibly swapped) a and b.
+            let mut a_exponent = Wrapping((a_rep >> significand_bits.0 as usize & max_exponent).0 as i32);
+            let mut b_exponent = Wrapping((b_rep >> significand_bits.0 as usize & max_exponent).0 as i32);
+            let mut a_significand = a_rep & significand_mask;
+            let mut b_significand = b_rep & significand_mask;
+
+            // normalize any denormals, and adjust the exponent accordingly.
+            if a_exponent.0 == 0 {
+                let (exponent, significand) = <$ty>::normalize(a_significand.0);
+                a_exponent = Wrapping(exponent);
+                a_significand = Wrapping(significand); 
+            }
+            if b_exponent.0 == 0 {
+                let (exponent, significand) = <$ty>::normalize(b_significand.0);
+                b_exponent = Wrapping(exponent);
+                b_significand = Wrapping(significand); 
+            }
+
+            // The sign of the result is the sign of the larger operand, a.  If they
+            // have opposite signs, we are performing a subtraction; otherwise addition.
+            let result_sign = a_rep & sign_bit;
+            let subtraction = ((a_rep ^ b_rep) & sign_bit) != zero;
+
+            // Shift the significands to give us round, guard and sticky, and or in the
+            // implicit significand bit.  (If we fell through from the denormal path it
+            // was already set by normalize(), but setting it twice won't hurt
+            // anything.)
+            a_significand = (a_significand | implicit_bit) << 3;
+            b_significand = (b_significand | implicit_bit) << 3;
+
+            // Shift the significand of b by the difference in exponents, with a sticky
+            // bottom bit to get rounding correct.
+            let align = Wrapping((a_exponent - b_exponent).0 as <$ty as Float>::Int);
+            if align.0 != 0 {
+                if align < bits {
+                    let sticky = ((b_significand << (bits - align).0 as usize).0 != 0) as <$ty as Float>::Int;
+                    b_significand = (b_significand >> align.0 as usize) | Wrapping(sticky);
+                } else {
+                    b_significand = one; // sticky; b is known to be non-zero.
+                }
+            }
+            if subtraction {
+                a_significand -= b_significand;
+                // If a == -b, return +zero.
+                if a_significand.0 == 0 { 
+                    return (<$ty as Float>::from_repr(0)); 
+                }
+
+                // If partial cancellation occured, we need to left-shift the result
+                // and adjust the exponent:
+                if a_significand < implicit_bit << 3 {
+                    let shift = a_significand.0.leading_zeros() as i32
+                        - (implicit_bit << 3).0.leading_zeros() as i32;
+                    a_significand <<= shift as usize;
+                    a_exponent -= Wrapping(shift);
+                }
+            } else /* addition */ {
+                a_significand += b_significand;
+
+                // If the addition carried up, we need to right-shift the result and
+                // adjust the exponent:
+                if (a_significand & implicit_bit << 4).0 != 0 {
+                    let sticky = ((a_significand & one).0 != 0) as <$ty as Float>::Int;
+                    a_significand = a_significand >> 1 | Wrapping(sticky);
+                    a_exponent += Wrapping(1);
+                }
+            }
+
+            // If we have overflowed the type, return +/- infinity:
+            if a_exponent >= Wrapping(max_exponent.0 as i32) { 
+                return (<$ty>::from_repr((inf_rep | result_sign).0));
+            }
+
+            if a_exponent.0 <= 0 {
+                // Result is denormal before rounding; the exponent is zero and we
+                // need to shift the significand.
+                let shift = Wrapping((Wrapping(1) - a_exponent).0 as <$ty as Float>::Int);
+                let sticky = ((a_significand << (bits - shift).0 as usize).0 != 0) as <$ty as Float>::Int;
+                a_significand = a_significand >> shift.0 as usize | Wrapping(sticky);
+                a_exponent = Wrapping(0);
+            }
+
+            // Low three bits are round, guard, and sticky.
+            let round_guard_sticky: i32 = (a_significand.0 & 0x7) as i32;
+
+            // Shift the significand into place, and mask off the implicit bit.
+            let mut result = a_significand >> 3 & significand_mask;
+
+            // Insert the exponent and sign.
+            result |= Wrapping(a_exponent.0 as <$ty as Float>::Int) << significand_bits.0 as usize;
+            result |= result_sign;
+
+            // Final rounding.  The result may overflow to infinity, but that is the
+            // correct result in that case.
+            if round_guard_sticky > 0x4 { result += one; }
+            if round_guard_sticky == 0x4 { result += result & one; }
+            return (<$ty>::from_repr(result.0));
+        }
+    }
+}
+
+add!(__addsf3: f32);
+add!(__adddf3: f64);
+
+// FIXME: Implement these using aliases
+#[cfg(target_arch = "arm")]
+#[cfg_attr(not(test), no_mangle)]
+pub extern fn __aeabi_dadd(a: f64, b: f64) -> f64 {
+    __adddf3(a, b)
+}
+
+#[cfg(target_arch = "arm")]
+#[cfg_attr(not(test), no_mangle)]
+pub extern fn __aeabi_fadd(a: f32, b: f32) -> f32 {
+    __addsf3(a, b)
+}
+
+#[cfg(test)]
+mod tests {
+    use core::{f32, f64};
+    use qc::{U32, U64};
+    use float::Float;
+
+    // NOTE The tests below have special handing for NaN values.
+    // Because NaN != NaN, the floating-point representations must be used
+    // Because there are many diffferent values of NaN, and the implementation
+    // doesn't care about calculating the 'correct' one, if both values are NaN
+    // the values are considered equivalent.
+
+    // TODO: Add F32/F64 to qc so that they print the right values (at the very least)
+    quickcheck! {
+        fn addsf3(a: U32, b: U32) -> bool {
+            let (a, b) = (f32::from_repr(a.0), f32::from_repr(b.0));
+            let x = super::__addsf3(a, b);
+            let y = a + b;
+            if !(x.is_nan() && y.is_nan()) {
+                x.repr() == y.repr()
+            } else {
+                true
+            }
+        }
+
+        fn adddf3(a: U64, b: U64) -> bool {
+            let (a, b) = (f64::from_repr(a.0), f64::from_repr(b.0));
+            let x = super::__adddf3(a, b);
+            let y = a + b;
+            if !(x.is_nan() && y.is_nan()) {
+                x.repr() == y.repr()
+            } else {
+                true
+            }
+        }
+    }
+    
+    // More tests for special float values
+
+    #[test]
+    fn test_float_tiny_plus_tiny() {
+        let tiny = f32::from_repr(1);
+        let r = super::__addsf3(tiny, tiny);
+        assert_eq!(r, tiny + tiny);
+    }
+
+    #[test]
+    fn test_double_tiny_plus_tiny() {
+        let tiny = f64::from_repr(1);
+        let r = super::__adddf3(tiny, tiny);
+        assert_eq!(r, tiny + tiny);
+    }
+
+    #[test]
+    fn test_float_small_plus_small() {
+        let a = f32::from_repr(327);
+        let b = f32::from_repr(256);
+        let r = super::__addsf3(a, b);
+        assert_eq!(r, a + b);
+    }
+
+    #[test]
+    fn test_double_small_plus_small() {
+        let a = f64::from_repr(327);
+        let b = f64::from_repr(256);
+        let r = super::__adddf3(a, b);
+        assert_eq!(r, a + b);
+    }
+
+    #[test]
+    fn test_float_one_plus_one() {
+        let r = super::__addsf3(1f32, 1f32);
+        assert_eq!(r, 1f32 + 1f32);
+    }
+
+    #[test]
+    fn test_double_one_plus_one() {
+        let r = super::__adddf3(1f64, 1f64);
+        assert_eq!(r, 1f64 + 1f64);
+    }
+
+    #[test]
+    fn test_float_different_nan() {
+        let a = f32::from_repr(1);
+        let b = f32::from_repr(0b11111111100100010001001010101010);
+        let x = super::__addsf3(a, b);
+        let y = a + b;
+        if !(x.is_nan() && y.is_nan()) {
+            assert_eq!(x.repr(), y.repr());
+        }
+    }
+
+    #[test]
+    fn test_double_different_nan() {
+        let a = f64::from_repr(1);
+        let b = f64::from_repr(
+            0b1111111111110010001000100101010101001000101010000110100011101011);
+        let x = super::__adddf3(a, b);
+        let y = a + b;
+        if !(x.is_nan() && y.is_nan()) {
+            assert_eq!(x.repr(), y.repr());
+        }
+    }
+
+    #[test]
+    fn test_float_nan() {
+        let r = super::__addsf3(f32::NAN, 1.23);
+        assert_eq!(r.repr(), f32::NAN.repr());
+    }
+
+    #[test]
+    fn test_double_nan() {
+        let r = super::__adddf3(f64::NAN, 1.23);
+        assert_eq!(r.repr(), f64::NAN.repr());
+    }
+
+    #[test]
+    fn test_float_inf() {
+        let r = super::__addsf3(f32::INFINITY, -123.4);
+        assert_eq!(r, f32::INFINITY);
+    }
+
+    #[test]
+    fn test_double_inf() {
+        let r = super::__adddf3(f64::INFINITY, -123.4);
+        assert_eq!(r, f64::INFINITY);
+    }
+}

+ 65 - 0
src/float/mod.rs

@@ -0,0 +1,65 @@
+use core::mem;
+
+pub mod add;
+
+/// Trait for some basic operations on floats
+pub trait Float: Sized {
+    /// A uint of the same with as the float
+    type Int;
+    
+    /// Returns the bitwidth of the float type
+    fn bits() -> u32;
+
+    /// Returns the bitwidth of the significand
+    fn significand_bits() -> u32;
+
+    /// Returns `self` transmuted to `Self::Int`
+    fn repr(self) -> Self::Int;
+
+    /// Returns a `Self::Int` transmuted back to `Self` 
+    fn from_repr(a: Self::Int) -> Self;
+
+    /// Returns (normalized exponent, normalized significand)
+    fn normalize(significand: Self::Int) -> (i32, Self::Int);
+}
+
+impl Float for f32 {
+    type Int = u32;
+    fn bits() -> u32 {
+        32
+    }
+    fn significand_bits() -> u32 {
+        23
+    }
+    fn repr(self) -> Self::Int {
+        unsafe { mem::transmute(self) }
+    }
+    fn from_repr(a: Self::Int) -> Self {
+        unsafe { mem::transmute(a) }
+    }
+    fn normalize(significand: Self::Int) -> (i32, Self::Int) {
+        let shift = significand.leading_zeros()
+            .wrapping_sub((1u32 << Self::significand_bits()).leading_zeros());
+        (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int)
+    }
+}
+impl Float for f64 {
+    type Int = u64;
+    fn bits() -> u32 {
+        64
+    }
+    fn significand_bits() -> u32 {
+        52
+    }
+    fn repr(self) -> Self::Int {
+        unsafe { mem::transmute(self) }
+    }
+    fn from_repr(a: Self::Int) -> Self {
+        unsafe { mem::transmute(a) }
+    }
+    fn normalize(significand: Self::Int) -> (i32, Self::Int) {
+        let shift = significand.leading_zeros()
+            .wrapping_sub((1u64 << Self::significand_bits()).leading_zeros());
+        (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int)
+    }
+}

+ 73 - 0
src/int/mod.rs

@@ -0,0 +1,73 @@
+
+pub mod mul;
+pub mod sdiv;
+pub mod shift;
+pub mod udiv;
+
+/// Trait for some basic operations on integers
+pub trait Int {
+    /// Returns the bitwidth of the int type
+    fn bits() -> u32;
+}
+
+// TODO: Once i128/u128 support lands, we'll want to add impls for those as well
+impl Int for u32 {
+    fn bits() -> u32 {
+        32
+    }
+}
+impl Int for i32 {
+    fn bits() -> u32 {
+        32
+    }
+}
+impl Int for u64 {
+    fn bits() -> u32 {
+        64
+    }
+}
+impl Int for i64 {
+    fn bits() -> u32 {
+        64
+    }
+}
+
+/// Trait to convert an integer to/from smaller parts
+pub trait LargeInt {
+    type LowHalf;
+    type HighHalf;
+
+    fn low(self) -> Self::LowHalf;
+    fn high(self) -> Self::HighHalf;
+    fn from_parts(low: Self::LowHalf, high: Self::HighHalf) -> Self;
+}
+
+// TODO: Once i128/u128 support lands, we'll want to add impls for those as well
+impl LargeInt for u64 {
+    type LowHalf = u32;
+    type HighHalf = u32;
+
+    fn low(self) -> u32 {
+        self as u32
+    }
+    fn high(self) -> u32 {
+        (self >> 32) as u32
+    }
+    fn from_parts(low: u32, high: u32) -> u64 {
+        low as u64 | ((high as u64) << 32)
+    }
+}
+impl LargeInt for i64 {
+    type LowHalf = u32;
+    type HighHalf = i32;
+
+    fn low(self) -> u32 {
+        self as u32
+    }
+    fn high(self) -> i32 {
+        (self >> 32) as i32
+    }
+    fn from_parts(low: u32, high: i32) -> i64 {
+        low as i64 | ((high as i64) << 32)
+    }
+}

+ 1 - 1
src/mul.rs → src/int/mul.rs

@@ -1,4 +1,4 @@
-use {Int, LargeInt};
+use int::{Int, LargeInt};
 
 macro_rules! mul {
     ($intrinsic:ident: $ty:ty) => {

+ 1 - 1
src/sdiv.rs → src/int/sdiv.rs

@@ -1,4 +1,4 @@
-use Int;
+use int::Int;
 
 macro_rules! div {
     ($intrinsic:ident: $ty:ty, $uty:ty) => {

+ 1 - 1
src/shift.rs → src/int/shift.rs

@@ -1,4 +1,4 @@
-use {Int, LargeInt};
+use int::{Int, LargeInt};
 
 macro_rules! ashl {
     ($intrinsic:ident: $ty:ty) => {

+ 1 - 1
src/udiv.rs → src/int/udiv.rs

@@ -1,5 +1,5 @@
 use core::mem;
-use {Int, LargeInt};
+use int::{Int, LargeInt};
 
 /// Returns `n / d`
 #[cfg_attr(not(test), no_mangle)]

+ 3 - 71
src/lib.rs

@@ -20,83 +20,15 @@ extern crate core;
 #[cfg(all(not(windows), not(target_os = "macos")))]
 extern crate rlibc;
 
+pub mod int;
+pub mod float;
+
 #[cfg(target_arch = "arm")]
 pub mod arm;
 
 #[cfg(target_arch = "x86_64")]
 pub mod x86_64;
 
-pub mod udiv;
-pub mod sdiv;
-pub mod mul;
-pub mod shift;
-
 #[cfg(test)]
 mod qc;
 
-/// Trait for some basic operations on integers
-trait Int {
-    fn bits() -> u32;
-}
-
-// TODO: Once i128/u128 support lands, we'll want to add impls for those as well
-impl Int for u32 {
-    fn bits() -> u32 {
-        32
-    }
-}
-impl Int for i32 {
-    fn bits() -> u32 {
-        32
-    }
-}
-impl Int for u64 {
-    fn bits() -> u32 {
-        64
-    }
-}
-impl Int for i64 {
-    fn bits() -> u32 {
-        64
-    }
-}
-
-/// Trait to convert an integer to/from smaller parts
-trait LargeInt {
-    type LowHalf;
-    type HighHalf;
-
-    fn low(self) -> Self::LowHalf;
-    fn high(self) -> Self::HighHalf;
-    fn from_parts(low: Self::LowHalf, high: Self::HighHalf) -> Self;
-}
-
-// TODO: Once i128/u128 support lands, we'll want to add impls for those as well
-impl LargeInt for u64 {
-    type LowHalf = u32;
-    type HighHalf = u32;
-
-    fn low(self) -> u32 {
-        self as u32
-    }
-    fn high(self) -> u32 {
-        (self >> 32) as u32
-    }
-    fn from_parts(low: u32, high: u32) -> u64 {
-        low as u64 | ((high as u64) << 32)
-    }
-}
-impl LargeInt for i64 {
-    type LowHalf = u32;
-    type HighHalf = i32;
-
-    fn low(self) -> u32 {
-        self as u32
-    }
-    fn high(self) -> i32 {
-        (self >> 32) as i32
-    }
-    fn from_parts(low: u32, high: i32) -> i64 {
-        low as i64 | ((high as i64) << 32)
-    }
-}

+ 1 - 1
src/qc.rs

@@ -8,7 +8,7 @@ use std::fmt;
 
 use quickcheck::{Arbitrary, Gen};
 
-use LargeInt;
+use int::LargeInt;
 
 // Generates values in the full range of the integer type
 macro_rules! arbitrary {