瀏覽代碼

Auto merge of #231 - paoloteti:vfp, r=alexcrichton

Collection of VFP intrinsics

Nothing really exciting here, just a list of trivial VFP intrinsics.

First of all set  `mfloat-abi=hard` not only for thumb targets, then add support for the following intrinsics:

```
 __gesf2vfp
 __gedf2vfp
 __gtsf2vfp
 __gtdf2vfp
 __ltsf2vfp
 __ltdf2vfp
 __nesf2vfp
 __nedf2vfp
 __eqsf2vfp
 __eqdf2vfp
 __extendsfdf2vfp
```
Resulting implementation is really trivial thanks to native code generated by LLVM on hard-float targets
bors 7 年之前
父節點
當前提交
266ea0740a
共有 5 個文件被更改,包括 150 次插入31 次删除
  1. 16 16
      README.md
  2. 6 15
      build.rs
  3. 43 0
      src/float/cmp.rs
  4. 5 0
      src/float/extend.rs
  5. 80 0
      testcrate/build.rs

+ 16 - 16
README.md

@@ -87,8 +87,8 @@ features = ["c"]
 - [x] addsf3.c
 - [x] arm/adddf3vfp.S
 - [x] arm/addsf3vfp.S
-- [ ] arm/aeabi_dcmp.S
-- [ ] arm/aeabi_fcmp.S
+- [x] arm/aeabi_dcmp.S
+- [x] arm/aeabi_fcmp.S
 - [x] arm/aeabi_idivmod.S
 - [x] arm/aeabi_ldivmod.S
 - [x] arm/aeabi_memcpy.S
@@ -100,9 +100,9 @@ features = ["c"]
 - [ ] arm/divmodsi4.S (generic version is done)
 - [x] arm/divsf3vfp.S
 - [ ] arm/divsi3.S (generic version is done)
-- [ ] arm/eqdf2vfp.S
-- [ ] arm/eqsf2vfp.S
-- [ ] arm/extendsfdf2vfp.S
+- [x] arm/eqdf2vfp.S
+- [x] arm/eqsf2vfp.S
+- [x] arm/extendsfdf2vfp.S
 - [ ] arm/fixdfsivfp.S
 - [ ] arm/fixsfsivfp.S
 - [ ] arm/fixunsdfsivfp.S
@@ -111,22 +111,22 @@ features = ["c"]
 - [ ] arm/floatsisfvfp.S
 - [ ] arm/floatunssidfvfp.S
 - [ ] arm/floatunssisfvfp.S
-- [ ] arm/gedf2vfp.S
-- [ ] arm/gesf2vfp.S
-- [ ] arm/gtdf2vfp.S
-- [ ] arm/gtsf2vfp.S
+- [x] arm/gedf2vfp.S
+- [x] arm/gesf2vfp.S
+- [x] arm/gtdf2vfp.S
+- [x] arm/gtsf2vfp.S
 - [ ] arm/ledf2vfp.S
 - [ ] arm/lesf2vfp.S
-- [ ] arm/ltdf2vfp.S
-- [ ] arm/ltsf2vfp.S
+- [x] arm/ltdf2vfp.S
+- [x] arm/ltsf2vfp.S
 - [ ] arm/modsi3.S (generic version is done)
 - [x] arm/muldf3vfp.S
 - [x] arm/mulsf3vfp.S
-- [ ] arm/nedf2vfp.S
+- [x] arm/nedf2vfp.S
 - [ ] arm/negdf2vfp.S
 - [ ] arm/negsf2vfp.S
-- [ ] arm/nesf2vfp.S
-- [ ] arm/softfloat-alias.list
+- [x] arm/nesf2vfp.S
+- [x] arm/softfloat-alias.list
 - [x] arm/subdf3vfp.S
 - [x] arm/subsf3vfp.S
 - [ ] arm/truncdfsf2vfp.S
@@ -183,8 +183,8 @@ features = ["c"]
 - [x] mulsf3.c
 - [x] powidf2.c
 - [x] powisf2.c
-- [ ] subdf3.c
-- [ ] subsf3.c
+- [x] subdf3.c
+- [x] subsf3.c
 - [ ] truncdfhf2.c
 - [ ] truncdfsf2.c
 - [ ] truncsfhf2.c

+ 6 - 15
build.rs

@@ -103,7 +103,9 @@ mod c {
         let target_env = env::var("CARGO_CFG_TARGET_ENV").unwrap();
         let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap();
         let target_vendor = env::var("CARGO_CFG_TARGET_VENDOR").unwrap();
-
+        let target_arch_arm =
+            target_arch.contains("arm") ||
+            target_arch.contains("thumb");
         let cfg = &mut cc::Build::new();
 
         cfg.warnings(false);
@@ -137,10 +139,10 @@ mod c {
         // the implementation is not valid for the arch, then gcc will error when compiling it.
         if llvm_target[0].starts_with("thumb") {
             cfg.flag("-mthumb");
+        }
 
-            if llvm_target.last() == Some(&"eabihf") {
-                cfg.flag("-mfloat-abi=hard");
-            }
+        if target_arch_arm && llvm_target.last() == Some(&"eabihf") {
+            cfg.flag("-mfloat-abi=hard");
         }
 
         if llvm_target[0] == "thumbv6m" {
@@ -374,9 +376,6 @@ mod c {
             if !llvm_target[0].starts_with("thumbv7em") {
                 sources.extend(
                     &[
-                        "arm/eqdf2vfp.S",
-                        "arm/eqsf2vfp.S",
-                        "arm/extendsfdf2vfp.S",
                         "arm/fixdfsivfp.S",
                         "arm/fixsfsivfp.S",
                         "arm/fixunsdfsivfp.S",
@@ -385,16 +384,8 @@ mod c {
                         "arm/floatsisfvfp.S",
                         "arm/floatunssidfvfp.S",
                         "arm/floatunssisfvfp.S",
-                        "arm/gedf2vfp.S",
-                        "arm/gesf2vfp.S",
-                        "arm/gtdf2vfp.S",
-                        "arm/gtsf2vfp.S",
                         "arm/ledf2vfp.S",
                         "arm/lesf2vfp.S",
-                        "arm/ltdf2vfp.S",
-                        "arm/ltsf2vfp.S",
-                        "arm/nedf2vfp.S",
-                        "arm/nesf2vfp.S",
                         "arm/restore_vfp_d8_d15_regs.S",
                         "arm/save_vfp_d8_d15_regs.S",
                     ],

+ 43 - 0
src/float/cmp.rs

@@ -212,4 +212,47 @@ intrinsics! {
     pub extern "aapcs" fn __aeabi_dcmpgt(a: f64, b: f64) -> i32 {
         (__gtdf2(a, b) > 0) as i32
     }
+
+    // On hard-float targets LLVM will use native instructions
+    // for all VFP intrinsics below
+
+    pub extern "C" fn __gesf2vfp(a: f32, b: f32) -> i32 {
+        (a >= b) as i32
+    }
+
+    pub extern "C" fn __gedf2vfp(a: f64, b: f64) -> i32 {
+        (a >= b) as i32
+    }
+
+    pub extern "C" fn __gtsf2vfp(a: f32, b: f32) -> i32 {
+        (a > b) as i32
+    }
+
+    pub extern "C" fn __gtdf2vfp(a: f64, b: f64) -> i32 {
+        (a > b) as i32
+    }
+
+    pub extern "C" fn __ltsf2vfp(a: f32, b: f32) -> i32 {
+        (a < b) as i32
+    }
+
+    pub extern "C" fn __ltdf2vfp(a: f64, b: f64) -> i32 {
+        (a < b) as i32
+    }
+
+    pub extern "C" fn __nesf2vfp(a: f32, b: f32) -> i32 {
+        (a != b) as i32
+    }
+
+    pub extern "C" fn __nedf2vfp(a: f64, b: f64) -> i32 {
+        (a != b) as i32
+    }
+
+    pub extern "C" fn __eqsf2vfp(a: f32, b: f32) -> i32 {
+        (a == b) as i32
+    }
+
+    pub extern "C" fn __eqdf2vfp(a: f64, b: f64) -> i32 {
+        (a == b) as i32
+    }
 }

+ 5 - 0
src/float/extend.rs

@@ -74,4 +74,9 @@ intrinsics! {
     pub extern "C" fn  __extendsfdf2(a: f32) -> f64 {
         extend(a)
     }
+
+    #[cfg(target_arch = "arm")]
+    pub extern "C" fn  __extendsfdf2vfp(a: f32) -> f64 {
+        a as f64 // LLVM generate 'fcvtds'
+    }
 }

+ 80 - 0
testcrate/build.rs

@@ -233,6 +233,77 @@ fn main() {
                 Some(c)
             },
             "compiler_builtins::float::cmp::__aeabi_dcmpgt(a, b)");
+
+        gen(|(a, b): (LargeF32, LargeF32)| {
+                if a.0.is_nan() || b.0.is_nan() {
+                    return None;
+                }
+                Some((a.0 >= b.0) as i32)
+            },
+            "compiler_builtins::float::cmp::__gesf2vfp(a, b)");
+        gen(|(a, b): (MyF64, MyF64)| {
+                if a.0.is_nan() || b.0.is_nan() {
+                    return None;
+                }
+                Some((a.0 >= b.0) as i32)
+            },
+            "compiler_builtins::float::cmp::__gedf2vfp(a, b)");
+        gen(|(a, b): (LargeF32, LargeF32)| {
+                if a.0.is_nan() || b.0.is_nan() {
+                    return None;
+                }
+                Some((a.0 > b.0) as i32)
+            },
+            "compiler_builtins::float::cmp::__gtsf2vfp(a, b)");
+        gen(|(a, b): (MyF64, MyF64)| {
+                if a.0.is_nan() || b.0.is_nan() {
+                    return None;
+                }
+                Some((a.0 > b.0) as i32)
+            },
+            "compiler_builtins::float::cmp::__gtdf2vfp(a, b)");
+        gen(|(a, b): (LargeF32, LargeF32)| {
+                if a.0.is_nan() || b.0.is_nan() {
+                    return None;
+                }
+                Some((a.0 < b.0) as i32)
+            },
+            "compiler_builtins::float::cmp::__ltsf2vfp(a, b)");
+        gen(|(a, b): (MyF64, MyF64)| {
+                if a.0.is_nan() || b.0.is_nan() {
+                    return None;
+                }
+                Some((a.0 < b.0) as i32)
+            },
+            "compiler_builtins::float::cmp::__ltdf2vfp(a, b)");
+        gen(|(a, b): (LargeF32, LargeF32)| {
+                if a.0.is_nan() || b.0.is_nan() {
+                    return None;
+                }
+                Some((a.0 != b.0) as i32)
+            },
+            "compiler_builtins::float::cmp::__nesf2vfp(a, b)");
+        gen(|(a, b): (MyF64, MyF64)| {
+                if a.0.is_nan() || b.0.is_nan() {
+                    return None;
+                }
+                Some((a.0 != b.0) as i32)
+            },
+            "compiler_builtins::float::cmp::__nedf2vfp(a, b)");
+        gen(|(a, b): (LargeF32, LargeF32)| {
+                if a.0.is_nan() || b.0.is_nan() {
+                    return None;
+                }
+                Some((a.0 == b.0) as i32)
+            },
+            "compiler_builtins::float::cmp::__eqsf2vfp(a, b)");
+        gen(|(a, b): (MyF64, MyF64)| {
+                if a.0.is_nan() || b.0.is_nan() {
+                    return None;
+                }
+                Some((a.0 == b.0) as i32)
+            },
+            "compiler_builtins::float::cmp::__eqdf2vfp(a, b)");
     }
 
     // float/extend.rs
@@ -243,6 +314,15 @@ fn main() {
             Some(f64(a.0))
         },
         "compiler_builtins::float::extend::__extendsfdf2(a)");
+    if target_arch_arm {
+        gen(|a: LargeF32| {
+            if a.0.is_nan() {
+                return None;
+            }
+            Some(f64(a.0))
+        },
+        "compiler_builtins::float::extend::__extendsfdf2vfp(a)");
+    }
 
     // float/conv.rs
     gen(|a: MyF64| i64(a.0).ok(),