Эх сурвалжийг харах

Move from an "asm" flag to a "no-asm" feature flag (#386)

* Use a no-asm feature instead of an asm feature

This works better as core/alloc/std have trouble supporting default
featues in this crate.

Signed-off-by: Joe Richey <joerichey@google.com>

* Have no-asm disable arm assembly intrinsics

Signed-off-by: Joe Richey <joerichey@google.com>
Joseph Richey 4 жил өмнө
parent
commit
63ccaf11f0

+ 4 - 4
Cargo.toml

@@ -40,14 +40,14 @@ panic-handler = { path = 'crates/panic-handler' }
 [features]
 default = ["compiler-builtins"]
 
-# Some algorithms benefit from inline assembly, but some compiler backends do
-# not support it, so inline assembly is only enabled when this flag is set.
-asm = []
-
 # Enable compilation of C code in compiler-rt, filling in some more optimized
 # implementations and also filling in unimplemented intrinsics
 c = ["cc"]
 
+# Workaround for the Cranelift codegen backend. Disables any implementations
+# which use inline assembly and fall back to pure Rust versions (if avalible).
+no-asm = []
+
 # Flag this library as the unstable compiler-builtins lib
 compiler-builtins = []
 

+ 4 - 4
ci/run.sh

@@ -12,16 +12,16 @@ else
     $run --release
     $run --features c
     $run --features c --release
-    $run --features asm
-    $run --features asm --release
+    $run --features no-asm
+    $run --features no-asm --release
 fi
 
 cargo build --target $1
 cargo build --target $1 --release
 cargo build --target $1 --features c
 cargo build --target $1 --release --features c
-cargo build --target $1 --features asm
-cargo build --target $1 --release --features asm
+cargo build --target $1 --features no-asm
+cargo build --target $1 --release --features no-asm
 
 PREFIX=$(echo $1 | sed -e 's/unknown-//')-
 case $1 in

+ 2 - 0
src/arm.rs

@@ -1,3 +1,5 @@
+#![cfg(not(feature = "no-asm"))]
+
 use core::intrinsics;
 
 // NOTE This function and the ones below are implemented using assembly because they using a custom

+ 8 - 8
src/int/specialized_div_rem/mod.rs

@@ -115,7 +115,7 @@ fn u64_by_u64_div_rem(duo: u64, div: u64) -> (u64, u64) {
 // microarchitecture can multiply and divide. We decide to be optimistic and assume `trifecta` is
 // faster if the target pointer width is at least 64.
 #[cfg(all(
-    not(all(feature = "asm", target_arch = "x86_64")),
+    not(all(not(feature = "no-asm"), target_arch = "x86_64")),
     not(any(target_pointer_width = "16", target_pointer_width = "32"))
 ))]
 impl_trifecta!(
@@ -131,7 +131,7 @@ impl_trifecta!(
 // If the pointer width less than 64, then the target architecture almost certainly does not have
 // the fast 64 to 128 bit widening multiplication needed for `trifecta` to be faster.
 #[cfg(all(
-    not(all(feature = "asm", target_arch = "x86_64")),
+    not(all(not(feature = "no-asm"), target_arch = "x86_64")),
     any(target_pointer_width = "16", target_pointer_width = "32")
 ))]
 impl_delegate!(
@@ -152,7 +152,7 @@ impl_delegate!(
 ///
 /// If the quotient does not fit in a `u64`, a floating point exception occurs.
 /// If `div == 0`, then a division by zero exception occurs.
-#[cfg(all(feature = "asm", target_arch = "x86_64"))]
+#[cfg(all(not(feature = "no-asm"), target_arch = "x86_64"))]
 #[inline]
 unsafe fn u128_by_u64_div_rem(duo: u128, div: u64) -> (u64, u64) {
     let duo_lo = duo as u64;
@@ -174,7 +174,7 @@ unsafe fn u128_by_u64_div_rem(duo: u128, div: u64) -> (u64, u64) {
 }
 
 // use `asymmetric` instead of `trifecta` on x86_64
-#[cfg(all(feature = "asm", target_arch = "x86_64"))]
+#[cfg(all(not(feature = "no-asm"), target_arch = "x86_64"))]
 impl_asymmetric!(
     u128_div_rem,
     zero_div_fn,
@@ -203,7 +203,7 @@ fn u32_by_u32_div_rem(duo: u32, div: u32) -> (u32, u32) {
 // When not on x86 and the pointer width is not 64, use `delegate` since the division size is larger
 // than register size.
 #[cfg(all(
-    not(all(feature = "asm", target_arch = "x86")),
+    not(all(not(feature = "no-asm"), target_arch = "x86")),
     not(target_pointer_width = "64")
 ))]
 impl_delegate!(
@@ -220,7 +220,7 @@ impl_delegate!(
 
 // When not on x86 and the pointer width is 64, use `binary_long`.
 #[cfg(all(
-    not(all(feature = "asm", target_arch = "x86")),
+    not(all(not(feature = "no-asm"), target_arch = "x86")),
     target_pointer_width = "64"
 ))]
 impl_binary_long!(
@@ -238,7 +238,7 @@ impl_binary_long!(
 ///
 /// If the quotient does not fit in a `u32`, a floating point exception occurs.
 /// If `div == 0`, then a division by zero exception occurs.
-#[cfg(all(feature = "asm", target_arch = "x86"))]
+#[cfg(all(not(feature = "no-asm"), target_arch = "x86"))]
 #[inline]
 unsafe fn u64_by_u32_div_rem(duo: u64, div: u32) -> (u32, u32) {
     let duo_lo = duo as u32;
@@ -260,7 +260,7 @@ unsafe fn u64_by_u32_div_rem(duo: u64, div: u32) -> (u32, u32) {
 }
 
 // use `asymmetric` instead of `delegate` on x86
-#[cfg(all(feature = "asm", target_arch = "x86"))]
+#[cfg(all(not(feature = "no-asm"), target_arch = "x86"))]
 impl_asymmetric!(
     u64_div_rem,
     zero_div_fn,

+ 3 - 3
src/lib.rs

@@ -1,8 +1,8 @@
 #![cfg_attr(feature = "compiler-builtins", compiler_builtins)]
-#![cfg_attr(feature = "asm", feature(asm))]
+#![cfg_attr(not(feature = "no-asm"), feature(asm))]
 #![feature(abi_unadjusted)]
-#![feature(llvm_asm)]
-#![feature(global_asm)]
+#![cfg_attr(not(feature = "no-asm"), feature(llvm_asm))]
+#![cfg_attr(not(feature = "no-asm"), feature(global_asm))]
 #![feature(cfg_target_has_atomic)]
 #![feature(compiler_builtins)]
 #![feature(core_intrinsics)]

+ 4 - 1
src/mem/mod.rs

@@ -10,7 +10,10 @@ use core::mem;
 use core::ops::{BitOr, Shl};
 
 // memcpy/memmove/memset have optimized implementations on some architectures
-#[cfg_attr(all(feature = "asm", target_arch = "x86_64"), path = "x86_64.rs")]
+#[cfg_attr(
+    all(not(feature = "no-asm"), target_arch = "x86_64"),
+    path = "x86_64.rs"
+)]
 mod impls;
 
 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]

+ 2 - 0
src/probestack.rs

@@ -44,6 +44,8 @@
 #![cfg(not(feature = "mangled-names"))]
 // Windows already has builtins to do this.
 #![cfg(not(windows))]
+// All these builtins require assembly
+#![cfg(not(feature = "no-asm"))]
 // We only define stack probing for these architectures today.
 #![cfg(any(target_arch = "x86_64", target_arch = "x86"))]
 

+ 18 - 3
src/x86.rs

@@ -8,7 +8,12 @@ use core::intrinsics;
 // NOTE These functions are never mangled as they are not tested against compiler-rt
 // and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca
 
-#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
+#[cfg(all(
+    windows,
+    target_env = "gnu",
+    not(feature = "no-asm"),
+    not(feature = "mangled-names")
+))]
 #[naked]
 #[no_mangle]
 pub unsafe fn ___chkstk_ms() {
@@ -34,7 +39,12 @@ pub unsafe fn ___chkstk_ms() {
 }
 
 // FIXME: __alloca should be an alias to __chkstk
-#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
+#[cfg(all(
+    windows,
+    target_env = "gnu",
+    not(feature = "no-asm"),
+    not(feature = "mangled-names")
+))]
 #[naked]
 #[no_mangle]
 pub unsafe fn __alloca() {
@@ -43,7 +53,12 @@ pub unsafe fn __alloca() {
     intrinsics::unreachable();
 }
 
-#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
+#[cfg(all(
+    windows,
+    target_env = "gnu",
+    not(feature = "no-asm"),
+    not(feature = "mangled-names")
+))]
 #[naked]
 #[no_mangle]
 pub unsafe fn ___chkstk() {

+ 18 - 3
src/x86_64.rs

@@ -8,7 +8,12 @@ use core::intrinsics;
 // NOTE These functions are never mangled as they are not tested against compiler-rt
 // and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca
 
-#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
+#[cfg(all(
+    windows,
+    target_env = "gnu",
+    not(feature = "no-asm"),
+    not(feature = "mangled-names")
+))]
 #[naked]
 #[no_mangle]
 pub unsafe fn ___chkstk_ms() {
@@ -33,7 +38,12 @@ pub unsafe fn ___chkstk_ms() {
     intrinsics::unreachable();
 }
 
-#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
+#[cfg(all(
+    windows,
+    target_env = "gnu",
+    not(feature = "no-asm"),
+    not(feature = "mangled-names")
+))]
 #[naked]
 #[no_mangle]
 pub unsafe fn __alloca() {
@@ -43,7 +53,12 @@ pub unsafe fn __alloca() {
     intrinsics::unreachable();
 }
 
-#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
+#[cfg(all(
+    windows,
+    target_env = "gnu",
+    not(feature = "no-asm"),
+    not(feature = "mangled-names")
+))]
 #[naked]
 #[no_mangle]
 pub unsafe fn ___chkstk() {

+ 2 - 2
testcrate/Cargo.toml

@@ -28,8 +28,8 @@ utest-cortex-m-qemu = { default-features = false, git = "https://github.com/japa
 utest-macros = { git = "https://github.com/japaric/utest" }
 
 [features]
-default = ["asm", "mangled-names"]
-asm = ["compiler_builtins/asm"]
+default = ["mangled-names"]
 c = ["compiler_builtins/c"]
+no-asm = ["compiler_builtins/no-asm"]
 mem = ["compiler_builtins/mem"]
 mangled-names = ["compiler_builtins/mangled-names"]