瀏覽代碼

Run rustfmt over everything

Alex Crichton 6 年之前
父節點
當前提交
c88c9502b7

+ 192 - 221
build.rs

@@ -22,8 +22,9 @@ fn main() {
 
     // Forcibly enable memory intrinsics on wasm32 & SGX as we don't have a libc to
     // provide them.
-    if (target.contains("wasm32") && !target.contains("wasi")) ||
-        (target.contains("sgx") && target.contains("fortanix")) {
+    if (target.contains("wasm32") && !target.contains("wasi"))
+        || (target.contains("sgx") && target.contains("fortanix"))
+    {
         println!("cargo:rustc-cfg=feature=\"mem\"");
     }
 
@@ -85,7 +86,9 @@ mod c {
 
     impl Sources {
         fn new() -> Sources {
-            Sources { map: BTreeMap::new() }
+            Sources {
+                map: BTreeMap::new(),
+            }
         }
 
         fn extend(&mut self, sources: &[&'static str]) {
@@ -151,163 +154,144 @@ mod c {
         }
 
         let mut sources = Sources::new();
-        sources.extend(
-            &[
-                "absvdi2.c",
-                "absvsi2.c",
-                "addvdi3.c",
-                "addvsi3.c",
-                "apple_versioning.c",
-                "clzdi2.c",
-                "clzsi2.c",
-                "cmpdi2.c",
-                "ctzdi2.c",
-                "ctzsi2.c",
-                "divdc3.c",
-                "divsc3.c",
-                "divxc3.c",
-                "extendhfsf2.c",
-                "int_util.c",
-                "muldc3.c",
-                "mulsc3.c",
-                "mulvdi3.c",
-                "mulvsi3.c",
-                "mulxc3.c",
-                "negdf2.c",
-                "negdi2.c",
-                "negsf2.c",
-                "negvdi2.c",
-                "negvsi2.c",
-                "paritydi2.c",
-                "paritysi2.c",
-                "popcountdi2.c",
-                "popcountsi2.c",
-                "powixf2.c",
-                "subvdi3.c",
-                "subvsi3.c",
-                "truncdfhf2.c",
-                "truncdfsf2.c",
-                "truncsfhf2.c",
-                "ucmpdi2.c",
-            ],
-        );
+        sources.extend(&[
+            "absvdi2.c",
+            "absvsi2.c",
+            "addvdi3.c",
+            "addvsi3.c",
+            "apple_versioning.c",
+            "clzdi2.c",
+            "clzsi2.c",
+            "cmpdi2.c",
+            "ctzdi2.c",
+            "ctzsi2.c",
+            "divdc3.c",
+            "divsc3.c",
+            "divxc3.c",
+            "extendhfsf2.c",
+            "int_util.c",
+            "muldc3.c",
+            "mulsc3.c",
+            "mulvdi3.c",
+            "mulvsi3.c",
+            "mulxc3.c",
+            "negdf2.c",
+            "negdi2.c",
+            "negsf2.c",
+            "negvdi2.c",
+            "negvsi2.c",
+            "paritydi2.c",
+            "paritysi2.c",
+            "popcountdi2.c",
+            "popcountsi2.c",
+            "powixf2.c",
+            "subvdi3.c",
+            "subvsi3.c",
+            "truncdfhf2.c",
+            "truncdfsf2.c",
+            "truncsfhf2.c",
+            "ucmpdi2.c",
+        ]);
 
         // When compiling in rustbuild (the rust-lang/rust repo) this library
         // also needs to satisfy intrinsics that jemalloc or C in general may
         // need, so include a few more that aren't typically needed by
         // LLVM/Rust.
         if cfg!(feature = "rustbuild") {
-            sources.extend(&[
-                "ffsdi2.c",
-            ]);
+            sources.extend(&["ffsdi2.c"]);
         }
 
         // On iOS and 32-bit OSX these are all just empty intrinsics, no need to
         // include them.
         if target_os != "ios" && (target_vendor != "apple" || target_arch != "x86") {
-            sources.extend(
-                &[
-                    "absvti2.c",
-                    "addvti3.c",
-                    "clzti2.c",
-                    "cmpti2.c",
-                    "ctzti2.c",
-                    "ffsti2.c",
-                    "mulvti3.c",
-                    "negti2.c",
-                    "negvti2.c",
-                    "parityti2.c",
-                    "popcountti2.c",
-                    "subvti3.c",
-                    "ucmpti2.c",
-                ],
-            );
+            sources.extend(&[
+                "absvti2.c",
+                "addvti3.c",
+                "clzti2.c",
+                "cmpti2.c",
+                "ctzti2.c",
+                "ffsti2.c",
+                "mulvti3.c",
+                "negti2.c",
+                "negvti2.c",
+                "parityti2.c",
+                "popcountti2.c",
+                "subvti3.c",
+                "ucmpti2.c",
+            ]);
         }
 
         if target_vendor == "apple" {
-            sources.extend(
-                &[
-                    "atomic_flag_clear.c",
-                    "atomic_flag_clear_explicit.c",
-                    "atomic_flag_test_and_set.c",
-                    "atomic_flag_test_and_set_explicit.c",
-                    "atomic_signal_fence.c",
-                    "atomic_thread_fence.c",
-                ],
-            );
+            sources.extend(&[
+                "atomic_flag_clear.c",
+                "atomic_flag_clear_explicit.c",
+                "atomic_flag_test_and_set.c",
+                "atomic_flag_test_and_set_explicit.c",
+                "atomic_signal_fence.c",
+                "atomic_thread_fence.c",
+            ]);
         }
 
         if target_env == "msvc" {
             if target_arch == "x86_64" {
-                sources.extend(
-                    &[
-                        "x86_64/floatdisf.c",
-                        "x86_64/floatdixf.c",
-                    ],
-                );
+                sources.extend(&["x86_64/floatdisf.c", "x86_64/floatdixf.c"]);
             }
         } else {
             // None of these seem to be used on x86_64 windows, and they've all
             // got the wrong ABI anyway, so we want to avoid them.
             if target_os != "windows" {
                 if target_arch == "x86_64" {
-                    sources.extend(
-                        &[
-                            "x86_64/floatdisf.c",
-                            "x86_64/floatdixf.c",
-                            "x86_64/floatundidf.S",
-                            "x86_64/floatundisf.S",
-                            "x86_64/floatundixf.S",
-                        ],
-                    );
+                    sources.extend(&[
+                        "x86_64/floatdisf.c",
+                        "x86_64/floatdixf.c",
+                        "x86_64/floatundidf.S",
+                        "x86_64/floatundisf.S",
+                        "x86_64/floatundixf.S",
+                    ]);
                 }
             }
 
             if target_arch == "x86" {
-                sources.extend(
-                    &[
-                        "i386/ashldi3.S",
-                        "i386/ashrdi3.S",
-                        "i386/divdi3.S",
-                        "i386/floatdidf.S",
-                        "i386/floatdisf.S",
-                        "i386/floatdixf.S",
-                        "i386/floatundidf.S",
-                        "i386/floatundisf.S",
-                        "i386/floatundixf.S",
-                        "i386/lshrdi3.S",
-                        "i386/moddi3.S",
-                        "i386/muldi3.S",
-                        "i386/udivdi3.S",
-                        "i386/umoddi3.S",
-                    ],
-                );
+                sources.extend(&[
+                    "i386/ashldi3.S",
+                    "i386/ashrdi3.S",
+                    "i386/divdi3.S",
+                    "i386/floatdidf.S",
+                    "i386/floatdisf.S",
+                    "i386/floatdixf.S",
+                    "i386/floatundidf.S",
+                    "i386/floatundisf.S",
+                    "i386/floatundixf.S",
+                    "i386/lshrdi3.S",
+                    "i386/moddi3.S",
+                    "i386/muldi3.S",
+                    "i386/udivdi3.S",
+                    "i386/umoddi3.S",
+                ]);
             }
         }
 
         if target_arch == "arm" && target_os != "ios" && target_env != "msvc" {
-            sources.extend(
-                &[
-                    "arm/aeabi_div0.c",
-                    "arm/aeabi_drsub.c",
-                    "arm/aeabi_frsub.c",
-                    "arm/bswapdi2.S",
-                    "arm/bswapsi2.S",
-                    "arm/clzdi2.S",
-                    "arm/clzsi2.S",
-                    "arm/divmodsi4.S",
-                    "arm/divsi3.S",
-                    "arm/modsi3.S",
-                    "arm/switch16.S",
-                    "arm/switch32.S",
-                    "arm/switch8.S",
-                    "arm/switchu8.S",
-                    "arm/sync_synchronize.S",
-                    "arm/udivmodsi4.S",
-                    "arm/udivsi3.S",
-                    "arm/umodsi3.S",
-                ],
-            );
+            sources.extend(&[
+                "arm/aeabi_div0.c",
+                "arm/aeabi_drsub.c",
+                "arm/aeabi_frsub.c",
+                "arm/bswapdi2.S",
+                "arm/bswapsi2.S",
+                "arm/clzdi2.S",
+                "arm/clzsi2.S",
+                "arm/divmodsi4.S",
+                "arm/divsi3.S",
+                "arm/modsi3.S",
+                "arm/switch16.S",
+                "arm/switch32.S",
+                "arm/switch8.S",
+                "arm/switchu8.S",
+                "arm/sync_synchronize.S",
+                "arm/udivmodsi4.S",
+                "arm/udivsi3.S",
+                "arm/umodsi3.S",
+            ]);
 
             if target_os == "freebsd" {
                 sources.extend(&["clear_cache.c"]);
@@ -316,100 +300,89 @@ mod c {
             // First of all aeabi_cdcmp and aeabi_cfcmp are never called by LLVM.
             // Second are little-endian only, so build fail on big-endian targets.
             // Temporally workaround: exclude these files for big-endian targets.
-            if !llvm_target[0].starts_with("thumbeb") &&
-               !llvm_target[0].starts_with("armeb") {
-                sources.extend(
-                    &[
-                        "arm/aeabi_cdcmp.S",
-                        "arm/aeabi_cdcmpeq_check_nan.c",
-                        "arm/aeabi_cfcmp.S",
-                        "arm/aeabi_cfcmpeq_check_nan.c",
-                    ],
-                );
+            if !llvm_target[0].starts_with("thumbeb") && !llvm_target[0].starts_with("armeb") {
+                sources.extend(&[
+                    "arm/aeabi_cdcmp.S",
+                    "arm/aeabi_cdcmpeq_check_nan.c",
+                    "arm/aeabi_cfcmp.S",
+                    "arm/aeabi_cfcmpeq_check_nan.c",
+                ]);
             }
         }
 
         if llvm_target[0] == "armv7" {
-            sources.extend(
-                &[
-                    "arm/sync_fetch_and_add_4.S",
-                    "arm/sync_fetch_and_add_8.S",
-                    "arm/sync_fetch_and_and_4.S",
-                    "arm/sync_fetch_and_and_8.S",
-                    "arm/sync_fetch_and_max_4.S",
-                    "arm/sync_fetch_and_max_8.S",
-                    "arm/sync_fetch_and_min_4.S",
-                    "arm/sync_fetch_and_min_8.S",
-                    "arm/sync_fetch_and_nand_4.S",
-                    "arm/sync_fetch_and_nand_8.S",
-                    "arm/sync_fetch_and_or_4.S",
-                    "arm/sync_fetch_and_or_8.S",
-                    "arm/sync_fetch_and_sub_4.S",
-                    "arm/sync_fetch_and_sub_8.S",
-                    "arm/sync_fetch_and_umax_4.S",
-                    "arm/sync_fetch_and_umax_8.S",
-                    "arm/sync_fetch_and_umin_4.S",
-                    "arm/sync_fetch_and_umin_8.S",
-                    "arm/sync_fetch_and_xor_4.S",
-                    "arm/sync_fetch_and_xor_8.S",
-                ],
-            );
+            sources.extend(&[
+                "arm/sync_fetch_and_add_4.S",
+                "arm/sync_fetch_and_add_8.S",
+                "arm/sync_fetch_and_and_4.S",
+                "arm/sync_fetch_and_and_8.S",
+                "arm/sync_fetch_and_max_4.S",
+                "arm/sync_fetch_and_max_8.S",
+                "arm/sync_fetch_and_min_4.S",
+                "arm/sync_fetch_and_min_8.S",
+                "arm/sync_fetch_and_nand_4.S",
+                "arm/sync_fetch_and_nand_8.S",
+                "arm/sync_fetch_and_or_4.S",
+                "arm/sync_fetch_and_or_8.S",
+                "arm/sync_fetch_and_sub_4.S",
+                "arm/sync_fetch_and_sub_8.S",
+                "arm/sync_fetch_and_umax_4.S",
+                "arm/sync_fetch_and_umax_8.S",
+                "arm/sync_fetch_and_umin_4.S",
+                "arm/sync_fetch_and_umin_8.S",
+                "arm/sync_fetch_and_xor_4.S",
+                "arm/sync_fetch_and_xor_8.S",
+            ]);
         }
 
         if llvm_target.last().unwrap().ends_with("eabihf") {
-            if !llvm_target[0].starts_with("thumbv7em") &&
-               !llvm_target[0].starts_with("thumbv8m.main") {
+            if !llvm_target[0].starts_with("thumbv7em")
+                && !llvm_target[0].starts_with("thumbv8m.main")
+            {
                 // The FPU option chosen for these architectures in cc-rs, ie:
                 //     -mfpu=fpv4-sp-d16 for thumbv7em
                 //     -mfpu=fpv5-sp-d16 for thumbv8m.main
                 // do not support double precision floating points conversions so the files
                 // that include such instructions are not included for these targets.
-                sources.extend(
-                    &[
-                        "arm/fixdfsivfp.S",
-                        "arm/fixunsdfsivfp.S",
-                        "arm/floatsidfvfp.S",
-                        "arm/floatunssidfvfp.S",
-                    ],
-                );
+                sources.extend(&[
+                    "arm/fixdfsivfp.S",
+                    "arm/fixunsdfsivfp.S",
+                    "arm/floatsidfvfp.S",
+                    "arm/floatunssidfvfp.S",
+                ]);
             }
 
-            sources.extend(
-                &[
-                    "arm/fixsfsivfp.S",
-                    "arm/fixunssfsivfp.S",
-                    "arm/floatsisfvfp.S",
-                    "arm/floatunssisfvfp.S",
-                    "arm/floatunssisfvfp.S",
-                    "arm/restore_vfp_d8_d15_regs.S",
-                    "arm/save_vfp_d8_d15_regs.S",
-                    "arm/negdf2vfp.S",
-                    "arm/negsf2vfp.S",
-                ]
-            );
-
+            sources.extend(&[
+                "arm/fixsfsivfp.S",
+                "arm/fixunssfsivfp.S",
+                "arm/floatsisfvfp.S",
+                "arm/floatunssisfvfp.S",
+                "arm/floatunssisfvfp.S",
+                "arm/restore_vfp_d8_d15_regs.S",
+                "arm/save_vfp_d8_d15_regs.S",
+                "arm/negdf2vfp.S",
+                "arm/negsf2vfp.S",
+            ]);
         }
 
         if target_arch == "aarch64" {
-            sources.extend(
-                &[
-                    "comparetf2.c",
-                    "extenddftf2.c",
-                    "extendsftf2.c",
-                    "fixtfdi.c",
-                    "fixtfsi.c",
-                    "fixtfti.c",
-                    "fixunstfdi.c",
-                    "fixunstfsi.c",
-                    "fixunstfti.c",
-                    "floatditf.c",
-                    "floatsitf.c",
-                    "floatunditf.c",
-                    "floatunsitf.c",
-                    "trunctfdf2.c",
-                    "trunctfsf2.c",
-                ],
-            );
+            sources.extend(&[
+                "comparetf2.c",
+                "extenddftf2.c",
+                "extendsftf2.c",
+                "fixtfdi.c",
+                "fixtfsi.c",
+                "fixtfti.c",
+                "fixunstfdi.c",
+                "fixunstfsi.c",
+                "fixunstfti.c",
+                "floatditf.c",
+                "floatsitf.c",
+                "floatunditf.c",
+                "floatunsitf.c",
+                "trunctfdf2.c",
+                "trunctfsf2.c",
+            ]);
 
             if target_os != "windows" {
                 sources.extend(&["multc3.c"]);
@@ -418,22 +391,20 @@ mod c {
 
         // Remove the assembly implementations that won't compile for the target
         if llvm_target[0] == "thumbv6m" || llvm_target[0] == "thumbv8m.base" {
-            sources.remove(
-                &[
-                    "clzdi2",
-                    "clzsi2",
-                    "divmodsi4",
-                    "divsi3",
-                    "modsi3",
-                    "switch16",
-                    "switch32",
-                    "switch8",
-                    "switchu8",
-                    "udivmodsi4",
-                    "udivsi3",
-                    "umodsi3",
-                ],
-            );
+            sources.remove(&[
+                "clzdi2",
+                "clzsi2",
+                "divmodsi4",
+                "divsi3",
+                "modsi3",
+                "switch16",
+                "switch32",
+                "switch8",
+                "switchu8",
+                "udivmodsi4",
+                "udivsi3",
+                "umodsi3",
+            ]);
 
             // But use some generic implementations where possible
             sources.extend(&["clzdi2.c", "clzsi2.c"])

+ 6 - 4
examples/intrinsics.rs

@@ -17,7 +17,7 @@ extern crate panic_handler;
 
 #[cfg(all(not(thumb), not(windows)))]
 #[link(name = "c")]
-extern {}
+extern "C" {}
 
 // Every function in this module maps will be lowered to an intrinsic by LLVM, if the platform
 // doesn't have native support for the operation used in the function. ARM has a naming convention
@@ -340,11 +340,13 @@ fn run() {
 
     something_with_a_dtor(&|| assert_eq!(bb(1), 1));
 
-    extern {
+    extern "C" {
         fn rust_begin_unwind();
     }
     // if bb(false) {
-        unsafe { rust_begin_unwind(); }
+    unsafe {
+        rust_begin_unwind();
+    }
     // }
 }
 
@@ -377,7 +379,7 @@ pub fn _start() -> ! {
 #[cfg(windows)]
 #[link(name = "kernel32")]
 #[link(name = "msvcrt")]
-extern {}
+extern "C" {}
 
 // ARM targets need these symbols
 #[no_mangle]

+ 75 - 23
src/arm_linux.rs

@@ -4,11 +4,11 @@ use core::mem;
 // Kernel-provided user-mode helper functions:
 // https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
 unsafe fn __kuser_cmpxchg(oldval: u32, newval: u32, ptr: *mut u32) -> bool {
-    let f: extern "C" fn (u32, u32, *mut u32) -> u32 = mem::transmute(0xffff0fc0u32);
+    let f: extern "C" fn(u32, u32, *mut u32) -> u32 = mem::transmute(0xffff0fc0u32);
     f(oldval, newval, ptr) == 0
 }
 unsafe fn __kuser_memory_barrier() {
-    let f: extern "C" fn () = mem::transmute(0xffff0fa0u32);
+    let f: extern "C" fn() = mem::transmute(0xffff0fa0u32);
     f();
 }
 
@@ -94,7 +94,7 @@ macro_rules! atomic_rmw {
         pub unsafe extern "C" fn $name(ptr: *mut $ty, val: $ty) -> $ty {
             atomic_rmw(ptr, |x| $op(x as $ty, val) as u32) as $ty
         }
-    }
+    };
 }
 macro_rules! atomic_cmpxchg {
     ($name:ident, $ty:ty) => {
@@ -102,16 +102,20 @@ macro_rules! atomic_cmpxchg {
         pub unsafe extern "C" fn $name(ptr: *mut $ty, oldval: $ty, newval: $ty) -> $ty {
             atomic_cmpxchg(ptr, oldval as u32, newval as u32) as $ty
         }
-    }
+    };
 }
 
 atomic_rmw!(__sync_fetch_and_add_1, u8, |a: u8, b: u8| a.wrapping_add(b));
-atomic_rmw!(__sync_fetch_and_add_2, u16, |a: u16, b: u16| a.wrapping_add(b));
-atomic_rmw!(__sync_fetch_and_add_4, u32, |a: u32, b: u32| a.wrapping_add(b));
+atomic_rmw!(__sync_fetch_and_add_2, u16, |a: u16, b: u16| a
+    .wrapping_add(b));
+atomic_rmw!(__sync_fetch_and_add_4, u32, |a: u32, b: u32| a
+    .wrapping_add(b));
 
 atomic_rmw!(__sync_fetch_and_sub_1, u8, |a: u8, b: u8| a.wrapping_sub(b));
-atomic_rmw!(__sync_fetch_and_sub_2, u16, |a: u16, b: u16| a.wrapping_sub(b));
-atomic_rmw!(__sync_fetch_and_sub_4, u32, |a: u32, b: u32| a.wrapping_sub(b));
+atomic_rmw!(__sync_fetch_and_sub_2, u16, |a: u16, b: u16| a
+    .wrapping_sub(b));
+atomic_rmw!(__sync_fetch_and_sub_4, u32, |a: u32, b: u32| a
+    .wrapping_sub(b));
 
 atomic_rmw!(__sync_fetch_and_and_1, u8, |a: u8, b: u8| a & b);
 atomic_rmw!(__sync_fetch_and_and_2, u16, |a: u16, b: u16| a & b);
@@ -129,21 +133,69 @@ atomic_rmw!(__sync_fetch_and_nand_1, u8, |a: u8, b: u8| !(a & b));
 atomic_rmw!(__sync_fetch_and_nand_2, u16, |a: u16, b: u16| !(a & b));
 atomic_rmw!(__sync_fetch_and_nand_4, u32, |a: u32, b: u32| !(a & b));
 
-atomic_rmw!(__sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b { a } else { b });
-atomic_rmw!(__sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b { a } else { b });
-atomic_rmw!(__sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b { a } else { b });
-
-atomic_rmw!(__sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b { a } else { b });
-atomic_rmw!(__sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b { a } else { b });
-atomic_rmw!(__sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b { a } else { b });
-
-atomic_rmw!(__sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b { a } else { b });
-atomic_rmw!(__sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b { a } else { b });
-atomic_rmw!(__sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b { a } else { b });
-
-atomic_rmw!(__sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b { a } else { b });
-atomic_rmw!(__sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b { a } else { b });
-atomic_rmw!(__sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b { a } else { b });
+atomic_rmw!(__sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b {
+    a
+} else {
+    b
+});
+atomic_rmw!(__sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b {
+    a
+} else {
+    b
+});
+atomic_rmw!(__sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b {
+    a
+} else {
+    b
+});
+
+atomic_rmw!(__sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b {
+    a
+} else {
+    b
+});
+atomic_rmw!(__sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b {
+    a
+} else {
+    b
+});
+atomic_rmw!(__sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b {
+    a
+} else {
+    b
+});
+
+atomic_rmw!(__sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b {
+    a
+} else {
+    b
+});
+atomic_rmw!(__sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b {
+    a
+} else {
+    b
+});
+atomic_rmw!(__sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b {
+    a
+} else {
+    b
+});
+
+atomic_rmw!(__sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b {
+    a
+} else {
+    b
+});
+atomic_rmw!(__sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b {
+    a
+} else {
+    b
+});
+atomic_rmw!(__sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b {
+    a
+} else {
+    b
+});
 
 atomic_rmw!(__sync_lock_test_and_set_1, u8, |_: u8, b: u8| b);
 atomic_rmw!(__sync_lock_test_and_set_2, u16, |_: u16, b: u16| b);

+ 29 - 21
src/float/add.rs

@@ -1,8 +1,9 @@
-use int::{Int, CastInto};
 use float::Float;
+use int::{CastInto, Int};
 
 /// Returns `a + b`
-fn add<F: Float>(a: F, b: F) -> F where
+fn add<F: Float>(a: F, b: F) -> F
+where
     u32: CastInto<F::Int>,
     F::Int: CastInto<u32>,
     i32: CastInto<F::Int>,
@@ -11,18 +12,18 @@ fn add<F: Float>(a: F, b: F) -> F where
     let one = F::Int::ONE;
     let zero = F::Int::ZERO;
 
-    let bits =             F::BITS.cast();
+    let bits = F::BITS.cast();
     let significand_bits = F::SIGNIFICAND_BITS;
-    let max_exponent =     F::EXPONENT_MAX;
+    let max_exponent = F::EXPONENT_MAX;
 
-    let implicit_bit =     F::IMPLICIT_BIT;
+    let implicit_bit = F::IMPLICIT_BIT;
     let significand_mask = F::SIGNIFICAND_MASK;
-    let sign_bit =         F::SIGN_MASK as F::Int;
-    let abs_mask =         sign_bit - one;
-    let exponent_mask =    F::EXPONENT_MASK;
-    let inf_rep =          exponent_mask;
-    let quiet_bit =        implicit_bit >> 1;
-    let qnan_rep =         exponent_mask | quiet_bit;
+    let sign_bit = F::SIGN_MASK as F::Int;
+    let abs_mask = sign_bit - one;
+    let exponent_mask = F::EXPONENT_MASK;
+    let inf_rep = exponent_mask;
+    let quiet_bit = implicit_bit >> 1;
+    let qnan_rep = exponent_mask | quiet_bit;
 
     let mut a_rep = a.repr();
     let mut b_rep = b.repr();
@@ -30,8 +31,7 @@ fn add<F: Float>(a: F, b: F) -> F where
     let b_abs = b_rep & abs_mask;
 
     // Detect if a or b is zero, infinity, or NaN.
-    if a_abs.wrapping_sub(one) >= inf_rep - one ||
-        b_abs.wrapping_sub(one) >= inf_rep - one {
+    if a_abs.wrapping_sub(one) >= inf_rep - one || b_abs.wrapping_sub(one) >= inf_rep - one {
         // NaN + anything = qNaN
         if a_abs > inf_rep {
             return F::from_repr(a_abs | quiet_bit);
@@ -68,7 +68,7 @@ fn add<F: Float>(a: F, b: F) -> F where
 
         // anything + zero = anything
         if b_abs == Int::ZERO {
-             return a;
+            return a;
         }
     }
 
@@ -115,7 +115,8 @@ fn add<F: Float>(a: F, b: F) -> F where
     let align = a_exponent.wrapping_sub(b_exponent).cast();
     if align != Int::ZERO {
         if align < bits {
-            let sticky = F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != Int::ZERO);
+            let sticky =
+                F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != Int::ZERO);
             b_significand = (b_significand >> align.cast()) | sticky;
         } else {
             b_significand = one; // sticky; b is known to be non-zero.
@@ -131,12 +132,14 @@ fn add<F: Float>(a: F, b: F) -> F where
         // If partial cancellation occured, we need to left-shift the result
         // and adjust the exponent:
         if a_significand < implicit_bit << 3 {
-            let shift = a_significand.leading_zeros() as i32
-                - (implicit_bit << 3).leading_zeros() as i32;
+            let shift =
+                a_significand.leading_zeros() as i32 - (implicit_bit << 3).leading_zeros() as i32;
             a_significand <<= shift;
             a_exponent -= shift;
         }
-    } else /* addition */ {
+    } else
+    /* addition */
+    {
         a_significand += b_significand;
 
         // If the addition carried up, we need to right-shift the result and
@@ -157,7 +160,8 @@ fn add<F: Float>(a: F, b: F) -> F where
         // Result is denormal before rounding; the exponent is zero and we
         // need to shift the significand.
         let shift = (1 - a_exponent).cast();
-        let sticky = F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != Int::ZERO);
+        let sticky =
+            F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != Int::ZERO);
         a_significand = a_significand >> shift.cast() | sticky;
         a_exponent = 0;
     }
@@ -175,8 +179,12 @@ fn add<F: Float>(a: F, b: F) -> F where
 
     // Final rounding.  The result may overflow to infinity, but that is the
     // correct result in that case.
-    if round_guard_sticky > 0x4 { result += one; }
-    if round_guard_sticky == 0x4 { result += result & one; }
+    if round_guard_sticky > 0x4 {
+        result += one;
+    }
+    if round_guard_sticky == 0x4 {
+        result += result & one;
+    }
 
     F::from_repr(result)
 }

+ 34 - 33
src/float/cmp.rs

@@ -1,64 +1,65 @@
 #![allow(unreachable_code)]
 
-use int::{Int, CastInto};
 use float::Float;
+use int::{CastInto, Int};
 
 #[derive(Clone, Copy)]
 enum Result {
     Less,
     Equal,
     Greater,
-    Unordered
+    Unordered,
 }
 
 impl Result {
     fn to_le_abi(self) -> i32 {
         match self {
-            Result::Less      => -1,
-            Result::Equal     => 0,
-            Result::Greater   => 1,
-            Result::Unordered => 1
+            Result::Less => -1,
+            Result::Equal => 0,
+            Result::Greater => 1,
+            Result::Unordered => 1,
         }
     }
 
     fn to_ge_abi(self) -> i32 {
         match self {
-            Result::Less      => -1,
-            Result::Equal     => 0,
-            Result::Greater   => 1,
-            Result::Unordered => -1
+            Result::Less => -1,
+            Result::Equal => 0,
+            Result::Greater => 1,
+            Result::Unordered => -1,
         }
     }
 }
 
-fn cmp<F: Float>(a: F, b: F) -> Result where
+fn cmp<F: Float>(a: F, b: F) -> Result
+where
     u32: CastInto<F::Int>,
     F::Int: CastInto<u32>,
     i32: CastInto<F::Int>,
     F::Int: CastInto<i32>,
 {
-    let one   = F::Int::ONE;
-    let zero  = F::Int::ZERO;
+    let one = F::Int::ONE;
+    let zero = F::Int::ZERO;
     let szero = F::SignedInt::ZERO;
 
-    let sign_bit =      F::SIGN_MASK as F::Int;
-    let abs_mask =      sign_bit - one;
+    let sign_bit = F::SIGN_MASK as F::Int;
+    let abs_mask = sign_bit - one;
     let exponent_mask = F::EXPONENT_MASK;
-    let inf_rep =       exponent_mask;
+    let inf_rep = exponent_mask;
 
-    let a_rep  = a.repr();
-    let b_rep  = b.repr();
-    let a_abs  = a_rep & abs_mask;
-    let b_abs  = b_rep & abs_mask;
+    let a_rep = a.repr();
+    let b_rep = b.repr();
+    let a_abs = a_rep & abs_mask;
+    let b_abs = b_rep & abs_mask;
 
     // If either a or b is NaN, they are unordered.
     if a_abs > inf_rep || b_abs > inf_rep {
-        return Result::Unordered
+        return Result::Unordered;
     }
 
     // If a and b are both zeros, they are equal.
     if a_abs | b_abs == zero {
-        return Result::Equal
+        return Result::Equal;
     }
 
     let a_srep = a.signed_repr();
@@ -68,29 +69,29 @@ fn cmp<F: Float>(a: F, b: F) -> Result where
     // a and b as signed integers as we would with a fp_ting-point compare.
     if a_srep & b_srep >= szero {
         if a_srep < b_srep {
-            return Result::Less
+            return Result::Less;
         } else if a_srep == b_srep {
-            return Result::Equal
+            return Result::Equal;
         } else {
-            return Result::Greater
+            return Result::Greater;
         }
     }
-
     // Otherwise, both are negative, so we need to flip the sense of the
     // comparison to get the correct result.  (This assumes a twos- or ones-
     // complement integer representation; if integers are represented in a
     // sign-magnitude representation, then this flip is incorrect).
     else {
         if a_srep > b_srep {
-            return Result::Less
+            return Result::Less;
         } else if a_srep == b_srep {
-            return Result::Equal
+            return Result::Equal;
         } else {
-            return Result::Greater
+            return Result::Greater;
         }
     }
 }
-fn unord<F: Float>(a: F, b: F) -> bool where
+fn unord<F: Float>(a: F, b: F) -> bool
+where
     u32: CastInto<F::Int>,
     F::Int: CastInto<u32>,
     i32: CastInto<F::Int>,
@@ -98,10 +99,10 @@ fn unord<F: Float>(a: F, b: F) -> bool where
 {
     let one = F::Int::ONE;
 
-    let sign_bit =      F::SIGN_MASK as F::Int;
-    let abs_mask =      sign_bit - one;
+    let sign_bit = F::SIGN_MASK as F::Int;
+    let abs_mask = sign_bit - one;
     let exponent_mask = F::EXPONENT_MASK;
-    let inf_rep =       exponent_mask;
+    let inf_rep = exponent_mask;
 
     let a_rep = a.repr();
     let b_rep = b.repr();

+ 47 - 27
src/float/conv.rs

@@ -2,10 +2,10 @@ use float::Float;
 use int::Int;
 
 macro_rules! int_to_float {
-    ($i:expr, $ity:ty, $fty:ty) => ({
+    ($i:expr, $ity:ty, $fty:ty) => {{
         let i = $i;
         if i == 0 {
-            return 0.0
+            return 0.0;
         }
 
         let mant_dig = <$fty>::SIGNIFICAND_BITS + 1;
@@ -22,20 +22,22 @@ macro_rules! int_to_float {
         let mut e = sd - 1;
 
         if <$ity>::BITS < mant_dig {
-            return <$fty>::from_parts(s,
+            return <$fty>::from_parts(
+                s,
                 (e + exponent_bias) as <$fty as Float>::Int,
-                (a as <$fty as Float>::Int) << (mant_dig - e - 1))
+                (a as <$fty as Float>::Int) << (mant_dig - e - 1),
+            );
         }
 
         a = if sd > mant_dig {
             /* start:  0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
-            *  finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
-            *                                                12345678901234567890123456
-            *  1 = msb 1 bit
-            *  P = bit MANT_DIG-1 bits to the right of 1
-            *  Q = bit MANT_DIG bits to the right of 1
-            *  R = "or" of all bits to the right of Q
-            */
+             *  finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+             *                                                12345678901234567890123456
+             *  1 = msb 1 bit
+             *  P = bit MANT_DIG-1 bits to the right of 1
+             *  Q = bit MANT_DIG bits to the right of 1
+             *  R = "or" of all bits to the right of Q
+             */
             let mant_dig_plus_one = mant_dig + 1;
             let mant_dig_plus_two = mant_dig + 2;
             a = if sd == mant_dig_plus_one {
@@ -43,8 +45,10 @@ macro_rules! int_to_float {
             } else if sd == mant_dig_plus_two {
                 a
             } else {
-                (a >> (sd - mant_dig_plus_two)) as <$ity as Int>::UnsignedInt |
-                ((a & <$ity as Int>::UnsignedInt::max_value()).wrapping_shl((n + mant_dig_plus_two) - sd) != 0) as <$ity as Int>::UnsignedInt
+                (a >> (sd - mant_dig_plus_two)) as <$ity as Int>::UnsignedInt
+                    | ((a & <$ity as Int>::UnsignedInt::max_value())
+                        .wrapping_shl((n + mant_dig_plus_two) - sd)
+                        != 0) as <$ity as Int>::UnsignedInt
             };
 
             /* finish: */
@@ -54,19 +58,22 @@ macro_rules! int_to_float {
 
             /* a is now rounded to mant_dig or mant_dig+1 bits */
             if (a & (1 << mant_dig)) != 0 {
-                a >>= 1; e += 1;
+                a >>= 1;
+                e += 1;
             }
             a
-            /* a is now rounded to mant_dig bits */
+        /* a is now rounded to mant_dig bits */
         } else {
             a.wrapping_shl(mant_dig - sd)
             /* a is now rounded to mant_dig bits */
         };
 
-        <$fty>::from_parts(s,
+        <$fty>::from_parts(
+            s,
             (e + exponent_bias) as <$fty as Float>::Int,
-            a as <$fty as Float>::Int)
-    })
+            a as <$fty as Float>::Int,
+        )
+    }};
 }
 
 intrinsics! {
@@ -160,11 +167,11 @@ intrinsics! {
 #[derive(PartialEq)]
 enum Sign {
     Positive,
-    Negative
+    Negative,
 }
 
 macro_rules! float_to_int {
-    ($f:expr, $fty:ty, $ity:ty) => ({
+    ($f:expr, $fty:ty, $ity:ty) => {{
         let f = $f;
         let fixint_min = <$ity>::min_value();
         let fixint_max = <$ity>::max_value();
@@ -181,21 +188,34 @@ macro_rules! float_to_int {
         let a_abs = a_rep & !sign_bit;
 
         // this is used to work around -1 not being available for unsigned
-        let sign = if (a_rep & sign_bit) == 0 { Sign::Positive } else { Sign::Negative };
+        let sign = if (a_rep & sign_bit) == 0 {
+            Sign::Positive
+        } else {
+            Sign::Negative
+        };
         let mut exponent = (a_abs >> significand_bits) as usize;
         let significand = (a_abs & <$fty>::SIGNIFICAND_MASK) | <$fty>::IMPLICIT_BIT;
 
         // if < 1 or unsigned & negative
-        if  exponent < exponent_bias ||
-            fixint_unsigned && sign == Sign::Negative {
-            return 0
+        if exponent < exponent_bias || fixint_unsigned && sign == Sign::Negative {
+            return 0;
         }
         exponent -= exponent_bias;
 
         // If the value is infinity, saturate.
         // If the value is too large for the integer type, 0.
-        if exponent >= (if fixint_unsigned {fixint_bits} else {fixint_bits -1}) {
-            return if sign == Sign::Positive {fixint_max} else {fixint_min}
+        if exponent
+            >= (if fixint_unsigned {
+                fixint_bits
+            } else {
+                fixint_bits - 1
+            })
+        {
+            return if sign == Sign::Positive {
+                fixint_max
+            } else {
+                fixint_min
+            };
         }
         // If 0 <= exponent < significand_bits, right shift to get the result.
         // Otherwise, shift left.
@@ -211,7 +231,7 @@ macro_rules! float_to_int {
         } else {
             r
         }
-    })
+    }};
 }
 
 intrinsics! {

+ 1 - 5
src/float/div.rs

@@ -1,7 +1,5 @@
-use int::{CastInto, Int, WideInt};
 use float::Float;
-
-
+use int::{CastInto, Int, WideInt};
 
 fn div32<F: Float>(a: F, b: F) -> F
 where
@@ -398,7 +396,6 @@ where
     // operation in C, so we need to be a little bit fussy.
     let (mut quotient, _) = <F::Int as WideInt>::wide_mul(a_significand << 2, reciprocal.cast());
 
-
     // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
     // In either case, we are going to compute a residual of the form
     //
@@ -442,7 +439,6 @@ where
     }
 }
 
-
 intrinsics! {
     #[arm_aeabi_alias = __aeabi_fdiv]
     pub extern "C" fn __divsf3(a: f32, b: f32) -> f32 {

+ 4 - 3
src/float/extend.rs

@@ -1,8 +1,9 @@
-use int::{CastInto, Int};
 use float::Float;
+use int::{CastInto, Int};
 
 /// Generic conversion from a narrower to a wider IEEE-754 floating-point type
-fn extend<F: Float, R: Float>(a: F) -> R where
+fn extend<F: Float, R: Float>(a: F) -> R
+where
     F::Int: CastInto<u64>,
     u64: CastInto<F::Int>,
     u32: CastInto<R::Int>,
@@ -79,4 +80,4 @@ intrinsics! {
     pub extern "C" fn  __extendsfdf2vfp(a: f32) -> f64 {
         a as f64 // LLVM generate 'fcvtds'
     }
-}
+}

+ 26 - 20
src/float/mod.rs

@@ -3,26 +3,26 @@ use core::ops;
 
 use super::int::Int;
 
-pub mod conv;
-pub mod cmp;
 pub mod add;
-pub mod pow;
-pub mod sub;
-pub mod mul;
+pub mod cmp;
+pub mod conv;
 pub mod div;
 pub mod extend;
+pub mod mul;
+pub mod pow;
+pub mod sub;
 
 /// Trait for some basic operations on floats
 pub trait Float:
-    Copy +
-    PartialEq +
-    PartialOrd +
-    ops::AddAssign +
-    ops::MulAssign +
-    ops::Add<Output = Self> +
-    ops::Sub<Output = Self> +
-    ops::Div<Output = Self> +
-    ops::Rem<Output = Self> +
+    Copy
+    + PartialEq
+    + PartialOrd
+    + ops::AddAssign
+    + ops::MulAssign
+    + ops::Add<Output = Self>
+    + ops::Sub<Output = Self>
+    + ops::Div<Output = Self>
+    + ops::Rem<Output = Self>
 {
     /// A uint of the same with as the float
     type Int: Int;
@@ -118,17 +118,23 @@ macro_rules! float_impl {
                 unsafe { mem::transmute(a) }
             }
             fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self {
-                Self::from_repr(((sign as Self::Int) << (Self::BITS - 1)) |
-                    ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) |
-                    (significand & Self::SIGNIFICAND_MASK))
+                Self::from_repr(
+                    ((sign as Self::Int) << (Self::BITS - 1))
+                        | ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK)
+                        | (significand & Self::SIGNIFICAND_MASK),
+                )
             }
             fn normalize(significand: Self::Int) -> (i32, Self::Int) {
-                let shift = significand.leading_zeros()
+                let shift = significand
+                    .leading_zeros()
                     .wrapping_sub((Self::Int::ONE << Self::SIGNIFICAND_BITS).leading_zeros());
-                (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int)
+                (
+                    1i32.wrapping_sub(shift as i32),
+                    significand << shift as Self::Int,
+                )
             }
         }
-    }
+    };
 }
 
 float_impl!(f32, u32, i32, 32, 23);

+ 1 - 1
src/float/mul.rs

@@ -1,5 +1,5 @@
-use int::{CastInto, Int, WideInt};
 use float::Float;
+use int::{CastInto, Int, WideInt};
 
 fn mul<F: Float>(a: F, b: F) -> F
 where

+ 1 - 1
src/float/pow.rs

@@ -1,5 +1,5 @@
-use int::Int;
 use float::Float;
+use int::Int;
 
 trait Pow: Float {
     /// Returns `a` raised to the power `b`

+ 2 - 2
src/float/sub.rs

@@ -1,6 +1,6 @@
-use float::Float;
-use float::add::__addsf3;
 use float::add::__adddf3;
+use float::add::__addsf3;
+use float::Float;
 
 intrinsics! {
     #[arm_aeabi_alias = __aeabi_fsub]

+ 17 - 6
src/int/addsub.rs

@@ -1,16 +1,24 @@
-use int::LargeInt;
 use int::Int;
+use int::LargeInt;
 
 trait UAddSub: LargeInt {
     fn uadd(self, other: Self) -> Self {
         let (low, carry) = self.low().overflowing_add(other.low());
         let high = self.high().wrapping_add(other.high());
-        let carry = if carry { Self::HighHalf::ONE } else { Self::HighHalf::ZERO };
+        let carry = if carry {
+            Self::HighHalf::ONE
+        } else {
+            Self::HighHalf::ZERO
+        };
         Self::from_parts(low, high.wrapping_add(carry))
     }
     fn uadd_one(self) -> Self {
         let (low, carry) = self.low().overflowing_add(Self::LowHalf::ONE);
-        let carry = if carry { Self::HighHalf::ONE } else { Self::HighHalf::ZERO };
+        let carry = if carry {
+            Self::HighHalf::ONE
+        } else {
+            Self::HighHalf::ZERO
+        };
         Self::from_parts(low, self.high().wrapping_add(carry))
     }
     fn usub(self, other: Self) -> Self {
@@ -22,7 +30,8 @@ trait UAddSub: LargeInt {
 impl UAddSub for u128 {}
 
 trait AddSub: Int
-    where <Self as Int>::UnsignedInt: UAddSub
+where
+    <Self as Int>::UnsignedInt: UAddSub,
 {
     fn add(self, other: Self) -> Self {
         Self::from_unsigned(self.unsigned().uadd(other.unsigned()))
@@ -36,7 +45,8 @@ impl AddSub for u128 {}
 impl AddSub for i128 {}
 
 trait Addo: AddSub
-    where <Self as Int>::UnsignedInt: UAddSub
+where
+    <Self as Int>::UnsignedInt: UAddSub,
 {
     fn addo(self, other: Self, overflow: &mut i32) -> Self {
         *overflow = 0;
@@ -58,7 +68,8 @@ impl Addo for i128 {}
 impl Addo for u128 {}
 
 trait Subo: AddSub
-    where <Self as Int>::UnsignedInt: UAddSub
+where
+    <Self as Int>::UnsignedInt: UAddSub,
 {
     fn subo(self, other: Self, overflow: &mut i32) -> Self {
         *overflow = 0;

+ 25 - 25
src/int/mod.rs

@@ -3,13 +3,13 @@ use core::ops;
 macro_rules! hty {
     ($ty:ty) => {
         <$ty as LargeInt>::HighHalf
-    }
+    };
 }
 
 macro_rules! os_ty {
     ($ty:ty) => {
         <$ty as Int>::OtherSign
-    }
+    };
 }
 
 pub mod addsub;
@@ -20,23 +20,23 @@ pub mod udiv;
 
 /// Trait for some basic operations on integers
 pub trait Int:
-    Copy +
-    PartialEq +
-    PartialOrd +
-    ops::AddAssign +
-    ops::BitAndAssign +
-    ops::BitOrAssign +
-    ops::ShlAssign<i32> +
-    ops::ShrAssign<u32> +
-    ops::Add<Output = Self> +
-    ops::Sub<Output = Self> +
-    ops::Div<Output = Self> +
-    ops::Shl<u32, Output = Self> +
-    ops::Shr<u32, Output = Self> +
-    ops::BitOr<Output = Self> +
-    ops::BitXor<Output = Self> +
-    ops::BitAnd<Output = Self> +
-    ops::Not<Output = Self> +
+    Copy
+    + PartialEq
+    + PartialOrd
+    + ops::AddAssign
+    + ops::BitAndAssign
+    + ops::BitOrAssign
+    + ops::ShlAssign<i32>
+    + ops::ShrAssign<u32>
+    + ops::Add<Output = Self>
+    + ops::Sub<Output = Self>
+    + ops::Div<Output = Self>
+    + ops::Shl<u32, Output = Self>
+    + ops::Shr<u32, Output = Self>
+    + ops::BitOr<Output = Self>
+    + ops::BitXor<Output = Self>
+    + ops::BitAnd<Output = Self>
+    + ops::Not<Output = Self>
 {
     /// Type with the same width but other signedness
     type OtherSign: Int;
@@ -182,7 +182,7 @@ macro_rules! int_impl {
 
             int_impl_common!($ity, $bits);
         }
-    }
+    };
 }
 
 int_impl!(i32, u32, 32);
@@ -223,7 +223,7 @@ macro_rules! large_int {
                 low as $ty | ((high as $ty) << $halfbits)
             }
         }
-    }
+    };
 }
 
 large_int!(u64, u32, u32, 32);
@@ -284,9 +284,9 @@ macro_rules! impl_wide_int {
                     let sticky = *low << ($bits - count);
                     *low = *self << ($bits - count) | *low >> count | sticky;
                     *self = *self >> count;
-                } else if count < 2*$bits {
-                    let sticky = *self << (2*$bits - count) | *low;
-                    *low = *self >> (count - $bits ) | sticky;
+                } else if count < 2 * $bits {
+                    let sticky = *self << (2 * $bits - count) | *low;
+                    *low = *self >> (count - $bits) | sticky;
                     *self = 0;
                 } else {
                     let sticky = *self | *low;
@@ -295,7 +295,7 @@ macro_rules! impl_wide_int {
                 }
             }
         }
-    }
+    };
 }
 
 impl_wide_int!(u32, u64, 32);

+ 5 - 4
src/int/mul.rs

@@ -1,7 +1,7 @@
 use core::ops;
 
-use int::LargeInt;
 use int::Int;
+use int::LargeInt;
 
 trait Mul: LargeInt {
     fn mul(self, other: Self) -> Self {
@@ -19,8 +19,9 @@ trait Mul: LargeInt {
         low += (t & lower_mask) << half_bits;
         high += Self::low_as_high(t >> half_bits);
         high += Self::low_as_high((self.low() >> half_bits).wrapping_mul(other.low() >> half_bits));
-        high = high.wrapping_add(self.high().wrapping_mul(Self::low_as_high(other.low())))
-                   .wrapping_add(Self::low_as_high(self.low()).wrapping_mul(other.high()));
+        high = high
+            .wrapping_add(self.high().wrapping_mul(Self::low_as_high(other.low())))
+            .wrapping_add(Self::low_as_high(self.low()).wrapping_mul(other.high()));
         Self::from_parts(low, high)
     }
 }
@@ -70,7 +71,7 @@ impl Mulo for i32 {}
 impl Mulo for i64 {}
 impl Mulo for i128 {}
 
-trait UMulo : Int {
+trait UMulo: Int {
     fn mulo(self, other: Self, overflow: &mut i32) -> Self {
         *overflow = 0;
         let result = self.wrapping_mul(other);

+ 2 - 1
src/int/sdiv.rs

@@ -43,7 +43,8 @@ impl Mod for i128 {}
 trait Divmod: Int {
     /// Returns `a / b` and sets `*rem = n % d`
     fn divmod<F>(self, other: Self, rem: &mut Self, div: F) -> Self
-        where F: Fn(Self, Self) -> Self,
+    where
+        F: Fn(Self, Self) -> Self,
     {
         let r = div(self, other);
         // NOTE won't overflow because it's using the result from the

+ 22 - 13
src/int/shift.rs

@@ -3,7 +3,8 @@ use int::{Int, LargeInt};
 trait Ashl: Int + LargeInt {
     /// Returns `a << b`, requires `b < Self::BITS`
     fn ashl(self, offset: u32) -> Self
-        where Self: LargeInt<HighHalf = <Self as LargeInt>::LowHalf>,
+    where
+        Self: LargeInt<HighHalf = <Self as LargeInt>::LowHalf>,
     {
         let half_bits = Self::BITS / 2;
         if offset & half_bits != 0 {
@@ -11,9 +12,10 @@ trait Ashl: Int + LargeInt {
         } else if offset == 0 {
             self
         } else {
-            Self::from_parts(self.low() << offset,
-                             (self.high() << offset) |
-                                (self.low() >> (half_bits - offset)))
+            Self::from_parts(
+                self.low() << offset,
+                (self.high() << offset) | (self.low() >> (half_bits - offset)),
+            )
         }
     }
 }
@@ -24,18 +26,23 @@ impl Ashl for u128 {}
 trait Ashr: Int + LargeInt {
     /// Returns arithmetic `a >> b`, requires `b < Self::BITS`
     fn ashr(self, offset: u32) -> Self
-        where Self: LargeInt<LowHalf = <<Self as LargeInt>::HighHalf as Int>::UnsignedInt>,
+    where
+        Self: LargeInt<LowHalf = <<Self as LargeInt>::HighHalf as Int>::UnsignedInt>,
     {
         let half_bits = Self::BITS / 2;
         if offset & half_bits != 0 {
-            Self::from_parts((self.high() >> (offset - half_bits)).unsigned(),
-                              self.high() >> (half_bits - 1))
+            Self::from_parts(
+                (self.high() >> (offset - half_bits)).unsigned(),
+                self.high() >> (half_bits - 1),
+            )
         } else if offset == 0 {
             self
         } else {
             let high_unsigned = self.high().unsigned();
-            Self::from_parts((high_unsigned << (half_bits - offset)) | (self.low() >> offset),
-                              self.high() >> offset)
+            Self::from_parts(
+                (high_unsigned << (half_bits - offset)) | (self.low() >> offset),
+                self.high() >> offset,
+            )
         }
     }
 }
@@ -46,7 +53,8 @@ impl Ashr for i128 {}
 trait Lshr: Int + LargeInt {
     /// Returns logical `a >> b`, requires `b < Self::BITS`
     fn lshr(self, offset: u32) -> Self
-        where Self: LargeInt<HighHalf = <Self as LargeInt>::LowHalf>,
+    where
+        Self: LargeInt<HighHalf = <Self as LargeInt>::LowHalf>,
     {
         let half_bits = Self::BITS / 2;
         if offset & half_bits != 0 {
@@ -54,9 +62,10 @@ trait Lshr: Int + LargeInt {
         } else if offset == 0 {
             self
         } else {
-            Self::from_parts((self.high() << (half_bits - offset)) |
-                                (self.low() >> offset),
-                             self.high() >> offset)
+            Self::from_parts(
+                (self.high() << (half_bits - offset)) | (self.low() >> offset),
+                self.high() >> offset,
+            )
         }
     }
 }

+ 21 - 13
src/lib.rs

@@ -3,11 +3,13 @@
 #![cfg_attr(feature = "compiler-builtins", compiler_builtins)]
 #![crate_name = "compiler_builtins"]
 #![crate_type = "rlib"]
-#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
-       html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
-       html_root_url = "https://doc.rust-lang.org/nightly/",
-       html_playground_url = "https://play.rust-lang.org/",
-       test(attr(deny(warnings))))]
+#![doc(
+    html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
+    html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
+    html_root_url = "https://doc.rust-lang.org/nightly/",
+    html_playground_url = "https://play.rust-lang.org/",
+    test(attr(deny(warnings)))
+)]
 #![feature(asm)]
 #![feature(compiler_builtins)]
 #![feature(core_intrinsics)]
@@ -19,10 +21,14 @@
 #![allow(unused_features)]
 #![no_builtins]
 #![cfg_attr(feature = "compiler-builtins", feature(staged_api))]
-#![cfg_attr(feature = "compiler-builtins",
-            unstable(feature = "compiler_builtins_lib",
-                     reason = "Compiler builtins. Will never become stable.",
-                     issue = "0"))]
+#![cfg_attr(
+    feature = "compiler-builtins",
+    unstable(
+        feature = "compiler_builtins_lib",
+        reason = "Compiler builtins. Will never become stable.",
+        issue = "0"
+    )
+)]
 
 // We disable #[no_mangle] for tests so that we can verify the test results
 // against the native compiler-rt implementations of the builtins.
@@ -44,12 +50,14 @@ fn abort() -> ! {
 #[macro_use]
 mod macros;
 
-pub mod int;
 pub mod float;
+pub mod int;
 
-#[cfg(any(all(target_arch = "wasm32", target_os = "unknown"),
-          all(target_arch = "arm", target_os = "none"),
-          all(target_vendor = "fortanix", target_env = "sgx")))]
+#[cfg(any(
+    all(target_arch = "wasm32", target_os = "unknown"),
+    all(target_arch = "arm", target_os = "none"),
+    all(target_vendor = "fortanix", target_env = "sgx")
+))]
 pub mod math;
 pub mod mem;
 

+ 1 - 1
src/macros.rs

@@ -261,7 +261,7 @@ macro_rules! intrinsics {
 
 // Hack for LLVM expectations for ABI on windows. This is used by the
 // `#[win64_128bit_abi_hack]` attribute recognized above
-#[cfg(all(windows, target_pointer_width="64"))]
+#[cfg(all(windows, target_pointer_width = "64"))]
 pub mod win64_128bit_abi_hack {
     #[repr(simd)]
     pub struct U64x2(u64, u64);

+ 2 - 8
src/mem.rs

@@ -6,10 +6,7 @@ type c_int = i16;
 type c_int = i32;
 
 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
-pub unsafe extern "C" fn memcpy(dest: *mut u8,
-                                src: *const u8,
-                                n: usize)
-                                -> *mut u8 {
+pub unsafe extern "C" fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
     let mut i = 0;
     while i < n {
         *dest.offset(i as isize) = *src.offset(i as isize);
@@ -19,10 +16,7 @@ pub unsafe extern "C" fn memcpy(dest: *mut u8,
 }
 
 #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
-pub unsafe extern "C" fn memmove(dest: *mut u8,
-                                 src: *const u8,
-                                 n: usize)
-                                 -> *mut u8 {
+pub unsafe extern "C" fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
     if src < dest as *const u8 {
         // copy from end
         let mut i = n;

+ 2 - 2
src/probestack.rs

@@ -46,7 +46,7 @@
 #[naked]
 #[no_mangle]
 #[cfg(all(target_arch = "x86_64", not(feature = "mangled-names")))]
-pub unsafe extern fn __rust_probestack() {
+pub unsafe extern "C" fn __rust_probestack() {
     // Our goal here is to touch each page between %rsp+8 and %rsp+8-%rax,
     // ensuring that if any pages are unmapped we'll make a page fault.
     //
@@ -97,7 +97,7 @@ pub unsafe extern fn __rust_probestack() {
 #[naked]
 #[no_mangle]
 #[cfg(all(target_arch = "x86", not(feature = "mangled-names")))]
-pub unsafe extern fn __rust_probestack() {
+pub unsafe extern "C" fn __rust_probestack() {
     // This is the same as x86_64 above, only translated for 32-bit sizes. Note
     // that on Unix we're expected to restore everything as it was, this
     // function basically can't tamper with anything.

文件差異過大導致無法顯示
+ 431 - 229
testcrate/build.rs


+ 1 - 1
testcrate/src/lib.rs

@@ -1 +1 @@
-#![no_std]
+#![no_std]

+ 6 - 4
testcrate/tests/aeabi_memclr.rs

@@ -1,7 +1,9 @@
-#![cfg(all(target_arch = "arm",
-           not(any(target_env = "gnu", target_env = "musl")),
-           target_os = "linux",
-           feature = "mem"))]
+#![cfg(all(
+    target_arch = "arm",
+    not(any(target_env = "gnu", target_env = "musl")),
+    target_os = "linux",
+    feature = "mem"
+))]
 #![feature(compiler_builtins_lib)]
 #![feature(lang_items)]
 #![no_std]

+ 6 - 4
testcrate/tests/aeabi_memcpy.rs

@@ -1,7 +1,9 @@
-#![cfg(all(target_arch = "arm",
-           not(any(target_env = "gnu", target_env = "musl")),
-           target_os = "linux",
-           feature = "mem"))]
+#![cfg(all(
+    target_arch = "arm",
+    not(any(target_env = "gnu", target_env = "musl")),
+    target_os = "linux",
+    feature = "mem"
+))]
 #![feature(compiler_builtins_lib)]
 #![feature(lang_items)]
 #![no_std]

+ 24 - 58
testcrate/tests/aeabi_memset.rs

@@ -1,7 +1,9 @@
-#![cfg(all(target_arch = "arm",
-           not(any(target_env = "gnu", target_env = "musl")),
-           target_os = "linux",
-           feature = "mem"))]
+#![cfg(all(
+    target_arch = "arm",
+    not(any(target_env = "gnu", target_env = "musl")),
+    target_os = "linux",
+    feature = "mem"
+))]
 #![feature(compiler_builtins_lib)]
 #![feature(lang_items)]
 #![no_std]
@@ -48,9 +50,7 @@ fn zero() {
     let xs = &mut aligned.array;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), 0, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), 0, c) }
 
     assert_eq!(*xs, [0; 8]);
 
@@ -59,9 +59,7 @@ fn zero() {
     let xs = &mut aligned.array;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), 0, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), 0, c) }
 
     assert_eq!(*xs, [1; 8]);
 }
@@ -74,9 +72,7 @@ fn one() {
     let n = 1;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0, 0, 0, 0, 0, 0, 0]);
 
@@ -85,9 +81,7 @@ fn one() {
     let xs = &mut aligned.array;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 1, 1, 1, 1, 1, 1, 1]);
 }
@@ -100,9 +94,7 @@ fn two() {
     let n = 2;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0, 0, 0, 0, 0, 0]);
 
@@ -111,9 +103,7 @@ fn two() {
     let xs = &mut aligned.array;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 1, 1, 1, 1, 1, 1]);
 }
@@ -126,9 +116,7 @@ fn three() {
     let n = 3;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0, 0, 0, 0, 0]);
 
@@ -137,9 +125,7 @@ fn three() {
     let xs = &mut aligned.array;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 1, 1, 1, 1, 1]);
 }
@@ -152,9 +138,7 @@ fn four() {
     let n = 4;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0, 0, 0, 0]);
 
@@ -163,9 +147,7 @@ fn four() {
     let xs = &mut aligned.array;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 1, 1, 1, 1]);
 }
@@ -178,9 +160,7 @@ fn five() {
     let n = 5;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0, 0, 0]);
 
@@ -189,9 +169,7 @@ fn five() {
     let xs = &mut aligned.array;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 1, 1, 1]);
 }
@@ -204,9 +182,7 @@ fn six() {
     let n = 6;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0, 0]);
 
@@ -215,9 +191,7 @@ fn six() {
     let xs = &mut aligned.array;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 1, 1]);
 }
@@ -230,9 +204,7 @@ fn seven() {
     let n = 7;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0]);
 
@@ -241,9 +213,7 @@ fn seven() {
     let xs = &mut aligned.array;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 1]);
 }
@@ -256,9 +226,7 @@ fn eight() {
     let n = 8;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef]);
 
@@ -267,9 +235,7 @@ fn eight() {
     let xs = &mut aligned.array;
     let c = 0xdeadbeef;
 
-    unsafe {
-        __aeabi_memset4(xs.as_mut_ptr(), n, c)
-    }
+    unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
 
     assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef]);
 }

+ 15 - 15
testcrate/tests/count_leading_zeros.rs

@@ -6,20 +6,20 @@ use compiler_builtins::int::__clzsi2;
 
 #[test]
 fn __clzsi2_test() {
-  let mut i: usize = core::usize::MAX;
-  // Check all values above 0
-  while i > 0 {
+    let mut i: usize = core::usize::MAX;
+    // Check all values above 0
+    while i > 0 {
+        assert_eq!(__clzsi2(i) as u32, i.leading_zeros());
+        i >>= 1;
+    }
+    // check 0 also
+    i = 0;
     assert_eq!(__clzsi2(i) as u32, i.leading_zeros());
-    i >>= 1;
-  }
-  // check 0 also
-  i = 0;
-  assert_eq!(__clzsi2(i) as u32, i.leading_zeros());
-  // double check for bit patterns that aren't just solid 1s
-  i = 1;
-  for _ in 0..63 {
-    assert_eq!(__clzsi2(i) as u32, i.leading_zeros());
-    i <<= 2;
-    i += 1;
-  }
+    // double check for bit patterns that aren't just solid 1s
+    i = 1;
+    for _ in 0..63 {
+        assert_eq!(__clzsi2(i) as u32, i.leading_zeros());
+        i <<= 2;
+        i += 1;
+    }
 }

+ 18 - 12
testcrate/tests/generated.rs

@@ -6,23 +6,29 @@
 
 extern crate compiler_builtins as builtins;
 
-#[cfg(all(target_arch = "arm",
-          not(any(target_env = "gnu", target_env = "musl")),
-          target_os = "linux",
-          test))]
+#[cfg(all(
+    target_arch = "arm",
+    not(any(target_env = "gnu", target_env = "musl")),
+    target_os = "linux",
+    test
+))]
 extern crate utest_cortex_m_qemu;
 
-#[cfg(all(target_arch = "arm",
-          not(any(target_env = "gnu", target_env = "musl")),
-          target_os = "linux",
-          test))]
+#[cfg(all(
+    target_arch = "arm",
+    not(any(target_env = "gnu", target_env = "musl")),
+    target_os = "linux",
+    test
+))]
 #[macro_use]
 extern crate utest_macros;
 
-#[cfg(all(target_arch = "arm",
-          not(any(target_env = "gnu", target_env = "musl")),
-          target_os = "linux",
-          test))]
+#[cfg(all(
+    target_arch = "arm",
+    not(any(target_env = "gnu", target_env = "musl")),
+    target_os = "linux",
+    test
+))]
 macro_rules! panic { // overrides `panic!`
     ($($tt:tt)*) => {
         upanic!($($tt)*);

部分文件因文件數量過多而無法顯示