Browse Source

Multi-level logging and user configuration.

This commit is a complete revision of `shim`. Anything which is
configurable of nature is now moved to `shim` allowing complete
customizability of `ralloc`.

But it doesn't end there!

The logs got quite messy before due to the classical TMI problem. We
solve this by adding multiple "log levels" each describing a different
class of importance.

- Add multi-level logging.
- Move each component of shim into distinct modules.
- Add allocator canonicalization to `shim`.
- Add adjustable constant to `shim`.
- Add default OOM handler to `shim`.
- Add logging to everything.
- Remove the `libc_write` module and add it to `log` instead.
- Add colourful OOM message.
- Tweak the allocator canonicalization strategy.
- Rename the `log!` macro to `bk_log!`, and use the old name for the
  generalized logging macro.
- Remove the `unsafe_no_brk_lock` feature, due to being unsafe in
  multithreaded environment (and in singlethreaded ones,
  `unsafe_no_mutex_lock` is sufficient).
- Remove the `sys` module in favor of direct `shim` imports.
ticki 8 years ago
parent
commit
e3190491ac
18 changed files with 434 additions and 433 deletions
  1. 0 1
      Cargo.toml
  2. 89 0
      shim/src/config.rs
  3. 17 0
      shim/src/debug.rs
  4. 4 115
      shim/src/lib.rs
  5. 17 0
      shim/src/syscalls.rs
  6. 46 0
      shim/src/thread_destructor.rs
  7. 21 24
      src/allocator.rs
  8. 8 4
      src/block.rs
  9. 43 43
      src/bookkeeper.rs
  10. 11 36
      src/brk.rs
  11. 16 5
      src/fail.rs
  12. 1 4
      src/lib.rs
  13. 151 16
      src/log.rs
  14. 3 3
      src/sync.rs
  15. 0 75
      src/sys.rs
  16. 5 3
      src/tls.rs
  17. 2 0
      src/vec.rs
  18. 0 104
      src/write.rs

+ 0 - 1
Cargo.toml

@@ -43,6 +43,5 @@ no_log_lock = ["log"]
 security = []
 testing = ["log", "debugger"]
 tls = []
-unsafe_no_brk_lock = []
 unsafe_no_mutex_lock = []
 write = []

+ 89 - 0
shim/src/config.rs

@@ -0,0 +1,89 @@
+//! Configuration.
+//!
+//! This module contains anything which can be tweaked and customized to the users preferences.
+
+use core::{intrinsics, cmp};
+
+/// The memtrim limit.
+///
+/// Whenever this is exceeded, the allocator will try to free as much memory to the system
+/// as it can.
+pub const OS_MEMTRIM_LIMIT: usize = 200000000;
+/// Minimum size before a block is worthy to memtrim.
+pub const OS_MEMTRIM_WORTHY: usize = 4000;
+
+/// The fragmentation scale constant.
+///
+/// This is used for determining the minimum avarage block size before locally memtrimming.
+pub const FRAGMENTATION_SCALE: usize = 10;
+/// The local memtrim limit.
+///
+/// Whenever an local allocator has more free bytes than this value, it will be memtrimmed.
+pub const LOCAL_MEMTRIM_LIMIT: usize = 16384;
+
+/// The minimum log level.
+pub const MIN_LOG_LEVEL: u8 = 0;
+
+/// The default OOM handler.
+#[cold]
+pub fn default_oom_handler() -> ! {
+    // Log some message.
+    log("\x1b[31;1mThe application ran out of memory. Aborting.\x1b[m\n");
+
+    unsafe {
+        intrinsics::abort();
+    }
+}
+
+/// Write to the log.
+///
+/// This points to stderr, but could be changed arbitrarily.
+pub fn log(s: &str) -> usize {
+    unsafe { syscall!(WRITE, 2, s.as_ptr(), s.len()) }
+}
+
+/// Canonicalize a fresh allocation.
+///
+/// The return value specifies how much _more_ space is requested to the fresh allocator.
+// TODO: Move to shim.
+#[inline]
+pub fn extra_fresh(size: usize) -> usize {
+    /// The multiplier.
+    ///
+    /// The factor determining the linear dependence between the minimum segment, and the acquired
+    /// segment.
+    const MULTIPLIER: usize = 2;
+    /// The minimum extra size to be BRK'd.
+    const MIN_EXTRA: usize = 64;
+    /// The maximal amount of _extra_ bytes.
+    const MAX_EXTRA: usize = 1024;
+
+    cmp::max(MIN_EXTRA, cmp::min(MULTIPLIER * size, MAX_EXTRA))
+}
+
+/// Canonicalize a BRK request.
+///
+/// Syscalls can be expensive, which is why we would rather accquire more memory than necessary,
+/// than having many syscalls acquiring memory stubs. Memory stubs are small blocks of memory,
+/// which are essentially useless until merge with another block.
+///
+/// To avoid many syscalls and accumulating memory stubs, we BRK a little more memory than
+/// necessary. This function calculate the memory to be BRK'd based on the necessary memory.
+///
+/// The return value specifies how much _more_ space is requested.
+// TODO: Move to shim.
+#[inline]
+pub fn extra_brk(size: usize) -> usize {
+    // TODO: Tweak this.
+    /// The BRK multiplier.
+    ///
+    /// The factor determining the linear dependence between the minimum segment, and the acquired
+    /// segment.
+    const MULTIPLIER: usize = 2;
+    /// The minimum extra size to be BRK'd.
+    const MIN_EXTRA: usize = 1024;
+    /// The maximal amount of _extra_ bytes.
+    const MAX_EXTRA: usize = 65536;
+
+    cmp::max(MIN_EXTRA, cmp::min(MULTIPLIER * size, MAX_EXTRA))
+}

+ 17 - 0
shim/src/debug.rs

@@ -0,0 +1,17 @@
+//! Bindings to debuggers.
+
+extern {
+    /// Valgrind symbol to declare memory undefined.
+    fn valgrind_make_mem_undefined(ptr: *const u8, size: usize);
+    /// Valgrind symbol to declare memory freed.
+    fn valgrind_freelike_block(ptr: *const u8, size: usize);
+}
+
+/// Mark this segment undefined to the debugger.
+pub fn mark_undefined(ptr: *const u8, size: usize) {
+    unsafe { valgrind_make_mem_undefined(ptr, size) }
+}
+/// Mark this segment free to the debugger.
+pub fn mark_free(ptr: *const u8, size: usize) {
+    unsafe { valgrind_freelike_block(ptr, size) }
+}

+ 4 - 115
shim/src/lib.rs

@@ -14,118 +14,7 @@
 #[macro_use]
 extern crate syscall;
 
-use core::intrinsics;
-
-/// Voluntarily give a time slice to the scheduler.
-pub fn sched_yield() -> usize {
-    unsafe { syscall!(SCHED_YIELD) }
-}
-
-/// The default OOM handler.
-#[cold]
-pub fn default_oom_handler() -> ! {
-    // Log some message.
-    log("\x1b[31;1mThe application ran out of memory. Aborting.\n");
-
-    unsafe {
-        intrinsics::abort();
-    }
-}
-
-/// Change the data segment. See `man brk`.
-///
-/// # Note
-///
-/// This is the `brk` **syscall**, not the library function.
-pub unsafe fn brk(ptr: *const u8) -> *const u8 {
-    syscall!(BRK, ptr) as *const u8
-}
-
-/// Write to the log.
-///
-/// This points to stderr, but could be changed arbitrarily.
-pub fn log(s: &str) -> usize {
-    unsafe { syscall!(WRITE, 2, s.as_ptr(), s.len()) }
-}
-
-/// Thread destructors for Linux.
-#[cfg(target_os = "linux")]
-pub mod thread_destructor {
-    extern {
-        #[linkage = "extern_weak"]
-        static __dso_handle: *mut u8;
-        #[linkage = "extern_weak"]
-        static __cxa_thread_atexit_impl: *const u8;
-    }
-
-    /// Does this platform support thread destructors?
-    ///
-    /// This will return true, if and only if `__cxa_thread_atexit_impl` is non-null.
-    #[inline]
-    pub fn is_supported() -> bool {
-        !__cxa_thread_atexit_impl.is_null()
-    }
-
-    /// Register a thread destructor.
-    ///
-    /// # Safety
-    ///
-    /// This is unsafe due to accepting (and dereferencing) raw pointers, as well as running an
-    /// arbitrary unsafe function.
-    ///
-    /// On older system without the `__cxa_thread_atexit_impl` symbol, this is unsafe to call, and will
-    /// likely segfault.
-    // TODO: Due to rust-lang/rust#18804, make sure this is not generic!
-    pub unsafe fn register(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
-        use core::mem;
-
-        /// A thread destructor.
-        type Dtor = unsafe extern fn(dtor: unsafe extern fn(*mut u8), arg: *mut u8, dso_handle: *mut u8) -> i32;
-
-        mem::transmute::<*const u8, Dtor>(__cxa_thread_atexit_impl)(dtor, t, &__dso_handle as *const _ as *mut _);
-    }
-}
-
-/// Thread destructors for Mac OS.
-#[cfg(target_os = "macos")]
-pub mod thread_destructor {
-    /// Does this platform support thread destructors?
-    ///
-    /// This will always return true.
-    #[inline]
-    pub fn is_supported() -> bool { true }
-
-    /// Register a thread destructor.
-    ///
-    /// # Safety
-    ///
-    /// This is unsafe due to accepting (and dereferencing) raw pointers, as well as running an
-    /// arbitrary unsafe function.
-    #[cfg(target_os = "macos")]
-    pub unsafe fn register(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
-        extern {
-            fn _tlv_atexit(dtor: unsafe extern fn(*mut u8), arg: *mut u8);
-        }
-
-        _tlv_atexit(dtor, t);
-    }
-}
-
-/// Debugging.
-pub mod debug {
-    extern {
-        /// Valgrind symbol to declare memory undefined.
-        fn valgrind_make_mem_undefined(ptr: *const u8, size: usize);
-        /// Valgrind symbol to declare memory freed.
-        fn valgrind_freelike_block(ptr: *const u8, size: usize);
-    }
-
-    /// Mark this segment undefined to the debugger.
-    pub fn mark_undefined(ptr: *const u8, size: usize) {
-        unsafe { valgrind_make_mem_undefined(ptr, size) }
-    }
-    /// Mark this segment free to the debugger.
-    pub fn mark_free(ptr: *const u8, size: usize) {
-        unsafe { valgrind_freelike_block(ptr, size) }
-    }
-}
+pub mod config;
+pub mod thread_destructor;
+pub mod debug;
+pub mod syscalls;

+ 17 - 0
shim/src/syscalls.rs

@@ -0,0 +1,17 @@
+//! System calls.
+
+/// Change the data segment. See `man brk`.
+///
+/// On success, the new program break is returned. On failure, the old program break is returned.
+///
+/// # Note
+///
+/// This is the `brk` **syscall**, not the library function.
+pub unsafe fn brk(ptr: *const u8) -> *const u8 {
+    syscall!(BRK, ptr) as *const u8
+}
+
+/// Voluntarily give a time slice to the scheduler.
+pub fn sched_yield() -> usize {
+    unsafe { syscall!(SCHED_YIELD) }
+}

+ 46 - 0
shim/src/thread_destructor.rs

@@ -0,0 +1,46 @@
+//! Thread destructors.
+//!
+//! This module supplies the ability to register destructors called upon thread exit.
+
+pub use self::arch::*;
+
+/// Thread destructors for Linux/BSD.
+#[cfg(not(target_os = "macos"))]
+pub mod arch {
+    extern {
+        #[linkage = "extern_weak"]
+        static __dso_handle: *mut u8;
+        #[linkage = "extern_weak"]
+        static __cxa_thread_atexit_impl: *const u8;
+    }
+
+    /// Register a thread destructor.
+    // TODO: Due to rust-lang/rust#18804, make sure this is not generic!
+    pub fn register(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
+        use core::mem;
+
+        /// A thread destructor.
+        type Dtor = unsafe extern fn(dtor: unsafe extern fn(*mut u8), arg: *mut u8, dso_handle: *mut u8) -> i32;
+
+        // Make sure the symbols exist.
+        assert!(!__cxa_thread_atexit_impl.is_null());
+
+        unsafe {
+            mem::transmute::<*const u8, Dtor>(__cxa_thread_atexit_impl)
+                (dtor, t, &__dso_handle as *const _ as *mut _)
+        };
+    }
+}
+
+/// Thread destructors for Mac OS.
+#[cfg(target_os = "macos")]
+pub mod arch {
+    extern {
+        fn _tlv_atexit(dtor: unsafe extern fn(*mut u8), arg: *mut u8);
+    }
+
+    /// Register a thread destructor.
+    pub fn register(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
+        _tlv_atexit(dtor, t);
+    }
+}

+ 21 - 24
src/allocator.rs

@@ -9,6 +9,8 @@ use core::{mem, ops};
 use {brk, sync};
 use bookkeeper::{self, Bookkeeper, Allocator};
 
+use shim::config;
+
 #[cfg(feature = "tls")]
 use tls;
 
@@ -58,6 +60,7 @@ macro_rules! get_allocator {
                 } else {
                     // The local allocator seems to have been deinitialized, for this reason we fallback to
                     // the global allocator.
+                    log!(WARNING, "Accessing the allocator after deinitialization of the local allocator.");
 
                     // Lock the global allocator.
                     let mut guard = GLOBAL_ALLOCATOR.lock();
@@ -116,6 +119,9 @@ struct GlobalAllocator {
 impl GlobalAllocator {
     /// Initialize the global allocator.
     fn init() -> GlobalAllocator {
+        /// Logging...
+        log!(NOTE, "Initializing the global allocator.");
+
         // The initial acquired segment.
         let (aligner, initial_segment, excessive) =
             brk::lock().canonical_brk(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
@@ -153,22 +159,14 @@ impl Allocator for GlobalAllocator {
     }
 
     fn on_new_memory(&mut self) {
-        /// The memtrim limit.
-        ///
-        /// Whenever this is exceeded, the allocator will try to free as much memory to the system
-        /// as it can.
-        const OS_MEMTRIM_LIMIT: usize = 200000000;
-        /// Minimum size before a block is worthy to memtrim.
-        const MEMTRIM_WORTHY: usize = 4000;
-
-        if self.total_bytes() > OS_MEMTRIM_LIMIT {
+        if self.total_bytes() > config::OS_MEMTRIM_LIMIT {
             // memtrim the fack outta 'em.
 
             // Pop the last block.
             let block = self.pop().expect("The byte count on the global allocator is invalid.");
 
             // Check if the memtrim is worth it.
-            if block.size() >= MEMTRIM_WORTHY {
+            if block.size() >= config::OS_MEMTRIM_WORTHY {
                 // Release the block to the OS.
                 if let Err(block) = brk::lock().release(block) {
                     // It failed, put the block back.
@@ -205,6 +203,9 @@ impl LocalAllocator {
         ///
         /// This will simply free everything to the global allocator.
         extern fn dtor(alloc: &ThreadLocalAllocator) {
+            /// Logging...
+            log!(NOTE, "Initializing the local allocator.");
+
             // This is important! The thread destructors guarantee no other, and thus one could use the
             // allocator _after_ this destructor have been finished. In fact, this is a real problem,
             // and happens when using `Arc` and terminating the main thread, for this reason we place
@@ -231,8 +232,7 @@ impl LocalAllocator {
 
         unsafe {
             // Register the thread destructor on the current thread.
-            THREAD_ALLOCATOR.register_thread_destructor(dtor)
-                .expect("Unable to register a thread destructor.");
+            THREAD_ALLOCATOR.register_thread_destructor(dtor);
 
             LocalAllocator {
                 inner: Bookkeeper::new(Vec::from_raw_parts(initial_segment, 0)),
@@ -245,18 +245,7 @@ impl LocalAllocator {
     /// The idea is to free memory to the global allocator to unify small stubs and avoid
     /// fragmentation and thread accumulation.
     fn should_memtrim(&self) -> bool {
-        // TODO: Tweak this.
-
-        /// The fragmentation scale constant.
-        ///
-        /// This is used for determining the minimum avarage block size before memtrimming.
-        const FRAGMENTATION_SCALE: usize = 10;
-        /// The local memtrim limit.
-        ///
-        /// Whenever an allocator has more free bytes than this value, it will be memtrimmed.
-        const LOCAL_MEMTRIM_LIMIT: usize = 16384;
-
-        self.total_bytes() < FRAGMENTATION_SCALE * self.len() || self.total_bytes() > LOCAL_MEMTRIM_LIMIT
+        self.total_bytes() < config::FRAGMENTATION_SCALE * self.len() || self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT
     }
 }
 
@@ -297,6 +286,8 @@ impl Allocator for LocalAllocator {
 /// The OOM handler handles out-of-memory conditions.
 #[inline]
 pub fn alloc(size: usize, align: usize) -> *mut u8 {
+    log!(CALL, "Allocating buffer of size {} (align {}).", size, align);
+
     get_allocator!(|alloc| *Pointer::from(alloc.alloc(size, align)))
 }
 
@@ -322,6 +313,8 @@ pub fn alloc(size: usize, align: usize) -> *mut u8 {
 /// Secondly, freeing an used buffer can introduce use-after-free.
 #[inline]
 pub unsafe fn free(ptr: *mut u8, size: usize) {
+    log!(CALL, "Freeing buffer of size {}.", size);
+
     get_allocator!(|alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size)))
 }
 
@@ -345,6 +338,8 @@ pub unsafe fn free(ptr: *mut u8, size: usize) {
 /// this is marked unsafe.
 #[inline]
 pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
+    log!(CALL, "Reallocating buffer of size {} to new size {}.", old_size, size);
+
     get_allocator!(|alloc| {
         *Pointer::from(alloc.realloc(
             Block::from_raw_parts(Pointer::new(ptr), old_size),
@@ -365,6 +360,8 @@ pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize)
 /// Due to being able to shrink (and thus free) the buffer, this is marked unsafe.
 #[inline]
 pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
+    log!(CALL, "Inplace reallocating buffer of size {} to new size {}.", old_size, size);
+
     get_allocator!(|alloc| {
         if alloc.realloc_inplace(
             Block::from_raw_parts(Pointer::new(ptr), old_size),

+ 8 - 4
src/block.rs

@@ -9,8 +9,6 @@ use prelude::*;
 
 use core::{ptr, cmp, mem, fmt};
 
-use sys;
-
 /// A contiguous memory block.
 ///
 /// This provides a number of guarantees,
@@ -123,6 +121,8 @@ impl Block {
     /// This will panic if the target block is smaller than the source.
     #[inline]
     pub fn copy_to(&self, block: &mut Block) {
+        log!(INTERNAL, "Copying {:?} to {:?}", *self, *block);
+
         // Bound check.
         assert!(self.size <= block.size, "Block too small.");
 
@@ -136,6 +136,8 @@ impl Block {
         use core::intrinsics;
 
         if cfg!(feature = "security") {
+            log!(INTERNAL, "Zeroing {:?}", *self);
+
             unsafe {
                 intrinsics::volatile_set_memory(*self.ptr, 0, self.size);
             }
@@ -224,7 +226,8 @@ impl Block {
     /// the debugger that this block is freed.
     #[inline]
     pub fn mark_free(self) -> Block {
-        sys::mark_free(*self.ptr as *const u8, self.size);
+        #[cfg(feature = "debugger")]
+        ::shim::debug::mark_free(*self.ptr as *const u8, self.size);
 
         self
     }
@@ -234,7 +237,8 @@ impl Block {
     /// To detect use-after-free, the allocator need to mark
     #[inline]
     pub fn mark_uninitialized(self) -> Block {
-        sys::mark_uninitialized(*self.ptr as *const u8, self.size);
+        #[cfg(feature = "debugger")]
+        ::shim::debug::mark_unintialized(*self.ptr as *const u8, self.size);
 
         self
     }

+ 43 - 43
src/bookkeeper.rs

@@ -5,6 +5,8 @@ use prelude::*;
 use core::ops::Range;
 use core::{ptr, mem, ops};
 
+use shim::config;
+
 /// Elements required _more_ than the length as capacity.
 ///
 /// This represents how many elements that are needed to conduct a `reserve` without the
@@ -91,7 +93,7 @@ impl Bookkeeper {
             reserving: false,
         };
 
-        log!(res, "Bookkeeper created.");
+        bk_log!(res, "Bookkeeper created.");
         res.check();
 
         res
@@ -104,7 +106,7 @@ impl Bookkeeper {
     #[inline]
     fn find(&mut self, block: &Block) -> usize {
         // Logging.
-        log!(self, "Searching (exact) for {:?}.", block);
+        bk_log!(self, "Searching (exact) for {:?}.", block);
 
         let ind = match self.pool.binary_search(block) {
             Ok(x) | Err(x) => x,
@@ -126,7 +128,7 @@ impl Bookkeeper {
     #[inline]
     fn find_bound(&mut self, block: &Block) -> Range<usize> {
         // Logging.
-        log!(self, "Searching (bounds) for {:?}.", block);
+        bk_log!(self, "Searching (bounds) for {:?}.", block);
 
         let mut left_ind = match self.pool.binary_search(block) {
             Ok(x) | Err(x) => x,
@@ -160,7 +162,7 @@ impl Bookkeeper {
     /// slightly faster in some cases.
     pub fn for_each<F: FnMut(Block)>(mut self, mut f: F) {
         // Logging.
-        log!(self, "Iterating over the blocks of the bookkeeper...");
+        bk_log!(self, "Iterating over the blocks of the bookkeeper...");
 
         // Run over all the blocks in the pool.
         for i in self.pool.pop_iter() {
@@ -197,7 +199,7 @@ impl Bookkeeper {
     fn check(&self) {
         if cfg!(debug_assertions) {
             // Logging.
-            log!(self, "Checking...");
+            bk_log!(self, "Checking...");
 
             // The total number of bytes.
             let mut total_bytes = 0;
@@ -320,7 +322,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     /// A block representing the marked area is then returned.
     fn alloc(&mut self, size: usize, align: usize) -> Block {
         // Logging.
-        log!(self, "Allocating {} bytes with alignment {}.", size, align);
+        bk_log!(self, "Allocating {} bytes with alignment {}.", size, align);
 
         if let Some((n, b)) = self.pool.iter_mut().enumerate().filter_map(|(n, i)| {
             if i.size() >= size {
@@ -414,7 +416,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     #[inline]
     fn free(&mut self, block: Block) {
         // Just logging for the unlucky people debugging this shit. No problem.
-        log!(self, "Freeing {:?}...", block);
+        bk_log!(self, "Freeing {:?}...", block);
 
         // Binary search for the block.
         let bound = self.find_bound(&block);
@@ -459,7 +461,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         let ind = self.find_bound(&block);
 
         // Logging.
-        log!(self;ind, "Reallocating {:?} to size {} with align {}...", block, new_size, align);
+        bk_log!(self;ind, "Reallocating {:?} to size {} with align {}...", block, new_size, align);
 
         // Try to do an inplace reallocation.
         match self.realloc_inplace_bound(ind, block, new_size) {
@@ -501,7 +503,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     #[inline]
     fn realloc_inplace(&mut self, block: Block, new_size: usize) -> Result<Block, Block> {
         // Logging.
-        log!(self, "Reallocating {:?} inplace to {}...", block, new_size);
+        bk_log!(self, "Reallocating {:?} inplace to {}...", block, new_size);
 
         // Find the bounds of given block.
         let bound = self.find_bound(&block);
@@ -521,7 +523,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     /// See [`realloc_inplace`](#method.realloc_inplace.html) for more information.
     fn realloc_inplace_bound(&mut self, ind: Range<usize>, mut block: Block, new_size: usize) -> Result<Block, Block> {
         // Logging.
-        log!(self;ind, "Try inplace reallocating {:?} to size {}.", block, new_size);
+        bk_log!(self;ind, "Try inplace reallocating {:?} to size {}.", block, new_size);
 
         /// Assertions...
         debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
@@ -529,7 +531,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
 
         if new_size <= block.size() {
             // Shrink the block.
-            log!(self;ind, "Shrinking {:?}.", block);
+            bk_log!(self;ind, "Shrinking {:?}.", block);
 
             // Split the block in two segments, the main segment and the excessive segment.
             let (block, excessive) = block.split(new_size);
@@ -555,7 +557,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
             // the current block.
             if mergable {
                 // Logging...
-                log!(self;ind, "Merging {:?} to the right.", block);
+                bk_log!(self;ind, "Merging {:?} to the right.", block);
 
                 // We'll merge it with the block at the end of the range.
                 block.merge_right(&mut self.remove_at(ind.end))
@@ -591,7 +593,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     #[inline]
     fn free_bound(&mut self, ind: Range<usize>, mut block: Block) {
         // Logging.
-        log!(self;ind, "Freeing {:?}.", block);
+        bk_log!(self;ind, "Freeing {:?}.", block);
 
         // Short circuit in case of empty block.
         if block.is_empty() { return; }
@@ -643,7 +645,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     /// The returned pointer is guaranteed to be aligned to `align`.
     fn alloc_external(&mut self, size: usize, align: usize) -> Block {
         // Logging.
-        log!(self, "Fresh allocation of size {} with alignment {}.", size, align);
+        bk_log!(self, "Fresh allocation of size {} with alignment {}.", size, align);
 
         // Break it to me!
         let res = self.alloc_fresh(size, align);
@@ -658,7 +660,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     // TODO: Make `push` and `free` one.
     fn push(&mut self, block: Block) {
         // Logging.
-        log!(self;self.pool.len(), "Pushing {:?}.", block);
+        bk_log!(self;self.pool.len(), "Pushing {:?}.", block);
 
         // Mark the block free.
         let mut block = block.mark_free();
@@ -732,11 +734,12 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     /// prior to call of this function, it should be too after it.
     fn reserve(&mut self, min_cap: usize) -> Option<Block> {
         // Logging.
-        log!(self;min_cap, "Reserving {}.", min_cap);
+        bk_log!(self;min_cap, "Reserving {}.", min_cap);
 
         if !self.reserving && (self.pool.capacity() < self.pool.len() + EXTRA_ELEMENTS || self.pool.capacity() < min_cap + EXTRA_ELEMENTS) {
             // Reserve a little extra for performance reasons.
-            let new_cap = (min_cap + EXTRA_ELEMENTS) * 2 + 16;
+            // TODO: This should be moved to some new method.
+            let new_cap = min_cap + EXTRA_ELEMENTS + config::extra_fresh(min_cap);
 
             // Catch 'em all.
             debug_assert!(new_cap > self.pool.capacity(), "Reserve shrinks?!");
@@ -826,7 +829,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     #[inline]
     fn insert(&mut self, ind: usize, block: Block) {
         // Logging.
-        log!(self;ind, "Inserting block {:?}...", block);
+        bk_log!(self;ind, "Inserting block {:?}...", block);
 
         // Bound check.
         assert!(self.pool.len() >= ind, "Insertion out of bounds.");
@@ -852,40 +855,37 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
             .map(|(n, _)| n);
 
         // Log the operation.
-        log!(self;ind, "Moving all blocks right to {} blocks to the right.",
+        bk_log!(self;ind, "Moving all blocks right to {} blocks to the right.",
              gap.unwrap_or_else(|| self.pool.len()));
 
         // The old vector's buffer.
         let mut old_buf = None;
 
-        // We will only extend the length if we were unable to fit it into the current length.
-        if gap.is_none() {
-            // Loooooooging...
-            log!(self;ind, "Block pool not long enough for shift. Extending.");
-
-            // Reserve space. This does not break order, due to the assumption that
-            // `reserve` never breaks order.
-            old_buf = unborrow!(self.reserve(self.pool.len() + 1));
-
-            // We will move a block into reserved memory but outside of the vec's bounds. For
-            // that reason, we push an uninitialized element to extend the length, which will
-            // be assigned in the memcpy.
-            let res = self.pool.push(unsafe { mem::uninitialized() });
-
-            // Just some assertions...
-            debug_assert!(res.is_ok(), "Push failed (buffer full).");
-        }
-
         unsafe {
             // Memmove the elements to make a gap to the new block.
             ptr::copy(self.pool.get_unchecked(ind) as *const Block,
                       self.pool.get_unchecked_mut(ind + 1) as *mut Block,
                       // The gap defaults to the end of the pool.
-                      gap.unwrap_or_else(|| self.pool.len() - 1) - ind);
-                      //                                    ^^^
-                      // We decrement to account for the push. Please note how we only push in the
-                      // `if` block above with the conditional that `gap` is `None`, which is the
-                      // case where the closure is evaluated.
+                      gap.unwrap_or_else(|| {
+                          // We will only extend the length if we were unable to fit it into the current length.
+
+                          // Loooooooging...
+                          bk_log!(self;ind, "Block pool not long enough for shift. Extending.");
+
+                          // Reserve space. This does not break order, due to the assumption that
+                          // `reserve` never breaks order.
+                          old_buf = unborrow!(self.reserve(self.pool.len() + 1));
+
+                          // We will move a block into reserved memory but outside of the vec's bounds. For
+                          // that reason, we push an uninitialized element to extend the length, which will
+                          // be assigned in the memcpy.
+                          let res = self.pool.push(mem::uninitialized());
+
+                          // Just some assertions...
+                          debug_assert!(res.is_ok(), "Push failed (buffer full).");
+
+                          self.pool.len() - 1
+                      }) - ind);
 
             // Update the pool byte count.
             self.total_bytes += block.size();
@@ -905,7 +905,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     /// Remove a block.
     fn remove_at(&mut self, ind: usize) -> Block {
         // Logging.
-        log!(self;ind, "Removing block at {}.", ind);
+        bk_log!(self;ind, "Removing block at {}.", ind);
 
         let res = if ind + 1 == self.pool.len() {
             let block = self.pool[ind].pop();

+ 11 - 36
src/brk.rs

@@ -4,10 +4,12 @@
 
 use prelude::*;
 
-use core::{cmp, ptr};
+use core::ptr;
 use core::convert::TryInto;
 
-use {sync, sys, fail};
+use shim::{syscalls, config};
+
+use {sync, fail};
 
 /// The BRK mutex.
 ///
@@ -37,12 +39,14 @@ impl BrkLock {
     ///
     /// Due to being able shrink the program break, this method is unsafe.
     unsafe fn sbrk(&mut self, size: isize) -> Result<Pointer<u8>, ()> {
+        log!(NOTE, "BRKing new space.");
+
         // Calculate the new program break. To avoid making multiple syscalls, we make use of the
         // state cache.
         let expected_brk = self.current_brk().offset(size);
 
         // Break it to me, babe!
-        let old_brk = Pointer::new(sys::brk(*expected_brk as *const u8) as *mut u8);
+        let old_brk = Pointer::new(syscalls::brk(*expected_brk as *const u8) as *mut u8);
 
         if expected_brk == old_brk && size != 0 {
             // BRK failed. This syscall is rather weird, but whenever it fails (e.g. OOM) it
@@ -64,6 +68,8 @@ impl BrkLock {
     pub fn release(&mut self, block: Block) -> Result<(), Block> {
         // Check if we are actually next to the program break.
         if self.current_brk() == Pointer::from(block.empty_right()) {
+            log!(DEBUG, "Releasing {:?} to the OS.", block);
+
             // We are. Now, sbrk the memory back. Do to the condition above, this is safe.
             let res = unsafe { self.sbrk(-(block.size() as isize)) };
 
@@ -105,7 +111,7 @@ impl BrkLock {
     // TODO: This method is possibly unsafe.
     pub fn canonical_brk(&mut self, size: usize, align: usize) -> (Block, Block, Block) {
         // Calculate the canonical size (extra space is allocated to limit the number of system calls).
-        let brk_size = canonicalize_space(size) + align;
+        let brk_size = size + config::extra_brk(size) + align;
 
         // Use SBRK to allocate extra data segment. The alignment is used as precursor for our
         // allocated block. This ensures that it is properly memory aligned to the requested value.
@@ -152,38 +158,7 @@ pub unsafe extern fn sbrk(size: isize) -> *mut u8 {
 
 /// Get the current program break.
 fn current_brk() -> Pointer<u8> {
-    unsafe { Pointer::new(sys::brk(ptr::null()) as *mut u8) }
-}
-
-/// Canonicalize a BRK request.
-///
-/// Syscalls can be expensive, which is why we would rather accquire more memory than necessary,
-/// than having many syscalls acquiring memory stubs. Memory stubs are small blocks of memory,
-/// which are essentially useless until merge with another block.
-///
-/// To avoid many syscalls and accumulating memory stubs, we BRK a little more memory than
-/// necessary. This function calculate the memory to be BRK'd based on the necessary memory.
-///
-/// The return value is always greater than or equals to the argument.
-#[inline]
-fn canonicalize_space(min: usize) -> usize {
-    // TODO: Tweak this.
-    /// The BRK multiplier.
-    ///
-    /// The factor determining the linear dependence between the minimum segment, and the acquired
-    /// segment.
-    const BRK_MULTIPLIER: usize = 2;
-    /// The minimum size to be BRK'd.
-    const BRK_MIN: usize = 1024;
-    /// The maximal amount of _extra_ elements.
-    const BRK_MAX_EXTRA: usize = 65536;
-
-    let res = cmp::max(BRK_MIN, min + cmp::min(BRK_MULTIPLIER * min, BRK_MAX_EXTRA));
-
-    // Make some handy assertions.
-    debug_assert!(res >= min, "Canonicalized BRK space is smaller than the one requested.");
-
-    res
+    unsafe { Pointer::new(syscalls::brk(ptr::null()) as *mut u8) }
 }
 
 #[cfg(test)]

+ 16 - 5
src/fail.rs

@@ -5,13 +5,13 @@ use prelude::*;
 use core::sync::atomic::{self, AtomicPtr};
 use core::mem;
 
-use sys;
+use shim::config;
 
 #[cfg(feature = "tls")]
 use tls;
 
 /// The global OOM handler.
-static OOM_HANDLER: AtomicPtr<()> = AtomicPtr::new(sys::default_oom_handler as *mut ());
+static OOM_HANDLER: AtomicPtr<()> = AtomicPtr::new(config::default_oom_handler as *mut ());
 #[cfg(feature = "tls")]
 tls! {
     /// The thread-local OOM handler.
@@ -35,10 +35,14 @@ pub fn oom() -> ! {
     #[cfg(feature = "tls")]
     {
         if let Some(handler) = THREAD_OOM_HANDLER.with(|x| x.replace(None)) {
+            log!(DEBUG, "Calling the local OOM handler.");
+
             handler();
         }
     }
 
+    log!(DEBUG, "Calling the global OOM handler.");
+
     unsafe {
         // Transmute the atomic pointer to a function pointer and call it.
         (mem::transmute::<_, fn() -> !>(OOM_HANDLER.load(atomic::Ordering::SeqCst)))()
@@ -50,6 +54,9 @@ pub fn oom() -> ! {
 /// This is called when the process is out-of-memory.
 #[inline]
 pub fn set_oom_handler(handler: fn() -> !) {
+    // Logging...
+    log!(NOTE, "Setting the global OOM handler.");
+
     OOM_HANDLER.store(handler as *mut (), atomic::Ordering::SeqCst);
 }
 
@@ -61,13 +68,17 @@ pub fn set_oom_handler(handler: fn() -> !) {
 #[inline]
 #[cfg(feature = "tls")]
 pub fn set_thread_oom_handler(handler: fn() -> !) {
+    // Logging...
+    log!(NOTE, "Setting the thread OOM handler.");
+
     THREAD_OOM_HANDLER.with(|thread_oom| {
         // Replace it with the new handler.
         let res = thread_oom.replace(Some(handler));
 
-        // Make sure that it doesn't override another handler.
-        // TODO: Make this a warning.
-        debug_assert!(res.is_none(), "Overriding the old handler. Is this intentional?");
+        // Throw a warning if it overrides another handler.
+        if cfg!(debug_assertions) && res.is_some() {
+            log!(WARNING, "An old thread OOM handler was overriden.");
+        }
     });
 }
 

+ 1 - 4
src/lib.rs

@@ -26,10 +26,8 @@
 #[macro_use]
 #[no_link]
 extern crate unborrow;
+extern crate ralloc_shim as shim;
 
-#[cfg(feature = "write")]
-#[macro_use]
-mod write;
 #[macro_use]
 mod log;
 #[macro_use]
@@ -49,7 +47,6 @@ mod leak;
 mod prelude;
 mod ptr;
 mod sync;
-mod sys;
 mod vec;
 
 pub use allocator::{alloc, free, realloc, realloc_inplace};

+ 151 - 16
src/log.rs

@@ -4,6 +4,50 @@
 
 /// Log to the appropriate source.
 ///
+/// The first argument defines the log level, the rest of the arguments are just `write!`-like
+/// formatters.
+#[macro_export]
+macro_rules! log {
+    (INTERNAL, $( $x:tt )*) => {
+        log!(@["INTERNAL: ", 1], $( $x )*);
+    };
+    (DEBUG, $( $x:tt )*) => {
+        log!(@["DEBUG:    ", 2], $( $x )*);
+    };
+    (CALL, $( $x:tt )*) => {
+        log!(@["CALL:     ", 3], $( $x )*);
+    };
+    (NOTE, $( $x:tt )*) => {
+        log!(@["NOTE:     ", 5], $( $x )*);
+    };
+    (WARNING, $( $x:tt )*) => {
+        log!(@["WARNING:  ", 5], $( $x )*);
+    };
+    (ERROR, $( $x:tt )*) => {
+        log!(@["ERROR:    ", 6], $( $x )*);
+    };
+    (@[$kind:expr, $lv:expr], $( $arg:expr ),*) => {
+        #[cfg(feature = "log")]
+        {
+            use core::fmt::Write;
+
+            use log::internal::{LogWriter, level};
+
+            // Set the level.
+            if level($lv) {
+                // Print the pool state.
+                let mut log = LogWriter::new();
+                // Print the log message.
+                let _ = write!(log, $kind);
+                let _ = write!(log, $( $arg ),*);
+                let _ = writeln!(log, " (at {}:{})", file!(), line!());
+            }
+        }
+    };
+}
+
+/// Log with bookkeeper data to the appropriate source.
+///
 /// The first argument this takes is of the form `pool;cursor`, which is used to print the
 /// block pools state. `cursor` is what the operation "revolves around" to give a sense of
 /// position.
@@ -11,44 +55,128 @@
 /// If the `;cursor` part is left out, no cursor will be printed.
 ///
 /// The rest of the arguments are just normal formatters.
+///
+/// This logs to level 2.
 #[macro_export]
-macro_rules! log {
+macro_rules! bk_log {
     ($pool:expr, $( $arg:expr ),*) => {
-        log!($pool;(), $( $arg ),*);
+        bk_log!($pool;(), $( $arg ),*);
     };
     ($bk:expr;$cur:expr, $( $arg:expr ),*) => {
         #[cfg(feature = "log")]
         {
-            use core::fmt::Write;
-
-            use {write, log};
-            use log::internal::IntoCursor;
+            use log::internal::{IntoCursor, BlockLogger};
 
-            // Print the pool state.
-            let mut log = write::LogWriter::new();
-            let _ = write!(log, "({:2})   {:10?} : ", $bk.id, log::internal::BlockLogger {
+            log!(INTERNAL, "({:2})   {:10?} : ", $bk.id, BlockLogger {
                 cur: $cur.clone().into_cursor(),
                 blocks: &$bk.pool,
             });
-
-            // Print the log message.
-            let _ = write!(log, $( $arg ),*);
-            let _ = writeln!(log, " (at {}:{})", file!(), line!());
         }
     };
 }
 
-/// Top secret place-holding module.
-#[macro_use]
+/// Make a runtime assertion.
+///
+/// The only way it differs from the one provided by `libcore` is the panicking strategy, which
+/// allows for aborting, non-allocating panics when running the tests.
+#[macro_export]
+#[cfg(feature = "write")]
+macro_rules! assert {
+    ($e:expr) => {
+        assert!($e, "No description.");
+    };
+    ($e:expr, $( $arg:expr ),*) => {{
+        use core::intrinsics;
+
+        if !$e {
+            log!(ERROR, $( $arg ),*);
+
+            #[allow(unused_unsafe)]
+            unsafe { intrinsics::abort() }
+        }
+    }}
+}
+
+/// Make a runtime assertion in debug mode.
+///
+/// The only way it differs from the one provided by `libcore` is the panicking strategy, which
+/// allows for aborting, non-allocating panics when running the tests.
+#[cfg(feature = "write")]
+#[macro_export]
+macro_rules! debug_assert {
+    // We force the programmer to provide explanation of their assertion.
+    ($first:expr, $( $arg:tt )*) => {{
+        if cfg!(debug_assertions) {
+            assert!($first, $( $arg )*);
+        }
+    }}
+}
+
+/// Make a runtime equality assertion in debug mode.
+///
+/// The only way it differs from the one provided by `libcore` is the panicking strategy, which
+/// allows for aborting, non-allocating panics when running the tests.
+#[cfg(feature = "write")]
+#[macro_export]
+macro_rules! assert_eq {
+    ($left:expr, $right:expr) => ({
+        // We evaluate _once_.
+        let left = &$left;
+        let right = &$right;
+
+        assert!(left == right, "(left: `{:?}`, right: `{:?}`)", left, right)
+    })
+}
+
+/// Top-secret module.
 #[cfg(feature = "log")]
 pub mod internal {
     use prelude::*;
 
     use core::fmt;
-
     use core::cell::Cell;
     use core::ops::Range;
 
+    use shim::config;
+
+    use sync;
+
+    /// The log lock.
+    ///
+    /// This lock is used to avoid bungling and intertwining the log.
+    #[cfg(not(feature = "no_log_lock"))]
+    pub static LOG_LOCK: Mutex<()> = Mutex::new(());
+
+    /// A log writer.
+    ///
+    /// This writes to the shim logger.
+    pub struct LogWriter {
+        /// The inner lock.
+        #[cfg(not(feature = "no_log_lock"))]
+        _lock: sync::MutexGuard<'static, ()>,
+    }
+
+    impl LogWriter {
+        /// Standard error output.
+        pub fn new() -> LogWriter {
+            #[cfg(feature = "no_log_lock")]
+            {
+                LogWriter {}
+            }
+
+            #[cfg(not(feature = "no_log_lock"))]
+            LogWriter {
+                _lock: LOG_LOCK.lock(),
+            }
+        }
+    }
+
+    impl fmt::Write for LogWriter {
+        fn write_str(&mut self, s: &str) -> fmt::Result {
+            if config::log(s) == !0 { Err(fmt::Error) } else { Ok(()) }
+        }
+    }
+
     /// A "cursor".
     ///
     /// Cursors represents a block or an interval in the log output. This trait is implemented for
@@ -205,4 +333,11 @@ pub mod internal {
             Ok(())
         }
     }
+
+    /// Check if this log level is enabled.
+    #[allow(absurd_extreme_comparisons)]
+    #[inline]
+    pub fn level(lv: u8) -> bool {
+        lv >= config::MIN_LOG_LEVEL
+    }
 }

+ 3 - 3
src/sync.rs

@@ -1,11 +1,11 @@
 //! Synchronization primitives.
 
-use sys;
-
 use core::cell::UnsafeCell;
 use core::sync::atomic::{self, AtomicBool};
 use core::ops;
 
+use shim;
+
 /// A mutual exclusive container.
 ///
 /// This assures that only one holds mutability of the inner value. To get the inner value, you
@@ -42,7 +42,7 @@ impl<T> Mutex<T> {
             // {O,o}
             // |)``)
             // SRSLY?
-            sys::yield_now();
+            shim::syscalls::sched_yield();
         }
 
         MutexGuard {

+ 0 - 75
src/sys.rs

@@ -1,75 +0,0 @@
-//! System primitives.
-//!
-//! This mostly wraps the `ralloc_shim` crate but provides some additional error handling.
-
-extern crate ralloc_shim as shim;
-
-use core::mem;
-
-pub use self::shim::default_oom_handler;
-
-/// Set the program break.
-///
-/// On success, the new program break is returned. On failure, the old program break is returned.
-///
-/// # Safety
-///
-/// This is due to being able to invalidate safe addresses as well as breaking invariants for the
-/// [`brk`](../brk).
-#[inline]
-pub unsafe fn brk(ptr: *const u8) -> *const u8 {
-    shim::brk(ptr)
-}
-
-/// Cooperatively gives up a timeslice to the OS scheduler.
-pub fn yield_now() {
-    assert_eq!(shim::sched_yield(), 0);
-}
-
-/// Register a thread destructor.
-///
-/// This will add a thread destructor to _the current thread_, which will be executed when the
-/// thread exits.
-///
-/// The argument to the destructor is a pointer to the so-called "load", which is the data
-/// shipped with the destructor.
-// TODO: I haven't figured out a safe general solution yet. Libstd relies on devirtualization,
-// which, when missed, can make it quite expensive.
-pub fn register_thread_destructor<T>(load: *mut T, dtor: extern fn(*mut T)) -> Result<(), ()> {
-    // Check if thread dtors are supported.
-    if shim::thread_destructor::is_supported() {
-        unsafe {
-            // This is safe due to sharing memory layout.
-            shim::thread_destructor::register(load as *mut u8, mem::transmute(dtor));
-        }
-
-        Ok(())
-    } else {
-        Err(())
-    }
-}
-
-/// Write text to the log.
-///
-/// The log target is defined by the `shim` crate.
-// TODO: Find a better way to silence the warning than this attribute.
-#[allow(dead_code)]
-pub fn log(s: &str) -> Result<(), ()> {
-    if shim::log(s) == !0 { Err(()) } else { Ok(()) }
-}
-
-/// Tell the debugger that this segment is free.
-///
-/// If the `debugger` feature is disabled, this is a NOOP.
-pub fn mark_free(_ptr: *const u8, _size: usize) {
-    #[cfg(feature = "debugger")]
-    shim::debug::mark_free(_ptr, _size);
-}
-
-/// Tell the debugger that this segment is unaccessible.
-///
-/// If the `debugger` feature is disabled, this is a NOOP.
-pub fn mark_uninitialized(_ptr: *const u8, _size: usize) {
-    #[cfg(feature = "debugger")]
-    shim::debug::mark_free(_ptr, _size);
-}

+ 5 - 3
src/tls.rs

@@ -4,7 +4,7 @@
 
 use core::{marker, mem};
 
-use sys;
+use shim::thread_destructor;
 
 /// A thread-local container.
 pub struct Key<T: 'static> {
@@ -39,8 +39,10 @@ impl<T: 'static> Key<T> {
     /// Note that this has to be registered for every thread, it is needed for.
     // TODO: Make this automatic on `Drop`.
     #[inline]
-    pub fn register_thread_destructor(&'static self, dtor: extern fn(&T)) -> Result<(), ()> {
-        sys::register_thread_destructor(&self.inner as *const T as *mut T, unsafe { mem::transmute(dtor) })
+    pub fn register_thread_destructor(&'static self, dtor: extern fn(&T)) {
+        // This is safe due to sharing memory layout.
+        thread_destructor::register(&self.inner as *const T as *const u8 as *mut u8,
+                                    unsafe { mem::transmute(dtor) });
     }
 }
 

+ 2 - 0
src/vec.rs

@@ -48,6 +48,8 @@ impl<T: Leak> Vec<T> {
     ///
     /// This panics if the vector is bigger than the block.
     pub fn refill(&mut self, block: Block) -> Block {
+        log!(INTERNAL, "Refilling vector...");
+
         // Calculate the new capacity.
         let new_cap = block.size() / mem::size_of::<T>();
 

+ 0 - 104
src/write.rs

@@ -1,104 +0,0 @@
-//! Direct shim-based write for internal debugging.
-//!
-//! This will replace the assertion macros to avoid deadlocks in panics, by utilizing a
-//! non-allocating writing primitive.
-
-use prelude::*;
-
-use core::fmt;
-
-use {sys, sync};
-
-/// The log lock.
-///
-/// This lock is used to avoid bungling and intertwining the log.
-#[cfg(not(feature = "no_log_lock"))]
-pub static LOG_LOCK: Mutex<()> = Mutex::new(());
-
-/// A log writer.
-///
-/// This writes to  `sys::log`.
-pub struct LogWriter {
-    /// The inner lock.
-    #[cfg(not(feature = "no_log_lock"))]
-    _lock: sync::MutexGuard<'static, ()>,
-}
-
-impl LogWriter {
-    /// Standard error output.
-    pub fn new() -> LogWriter {
-        #[cfg(feature = "no_log_lock")]
-        {
-            LogWriter {}
-        }
-
-        #[cfg(not(feature = "no_log_lock"))]
-        LogWriter {
-            _lock: LOG_LOCK.lock(),
-        }
-    }
-}
-
-impl fmt::Write for LogWriter {
-    fn write_str(&mut self, s: &str) -> fmt::Result {
-        if sys::log(s).is_err() {
-            Err(fmt::Error)
-        } else { Ok(()) }
-    }
-}
-
-/// Make a runtime assertion.
-///
-/// The only way it differs from the one provided by `libcore` is the panicking strategy, which
-/// allows for aborting, non-allocating panics when running the tests.
-#[macro_export]
-macro_rules! assert {
-    ($e:expr) => {
-        assert!($e, "No description.");
-    };
-    ($e:expr, $( $arg:expr ),*) => {{
-        use write;
-
-        use core::intrinsics;
-        use core::fmt::Write;
-
-        if !$e {
-            let mut log = write::LogWriter::new();
-            let _ = write!(log, "assertion failed at {}:{}: `{}` - ", file!(),
-                           line!(), stringify!($e));
-            let _ = writeln!(log, $( $arg ),*);
-
-            #[allow(unused_unsafe)]
-            unsafe { intrinsics::abort() }
-        }
-    }}
-}
-
-/// Make a runtime assertion in debug mode.
-///
-/// The only way it differs from the one provided by `libcore` is the panicking strategy, which
-/// allows for aborting, non-allocating panics when running the tests.
-#[macro_export]
-macro_rules! debug_assert {
-    // We force the programmer to provide explanation of their assertion.
-    ($first:expr, $( $arg:tt )*) => {{
-        if cfg!(debug_assertions) {
-            assert!($first, $( $arg )*);
-        }
-    }}
-}
-
-/// Make a runtime equality assertion in debug mode.
-///
-/// The only way it differs from the one provided by `libcore` is the panicking strategy, which
-/// allows for aborting, non-allocating panics when running the tests.
-#[macro_export]
-macro_rules! assert_eq {
-    ($left:expr, $right:expr) => ({
-        // We evaluate _once_.
-        let left = &$left;
-        let right = &$right;
-
-        assert!(left == right, "(left: `{:?}`, right: `{:?}`)", left, right)
-    })
-}