Browse Source

Improve testing suite, improve security, improve logging, global OOM handler, prepare for performance pass

This patch has the main intend of preparing for the performance pass, along with a set of other changes:

- Add acid (generative) testing: Set up chaotic and unusual environment for testing, hopefully detecting edge case bugs. This includes doing allocation inbetween tests and setting up multiple threads for detecting data races.

- Fix casts: Unchecked casts can - in some cases - lead to security issues. In particular, you might be able to shrink the data segment of the process by resulting in overflowing casts.

- Global OOM handler: The OOM handler is no longer allocator-specific. This is due to a planned new model, where we utilize global-to-local allocators through thread-local storage.

- Use ranges instead of tuples: Ranges describes the behavior in a more precise manner, and furthermore it is non-affine, allowing for detection of certain logic bugs.

- Better logging: Now range cursors can be given, with the output syntax []. Cursors are defined through the Cursor trait.

- General style improvement: Various cleanups of Nilset's code (shame, shame, shame! jk)

- Replace certain cfg-flags with cfg-macros: This allows all-path-checking, e.g. erroring even when the flag isn't set.

- Make SBRK unsafe: Due to breakage when shrinking the segment, SBRK is now marked unsafe.

- Enable more clippy lints: These includes checking the castings and other things. Many false-positives are generated. These needs to be manually fixed by allow-attributes.

- Update the README: Just a bunch of new rambling onreview process etc.

- New tests: This includes many new tests including tests for very big allocations and partial allocations.

Fix #27.
ticki 8 years ago
parent
commit
6e3426f3ca
28 changed files with 981 additions and 388 deletions
  1. 3 2
      Cargo.toml
  2. 90 21
      README.md
  3. 16 16
      src/allocator.rs
  4. 44 13
      src/block.rs
  5. 130 121
      src/bookkeeper.rs
  6. 42 0
      src/fail.rs
  7. 8 3
      src/lib.rs
  8. 151 40
      src/log.rs
  9. 13 17
      src/sys.rs
  10. 3 0
      src/vec.rs
  11. 7 4
      src/write.rs
  12. 11 8
      tests/box.rs
  13. 11 9
      tests/btreemap.rs
  14. 41 0
      tests/cross_thread_drop.rs
  15. 15 10
      tests/join.rs
  16. 49 0
      tests/manual.rs
  17. 28 14
      tests/mpsc.rs
  18. 0 32
      tests/multithreading.rs
  19. 60 0
      tests/partial_free.rs
  20. 27 0
      tests/partial_realloc.rs
  21. 26 7
      tests/realloc.rs
  22. 33 0
      tests/scaling.rs
  23. 0 22
      tests/send.rs
  24. 8 6
      tests/string.rs
  25. 39 0
      tests/too_many_threads.rs
  26. 78 0
      tests/util/mod.rs
  27. 25 22
      tests/vec.rs
  28. 23 21
      tests/vec_box.rs

+ 3 - 2
Cargo.toml

@@ -13,7 +13,7 @@ keywords = ["alloc", "malloc", "allocator", "ralloc", "redox"]
 license = "MIT"
 
 [dependencies]
-ralloc_shim = { path="shim" }
+ralloc_shim = { path = "shim" }
 
 [dependencies.clippy]
 git = "https://github.com/Manishearth/rust-clippy.git"
@@ -30,11 +30,12 @@ codegen-units = 1
 
 [features]
 default = ["allocator", "clippy"]
-
+# ---
 allocator = []
 debug_tools = []
 libc_write = []
 log = ["libc_write"]
 security = []
+testing = ["log", "libc_write", "debug_tools"]
 unsafe_no_brk_lock = []
 unsafe_no_mutex_lock = []

+ 90 - 21
README.md

@@ -1,18 +1,27 @@
 # ralloc
 
-Redox's fast & memory efficient userspace allocator.
+A fast & memory efficient userspace allocator.
+
+This allocator is used as the default Redox.
 
 ## A note on its state.
 
 It fully works, although it is relatively slow, since it haven't been optimized
-yet. There is currently no known bugs, but it haven't been carefully reviewed
-yet, so avoid using it in security critical programs.
+yet.
 
 I consider the state of the code quality very good.
 
+## Platforms supported out-of-the-box
+
+- [x] BSD
+- [x] Linux
+- [x] Mac OS X
+- [x] Redox
+- [ ] Windows
+
 ## Using ralloc
 
-Add ralloc to `Cargo.toml`:
+Add `ralloc` to `Cargo.toml`:
 
 ```toml
 [dependencies.ralloc]
@@ -25,9 +34,9 @@ then import it in your main file:
 extern crate ralloc;
 ```
 
-ralloc is now ready to roll!
+`ralloc` is now ready to roll!
 
-Note that ralloc cannot coexist with another allocator, unless they're deliberately compatible.
+Note that `ralloc` cannot coexist with another allocator, unless they're deliberately compatible.
 
 ## Features
 
@@ -39,18 +48,18 @@ You can set custom OOM handlers, by:
 extern crate ralloc;
 
 fn my_handler() -> ! {
-    println!("Oh no. Blame somebody.");
+    println!("Oh no. Blame the Mexicans.");
 }
 
 fn main() {
-    ralloc::lock().set_oom_handler(my_handler);
+    ralloc::set_oom_handler(my_handler);
     // Do some stuff...
 }
 ```
 
 ### Debug check: double free
 
-Ooh, this one is a cool one. ralloc detects various memory bugs when compiled
+Ooh, this one is a cool one. `ralloc` detects various memory bugs when compiled
 with the `debug_tools` feature. These checks include double free checks:
 
 ```rust
@@ -60,9 +69,9 @@ fn main() {
     // We start by allocating some stuff.
     let a = Box::new(500u32);
     // Then we memcpy the pointer (this is UB).
-    let b = Box::from_raw(&a as *mut u32);
+    let b = unsafe { Box::from_raw(&*a as *mut u32) };
     // Now both destructors are called. First a, then b, which is a double
-    // free. Luckily, ralloc provides a nice message for you, when in debug
+    // free. Luckily, `ralloc` provides a nice message for you, when in debug
     // tools mode:
     //    Assertion failed: Double free.
 
@@ -73,7 +82,7 @@ fn main() {
 
 ### Debug check: memory leaks.
 
-ralloc got memleak superpowers too! Enable `debug_tools` and do:
+`ralloc` got memleak superpowers too! Enable `debug_tools` and do:
 
 ```rust
 extern crate ralloc;
@@ -99,7 +108,7 @@ fn main() {
 ### Partial deallocation
 
 Many allocators limits deallocations to be allocated block, that is, you cannot
-perform arithmetics or split it. ralloc does not have such a limitation:
+perform arithmetics or split it. `ralloc` does not have such a limitation:
 
 ```rust
 extern crate ralloc;
@@ -151,16 +160,43 @@ fn main() {
 ### Top notch security
 
 If you are willing to trade a little performance, for extra security you can
-compile ralloc with the `security` flag. This will, along with other things,
+compile `ralloc` with the `security` flag. This will, along with other things,
 make frees zeroing.
 
 In other words, an attacker cannot for example inject malicious code or data,
 which can be exploited when forgetting to initialize the data you allocate.
 
+### Code verification
+
+Allocators are extremely security critical.If the same addressis allocated to
+two different callers, you risk all sorts of vulnerabilities. For this reason,
+it is important that the code is reviewed and verified.
+
+`ralloc` uses a multi-stage verification model:
+
+1. The type checker. A significant part of the verification is done entirely
+   statically, and enforced through the type checker. We make excessive use of
+   Rust's safety features and especially affine types.
+2. Unit testing. `ralloc` has full-coverage unit tests, even for private
+   interfaces.
+3. Integration testing suit. `ralloc` uses a form of generative testing, where
+   tests are "expanded" through a fixed set of functions. This allows
+   relatively few tests (e.g., a few hundreds of lines) to multiply and become
+   even more effective.
+4. Runtime checks. `ralloc` tries to avoid runtime tests, whenever it can, but
+   that is not always possible. When the security gain is determined to be
+   significant, and the performance loss is small, we use runtime checks (like
+   checks for buffer overflows).
+5. Debug assertions. `ralloc` contains numerous debug assertions, enabled in
+   debug mode. These allows for very careful testing for things like double
+   free, memory corruption, as well as leaks and alignment checks.
+6. Manual reviewing. One or more persons reviews patches to ensure high
+   security.
+
 ### Lock reuse
 
 Acquiring a lock sequentially multiple times can be expensive. Therefore,
-ralloc allows you to lock the allocator once, and reuse that:
+`ralloc` allows you to lock the allocator once, and reuse that:
 
 ```rust
 extern crate ralloc;
@@ -174,14 +210,14 @@ fn main() {
     let _ = lock.alloc(4, 2);
     let _ = lock.alloc(4, 2);
 
-    // It is automatically released through its destructor.
+    // The lock is automatically released through its destructor.
 }
 ```
 
 ### Security through the type system
 
-ralloc makes heavy use of Rust's type system, to make safety guarantees.
-Internally, ralloc has a primitive named `Block`. This is fairly simple,
+`ralloc` makes heavy use of Rust's type system, to make safety guarantees.
+Internally, `ralloc` has a primitive named `Block`. This is fairly simple,
 denoting a contagious segment of memory, but what is interesting is how it is
 checked at compile time to be unique. This is done through the affine type
 system.
@@ -190,7 +226,9 @@ This is just one of many examples.
 
 ### Platform agnostic
 
-ralloc is platform independent, with the only requirement of the following symbols:
+`ralloc` is platform independent. It depends on `ralloc_shim`, a minimal
+interface for platform dependent functions. The default implementation of
+`ralloc_shim` requires the following symbols:
 
 1. `sbrk`: For extending the data segment size.
 2. `sched_yield`: For the spinlock.
@@ -199,7 +237,7 @@ ralloc is platform independent, with the only requirement of the following symbo
 
 ### Local allocators
 
-ralloc allows you to create non-global allocators, for e.g. thread specific purposes:
+`ralloc` allows you to create non-global allocators, for e.g. thread specific purposes:
 
 ```rust
 extern crate ralloc;
@@ -217,7 +255,16 @@ fn main() {
 
 ### Safe SBRK
 
-ralloc provides a `sbrk`, which can be used safely without breaking the allocator.
+`ralloc` provides a `sbrk`, which can be used safely without breaking the allocator:
+
+```rust
+extern crate ralloc;
+
+fn main() {
+    // BRK'ing 20 bytes...
+    let ptr = unsafe { ralloc::sbrk(20) };
+}
+```
 
 ### Logging
 
@@ -239,3 +286,25 @@ To the left, you can see the state of the block pool. `x` denotes a non-empty
 block, `_` denotes an empty block, and `|` denotes the cursor.
 
 The `a[b]` is a syntax for block on address `a` with size `b`.
+
+### Useless alignments
+
+Alignments doesn't have to be a power of two.
+
+## Planned features
+
+### Failable allocations
+
+Often you are interested in handling OOM on a case-by-case basis. This is
+especially true when dealing with very big allocation.
+
+`ralloc` allows that:
+
+```rust
+extern crate ralloc;
+
+fn main() {
+    let buf = ralloc::lock().try_alloc(8, 4);
+    // `buf` is a Result: It is Err(()) if the allocation failed.
+}
+```

+ 16 - 16
src/allocator.rs

@@ -4,13 +4,14 @@
 
 use prelude::*;
 
-use bookkeeper::Bookkeeper;
 use sync;
+use bookkeeper::Bookkeeper;
 
 /// The global default allocator.
 static ALLOCATOR: sync::Mutex<Allocator> = sync::Mutex::new(Allocator::new());
 
 /// Lock the allocator.
+#[inline]
 pub fn lock<'a>() -> sync::MutexGuard<'a, Allocator> {
     ALLOCATOR.lock()
 }
@@ -26,6 +27,7 @@ pub struct Allocator {
 
 impl Allocator {
     /// Create a new, empty allocator.
+    #[inline]
     pub const fn new() -> Allocator {
         Allocator {
             inner: Bookkeeper::new(),
@@ -33,6 +35,10 @@ impl Allocator {
     }
 
     /// Allocate a block of memory.
+    ///
+    /// # Errors
+    ///
+    /// The OOM handler handles out-of-memory conditions.
     #[inline]
     pub fn alloc(&mut self, size: usize, align: usize) -> *mut u8 {
         *Pointer::from(self.inner.alloc(size, align))
@@ -42,23 +48,25 @@ impl Allocator {
     ///
     /// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
     /// that it is not used after the free.
+    ///
+    /// # Errors
+    ///
+    /// The OOM handler handles out-of-memory conditions.
     #[inline]
     pub unsafe fn free(&mut self, ptr: *mut u8, size: usize) {
-        // When compiled with `security`, we zero this block.
-        #[cfg(feature = "security")]
-        block.zero();
-
-        // Lock the bookkeeper, and do a `free`.
-        self.inner.free(Block::from_raw_parts(Pointer::new(ptr), size));
+        self.inner.free(Block::from_raw_parts(Pointer::new(ptr), size))
     }
 
     /// Reallocate memory.
     ///
     /// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
     /// returned pointer with size `size`.
+    ///
+    /// # Errors
+    ///
+    /// The OOM handler handles out-of-memory conditions.
     #[inline]
     pub unsafe fn realloc(&mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
-        // Lock the bookkeeper, and do a `realloc`.
         *Pointer::from(self.inner.realloc(
             Block::from_raw_parts(Pointer::new(ptr), old_size),
             size,
@@ -73,7 +81,6 @@ impl Allocator {
     /// This can be used to shrink (truncate) a buffer as well.
     #[inline]
     pub unsafe fn realloc_inplace(&mut self, ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
-        // Lock the bookkeeper, and do a `realloc_inplace`.
         if self.inner.realloc_inplace(
             Block::from_raw_parts(Pointer::new(ptr), old_size),
             size
@@ -84,13 +91,6 @@ impl Allocator {
         }
     }
 
-    /// Set the OOM handler.
-    ///
-    /// This is called when the process is out-of-memory.
-    pub fn set_oom_handler(&mut self, handler: fn() -> !) {
-        self.inner.set_oom_handler(handler);
-    }
-
     /// Assert that no leaks are done.
     ///
     /// This should be run in the end of your program, after destructors have been run. It will then

+ 44 - 13
src/block.rs

@@ -5,9 +5,10 @@
 
 use prelude::*;
 
-use sys;
+use {sys, fail};
 
 use core::{ptr, cmp, mem, fmt};
+use core::convert::TryInto;
 
 /// A contiguous memory block.
 ///
@@ -45,13 +46,13 @@ impl Block {
 
     /// BRK allocate a block.
     #[inline]
-    pub fn brk(size: usize) -> Result<Block, ()> {
-        Ok(Block {
+    pub fn brk(size: usize) -> Block {
+        Block {
             size: size,
             ptr: unsafe {
-                Pointer::new(try!(sys::sbrk(size as isize)))
+                Pointer::new(sys::sbrk(size.try_into().unwrap()).unwrap_or_else(|()| fail::oom()))
             },
-        })
+        }
     }
 
     /// Create an empty block starting at `ptr`.
@@ -75,10 +76,15 @@ impl Block {
 
     /// Create an empty block representing the right edge of this block
     #[inline]
+    #[allow(cast_possible_wrap)]
     pub fn empty_right(&self) -> Block {
         Block {
             size: 0,
-            ptr: unsafe { Pointer::new(*self.ptr).offset(self.size as isize) },
+            ptr: unsafe {
+                // By the invariants of this type (the size is bounded by the address space), this
+                // conversion isn't overflowing.
+                Pointer::new(*self.ptr).offset(self.size as isize)
+            },
         }
     }
 
@@ -136,12 +142,15 @@ impl Block {
     }
 
     /// Volatile zero this memory.
-    #[cfg(feature = "security")]
-    pub fn zero(&mut self) {
+    ///
+    /// Note that this is a NOOP in release mode.
+    pub fn sec_zero(&mut self) {
         use core::intrinsics;
 
-        unsafe {
-            intrinsics::volatile_set_memory(*self.ptr, 0, self.size);
+        if cfg!(feature = "security") {
+            unsafe {
+                intrinsics::volatile_set_memory(*self.ptr, 0, self.size);
+            }
         }
     }
 
@@ -167,6 +176,7 @@ impl Block {
     ///
     /// Panics if `pos` is out of bound.
     #[inline]
+    #[allow(cast_possible_wrap)]
     pub fn split(self, pos: usize) -> (Block, Block) {
         assert!(pos <= self.size, "Split {} out of bound (size is {})!", pos, self.size);
 
@@ -177,7 +187,11 @@ impl Block {
             },
             Block {
                 size: self.size - pos,
-                ptr: unsafe { self.ptr.offset(pos as isize) },
+                ptr: unsafe {
+                    // This won't overflow due to the assertion above, ensuring that it is bounded
+                    // by the address space. See the `split_at_mut` source from libcore.
+                    self.ptr.offset(pos as isize)
+                },
             }
         )
     }
@@ -186,6 +200,7 @@ impl Block {
     ///
     /// Returns an `None` holding the intact block if `align` is out of bounds.
     #[inline]
+    #[allow(cast_possible_wrap)]
     pub fn align(&mut self, align: usize) -> Option<(Block, Block)> {
         // Calculate the aligner, which defines the smallest size required as precursor to align
         // the block to `align`.
@@ -206,7 +221,11 @@ impl Block {
                 },
                 Block {
                     size: old.size - aligner,
-                    ptr: unsafe { old.ptr.offset(aligner as isize) },
+                    ptr: unsafe {
+                        // The aligner is bounded by the size, which itself is bounded by the
+                        // address space. Therefore, this conversion cannot overflow.
+                        old.ptr.offset(aligner as isize)
+                    },
                 }
             ))
         } else { None }
@@ -290,7 +309,6 @@ mod test {
     }
 
     #[test]
-    #[cfg(not(feature = "libc_write"))]
     #[should_panic]
     fn test_oob() {
         let arr = b"lorem";
@@ -315,4 +333,17 @@ mod test {
 
         assert_eq!(arr, [0, 2, 0, 2, 255, 255]);
     }
+
+    #[test]
+    fn test_empty_lr() {
+        let arr = b"Lorem ipsum dolor sit amet";
+        let block = unsafe {
+            Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
+        };
+
+        assert!(block.empty_left().is_empty());
+        assert!(block.empty_right().is_empty());
+        assert_eq!(*Pointer::from(block.empty_left()) as *const u8, arr.as_ptr());
+        assert_eq!(block.empty_right(), block.split(arr.len()).1);
+    }
 }

+ 130 - 121
src/bookkeeper.rs

@@ -4,7 +4,8 @@ use prelude::*;
 
 use vec::Vec;
 
-use core::{ptr, cmp, mem, intrinsics};
+use core::ops::Range;
+use core::{ptr, cmp, mem};
 
 /// Canonicalize a BRK request.
 ///
@@ -36,15 +37,6 @@ fn canonicalize_brk(min: usize) -> usize {
     res
 }
 
-/// The default OOM handler.
-///
-/// This will simply abort the process.
-fn default_oom_handler() -> ! {
-    unsafe {
-        intrinsics::abort();
-    }
-}
-
 /// The memory bookkeeper.
 ///
 /// This is the main component of ralloc. Its job is to keep track of the free blocks in a
@@ -59,8 +51,7 @@ fn default_oom_handler() -> ! {
 pub struct Bookkeeper {
     /// The internal block pool.
     ///
-    /// Guarantees
-    /// ==========
+    /// # Guarantees
     ///
     /// Certain guarantees are made:
     ///
@@ -71,8 +62,6 @@ pub struct Bookkeeper {
     ///
     /// These are invariants assuming that only the public methods are used.
     pool: Vec<Block>,
-    /// The inner OOM handler.
-    oom_handler: fn() -> !,
     /// The number of bytes currently allocated.
     #[cfg(feature = "debug_tools")]
     allocated: usize,
@@ -87,7 +76,6 @@ impl Bookkeeper {
     pub const fn new() -> Bookkeeper {
         Bookkeeper {
             pool: Vec::new(),
-            oom_handler: default_oom_handler,
             allocated: 0,
         }
 
@@ -98,7 +86,6 @@ impl Bookkeeper {
     pub const fn new() -> Bookkeeper {
         Bookkeeper {
             pool: Vec::new(),
-            oom_handler: default_oom_handler,
         }
     }
 
@@ -147,7 +134,10 @@ impl Bookkeeper {
     /// A block representing the marked area is then returned.
     pub fn alloc(&mut self, size: usize, align: usize) -> Block {
         // TODO: scan more intelligently.
-        log!(self.pool;0, "Allocating {} with align {}", size, align);
+
+        // Logging.
+        log!(self.pool, "Allocating {} bytes with alignment {}.", size, align);
+
         if let Some((n, b)) = self.pool.iter_mut().enumerate().filter_map(|(n, i)| {
             if i.size() >= size {
                 // Try to split at the aligner.
@@ -157,7 +147,7 @@ impl Bookkeeper {
                         *i = a;
                         Some((n, b))
                     } else {
-                        // Put the split block back together and place it back in its spot
+                        // Put the split block back together and place it back in its spot.
                         a.merge_right(&mut b).unwrap();
                         *i = a;
                         None
@@ -168,7 +158,8 @@ impl Bookkeeper {
             }
         }).next() {
             if self.pool[n].is_empty() {
-                let _ = self.remove_at(n); //for empty alignment invariant
+                // For empty alignment invariant.
+                let _ = self.remove_at(n);
             }
 
             let (res, excessive) = b.split(size);
@@ -176,8 +167,8 @@ impl Bookkeeper {
             // Mark the excessive space as free.
             // There are many corner cases that make knowing where to insert it difficult
             // so we search instead.
-            let (l,r) = self.find_both(&excessive);
-            self.free_ind(l, r, excessive);
+            let bound = self.find_bound(&excessive);
+            self.free_ind(bound, excessive);
 
             // Check consistency.
             self.check();
@@ -189,6 +180,7 @@ impl Bookkeeper {
         } else {
             // No fitting block found. Allocate a new block.
             let res = self.alloc_fresh(size, align);
+
             // "Leave" the allocator.
             self.leave(res)
         }
@@ -238,14 +230,18 @@ impl Bookkeeper {
     /// See [`insert`](#method.insert) for details.
     #[inline]
     pub fn free(&mut self, block: Block) {
+        // Just logging for the unlucky people debugging this shit. No problem.
+        log!(self.pool, "Freeing {:?}...", block);
+
         // "Enter" the allocator.
-        log!(self.pool;0, "free");
         let block = self.enter(block);
         self.reserve_more(1);
 
-        let (l,r) = self.find_both(&block);
+        // Binary search for the block.
+        let bound = self.find_bound(&block);
 
-        self.free_ind(l, r, block);
+        // Free the given block.
+        self.free_ind(bound, block);
     }
 
     /// Reallocate memory.
@@ -260,8 +256,7 @@ impl Bookkeeper {
     ///
     /// The data will be truncated if `new_size` is smaller than `block`'s size.
     ///
-    /// Example
-    /// =======
+    /// # Example
     ///
     /// We will first try to perform an in-place reallocation, and if that fails, we will use
     /// memmove.
@@ -281,17 +276,16 @@ impl Bookkeeper {
     /// deallocate the old one, after which we use memmove to copy the data over to the newly
     /// allocated list.
     pub fn realloc(&mut self, block: Block, new_size: usize, align: usize) -> Block {
-        // Find the index.
-        log!(self.pool;0, "realloc");
-        let (ind, ind_right) = self.find_both(&block);
+        // Find the index bound.
+        let ind = self.find_bound(&block);
 
         // Logging.
-        log!(self.pool;ind, "Reallocating {:?} to size {} with align {}.", block, new_size, align);
+        log!(self.pool;ind, "Reallocating {:?} to size {} with align {}...", block, new_size, align);
 
         // "Leave" the allocator.
         let block = self.enter(block);
         // Try to do an inplace reallocation.
-        match self.realloc_inplace_ind(ind, ind_right, block, new_size) {
+        match self.realloc_inplace_ind(ind, block, new_size) {
             Ok(block) => self.leave(block),
             Err(block) => {
                 // Reallocation cannot be done inplace.
@@ -304,8 +298,8 @@ impl Bookkeeper {
 
                 // Free the old block.
                 // Allocation may have moved insertion so we search again.
-                let (ind, ind_right) = self.find_both(&block);
-                self.free_ind(ind, ind_right,  block);
+                let bound = self.find_bound(&block);
+                self.free_ind(bound, block);
 
                 // Check consistency.
                 self.check();
@@ -313,6 +307,7 @@ impl Bookkeeper {
                 debug_assert!(res.size() >= new_size, "Requested space does not match with the \
                               returned block.");
 
+                // Leave the allocator.
                 self.leave(res)
             },
         }
@@ -330,9 +325,14 @@ impl Bookkeeper {
     /// [`realloc_inplace_ind`](#method.realloc_inplace_ind.html).
     #[inline]
     pub fn realloc_inplace(&mut self, block: Block, new_size: usize) -> Result<Block, Block> {
-        log!(self.pool;0, "realloc_inplace");
-        let (ind, ind_right) = self.find_both(&block);
-        let res = self.realloc_inplace_ind(ind, ind_right, block, new_size);
+        // Logging.
+        log!(self.pool, "Reallocating {:?} inplace to {}...", block, new_size);
+
+        // Find the bounds of given block.
+        let bound = self.find_bound(&block);
+
+        // Go for it!
+        let res = self.realloc_inplace_ind(bound, block, new_size);
 
         // Check consistency.
         debug_assert!(res.as_ref().ok().map_or(true, |x| x.size() == new_size), "Requested space \
@@ -348,7 +348,9 @@ impl Bookkeeper {
     /// The returned pointer is guaranteed to be aligned to `align`.
     #[inline]
     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
-        log!(self.pool;0, "alloc_fresh");
+        // Logging.
+        log!(self.pool, "Fresh allocation of size {} with alignment {}.", size, align);
+
         // To avoid shenanigans with unbounded recursion and other stuff, we pre-reserve the
         // buffer.
         self.reserve_more(2);
@@ -368,22 +370,23 @@ impl Bookkeeper {
 
     /// Reallocate a block on a know index inplace.
     ///
-    /// See [`realloc_inplace_ind`](#method.realloc_inplace.html) for more information.
-    fn realloc_inplace_ind(&mut self, ind: usize, ind_right: usize, mut block: Block, new_size: usize) -> Result<Block, Block> {
+    /// See [`realloc_inplace`](#method.realloc_inplace.html) for more information.
+    fn realloc_inplace_ind(&mut self, ind: Range<usize>, mut block: Block, new_size: usize) -> Result<Block, Block> {
         // Logging.
         log!(self.pool;ind, "Try inplace reallocating {:?} to size {}.", block, new_size);
 
         /// Assertions...
-        debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
+        debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
+                      index.");
 
         if new_size <= block.size() {
             // Shrink the block.
-            log!(self.pool;ind, "  Shrink.");
+            log!(self.pool;ind, "Shrinking {:?}.", block);
 
             // Split the block in two segments, the main segment and the excessive segment.
             let (block, excessive) = block.split(new_size);
             // Free the excessive segment.
-            self.free_ind(ind, ind_right, excessive);
+            self.free_ind(ind, excessive);
 
             // Make some assertions to avoid dumb bugs.
             debug_assert!(block.size() == new_size, "Block wasn't shrinked properly.");
@@ -396,24 +399,27 @@ impl Bookkeeper {
             // We check if `ind` is the end of the array.
         } else {
             let mut mergable = false;
-            if let Some(entry) = self.pool.get_mut(ind_right) {
+            if let Some(entry) = self.pool.get_mut(ind.end) {
                 mergable = entry.size() + block.size() >= new_size && block.left_to(entry);
             }
             // Note that we are sure that no segments in the array are adjacent (unless they have size
             // 0). This way we know that we will, at maximum, need one and only one block for extending
             // the current block.
             if mergable {
-                log!(self.pool;ind, "  Merge");
-                block.merge_right(&mut self.remove_at(ind_right)).unwrap();
+                // Logging...
+                log!(self.pool;ind, "Merging {:?} to the right.", block);
+
+                // We'll merge it with the block at the end of the range.
+                block.merge_right(&mut self.remove_at(ind.end)).unwrap();
                 // Merge succeeded.
 
                 // Place the excessive block back.
                 let (res, excessive) = block.split(new_size);
-                // remove_at may have shortened the vec
-                if ind == self.pool.len() {
+                // Remove_at may have shortened the Vvector.
+                if ind.start == self.pool.len() {
                     self.push_no_reserve(excessive);
                 } else if !excessive.is_empty() {
-                    self.pool[ind] = excessive;
+                    self.pool[ind.start] = excessive;
                 }
                 // Block will still not be adjacent, due to `excessive` being guaranteed to not be
                 // adjacent to the next block.
@@ -429,37 +435,41 @@ impl Bookkeeper {
         Err(block)
     }
 
-    /// Free a block placed on some index.
+    /// Free a block placed in some index bound.
     ///
     /// This will at maximum insert one element.
     ///
     /// See [`free`](#method.free) for more information.
     #[inline]
-    fn free_ind(&mut self, ind: usize, right_ind: usize, mut block: Block) {
+    fn free_ind(&mut self, ind: Range<usize>, mut block: Block) {
         // Logging.
         log!(self.pool;ind, "Freeing {:?}.", block);
 
         // Short circuit in case of empty block.
         if block.is_empty() { return; }
 
-        if ind == self.pool.len() {
+        // When compiled with `security`, we zero this block.
+        block.sec_zero();
+
+        if ind.start == self.pool.len() {
             self.push_no_reserve(block);
             return;
         }
 
         // Assertions...
-        debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
+        debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
+                      index.");
 
         // Try to merge it with the block to the right.
-        if right_ind < self.pool.len() && block.left_to(&self.pool[right_ind]) {
-            block.merge_right(&mut self.remove_at(right_ind)).unwrap();
+        if ind.end < self.pool.len() && block.left_to(&self.pool[ind.end]) {
+            block.merge_right(&mut self.remove_at(ind.end)).unwrap();
             // The merging succeeded. We proceed to try to close in the possible gap.
-            if ind != 0 && self.pool[ind-1].merge_right(&mut block).is_ok() {
+            if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
                 self.check();
                 return;
             }
         // Dammit, let's try to merge left.
-        } else if ind != 0 && self.pool[ind - 1].merge_right(&mut block).is_ok() {
+        } else if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
             // Check consistency.
             self.check();
 
@@ -467,7 +477,7 @@ impl Bookkeeper {
         }
 
         // Well, it failed, so we insert it the old-fashioned way.
-        self.insert(ind, block);
+        self.insert(ind.start, block);
 
         // Check consistency.
         self.check();
@@ -480,14 +490,11 @@ impl Bookkeeper {
         log!(self.pool;self.pool.len(), "BRK'ing a block of size, {}, and alignment {}.", size, align);
 
         // Calculate the canonical size (extra space is allocated to limit the number of system calls).
-        let brk_size = canonicalize_brk(size).checked_add(align).unwrap_or_else(|| self.oom());
+        let brk_size = canonicalize_brk(size).checked_add(align).expect("Alignment addition overflowed.");
 
         // Use SBRK to allocate extra data segment. The alignment is used as precursor for our
         // allocated block. This ensures that it is properly memory aligned to the requested value.
-        let (alignment_block, rest) = Block::brk(brk_size)
-            .unwrap_or_else(|_| self.oom())
-            .align(align)
-            .unwrap();
+        let (alignment_block, rest) = Block::brk(brk_size).align(align).unwrap();
 
         // Split the block to leave the excessive space.
         let (res, excessive) = rest.split(size);
@@ -523,8 +530,10 @@ impl Bookkeeper {
 
     /// Push an element without reserving.
     fn push_no_reserve(&mut self, mut block: Block) {
+        // Logging.
+        log!(self.pool;self.pool.len(), "Pushing {:?}.", block);
+
         // Short-circuit in case on empty block.
-        log!(self.pool;self.pool.len(), "Pushing {:?}", block);
         if !block.is_empty() {
             // We will try to simply merge it with the last block.
             if let Some(x) = self.pool.last_mut() {
@@ -542,6 +551,7 @@ impl Bookkeeper {
             // Make some assertions.
             debug_assert!(res.is_ok(), "Push failed (buffer full).");
         }
+
         self.check();
     }
 
@@ -551,7 +561,10 @@ impl Bookkeeper {
     /// potentially reallocating the block pool.
     #[inline]
     fn reserve_more(&mut self, needed: usize) {
-        log!(self.pool;self.pool.len(), "reserving {} past {}, cap {}", needed, self.pool.len(), self.pool.capacity());
+        // Logging.
+        log!(self.pool;self.pool.len(), "Reserving {} past {}, currently has capacity {}.", needed,
+             self.pool.len(), self.pool.capacity());
+
         let needed = self.pool.len() + needed;
         if needed > self.pool.capacity() {
             // TODO allow BRK-free non-inplace reservations.
@@ -591,8 +604,9 @@ impl Bookkeeper {
     #[inline]
     fn find(&mut self, block: &Block) -> usize {
         // TODO optimize this function.
+        // Logging.
+        log!(self.pool, "Searching (exact) for {:?}.", block);
 
-        log!(self.pool;0, "find");
         let ind = match self.pool.binary_search(block) {
             Ok(x) | Err(x) => x,
         };
@@ -606,15 +620,16 @@ impl Bookkeeper {
             .count()
     }
 
-    /// Perform a binary search to find the appropriate place where the block can be insert or is
+    /// Perform a binary search to find the appropriate bound where the block can be insert or is
     /// located.
     ///
     /// It is guaranteed that no block left to the returned value, satisfy the above condition.
     #[inline]
-    fn find_both(&mut self, block: &Block) -> (usize, usize) {
+    fn find_bound(&mut self, block: &Block) -> Range<usize> {
         // TODO optimize this function.
+        // Logging.
+        log!(self.pool, "Searching (bounds) for {:?}.", block);
 
-        log!(self.pool;0, "find_both");
         let mut left_ind = match self.pool.binary_search(block) {
             Ok(x) | Err(x) => x,
         };
@@ -637,7 +652,8 @@ impl Bookkeeper {
             .skip(right_ind)
             .take_while(|x| x.is_empty())
             .count();
-        (left_ind, right_ind)
+
+        left_ind..right_ind
     }
 
     /// Insert a block entry at some index.
@@ -704,14 +720,14 @@ impl Bookkeeper {
     #[inline]
     fn insert(&mut self, ind: usize, block: Block) {
         // Logging.
-        log!(self.pool;ind, "Inserting block {:?}.", block);
+        log!(self.pool;ind, "Inserting block {:?}...", block);
 
         // Bound check.
         assert!(self.pool.len() >= ind, "Insertion out of bounds.");
 
         // Some assertions...
-        debug_assert!(self.pool.len() <= ind || block <= self.pool[ind],
-                      "Inserting at {} will make the list unsorted.", ind);
+        debug_assert!(self.pool.len() <= ind || block <= self.pool[ind], "Inserting at {} will make \
+                      the list unsorted.", ind);
         debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
         debug_assert!(!block.is_empty(), "Inserting an empty block.");
 
@@ -736,7 +752,7 @@ impl Bookkeeper {
         };
 
         // Log the operation.
-        log!(self.pool;ind, "Moving {}", n);
+        log!(self.pool;ind, "Moving {} blocks to the right.", n);
 
         unsafe {
             // TODO clean this mess up.
@@ -765,21 +781,25 @@ impl Bookkeeper {
 
     /// Remove a block.
     fn remove_at(&mut self, ind: usize) -> Block {
+        // Logging.
+        log!(self.pool;ind, "Removing block.");
+
         if ind == self.pool.len() - 1 {
-            let ret = self.pool[ind].pop();
+            let res = self.pool[ind].pop();
             // Make sure there are no trailing empty blocks.
             let new_len = self.pool.len() - self.pool.iter().rev().take_while(|x| x.is_empty()).count();
 
             // Truncate the vector.
             self.pool.truncate(new_len);
-            ret
+            res
         } else {
             // Calculate the upper and lower bound
             let empty = self.pool[ind + 1].empty_left();
             let empty2 = empty.empty_left();
 
             // Replace the block at `ind` with the left empty block from `ind + 1`.
-            let ret = mem::replace(&mut self.pool[ind], empty);
+            let res = mem::replace(&mut self.pool[ind], empty);
+
             // Iterate over the pool from `ind` and down.
             let skip = self.pool.len() - ind;
             for place in self.pool.iter_mut().rev().skip(skip).take_while(|x| x.is_empty()) {
@@ -787,26 +807,10 @@ impl Bookkeeper {
                 *place = empty2.empty_left();
             }
 
-            ret
+            res
         }
     }
 
-    /// Call the OOM handler.
-    ///
-    /// This is used one out-of-memory errors, and will never return. Usually, it simply consists
-    /// of aborting the process.
-    fn oom(&self) -> ! {
-        (self.oom_handler)()
-    }
-
-    /// Set the OOM handler.
-    ///
-    /// This is called when the process is out-of-memory.
-    #[inline]
-    pub fn set_oom_handler(&mut self, handler: fn() -> !) {
-        self.oom_handler = handler;
-    }
-
     /// Leave the allocator.
     ///
     /// A block should be "registered" through this function when it leaves the allocated (e.g., is
@@ -838,44 +842,49 @@ impl Bookkeeper {
         block
     }
 
-    /// No-op in release mode.
-    #[cfg(not(debug_assertions))]
-    #[inline]
-    fn check(&self) {}
-
     /// Perform consistency checks.
     ///
     /// This will check for the following conditions:
     ///
     /// 1. The list is sorted.
     /// 2. No blocks are adjacent.
-    #[cfg(debug_assertions)]
+    ///
+    /// This is NOOP in release mode.
     fn check(&self) {
-        log!(self.pool;0, "Checking...");
-        let mut it = self.pool.iter().enumerate().rev();
-        if let Some((_,x)) = it.next() {
-            // Make sure there are no trailing empty blocks
-            assert!(!x.is_empty());
-            let mut next = x;
-            for (n, i) in it {
-                // Check if sorted.
-                assert!(next >= i, "The block pool is not sorted at index, {} ({:?} < {:?})", n, next,
-                        i);
-                // Make sure no blocks are adjacent.
-                assert!(!i.left_to(next) || i.is_empty(), "Adjacent blocks at index, {} ({:?} and \
-                        {:?})", n, i, next);
-                // Make sure an empty block has the same address as its right neighbor
-                assert!(!i.is_empty() || i == next, "Empty block not aligned to right neighbor \
-                        at index {} ({:?} and {:?})", n, i, next);
-
-                // Set the variable tracking the previous block.
-                next = i;
+        if cfg!(debug_assertions) {
+            // Logging.
+            log!(self.pool, "Checking...");
+
+            // Reverse iterator over the blocks.
+            let mut it = self.pool.iter().enumerate().rev();
+
+            if let Some((_, x)) = it.next() {
+                // Make sure there are no leading empty blocks.
+                assert!(!x.is_empty());
+
+                let mut next = x;
+                for (n, i) in it {
+                    // Check if sorted.
+                    assert!(next >= i, "The block pool is not sorted at index, {} ({:?} < {:?})", n, next,
+                            i);
+                    // Make sure no blocks are adjacent.
+                    assert!(!i.left_to(next) || i.is_empty(), "Adjacent blocks at index, {} ({:?} and \
+                            {:?})", n, i, next);
+                    // Make sure an empty block has the same address as its right neighbor.
+                    assert!(!i.is_empty() || i == next, "Empty block not adjacent to right neighbor \
+                            at index {} ({:?} and {:?})", n, i, next);
+
+                    // Set the variable tracking the previous block.
+                    next = i;
+                }
+
+                // Check for trailing empty blocks.
+                assert!(!self.pool.last().unwrap().is_empty(), "Trailing empty blocks.");
             }
 
-            // Check for trailing empty blocks.
-            assert!(!self.pool.last().unwrap().is_empty(), "Trailing empty blocks.");
+            // Logging...
+            log!(self.pool, "Check OK!");
         }
-        log!(self.pool;0, "Check ok!");
     }
 
     /// Check for memory leaks.

+ 42 - 0
src/fail.rs

@@ -0,0 +1,42 @@
+//! General error handling.
+
+use core::sync::atomic::{self, AtomicPtr};
+use core::{mem, intrinsics};
+
+static OOM_HANDLER: AtomicPtr<()> = AtomicPtr::new(default_oom_handler as *mut ());
+
+/// The default OOM handler.
+///
+/// This will simply abort the process.
+#[cold]
+fn default_oom_handler() -> ! {
+    unsafe {
+        intrinsics::abort();
+    }
+}
+
+/// Call the OOM handler.
+///
+/// This is used one out-of-memory errors, and will never return. Usually, it simply consists
+/// of aborting the process.
+///
+/// # An important note
+///
+/// This is for OOM-conditions, not malformed or too big allocations, but when the system is unable
+/// to gather memory for the allocation (SBRK fails).
+///
+/// The rule of thumb is that this should be called, if and only if unwinding (which allocates)
+/// will hit the same error.
+pub fn oom() -> ! {
+    unsafe {
+        (mem::transmute::<_, fn() -> !>(OOM_HANDLER.load(atomic::Ordering::SeqCst)))()
+    }
+}
+
+/// Set the OOM handler.
+///
+/// This is called when the process is out-of-memory.
+#[inline]
+pub fn set_oom_handler(handler: fn() -> !) {
+    OOM_HANDLER.store(handler as *mut (), atomic::Ordering::SeqCst);
+}

+ 8 - 3
src/lib.rs

@@ -10,8 +10,11 @@
 #![no_std]
 
 #![feature(allocator, const_fn, core_intrinsics, stmt_expr_attributes, drop_types_in_const,
-           nonzero, optin_builtin_traits, type_ascription, question_mark)]
-#![warn(missing_docs)]
+           nonzero, optin_builtin_traits, type_ascription, question_mark, try_from)]
+#![warn(missing_docs, cast_precision_loss, cast_sign_loss, cast_possible_wrap,
+        cast_possible_truncation, filter_map, if_not_else, items_after_statements,
+        invalid_upcast_comparisons, mutex_integer, nonminimal_bool, shadow_same, shadow_unrelated,
+        single_match_else, string_add, string_add_assign, wrong_pub_self_convention)]
 
 #[cfg(feature = "libc_write")]
 #[macro_use]
@@ -19,17 +22,19 @@ mod write;
 #[macro_use]
 mod log;
 
+mod allocator;
 mod block;
 mod bookkeeper;
+mod fail;
 mod leak;
 mod prelude;
 mod ptr;
 mod sync;
 mod sys;
 mod vec;
-mod allocator;
 
 pub use allocator::{lock, Allocator};
+pub use fail::set_oom_handler;
 pub use sys::sbrk;
 
 /// Rust allocation symbol.

+ 151 - 40
src/log.rs

@@ -2,46 +2,166 @@
 //!
 //! This allows for detailed logging for `ralloc`.
 
-/// NO-OP.
+/// Log to the appropriate source.
+///
+/// The first argument this takes is of the form `pool;cursor`, which is used to print the
+/// block pools state. `cursor` is what the operation "revolves around" to give a sense of
+/// position.
+///
+/// If the `;cursor` part is left out, no cursor will be printed.
+///
+/// The rest of the arguments are just normal formatters.
 #[macro_export]
-#[cfg(not(feature = "log"))]
 macro_rules! log {
-    ($( $arg:tt )*) => {};
+    ($pool:expr, $( $arg:expr ),*) => {
+        log!($pool;(), $( $arg ),*);
+    };
+    ($pool:expr;$cur:expr, $( $arg:expr ),*) => {{
+        #[cfg(feature = "log")]
+        {
+            use core::fmt::Write;
+
+            use {write, log};
+            use log::internal::IntoCursor;
+
+            // Print the pool state.
+            let mut stderr = write::Writer::stderr();
+            let _ = write!(stderr, "{:10?} : ", log::internal::BlockLogger {
+                cur: $cur.clone().into_cursor(),
+                blocks: &$pool,
+            });
+
+            // Print the log message.
+            let _ = write!(stderr, $( $arg ),*);
+            let _ = writeln!(stderr, " (at {}:{})", file!(), line!());
+        }
+    }};
 }
 
 /// Top secret place-holding module.
-#[cfg(feature = "log")]
 #[macro_use]
+#[cfg(feature = "log")]
 pub mod internal {
     use prelude::*;
 
     use core::fmt;
 
-    /// Log to the appropriate source.
+    use core::cell::Cell;
+    use core::ops::Range;
+
+    /// A "cursor".
     ///
-    /// The first argument this takes is of the form `pool;number`, which is used to print the
-    /// block pools state. `number` is what the operation "revolves around" to give a sense of
-    /// position.
+    /// Cursors represents a block or an interval in the log output. This trait is implemented for
+    /// various types that can represent a cursor.
+    pub trait Cursor {
+        /// Iteration at n.
+        ///
+        /// This is called in the logging loop. The cursor should then write, what it needs, to the
+        /// formatter if the underlying condition is true.
+        ///
+        /// For example, a plain position cursor will write `"|"` when `n == self.pos`.
+        fn at(&self, f: &mut fmt::Formatter, n: usize) -> fmt::Result;
+
+        /// The after hook.
+        ///
+        /// This is runned when the loop is over. The aim is to e.g. catch up if the cursor wasn't
+        /// printed (i.e. is out of range).
+        fn after(&self, f: &mut fmt::Formatter) -> fmt::Result;
+    }
+
+    /// Types that can be converted into a cursor.
+    pub trait IntoCursor {
+        /// The end result.
+        type Cursor: Cursor;
+
+        /// Convert this value into its equivalent cursor.
+        fn into_cursor(self) -> Self::Cursor;
+    }
+
+    /// A single-point cursor.
+    pub struct UniCursor {
+        /// The position where this cursor will be placed.
+        pos: usize,
+        /// Is this cursor printed?
+        ///
+        /// This is used for the after hook.
+        is_printed: Cell<bool>,
+    }
+
+    impl Cursor for UniCursor {
+        fn at(&self, f: &mut fmt::Formatter, n: usize) -> fmt::Result {
+            if self.pos == n {
+                self.is_printed.set(true);
+                write!(f, "|")?;
+            }
+
+            Ok(())
+        }
+
+        fn after(&self, f: &mut fmt::Formatter) -> fmt::Result {
+            if !self.is_printed.get() {
+                write!(f, "…|")?;
+            }
+
+            Ok(())
+        }
+    }
+
+    impl IntoCursor for usize {
+        type Cursor = UniCursor;
+
+        fn into_cursor(self) -> UniCursor {
+            UniCursor {
+                pos: self,
+                is_printed: Cell::new(false),
+            }
+        }
+    }
+
+    impl Cursor for () {
+        fn at(&self, _: &mut fmt::Formatter, _: usize) -> fmt::Result { Ok(()) }
+
+        fn after(&self, _: &mut fmt::Formatter) -> fmt::Result { Ok(()) }
+    }
+
+    impl IntoCursor for () {
+        type Cursor = ();
+
+        fn into_cursor(self) -> () {
+            ()
+        }
+    }
+
+    /// A interval/range cursor.
     ///
-    /// The rest of the arguments are just normal formatters.
-    #[macro_export]
-    macro_rules! log {
-        ($pool:expr;$n:expr, $( $arg:expr ),*) => {{
-            use {write, log};
+    /// The start of the range is marked by `[` and the end by `]`.
+    pub struct RangeCursor {
+        /// The range of this cursor.
+        range: Range<usize>,
+    }
 
-            use core::fmt::Write;
+    impl Cursor for RangeCursor {
+        fn at(&self, f: &mut fmt::Formatter, n: usize) -> fmt::Result {
+            if self.range.start == n {
+                write!(f, "[")?;
+            } else if self.range.end == n {
+                write!(f, "]")?;
+            }
 
-            // Print the pool state.
-            let mut stderr = write::Writer::stderr();
-            let _ = write!(stderr, "{:10?} : ", log::internal::BlockLogger {
-                cur: $n,
-                blocks: &$pool,
-            });
+            Ok(())
+        }
 
-            // Print the log message.
-            let _ = write!(stderr, $( $arg ),*);
-            let _ = writeln!(stderr, " (at {}:{})", file!(), line!());
-        }};
+        fn after(&self, _: &mut fmt::Formatter) -> fmt::Result { Ok(()) }
+    }
+
+    impl IntoCursor for Range<usize> {
+        type Cursor = RangeCursor;
+
+        fn into_cursor(self) -> RangeCursor {
+            RangeCursor {
+                range: self,
+            }
+        }
     }
 
     /// A "block logger".
@@ -52,29 +172,23 @@ pub mod internal {
     /// xxx__|xx_
     /// ```
     ///
-    /// where `x` denotes an non-empty block. `_` denotes an empty block, and `|` is placed on the
-    /// "current block".
-    pub struct BlockLogger<'a> {
+    /// where `x` denotes an non-empty block. `_` denotes an empty block, with `|` representing the
+    /// cursor.
+    pub struct BlockLogger<'a, T> {
         /// The cursor.
         ///
         /// This is where the `|` will be printed.
-        pub cur: usize,
+        pub cur: T,
         /// The blocks.
         pub blocks: &'a [Block],
     }
 
-    impl<'a> fmt::Debug for BlockLogger<'a> {
+    impl<'a, T: Cursor> fmt::Debug for BlockLogger<'a, T> {
         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
             // TODO handle alignment etc.
 
-            let mut cursor_set = false;
-
             for (n, i) in self.blocks.iter().enumerate() {
-                if n == self.cur {
-                    // Write the cursor.
-                    write!(f, "|")?;
-                    cursor_set = true;
-                }
+                self.cur.at(f, n)?;
 
                 if i.is_empty() {
                     // Empty block.
@@ -85,10 +199,7 @@ pub mod internal {
                 }
             }
 
-            if !cursor_set {
-                // The cursor isn't set yet, so we place it in the end.
-                write!(f, "|")?;
-            }
+            self.cur.after(f)?;
 
             Ok(())
         }

+ 13 - 17
src/sys.rs

@@ -15,19 +15,21 @@ static BRK_MUTEX: sync::Mutex<()> = sync::Mutex::new(());
 /// start.
 ///
 /// This uses the system call BRK as backend.
+///
+/// # Safety
+///
+/// This is safe unless you have negative or overflowing `n`.
 #[inline]
-pub fn sbrk(n: isize) -> Result<*mut u8, ()> {
+pub unsafe fn sbrk(n: isize) -> Result<*mut u8, ()> {
     // Lock the BRK mutex.
     #[cfg(not(feature = "unsafe_no_brk_lock"))]
     let _guard = BRK_MUTEX.lock();
 
-    unsafe {
-        let brk = ralloc_shim::sbrk(n);
-        if brk as usize == !0 {
-            Err(())
-        } else {
-            Ok(brk)
-        }
+    let brk = ralloc_shim::sbrk(n);
+    if brk as usize == !0 {
+        Err(())
+    } else {
+        Ok(brk)
     }
 }
 
@@ -42,14 +44,8 @@ mod test {
 
     #[test]
     fn test_oom() {
-        assert!(sbrk(9999999999999).is_err());
-    }
-
-    #[test]
-    #[ignore]
-    // TODO: fix this test
-    fn test_overflow() {
-        assert!(sbrk(!0).is_err());
-        assert!(sbrk(!0 - 2000).is_err());
+        unsafe {
+            assert!(sbrk(9999999999999).is_err());
+        }
     }
 }

+ 3 - 0
src/vec.rs

@@ -90,12 +90,15 @@ impl<T: Leak> Vec<T> {
     ///
     /// On success, return `Ok(())`. On failure (not enough capacity), return `Err(())`.
     #[inline]
+    #[allow(cast_possible_wrap)]
     pub fn push(&mut self, elem: T) -> Result<(), ()> {
         if self.len == self.cap {
             Err(())
         } else {
             // Place the element in the end of the vector.
             unsafe {
+                // By the invariants of this type (the size is bounded by the address space), this
+                // conversion isn't overflowing.
                 ptr::write((*self.ptr).offset(self.len as isize), elem);
             }
 

+ 7 - 4
src/write.rs

@@ -1,10 +1,12 @@
 //! Direct libc-based write for internal debugging.
 //!
-//! This will replace the assertion macros to avoid deadlocks in panics.
+//! This will replace the assertion macros to avoid deadlocks in panics, by utilizing a
+//! non-allocating writing primitive.
 
 use core::fmt;
 
 extern {
+    /// Write a buffer to a file descriptor.
     fn write(fd: i32, buff: *const u8, size: usize) -> isize;
 }
 
@@ -76,9 +78,10 @@ macro_rules! assert {
 /// allows for aborting, non-allocating panics when running the tests.
 #[macro_export]
 macro_rules! debug_assert {
-    ($($arg:tt)*) => {{
-        #[cfg(debug_assertions)]
-        assert!($($arg)*);
+    ($( $arg:tt )*) => {{
+        if cfg!(debug_assertions) {
+            assert!($( $arg )*);
+        }
     }}
 }
 

+ 11 - 8
tests/box.rs

@@ -1,12 +1,15 @@
 extern crate ralloc;
 
+mod util;
+
+#[inline(never)]
 fn alloc_box() -> Box<u32> {
     Box::new(0xDEADBEAF)
 }
 
 #[test]
-fn test() {
-    {
+fn simple_box() {
+    util::multiply(|| {
         let mut a = Box::new(1);
         let mut b = Box::new(2);
         let mut c = Box::new(3);
@@ -16,13 +19,13 @@ fn test() {
         assert_eq!(*c, 3);
         assert_eq!(*alloc_box(), 0xDEADBEAF);
 
-        *a = 0;
-        *b = 0;
-        *c = 0;
+        util::acid(|| {
+            *a = 0;
+            *b = 0;
+            *c = 0;
+        });
         assert_eq!(*a, 0);
         assert_eq!(*b, 0);
         assert_eq!(*c, 0);
-    }
-
-    ralloc::lock().debug_assert_no_leak();
+    });
 }

+ 11 - 9
tests/btreemap.rs

@@ -1,23 +1,25 @@
 extern crate ralloc;
 
+mod util;
+
 use std::collections::BTreeMap;
 
 #[test]
-fn test() {
-    {
+fn btreemap() {
+    util::multiply(|| {
         let mut map = BTreeMap::new();
 
-        map.insert("Nicolas", "Cage");
-        map.insert("is", "God");
-        map.insert("according", "to");
-        map.insert("ca1ek", ".");
+        util::acid(|| {
+            map.insert("Nicolas", "Cage");
+            map.insert("is", "God");
+            map.insert("according", "to");
+            map.insert("ca1ek", ".");
+        });
 
         assert_eq!(map.get("Nicolas"), Some(&"Cage"));
         assert_eq!(map.get("is"), Some(&"God"));
         assert_eq!(map.get("according"), Some(&"to"));
         assert_eq!(map.get("ca1ek"), Some(&"."));
         assert_eq!(map.get("This doesn't exist."), None);
-    }
-
-    ralloc::lock().debug_assert_no_leak();
+    });
 }

+ 41 - 0
tests/cross_thread_drop.rs

@@ -0,0 +1,41 @@
+extern crate ralloc;
+
+mod util;
+
+use std::thread;
+
+#[test]
+fn cross_thread_drop() {
+    util::multiply(|| {
+        let mut join = Vec::new();
+
+        for _ in 0..10 {
+            let bx = Box::new(0x11FE15C001u64);
+
+            join.push(thread::spawn(move || {
+                util::acid(|| {
+                    assert_eq!(*bx, 0x11FE15C001);
+                });
+            }));
+        }
+
+        for i in join {
+            i.join().unwrap();
+        }
+    });
+}
+
+#[test]
+fn cross_thread_drop_2() {
+    util::multiply(|| {
+        for _ in 0..10 {
+            let bx = thread::spawn(|| Box::new(0x11FE15C001u64)).join().unwrap();
+
+            thread::spawn(move || {
+                util::acid(|| {
+                    assert_eq!(*bx, 0x11FE15C001);
+                });
+            });
+        }
+    });
+}

+ 15 - 10
tests/join.rs

@@ -1,17 +1,22 @@
 extern crate ralloc;
 
+mod util;
+
 use std::thread;
 
 #[test]
-fn test() {
-    for i in 0..0xFFFF {
-        let bx = Box::new("frakkkko");
-        let join = thread::spawn(move || Box::new(!i));
-        drop(bx);
-        let bx = Box::new("frakkkko");
-        join.join().unwrap();
-        drop(bx);
-    }
+fn join_thread() {
+    util::multiply(|| {
+        for i in 0..0xFFF {
+            let bx = Box::new("frakkkko");
+            let join = thread::spawn(move || Box::new(!i));
+            drop(bx);
 
-    ralloc::lock().debug_assert_no_leak();
+            util::acid(move || {
+                let bx = Box::new("frakkkko");
+                join.join().unwrap();
+                drop(bx);
+            });
+        }
+    });
 }

+ 49 - 0
tests/manual.rs

@@ -0,0 +1,49 @@
+extern crate ralloc;
+
+mod util;
+
+use std::ptr;
+
+#[test]
+fn manual() {
+    util::multiply(|| {
+        let mut alloc = ralloc::Allocator::new();
+
+        let ptr1 = alloc.alloc(30, 3);
+        let ptr2 = alloc.alloc(500, 20);
+
+        assert_eq!(0, ptr1 as usize % 3);
+        assert_eq!(0, ptr2 as usize % 20);
+
+        unsafe {
+            util::acid(|| {
+                ptr::write_bytes(ptr1, 0x22, 30);
+            });
+            util::acid(|| {
+                for i in 0..500 {
+                    *ptr2.offset(i) = i as u8;
+                }
+            });
+
+            assert_eq!(*ptr1, 0x22);
+            assert_eq!(*ptr1.offset(5), 0x22);
+
+            assert_eq!(*ptr2, 0);
+            assert_eq!(*ptr2.offset(15), 15);
+
+            let ptr1 = alloc.realloc(ptr1, 30, 300, 3);
+            for i in 0..300 {
+                util::acid(|| {
+                    *ptr1.offset(i) = i as u8;
+                });
+            }
+            assert_eq!(*ptr1, 0);
+            assert_eq!(*ptr1.offset(200), 200);
+
+            util::acid(|| {
+                alloc.free(ptr1, 30);
+                alloc.free(ptr2, 500);
+            });
+        }
+    });
+}

+ 28 - 14
tests/mpsc.rs

@@ -1,36 +1,50 @@
 extern crate ralloc;
 
+mod util;
+
 use std::thread;
 use std::sync::mpsc;
 
 #[test]
-fn test() {
-    {
+fn mpsc_queue() {
+    util::multiply(|| {
         {
             let (tx, rx) = mpsc::channel::<Box<u64>>();
-            thread::spawn(move || {
-                tx.send(Box::new(0xBABAFBABAF)).unwrap();
-                tx.send(Box::new(0xDEADBEAF)).unwrap();
-                tx.send(Box::new(0xDECEA5E)).unwrap();
-                tx.send(Box::new(0xDEC1A551F1E5)).unwrap();
+
+            let handle = thread::spawn(move || {
+                util::acid(|| {
+                    tx.send(Box::new(0xBABAFBABAF)).unwrap();
+                    tx.send(Box::new(0xDEADBEAF)).unwrap();
+                    tx.send(Box::new(0xDECEA5E)).unwrap();
+                    tx.send(Box::new(0xDEC1A551F1E5)).unwrap();
+                });
             });
             assert_eq!(*rx.recv().unwrap(), 0xBABAFBABAF);
             assert_eq!(*rx.recv().unwrap(), 0xDEADBEAF);
             assert_eq!(*rx.recv().unwrap(), 0xDECEA5E);
             assert_eq!(*rx.recv().unwrap(), 0xDEC1A551F1E5);
+
+            handle.join().unwrap();
         }
 
         let (tx, rx) = mpsc::channel();
-        for _ in 0..0xFFFF {
-            let tx = tx.clone();
-            thread::spawn(move || {
-                tx.send(Box::new(0xFA11BAD)).unwrap();
+        let mut handles = Vec::new();
+
+        for _ in 0..10 {
+            util::acid(|| {
+                let tx = tx.clone();
+                handles.push(thread::spawn(move || {
+                    tx.send(Box::new(0xFA11BAD)).unwrap();
+                }));
             });
         }
-        for _ in 0..0xFFFF {
+
+        for _ in 0..10 {
             assert_eq!(*rx.recv().unwrap(), 0xFA11BAD);
         }
-    }
 
-    ralloc::lock().debug_assert_no_leak();
+        for i in handles {
+            i.join().unwrap()
+        }
+    });
 }

+ 0 - 32
tests/multithreading.rs

@@ -1,32 +0,0 @@
-extern crate ralloc;
-
-use std::thread;
-
-fn make_thread() -> thread::JoinHandle<()> {
-    thread::spawn(|| {
-        let mut vec = Vec::new();
-
-        for i in 0..0xFFFF {
-            vec.push(0);
-            vec[i] = i;
-        }
-
-        for i in 0..0xFFFF {
-            assert_eq!(vec[i], i);
-        }
-    })
-}
-
-#[test]
-fn test() {
-    let mut join = Vec::new();
-    for _ in 0..50 {
-        join.push(make_thread());
-    }
-
-    for i in join {
-        i.join().unwrap();
-    }
-
-    ralloc::lock().debug_assert_no_leak();
-}

+ 60 - 0
tests/partial_free.rs

@@ -0,0 +1,60 @@
+extern crate ralloc;
+
+mod util;
+
+use std::ptr;
+
+#[test]
+fn partial_free() {
+    util::multiply(|| {
+        let mut alloc = ralloc::Allocator::new();
+
+        let buf = alloc.alloc(63, 3);
+
+        unsafe {
+            util::acid(|| {
+                ptr::write_bytes(buf, 0, 63);
+                *buf = 4;
+            });
+
+            util::acid(|| {
+                alloc.free(buf.offset(8), 75);
+                *buf = 5;
+            });
+
+            util::acid(|| {
+                alloc.free(buf, 4);
+                *buf.offset(4) = 3;
+            });
+
+            assert_eq!(*buf.offset(4), 3);
+        }
+    });
+}
+
+#[test]
+fn partial_free_double() {
+    util::multiply(|| {
+        let mut alloc = ralloc::Allocator::new();
+
+        let buf = alloc.alloc(64, 4);
+
+        unsafe {
+            util::acid(|| {
+                ptr::write_bytes(buf, 0, 64);
+            });
+
+            util::acid(|| {
+                alloc.free(buf.offset(32), 32);
+                *buf = 5;
+            });
+
+            assert_eq!(*buf, 5);
+
+            util::acid(|| {
+                *buf = 0xAA;
+                alloc.free(buf, 32);
+            });
+        }
+    });
+}

+ 27 - 0
tests/partial_realloc.rs

@@ -0,0 +1,27 @@
+extern crate ralloc;
+
+mod util;
+
+use std::ptr;
+
+#[test]
+fn partial_realloc() {
+    util::multiply(|| {
+        let mut alloc = ralloc::Allocator::new();
+        let buf = alloc.alloc(63, 3);
+
+        unsafe {
+            util::acid(|| {
+                ptr::write_bytes(buf, 0, 63);
+                *buf = 4;
+            });
+
+            alloc.realloc(buf.offset(8), 75, 0, 23);
+            *buf = 5;
+
+            *alloc.realloc(buf, 4, 10, 2) = 10;
+
+            alloc.free(buf, 4);
+        }
+    });
+}

+ 26 - 7
tests/realloc.rs

@@ -1,21 +1,40 @@
 extern crate ralloc;
 
+mod util;
+
 #[test]
-fn test() {
-    {
+fn realloc_vec() {
+    util::multiply(|| {
         let mut vec = Vec::new();
 
         vec.reserve(1);
         vec.reserve(2);
-        vec.reserve(3);
-        vec.reserve(100);
-        vec.reserve(600);
+        util::acid(|| {
+            vec.reserve(3);
+            vec.reserve(100);
+            vec.reserve(600);
+        });
         vec.reserve(1000);
         vec.reserve(2000);
 
         vec.push(1);
         vec.push(2);
-    }
+    });
+}
+
+#[test]
+fn realloc_vec_2() {
+    util::multiply(|| {
+        let mut vec = Vec::with_capacity(4);
+
+        vec.push(1);
+        vec.push(2);
+        vec.push(101);
 
-    ralloc::lock().debug_assert_no_leak();
+        for x in 0..300 {
+            util::acid(|| {
+                vec.reserve_exact(x);
+            });
+        }
+    });
 }

+ 33 - 0
tests/scaling.rs

@@ -0,0 +1,33 @@
+extern crate ralloc;
+
+mod util;
+
+#[test]
+fn big_alloc() {
+    util::multiply(|| {
+        let mut vec = Vec::new();
+        let mut rand = 3u64;
+
+        for _ in 0..0xBFFF {
+            rand ^= 0xABFABFABFABF;
+            rand = rand.rotate_left(3);
+
+            util::acid(|| vec.push(rand));
+        }
+    });
+}
+
+#[test]
+fn many_small_allocs() {
+    util::multiply(|| {
+        let mut vec = Vec::new();
+        let mut rand = 3u64;
+
+        for _ in 0..3000 {
+            rand ^= 0xABFABFABFABF;
+            rand = rand.rotate_left(3);
+
+            util::acid(|| vec.push(Box::new(rand)));
+        }
+    });
+}

+ 0 - 22
tests/send.rs

@@ -1,22 +0,0 @@
-extern crate ralloc;
-
-use std::thread;
-
-#[test]
-fn test() {
-    let mut join = Vec::new();
-
-    for _ in 0..10000 {
-        let bx: Box<u64> = Box::new(0x11FE15C001);
-
-        join.push(thread::spawn(move || {
-            assert_eq!(*bx, 0x11FE15C001);
-        }));
-    }
-
-    for i in join {
-        i.join().unwrap();
-    }
-
-    ralloc::lock().debug_assert_no_leak();
-}

+ 8 - 6
tests/string.rs

@@ -1,10 +1,12 @@
 extern crate ralloc;
 
-#[test]
-fn test() {
-    assert_eq!(&String::from("you only live twice"), "you only live twice");
-    assert_eq!(&String::from("wtf have you smoked"), "wtf have you smoked");
-    assert_eq!(&String::from("get rekt m8"), "get rekt m8");
+mod util;
 
-    ralloc::lock().debug_assert_no_leak();
+#[test]
+fn simple_string() {
+    util::multiply(|| {
+        assert_eq!(&String::from("you only live twice"), "you only live twice");
+        assert_eq!(&String::from("wtf have you smoked"), "wtf have you smoked");
+        assert_eq!(&String::from("get rekt m8"), "get rekt m8");
+    });
 }

+ 39 - 0
tests/too_many_threads.rs

@@ -0,0 +1,39 @@
+extern crate ralloc;
+
+mod util;
+
+use std::thread;
+
+fn make_thread() -> thread::JoinHandle<()> {
+    thread::spawn(|| {
+        let mut vec = Vec::new();
+
+        for i in 0..0xFFF {
+            util::acid(|| {
+                vec.push(0);
+                vec[i] = i;
+            });
+        }
+
+        for i in 0..0xFFF {
+            assert_eq!(vec[i], i);
+        }
+    })
+}
+
+#[test]
+fn multithread_join_handle_vec() {
+    util::multiply(|| {
+        let mut join = Vec::new();
+
+        for _ in 0..20 {
+            util::acid(|| {
+                join.push(make_thread());
+            });
+        }
+
+        for i in join {
+            i.join().unwrap();
+        }
+    });
+}

+ 78 - 0
tests/util/mod.rs

@@ -0,0 +1,78 @@
+//! Test automation.
+
+use ralloc;
+
+use std::{thread, mem};
+
+/// Magic trait for boxed `FnOnce`s.
+///
+/// This is a temporary replacement as the trait from libstd is stabilized.
+trait FnBox {
+    /// Call the closure.
+    fn call_box(self: Box<Self>);
+}
+
+impl<F: FnOnce()> FnBox for F {
+    fn call_box(self: Box<Self>) { (*self)() }
+}
+
+/// Like `std::thread::spawn`, but without the closure bounds.
+unsafe fn spawn_unsafe<'a, F: FnOnce() + Send + 'a>(func: F) -> thread::JoinHandle<()> {
+    let closure: Box<FnBox + 'a> = Box::new(func);
+    let closure: Box<FnBox + Send> = mem::transmute(closure);
+    thread::spawn(move || closure.call_box())
+}
+
+/// Spawn three threads and `join` them.
+fn spawn_double<F: Fn() + Sync + Send>(func: F) {
+    let handle;
+
+    unsafe {
+        handle = spawn_unsafe(|| func());
+    }
+
+    func();
+
+    handle.join().unwrap();
+}
+
+/// "Multiply" a closure, by running it in multiple threads at the same time.
+///
+/// This will test for memory leaks, as well as acid wrapping.
+#[allow(dead_code)]
+pub fn multiply<F: Fn() + Sync + Send + 'static>(func: F) {
+    spawn_double(|| spawn_double(|| acid(|| func())));
+
+    ralloc::lock().debug_assert_no_leak();
+}
+
+/// Wrap a block in acid tests.
+///
+/// This performs a number of temporary allocations to try to detect inconsistency.
+///
+/// The basic idea is that if the allocator is broken, it might allocate the same memory twice, or
+/// corrupt when allocating. Thus, we allocate some temporary segment and override it. This way we
+/// might be able to detect memory corruption through asserting memory consistency after the
+/// closure is completed.
+#[allow(dead_code)]
+pub fn acid<F: FnOnce()>(func: F) {
+    let mut vec = vec!["something", "yep", "yup"];
+    let mut _v = vec![Box::new(2), Box::new(5)];
+    let mut bx = Box::new(2389);
+    let abc = Box::new("abc");
+
+    vec.shrink_to_fit();
+    vec.extend(["lol", "lulz"].iter());
+    vec.shrink_to_fit();
+    vec.extend(["we", "are"].iter());
+
+    func();
+
+    *bx = 500;
+    vec.push("heyaya");
+    *bx = 55;
+
+    assert_eq!(vec, ["something", "yep", "yup", "lol", "lulz", "we", "are", "heyaya"]);
+    assert_eq!(*bx, 55);
+    assert_eq!(*abc, "abc");
+}

+ 25 - 22
tests/vec.rs

@@ -1,31 +1,34 @@
 extern crate ralloc;
 
-#[test]
-fn test() {
-    let mut vec = Vec::new();
+mod util;
 
-    for i in 0..0xFFFF {
-        // We're going to annoy the allocator by allocating a small chunk, after which we push.
-        let _bx = Box::new(4);
-        vec.push(i);
-    }
+#[test]
+fn simple_vec() {
+    util::multiply(|| {
+        let mut vec = Vec::new();
 
-    assert_eq!(vec[0xDEAD], 0xDEAD);
-    assert_eq!(vec[0xBEAF], 0xBEAF);
-    assert_eq!(vec[0xABCD], 0xABCD);
-    assert_eq!(vec[0xFFAB], 0xFFAB);
-    assert_eq!(vec[0xAAAA], 0xAAAA);
+        for i in 0..0xFFFF {
+            // We're going to annoy the allocator by allocating a small chunk, after which we push.
+            let _bx = Box::new(4);
+            vec.push(i);
+        }
 
-    for i in 0xFFFF..0 {
-        assert_eq!(vec.pop(), Some(i));
-    }
+        assert_eq!(vec[0xDEAD], 0xDEAD);
+        assert_eq!(vec[0xBEAF], 0xBEAF);
+        assert_eq!(vec[0xABCD], 0xABCD);
+        assert_eq!(vec[0xFFAB], 0xFFAB);
+        assert_eq!(vec[0xAAAA], 0xAAAA);
 
-    for i in 0..0xFFFF {
-        vec[i] = 0;
-        assert_eq!(vec[i], 0);
-    }
+        for i in 0xFFF..0 {
+            util::acid(|| {
+                assert_eq!(vec.pop(), Some(i));
+            });
+        }
 
-    drop(vec);
+        for i in 0..0xFFF {
+            vec[i] = 0;
+            assert_eq!(vec[i], 0);
+        }
+    });
 
-    ralloc::lock().debug_assert_no_leak();
 }

+ 23 - 21
tests/vec_box.rs

@@ -1,29 +1,31 @@
 extern crate ralloc;
 
-#[test]
-fn test() {
-    let mut vec = Vec::new();
-
-    for i in 0..0xFFFF {
-        vec.push(Box::new(i));
-    }
+mod util;
 
-    assert_eq!(*vec[0xDEAD], 0xDEAD);
-    assert_eq!(*vec[0xBEAF], 0xBEAF);
-    assert_eq!(*vec[0xABCD], 0xABCD);
-    assert_eq!(*vec[0xFFAB], 0xFFAB);
-    assert_eq!(*vec[0xAAAA], 0xAAAA);
+#[test]
+fn vec_box() {
+    util::multiply(|| {
+        let mut vec = Vec::new();
 
-    for i in 0xFFFF..0 {
-        assert_eq!(*vec.pop().unwrap(), i);
-    }
+        for i in 0..0xFFF {
+            util::acid(|| {
+                vec.push(Box::new(i));
+            });
+        }
 
-    for i in 0..0xFFFF {
-        *vec[i] = 0;
-        assert_eq!(*vec[i], 0);
-    }
+        assert_eq!(*vec[0xEAD], 0xEAD);
+        assert_eq!(*vec[0xEAF], 0xEAF);
+        assert_eq!(*vec[0xBCD], 0xBCD);
+        assert_eq!(*vec[0xFAB], 0xFAB);
+        assert_eq!(*vec[0xAAA], 0xAAA);
 
-    drop(vec);
+        for i in 0xFFF..0 {
+            assert_eq!(*vec.pop().unwrap(), i);
+        }
 
-    ralloc::lock().debug_assert_no_leak();
+        for i in 0..0xFFF {
+            *vec[i] = 0;
+            assert_eq!(*vec[i], 0);
+        }
+    });
 }