Sfoglia il codice sorgente

Ralloc is now ready for production use: Fix all currently known issues, rewrite large parts of the allocator, move unsafes to seperate abstractions (e.g., remove pointer arithemtics), add direct method, add vector primitive, update README, add TODO, add pointer primitive, more debug_assertions, more tests

ticki 9 anni fa
parent
commit
4b65baa341
12 ha cambiato i file con 1127 aggiunte e 795 eliminazioni
  1. 107 2
      README.md
  2. 3 0
      TODO.md
  3. 39 88
      src/allocator.rs
  4. 185 79
      src/block.rs
  5. 327 550
      src/bookkeeper.rs
  6. 1 1
      src/fail.rs
  7. 51 5
      src/lib.rs
  8. 109 0
      src/ptr.rs
  9. 96 0
      src/sync.rs
  10. 13 39
      src/sys.rs
  11. 196 0
      src/vec.rs
  12. 0 31
      tests/brk.rs

+ 107 - 2
README.md

@@ -1,4 +1,109 @@
-ralloc
-======
+# ralloc
 
 Redox's fast & memory efficient userspace allocator.
+
+## Features
+
+### Custom out-of-memory handlers
+
+You can set custom OOM handlers, by:
+
+```rust
+extern crate ralloc;
+use fail::set_oom_handler;
+
+fn my_handler() -> ! {
+    println!("Oh no. Blame somebody.");
+}
+
+fn main() {
+    set_oom_handler(my_handler);
+    // Do some stuff...
+}
+```
+
+### Debug check: double free
+
+Ooh, this one is a cool one. `ralloc` detects various memory bugs when compiled
+with `debug_assertions`. These checks include double free checks:
+
+```rust
+fn main() {
+    // We start by allocating some stuff.
+    let a = Box::new(500u32);
+    // Then we memcpy the pointer (this is UB).
+    let b = Box::from_raw(&a as *mut u32);
+    // Now both destructors are called. First a, then b, which is a double
+    // free. Luckily, ralloc provides a nice message for you, when in debug
+    // mode:
+    //    Assertion failed: Double free.
+
+    // Setting RUST_BACKTRACE allows you to get a stack backtrace, so that you
+    // can find where the double free occurs.
+}
+```
+
+### Partial deallocation
+
+Many allocators limits deallocations to be allocated block, that is, you cannot
+perform arithmetics or split it. `ralloc` does not have such a limitation:
+
+```rust
+use std::mem;
+fn main() {
+    // We allocate 200 bytes.
+    let vec = vec![0u8; 200];
+    // Cast it to a pointer.
+    let ptr = vec.as_mut_ptr();
+
+    // To avoid UB, we leak the vector.
+    mem::forget(vec);
+
+    // Now, we create two vectors, each being 100 bytes long, effectively
+    // splitting the original vector in half.
+    let a = Vec::from_raw_parts(ptr, 100, 100);
+    let b = Vec::from_raw_parts(ptr.offset(100), 100, 100);
+
+    // Now, the destructor of a and b is called... Without a segfault!
+}
+```
+
+### Seperate deallocation
+
+Another cool feature is that you can deallocate things that weren't even
+allocated buffers in the first place!
+
+Consider that you got a unused static variable, that you want to put into the
+allocation pool:
+
+```rust
+extern crate ralloc;
+
+static mut BUFFER: [u8; 256] = [2; 256];
+
+fn main() {
+    // Throw `BUFFER` into the memory pool.
+    unsafe {
+        ralloc::free(&mut BUFFER as *mut u8, 256);
+    }
+
+    // Do some allocation.
+    assert_eq!(*Box::new(0xDEED), 0xDEED);
+}
+```
+
+### Thread local allocator
+
+TODO
+
+### Safe SBRK
+
+TODO
+
+### Lock reuse
+
+TODO
+
+### Platform agnostic
+
+TODO

+ 3 - 0
TODO.md

@@ -0,0 +1,3 @@
+- [ ] Thread local allocator.
+- [ ] Lock reuse
+- [ ] Checkpoints

+ 39 - 88
src/allocator.rs

@@ -1,105 +1,56 @@
 //! The global allocator.
 //!
-//! This contains primitives for the cross-thread allocator. Furthermore, it provides symbols for
-//! allocation, deallocation, and reallocation for Rust.
-
-use core::intrinsics;
-use core::sync::atomic;
-
+//! This contains primitives for the cross-thread allocator.
+use block::Block;
 use bookkeeper::Bookkeeper;
-use sys;
+use ptr::Pointer;
+use sync;
 
-/// The bookkeeper lock.
-///
-/// This atomic boolean is false whenever the lock is free.
-static mut BOOKKEEPER_LOCK: atomic::AtomicBool = atomic::AtomicBool::new(false);
 /// The bookkeeper.
 ///
 /// This is the associated bookkeeper of this allocator.
-static mut BOOKKEEPER: Option<Bookkeeper> = None;
+static BOOKKEEPER: sync::Mutex<Bookkeeper> = sync::Mutex::new(Bookkeeper::new());
 
-/// Unlock the associated mutex.
-///
-/// This is unsafe, since it will make future use of the acquired bookkeeper reference invalid,
-/// until it is reacquired through [the `get_bookkeeper` method](./fn.get_bookkeeper.html).
-unsafe fn unlock_bookkeeper() {
-    BOOKKEEPER_LOCK.store(false, atomic::Ordering::SeqCst);
+/// Allocate a block of memory.
+pub fn alloc(size: usize, align: usize) -> *mut u8 {
+    *BOOKKEEPER.lock().alloc(size, align).into_ptr()
 }
 
-/// Lock and possibly initialize the bookkeeper.
+/// Free a buffer.
 ///
-/// Note that the mutex should be unlocked manually, through the [`unlock_bookkeeper`
-/// method](./fn.unlock_bookkeeper.html).
-// TODO use condvar.
-fn get_bookkeeper() -> &'static mut Bookkeeper {
-    unsafe {
-        // Lock the mutex.
-        while BOOKKEEPER_LOCK.compare_and_swap(false, true, atomic::Ordering::SeqCst) {
-            sys::yield_now();
-        }
-
-        if let Some(ref mut x) = BOOKKEEPER {
-            x
-        } else {
-            BOOKKEEPER = Some(Bookkeeper::new());
-
-            BOOKKEEPER.as_mut().unwrap_or_else(|| intrinsics::unreachable())
-        }
-    }
-}
-
-/// Allocate memory.
-#[no_mangle]
-#[cfg(feature = "allocator")]
-pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
-    let res = *get_bookkeeper().alloc(size, align);
-    unsafe { unlock_bookkeeper() }
-
-    res
-}
-
-/// Deallocate memory.
-#[no_mangle]
-#[cfg(feature = "allocator")]
-pub extern fn __rust_deallocate(ptr: *mut u8, size: usize, _align: usize) {
-    use block::Block;
-    use core::ptr::Unique;
-
-    let res = get_bookkeeper().free(Block {
-        size: size,
-        ptr: unsafe { Unique::new(ptr) },
-    });
-    unsafe { unlock_bookkeeper() }
-
-    res
+/// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
+/// that it is not used after the free.
+pub unsafe fn free(ptr: *mut u8, size: usize) {
+    // Lock the bookkeeper, and do a `free`.
+    BOOKKEEPER.lock().free(Block::from_raw_parts(Pointer::new(ptr), size));
 }
 
 /// Reallocate memory.
-#[no_mangle]
-#[cfg(feature = "allocator")]
-pub extern fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
-    use block::Block;
-    use core::ptr::Unique;
-
-    let res = *get_bookkeeper().realloc(Block {
-        size: old_size,
-        ptr: unsafe { Unique::new(ptr) },
-    }, size, align);
-    unsafe { unlock_bookkeeper() }
-
-    res
-}
-
-/// Return the maximal amount of inplace reallocation that can be done.
-#[no_mangle]
-#[cfg(feature = "allocator")]
-pub extern fn __rust_reallocate_inplace(_ptr: *mut u8, old_size: usize, _size: usize, _align: usize) -> usize {
-    old_size // TODO
+///
+/// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
+/// returned pointer with size `size`.
+pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
+    // Lock the bookkeeper, and do a `realloc`.
+    *BOOKKEEPER.lock().realloc(
+        Block::from_raw_parts(Pointer::new(ptr), old_size),
+        size,
+        align
+    ).into_ptr()
 }
 
-/// Get the usable size of the some number of bytes of allocated memory.
-#[no_mangle]
-#[cfg(feature = "allocator")]
-pub extern fn __rust_usable_size(size: usize, _align: usize) -> usize {
-    size
+/// Try to reallocate the buffer _inplace_.
+///
+/// In case of success, return the new buffer's size. On failure, return the old size.
+///
+/// This can be used to shrink (truncate) a buffer as well.
+pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
+    // Lock the bookkeeper, and do a `realloc_inplace`.
+    if BOOKKEEPER.lock().realloc_inplace(
+        Block::from_raw_parts(Pointer::new(ptr), old_size),
+        size
+    ).is_ok() {
+        Ok(())
+    } else {
+        Err(())
+    }
 }

+ 185 - 79
src/block.rs

@@ -1,44 +1,162 @@
-//! Memory primitives.
-
-use core::cmp;
-use core::ptr::Unique;
-
-/// A contigious memory block.
+//! Memory blocks.
+//!
+//! Blocks are the main unit for the memory bookkeeping. A block is a simple construct with a
+//! `Pointer` pointer and a size. Occupied (non-free) blocks are represented by a zero-sized block.
+
+use core::{ptr, cmp, mem, fmt};
+
+use ptr::Pointer;
+use sys;
+
+/// A contiguous memory block.
+///
+/// This provides a number of guarantees,
+///
+/// 1. The inner pointer is never aliased. No byte in the block is contained in another block
+///    (aliased in this case is defined purely based on liveliness).
+/// 2. The buffer is valid, but not necessarily initialized.
+///
+/// All this is enforced through the type system.
 pub struct Block {
     /// The size of this block, in bytes.
-    pub size: usize,
+    size: usize,
     /// The pointer to the start of this block.
-    pub ptr: Unique<u8>,
+    ptr: Pointer<u8>,
 }
 
 impl Block {
-    /// Get a pointer to the end of this block, not inclusive.
-    pub fn end(&self) -> Unique<u8> {
-        // TODO, this might trigger an overflow, which could imply creating a null-pointer.
-        let ptr = (self.size + *self.ptr as usize) as *mut _;
-        debug_assert!(!ptr.is_null(), "Pointer is null.");
+    /// Create an empty block starting at `ptr`.
+    pub fn empty(ptr: &Pointer<u8>) -> Block {
+        Block {
+            size: 0,
+            // This won't alias `ptr`, since the block is empty.
+            ptr: unsafe { Pointer::new(**ptr) },
+        }
+    }
 
-        unsafe {
-            Unique::new(ptr)
+    /// Construct a block from its raw parts (pointer and size).
+    pub unsafe fn from_raw_parts(ptr: Pointer<u8>, size: usize) ->  Block {
+        Block {
+            size: size,
+            ptr: ptr,
+        }
+    }
+
+    /// Get the size of the block.
+    pub fn size(&self) -> usize {
+        self.size
+    }
+
+    /// BRK allocate a block.
+    ///
+    /// This is unsafe due to the allocator assuming that only it makes use of BRK.
+   pub unsafe fn brk(size: usize) -> Block {
+        Block {
+            size: size,
+            ptr: sys::inc_brk(size).unwrap_or_else(|x| x.handle()),
         }
     }
 
-    /// Is this block free?
-    pub fn is_free(&self) -> bool {
+    /// Merge this block with a block to the right.
+    ///
+    /// This will simply extend the block, adding the size of the block, and then set the size to
+    /// zero. The return value is `Ok(())` on success, and `Err(())` on failure (e.g., the blocks
+    /// are not adjacent).
+    pub fn merge_right(&mut self, block: &mut Block) -> Result<(), ()> {
+        if self.left_to(&block) {
+            // Since the end of `block` is bounded by the address space, adding them cannot
+            // overflow.
+            self.size += block.pop().size;
+            // We pop it to make sure it isn't aliased.
+            Ok(())
+        } else { Err(()) }
+    }
+
+    /// Is this block empty/free?
+    pub fn is_empty(&self) -> bool {
         self.size != 0
     }
 
-    /// Set this block as free.
+    /// Is this block aligned to `align`?
+    pub fn aligned_to(&self, align: usize) -> bool {
+        *self.ptr as usize % align == 0
+    }
+
+    /// Get the inner pointer.
+    pub fn into_ptr(self) -> Pointer<u8> {
+        self.ptr
+    }
+
+    /// memcpy the block to another pointer.
+    ///
+    /// # Panics
+    ///
+    /// This will panic if the target block is smaller than the source.
+    pub fn copy_to(&self, block: &mut Block) {
+        // Bound check.
+        assert!(self.size <= block.size, "Block too small.");
+
+        unsafe {
+            ptr::copy_nonoverlapping(*self.ptr, *block.ptr, self.size);
+        }
+    }
+
+    /// "Pop" this block.
     ///
-    /// This will not deallocate, but it will simply set the size to zero, which is the
-    /// representation of a freeed block.
-    pub fn set_free(&mut self) {
-        self.size = 0;
+    /// This marks it as free, and returns the old value.
+    pub fn pop(&mut self) -> Block {
+        let empty = Block::empty(&self.ptr);
+        mem::replace(self, empty)
     }
 
     /// Is this block placed left to the given other block?
-    pub fn left_to(&self, to: *mut u8) -> bool {
-        self.size + *self.ptr as usize == to as usize
+    pub fn left_to(&self, to: &Block) -> bool {
+        // This won't overflow due to the end being bounded by the address space.
+        self.size + *self.ptr as usize == *to.ptr as usize
+    }
+
+    /// Split the block at some position.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `pos` is out of bound.
+    pub fn split(self, pos: usize) -> (Block, Block) {
+        assert!(pos <= self.size, "Split {} out of bound (size is {})!", pos, self.size);
+
+        (
+            Block {
+                size: pos,
+                ptr: self.ptr.duplicate(),
+            },
+            Block {
+                size: self.size - pos,
+                ptr: unsafe { self.ptr.offset(pos as isize) },
+            }
+        )
+    }
+
+    /// Split this block, such that the second block is aligned to `align`.
+    ///
+    /// Returns an `None` holding the intact block if `align` is out of bounds.
+    pub fn align(&mut self, align: usize) -> Option<(Block, Block)> {
+        let aligner = align - *self.ptr as usize % align;
+
+        // Bound check.
+        if aligner < self.size {
+            // Invalidate the old block.
+            let old = self.pop();
+
+            Some((
+                Block {
+                    size: aligner,
+                    ptr: old.ptr.duplicate(),
+                },
+                Block {
+                    size: old.size - aligner,
+                    ptr: unsafe { old.ptr.offset(aligner as isize) },
+                }
+            ))
+        } else { None }
     }
 }
 
@@ -48,6 +166,7 @@ impl PartialOrd for Block {
     }
 }
 
+/// Compare the blocks address.
 impl Ord for Block {
     fn cmp(&self, other: &Block) -> cmp::Ordering {
         self.ptr.cmp(&other.ptr)
@@ -62,77 +181,64 @@ impl cmp::PartialEq for Block {
 
 impl cmp::Eq for Block {}
 
+impl fmt::Debug for Block {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "0x{:x}[0x{:x}]", *self.ptr as usize, self.size)
+    }
+}
+
 #[cfg(test)]
 mod test {
     use super::*;
-    use core::ptr::Unique;
+
+    use ptr::Pointer;
 
     #[test]
-    fn test_end() {
-        let a = Block {
-            size: 10,
-            ptr: unsafe { Unique::new(15 as *mut _) },
-        };
-        let b = Block {
-            size: 15,
-            ptr: unsafe { Unique::new(25 as *mut _) },
-        };
-        let c = Block {
-            size: 75,
-            ptr: unsafe { Unique::new(40 as *mut _) },
+    fn test_array() {
+        let arr = b"Lorem ipsum dolor sit amet";
+        let block = unsafe {
+            Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
         };
 
-        assert_eq!(*a.end(), *b.ptr);
-        assert_eq!(*b.end(), *c.ptr);
+        // Test split.
+        let (mut lorem, mut rest) = block.split(5);
+        assert_eq!(lorem.size(), 5);
+        assert_eq!(lorem.size() + rest.size(), arr.len());
+        assert!(lorem < rest);
+
+        /* TODO
+        assert_eq!(unsafe {
+            slice::from_raw_parts(*lorem.into_ptr() as *const _, lorem.size())
+        }, b"Lorem");
+        */
+
+        assert_eq!(lorem, lorem);
+        assert!(rest.is_empty());
+        assert!(lorem.align(2).unwrap().1.aligned_to(2));
+        assert!(rest.align(16).unwrap().1.aligned_to(16));
+        assert_eq!(*lorem.into_ptr() as usize + 5, *rest.into_ptr() as usize);
     }
 
     #[test]
-    fn test_left_to() {
-        let a = Block {
-            size: 10,
-            ptr: unsafe { Unique::new(15 as *mut _) },
-        };
-        let b = Block {
-            size: 15,
-            ptr: unsafe { Unique::new(25 as *mut _) },
-        };
-        let c = Block {
-            size: 75,
-            ptr: unsafe { Unique::new(40 as *mut _) },
+    fn test_merge() {
+        let arr = b"Lorem ipsum dolor sit amet";
+        let block = unsafe {
+            Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
         };
 
-        assert!(a.left_to(*b.ptr));
-        assert!(b.left_to(*c.ptr));
-        assert!(!c.left_to(*a.ptr));
-        assert!(!a.left_to(*c.ptr));
-        assert!(!b.left_to(*b.ptr));
-        assert!(!b.left_to(*a.ptr));
+        let (mut lorem, mut rest) = block.split(5);
+        lorem.merge_right(&mut rest).unwrap();
     }
 
     #[test]
-    fn test_cmp() {
-        let a = Block {
-            size: 10,
-            ptr: unsafe { Unique::new(10 as *mut _) },
-        };
-        let b = Block {
-            size: 15,
-            ptr: unsafe { Unique::new(25 as *mut _) },
-        };
-        let c = Block {
-            size: 75,
-            ptr: unsafe { Unique::new(40 as *mut _) },
+    #[should_panic]
+    fn test_oob() {
+        let arr = b"lorem";
+        let block = unsafe {
+            Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
         };
 
-        assert!(a < b);
-        assert!(b < c);
-        assert!(c > a);
-        assert!(a == a);
-        assert!(b == b);
-        assert!(c == c);
-        assert!(c >= c);
-        assert!(c <= c);
-        assert!(a <= c);
-        assert!(b >= a);
+        // Test OOB.
+        block.split(6);
     }
 }

File diff suppressed because it is too large
+ 327 - 550
src/bookkeeper.rs


+ 1 - 1
src/fail.rs

@@ -8,7 +8,7 @@ static OOM_HANDLER: AtomicPtr<()> = AtomicPtr::new(default_oom_handler as *mut (
 
 /// The default OOM handler.
 ///
-/// This will simply abort the process with exit code, 1.
+/// This will simply abort the process.
 fn default_oom_handler() -> ! {
     unsafe {
         intrinsics::abort();

+ 51 - 5
src/lib.rs

@@ -6,7 +6,8 @@
 #![cfg_attr(feature = "allocator", allocator)]
 #![no_std]
 
-#![feature(allocator, const_fn, core_intrinsics, stmt_expr_attributes, unique, iter_arith)]
+#![feature(allocator, const_fn, core_intrinsics, stmt_expr_attributes, drop_types_in_const,
+           nonzero)]
 
 #![warn(missing_docs)]
 
@@ -16,8 +17,53 @@ extern crate system;
 #[macro_use]
 extern crate syscall;
 
-pub mod allocator;
-pub mod block;
-pub mod bookkeeper;
+mod allocator;
+mod block;
+mod bookkeeper;
+mod ptr;
+mod sync;
+mod sys;
+mod vec;
 pub mod fail;
-pub mod sys;
+
+pub use allocator::{free, alloc, realloc, realloc_inplace};
+
+/// Rust allocation symbol.
+#[no_mangle]
+#[cfg(feature = "allocator")]
+pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
+    alloc(size, align)
+}
+
+/// Rust deallocation symbol.
+#[no_mangle]
+#[cfg(feature = "allocator")]
+pub unsafe extern fn __rust_deallocate(ptr: *mut u8, size: usize, _align: usize) {
+    free(ptr, size);
+}
+
+/// Rust reallocation symbol.
+#[no_mangle]
+#[cfg(feature = "allocator")]
+pub unsafe extern fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
+    realloc(ptr, old_size, size, align)
+}
+
+/// Rust reallocation inplace symbol.
+#[no_mangle]
+#[cfg(feature = "allocator")]
+pub unsafe extern fn __rust_reallocate_inplace(ptr: *mut u8, old_size: usize, size: usize, _align: usize) -> usize {
+    if realloc_inplace(ptr, old_size, size).is_ok() {
+        size
+    } else {
+        old_size
+    }
+}
+
+/// Get the usable size of the some number of bytes of allocated memory.
+#[no_mangle]
+#[cfg(feature = "allocator")]
+pub extern fn __rust_usable_size(size: usize, _align: usize) -> usize {
+    // Yay! It matches exactly.
+    size
+}

+ 109 - 0
src/ptr.rs

@@ -0,0 +1,109 @@
+//! Pointer wrappers.
+
+use core::nonzero::NonZero;
+use core::{ops, marker};
+
+/// A pointer wrapper type.
+///
+/// A wrapper around a raw non-null `*mut T` that indicates that the possessor of this wrapper owns
+/// the referent.
+#[derive(PartialEq, Debug)]
+pub struct Pointer<T> {
+    /// The internal pointer.
+    ptr: NonZero<*mut T>,
+    /// Associated phantom data.
+    ///
+    /// This indicates that we _own_ T.
+    _phantom: marker::PhantomData<T>,
+}
+
+impl<T> Pointer<T> {
+    /// Create a new `Pointer` from a raw pointer.
+    ///
+    /// # Safety
+    ///
+    /// This function is unsafe since a null pointer can cause UB, due to `Pointer` being
+    /// non-nullable.
+    pub unsafe fn new(ptr: *mut T) -> Pointer<T> {
+        // For the sake of nice debugging, make some assertions.
+        debug_assert!(!ptr.is_null(), "Null pointer!");
+
+        Pointer {
+            ptr: NonZero::new(ptr),
+            _phantom: marker::PhantomData,
+        }
+    }
+
+    /// Duplicate this pointer.
+    ///
+    /// For technical reasons, this is not implemented through the `Clone` trait, although it acts
+    /// similarly.
+    pub fn duplicate(&self) -> Pointer<T> {
+        Pointer {
+            ptr: self.ptr,
+            _phantom: marker::PhantomData,
+        }
+    }
+
+    /// Cast this pointer into a pointer to another type.
+    ///
+    /// This will simply transmute the pointer, leaving the actual data unmodified.
+    pub fn cast<U>(self) -> Pointer<U> {
+        Pointer {
+            ptr: unsafe { NonZero::new(*self as *mut U) },
+            _phantom: marker::PhantomData,
+        }
+    }
+
+    /// Create an "empty" `Pointer`.
+    ///
+    /// This acts as a null pointer, although it is represented by 0x1 instead of 0x0.
+    pub const fn empty() -> Pointer<T> {
+        Pointer {
+            ptr: unsafe { NonZero::new(0x1 as *mut T) },
+            _phantom: marker::PhantomData,
+        }
+    }
+
+    /// Offset this pointer.
+    ///
+    /// This will add some value multiplied by the size of T to the pointer.
+    ///
+    /// # Safety
+    ///
+    /// This is unsafe, due to OOB offsets being undefined behavior.
+    pub unsafe fn offset(self, diff: isize) -> Pointer<T> {
+        Pointer::new(self.ptr.offset(diff))
+    }
+}
+
+impl<T> ops::Deref for Pointer<T> {
+    type Target = *mut T;
+
+    #[inline]
+    fn deref(&self) -> &*mut T {
+        &self.ptr
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+
+    #[test]
+    fn test_pointer() {
+        let mut x = [b'a', b'b'];
+
+        unsafe {
+            let ptr = Pointer::new(&mut x[0] as *mut u8);
+            assert_eq!(**ptr, b'a');
+            assert_eq!(**ptr.duplicate().cast::<[u8; 1]>(), [b'a']);
+            assert_eq!(**ptr.offset(1), b'b');
+        }
+    }
+
+    #[test]
+    fn test_empty() {
+        assert_eq!(*Pointer::<u8>::empty() as usize, 1);
+    }
+}

+ 96 - 0
src/sync.rs

@@ -0,0 +1,96 @@
+//! Synchronization primitives.
+
+use core::sync::atomic::{self, AtomicBool};
+use core::ops;
+
+use sys;
+
+/// A mutual exclusive container.
+///
+/// This assures that only one holds mutability of the inner value. To get the inner value, you
+/// need acquire the "lock". If you try to lock it while a lock is already held elsewhere, it will
+/// block the thread until the lock is released.
+// TODO soundness issue when T: Drop?
+pub struct Mutex<T> {
+    /// The inner value.
+    inner: T,
+    /// The lock boolean.
+    ///
+    /// This is true, if and only if the lock is currently held.
+    locked: AtomicBool,
+}
+
+/// A mutex guard.
+///
+/// This acts as the lock.
+pub struct MutexGuard<'a, T: 'a> {
+    mutex: &'a Mutex<T>,
+}
+
+/// Release the mutex.
+impl<'a, T> Drop for MutexGuard<'a, T> {
+    fn drop(&mut self) {
+        self.mutex.locked.store(false, atomic::Ordering::SeqCst);
+    }
+}
+
+impl<'a, T> ops::Deref for MutexGuard<'a, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &self.mutex.inner
+    }
+}
+
+impl<'a, T> ops::DerefMut for MutexGuard<'a, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *(&self.mutex.inner as *const T as *mut T) }
+    }
+}
+
+impl<T> Mutex<T> {
+    /// Create a new mutex with some inner value.
+    pub const fn new(inner: T) -> Mutex<T> {
+        Mutex {
+            inner: inner,
+            locked: AtomicBool::new(false),
+        }
+    }
+
+    /// Lock this mutex.
+    ///
+    /// If another lock is held, this will block the thread until it is released.
+    pub fn lock(&self) -> MutexGuard<T> {
+        // Lock the mutex.
+        while self.locked.compare_and_swap(false, true, atomic::Ordering::SeqCst) {
+            // ,___,
+            // {O,o}
+            // |)``)
+            // SRSLY?
+            sys::yield_now();
+        }
+
+        MutexGuard {
+            mutex: self,
+        }
+    }
+}
+
+unsafe impl<T> Sync for Mutex<T> {}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+
+    #[test]
+    fn test_mutex() {
+        let mutex = Mutex::new(3);
+        assert_eq!(*mutex.lock(), 3);
+
+        *mutex.lock() = 4;
+        assert_eq!(*mutex.lock(), 4);
+
+        *mutex.lock() = 0xFF;
+        assert_eq!(*mutex.lock(), 0xFF);
+    }
+}

+ 13 - 39
src/sys.rs

@@ -1,34 +1,23 @@
 //! System primitives.
 
-use core::ptr::Unique;
-
+use ptr::Pointer;
 use fail;
 
-/// Out of memory.
-///
-/// In release mode, this will simply abort the process (standard behavior). In debug mode, it will
-/// panic, causing debugging to be easier.
-pub fn oom() -> ! {
-    fail::oom();
-}
-
 /// A system call error.
 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
 pub enum Error {
     /// Sir, we're running outta memory!
     OutOfMemory,
-    /// Arithmetic overflow.
-    ArithOverflow,
-    /// An unknown error occurred.
-    Unknown,
+    /// An OS error occurred.
+    Os,
 }
 
 impl Error {
     /// Handle this error with the appropriate method.
     pub fn handle(self) -> ! {
         match self {
-            Error::OutOfMemory | Error::ArithOverflow => oom(),
-            Error::Unknown => panic!("Unknown OS error.")
+            Error::OutOfMemory => fail::oom(),
+            Error::Os => panic!("Unknown OS error.")
         }
     }
 }
@@ -60,11 +49,11 @@ pub fn segment_end() -> Result<*const u8, Error> {
 ///
 /// This is unsafe for multiple reasons. Most importantly, it can create an inconsistent state,
 /// because it is not atomic. Thus, it can be used to create Undefined Behavior.
-pub unsafe fn inc_brk(n: usize) -> Result<Unique<u8>, Error> {
+pub unsafe fn inc_brk(n: usize) -> Result<Pointer<u8>, Error> {
     let orig_seg_end = try!(segment_end()) as usize;
-    if n == 0 { return Ok(Unique::new(orig_seg_end as *mut u8)) }
+    if n == 0 { return Ok(Pointer::new(orig_seg_end as *mut u8)) }
 
-    let expected_end = try!(orig_seg_end.checked_add(n).ok_or(Error::ArithOverflow));
+    let expected_end = try!(orig_seg_end.checked_add(n).ok_or(Error::OutOfMemory));
     let new_seg_end = try!(sys_brk(expected_end));
 
     if new_seg_end != expected_end {
@@ -73,7 +62,7 @@ pub unsafe fn inc_brk(n: usize) -> Result<Unique<u8>, Error> {
 
         Err(Error::OutOfMemory)
     } else {
-        Ok(Unique::new(orig_seg_end as *mut u8))
+        Ok(Pointer::new(orig_seg_end as *mut u8))
     }
 }
 
@@ -85,7 +74,7 @@ unsafe fn sys_brk(n: usize) -> Result<usize, Error> {
     if let Ok(ret) = syscall::sys_brk(n) {
         Ok(ret)
     } else {
-        Err(Error::Unknown)
+        Err(Error::Os)
     }
 }
 
@@ -95,7 +84,7 @@ unsafe fn sys_brk(n: usize) -> Result<usize, Error> {
     let ret = syscall!(BRK, n);
 
     if ret == !0 {
-        Err(Error::Unknown)
+        Err(Error::Os)
     } else {
         Ok(ret)
     }
@@ -112,26 +101,11 @@ mod test {
         }
     }
 
-    #[test]
-    fn test_read() {
-        unsafe {
-            let mem = *inc_brk(8).unwrap() as *mut u64;
-            assert_eq!(*mem, 0);
-        }
-    }
-
     #[test]
     fn test_overflow() {
         unsafe {
-            assert_eq!(inc_brk(!0).err(), Some(Error::ArithOverflow));
-            assert_eq!(inc_brk(!0 - 2000).err(), Some(Error::ArithOverflow));
+            assert_eq!(inc_brk(!0).err(), Some(Error::OutOfMemory));
+            assert_eq!(inc_brk(!0 - 2000).err(), Some(Error::OutOfMemory));
         }
     }
-
-    #[test]
-    fn test_segment_end() {
-        assert_eq!(segment_end().unwrap(), segment_end().unwrap());
-        assert_eq!(segment_end().unwrap(), segment_end().unwrap());
-        assert_eq!(segment_end().unwrap(), segment_end().unwrap());
-    }
 }

+ 196 - 0
src/vec.rs

@@ -0,0 +1,196 @@
+//! Vector primitive.
+
+use core::mem::size_of;
+use core::{slice, ops, ptr, mem};
+
+use block::Block;
+use ptr::Pointer;
+
+/// A low-level vector primitive.
+///
+/// This does not perform allocation nor reallaction, thus these have to be done manually.
+/// Moreover, no destructors are called, making it possible to leak memory.
+pub struct Vec<T: NoDrop> {
+    /// A pointer to the start of the buffer.
+    ptr: Pointer<T>,
+    /// The capacity of the buffer.
+    ///
+    /// This demonstrates the lengths before reallocation is necessary.
+    cap: usize,
+    /// The length of the vector.
+    ///
+    /// This is the number of elements from the start, that is initialized, and can be read safely.
+    len: usize,
+}
+
+impl<T: NoDrop> Vec<T> {
+    /// Create a new empty vector.
+    ///
+    /// This won't allocate a buffer, thus it will have a capacity of zero.
+    pub const fn new() -> Vec<T> {
+        Vec {
+            ptr: Pointer::empty(),
+            len: 0,
+            cap: 0,
+        }
+    }
+
+    /// Create a vector from a block.
+    ///
+    /// # Safety
+    ///
+    /// This is unsafe, since it won't initialize the buffer in any way, possibly breaking type
+    /// safety, memory safety, and so on. Thus, care must be taken upon usage.
+    pub unsafe fn from_raw_parts(block: Block, len: usize) -> Vec<T> {
+        // Make some handy assertions.
+        debug_assert!(block.size() % size_of::<T>() == 0, "The size of T does not divide the \
+                      block's size.");
+
+        Vec {
+            cap: block.size() / size_of::<T>(),
+            ptr: Pointer::new(*block.into_ptr() as *mut T),
+            len: len,
+        }
+    }
+
+    /// Replace the inner buffer with a new one, and return the old.
+    ///
+    /// This will memcpy the vectors buffer to the new block, and update the pointer and capacity
+    /// to match the given block.
+    ///
+    /// # Panics
+    ///
+    /// This panics if the vector is bigger than the block. Additional checks might be done in
+    /// debug mode.
+    pub fn refill(&mut self, block: Block) -> Block {
+        // Calculate the new capacity.
+        let new_cap = block.size() / size_of::<T>();
+
+        // Make some assertions.
+        assert!(self.len <= new_cap, "Block not large enough to cover the vector.");
+        debug_assert!(new_cap * size_of::<T>() == block.size(), "The size of T does not divide the \
+                      block's size.");
+
+        let old = mem::replace(self, Vec::new());
+        unsafe {
+            ptr::copy_nonoverlapping(*self.ptr, *old.ptr, self.len);
+        }
+
+        // Update the fields of `self`.
+        self.cap = new_cap;
+        self.ptr = unsafe { Pointer::new(*block.into_ptr() as *mut T) };
+        self.len = old.len;
+
+        Block::from(old)
+    }
+
+    /// Get the inner pointer.
+    ///
+    /// Do not perform mutation or any form of manipulation through this pointer, since doing so
+    /// might break invariants.
+    pub fn ptr(&self) -> &Pointer<T> {
+        &self.ptr
+    }
+
+    /// Get the capacity of this vector.
+    pub fn capacity(&self) -> usize {
+        self.cap
+    }
+
+    /// Push an element to the end of this vector.
+    ///
+    /// On success, return `Ok(())`. On failure (not enough capacity), return `Err(())`.
+    pub fn push(&mut self, elem: T) -> Result<(), ()> {
+        if self.len == self.cap {
+            Err(())
+        } else {
+            // Place the element in the end of the vector.
+            unsafe {
+                ptr::write((*self.ptr).offset(self.len as isize), elem);
+            }
+
+            // Increment the length.
+            self.len += 1;
+            Ok(())
+        }
+    }
+}
+
+/// Cast this vector to the respective block.
+impl<T: NoDrop> From<Vec<T>> for Block {
+    fn from(from: Vec<T>) -> Block {
+        unsafe { Block::from_raw_parts(from.ptr.cast(), from.cap * size_of::<T>()) }
+    }
+}
+
+impl<T: NoDrop> ops::Deref for Vec<T> {
+    type Target = [T];
+
+    fn deref(&self) -> &[T] {
+        unsafe {
+            slice::from_raw_parts(*self.ptr as *const _, self.len)
+        }
+    }
+}
+
+impl<T: NoDrop> ops::DerefMut for Vec<T> {
+    fn deref_mut(&mut self) -> &mut [T] {
+        unsafe {
+            slice::from_raw_parts_mut(*self.ptr as *mut _, self.len)
+        }
+    }
+}
+
+/// Types that have no destructor.
+///
+/// This trait act as a simple static assertions catching dumb logic errors and memory leaks.
+///
+/// Since one cannot define mutually exclusive traits, we have this as a temporary hack.
+pub trait NoDrop {}
+
+impl NoDrop for Block {}
+impl NoDrop for u8 {}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use block::Block;
+    use ptr::Pointer;
+
+    #[test]
+    fn test_vec() {
+        let mut buffer = [b'a'; 32];
+        let mut vec = unsafe {
+            Vec::from_raw_parts(
+                Block::from_raw_parts(Pointer::new(&mut buffer[0] as *mut u8), 32),
+                16
+            )
+        };
+
+        assert_eq!(&*vec, b"aaaaaaaaaaaaaaaa");
+        vec.push(b'b').unwrap();
+        assert_eq!(&*vec, b"aaaaaaaaaaaaaaaab");
+        vec.push(b'c').unwrap();
+        assert_eq!(&*vec, b"aaaaaaaaaaaaaaaabc");
+        vec[0] = b'.';
+        assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
+
+        unsafe {
+            assert_eq!(vec.refill(
+                Block::from_raw_parts(Pointer::new(&mut buffer[0] as *mut u8), 32)).size(),
+                32
+            );
+        }
+
+        assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
+
+        for _ in 0..14 {
+            vec.push(b'_').unwrap();
+        }
+
+        vec.push(b'!').unwrap_err();
+
+        assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc______________");
+        assert_eq!(vec.capacity(), 32);
+    }
+}

+ 0 - 31
tests/brk.rs

@@ -1,31 +0,0 @@
-extern crate ralloc;
-
-use ralloc::sys::{inc_brk, segment_end};
-
-use std::ptr;
-
-#[test]
-fn test() {
-    let alloc_before = Box::new("hello from the outside.");
-    let ptr = unsafe { (segment_end().unwrap() as *const u8).offset(-1) };
-
-    let abc = "abc";
-    let mem = unsafe { *inc_brk(8).unwrap() as *mut u64 };
-    unsafe {
-        *mem = 90823;
-        *mem = 2897309273;
-        *mem = 293872;
-        *mem = 0xDEADBEAFDEADBEAF;
-        *mem = 99999;
-
-        assert_eq!(*mem, 99999);
-    }
-
-    // Do some heap allocations.
-    let bx = Box::new("yo mamma is so nice.");
-
-    assert_eq!(*bx, "yo mamma is so nice.");
-    assert_eq!(*alloc_before, "hello from the outside.");
-    // Check that the stack frame is unaltered.
-    assert_eq!(abc, "abc");
-}

Some files were not shown because too many files changed in this diff