소스 검색

Inline smaller functions, update README, add benchmarks

ticki 9 년 전
부모
커밋
fc19fb6de1
12개의 변경된 파일115개의 추가작업 그리고 1개의 파일을 삭제
  1. 10 0
      README.md
  2. 16 0
      benches/box.rs
  3. 19 0
      benches/vec.rs
  4. 21 0
      benches/vec_box.rs
  5. 4 0
      src/allocator.rs
  6. 15 0
      src/block.rs
  7. 5 1
      src/bookkeeper.rs
  8. 5 0
      src/lib.rs
  9. 5 0
      src/ptr.rs
  10. 4 0
      src/sync.rs
  11. 4 0
      src/sys.rs
  12. 7 0
      src/vec.rs

+ 10 - 0
README.md

@@ -2,6 +2,14 @@
 
 Redox's fast & memory efficient userspace allocator.
 
+## A note on its state.
+
+It fully works, although it is relatively slow, since it haven't been optimized
+yet. There is currently no known bugs, but it haven't been carefully reviewed
+yet, so avoid using it in security critical programs.
+
+I consider the state of the code quality very good.
+
 ## Using ralloc
 
 Add ralloc to `Cargo.toml`:
@@ -19,6 +27,8 @@ extern crate ralloc;
 
 `ralloc` is now ready to roll!
 
+Note that ralloc cannot coexist with another allocator, unless they're deliberately compatible.
+
 ## Features
 
 ### Custom out-of-memory handlers

+ 16 - 0
benches/box.rs

@@ -0,0 +1,16 @@
+#![feature(test)]
+
+extern crate ralloc;
+extern crate test;
+
+use test::Bencher;
+
+#[bench]
+fn bench(b: &mut Bencher) {
+    b.iter(|| {
+        let _bx1 = Box::new(0xF000D);
+        let _bx2 = Box::new(0xF0002);
+
+        "abc".to_owned().into_boxed_str()
+    })
+}

+ 19 - 0
benches/vec.rs

@@ -0,0 +1,19 @@
+#![feature(test)]
+
+extern crate ralloc;
+extern crate test;
+
+use test::Bencher;
+
+#[bench]
+fn bench(b: &mut Bencher) {
+    b.iter(|| {
+        let mut stuff = Vec::with_capacity(10);
+
+        for i in 0..10000 { stuff.push(i) }
+
+        stuff.reserve(100000);
+
+        stuff
+    })
+}

+ 21 - 0
benches/vec_box.rs

@@ -0,0 +1,21 @@
+#![feature(test)]
+
+extern crate ralloc;
+extern crate test;
+
+use test::Bencher;
+
+#[bench]
+fn bench(b: &mut Bencher) {
+    b.iter(|| {
+        let mut stuff = Vec::with_capacity(10);
+
+        for i in 0..10000 {
+            stuff.push(Box::new(i))
+        }
+
+        stuff.reserve(100000);
+
+        stuff
+    })
+}

+ 4 - 0
src/allocator.rs

@@ -12,6 +12,7 @@ use sync;
 static BOOKKEEPER: sync::Mutex<Bookkeeper> = sync::Mutex::new(Bookkeeper::new());
 
 /// Allocate a block of memory.
+#[inline]
 pub fn alloc(size: usize, align: usize) -> *mut u8 {
     *BOOKKEEPER.lock().alloc(size, align).into_ptr()
 }
@@ -20,6 +21,7 @@ pub fn alloc(size: usize, align: usize) -> *mut u8 {
 ///
 /// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
 /// that it is not used after the free.
+#[inline]
 pub unsafe fn free(ptr: *mut u8, size: usize) {
     // Lock the bookkeeper, and do a `free`.
     BOOKKEEPER.lock().free(Block::from_raw_parts(Pointer::new(ptr), size));
@@ -29,6 +31,7 @@ pub unsafe fn free(ptr: *mut u8, size: usize) {
 ///
 /// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
 /// returned pointer with size `size`.
+#[inline]
 pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
     // Lock the bookkeeper, and do a `realloc`.
     *BOOKKEEPER.lock().realloc(
@@ -43,6 +46,7 @@ pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize)
 /// In case of success, return the new buffer's size. On failure, return the old size.
 ///
 /// This can be used to shrink (truncate) a buffer as well.
+#[inline]
 pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
     // Lock the bookkeeper, and do a `realloc_inplace`.
     if BOOKKEEPER.lock().realloc_inplace(

+ 15 - 0
src/block.rs

@@ -26,6 +26,7 @@ pub struct Block {
 
 impl Block {
     /// Create an empty block starting at `ptr`.
+    #[inline]
     pub fn empty(ptr: &Pointer<u8>) -> Block {
         Block {
             size: 0,
@@ -35,6 +36,7 @@ impl Block {
     }
 
     /// Construct a block from its raw parts (pointer and size).
+    #[inline]
     pub unsafe fn from_raw_parts(ptr: Pointer<u8>, size: usize) ->  Block {
         Block {
             size: size,
@@ -50,6 +52,7 @@ impl Block {
     /// BRK allocate a block.
     ///
     /// This is unsafe due to the allocator assuming that only it makes use of BRK.
+    #[inline]
    pub unsafe fn brk(size: usize) -> Block {
         Block {
             size: size,
@@ -62,6 +65,7 @@ impl Block {
     /// This will simply extend the block, adding the size of the block, and then set the size to
     /// zero. The return value is `Ok(())` on success, and `Err(())` on failure (e.g., the blocks
     /// are not adjacent).
+    #[inline]
     pub fn merge_right(&mut self, block: &mut Block) -> Result<(), ()> {
         if self.left_to(&block) {
             // Since the end of `block` is bounded by the address space, adding them cannot
@@ -73,16 +77,19 @@ impl Block {
     }
 
     /// Is this block empty/free?
+    #[inline]
     pub fn is_empty(&self) -> bool {
         self.size != 0
     }
 
     /// Is this block aligned to `align`?
+    #[inline]
     pub fn aligned_to(&self, align: usize) -> bool {
         *self.ptr as usize % align == 0
     }
 
     /// Get the inner pointer.
+    #[inline]
     pub fn into_ptr(self) -> Pointer<u8> {
         self.ptr
     }
@@ -92,6 +99,7 @@ impl Block {
     /// # Panics
     ///
     /// This will panic if the target block is smaller than the source.
+    #[inline]
     pub fn copy_to(&self, block: &mut Block) {
         // Bound check.
         assert!(self.size <= block.size, "Block too small.");
@@ -104,12 +112,14 @@ impl Block {
     /// "Pop" this block.
     ///
     /// This marks it as free, and returns the old value.
+    #[inline]
     pub fn pop(&mut self) -> Block {
         let empty = Block::empty(&self.ptr);
         mem::replace(self, empty)
     }
 
     /// Is this block placed left to the given other block?
+    #[inline]
     pub fn left_to(&self, to: &Block) -> bool {
         // This won't overflow due to the end being bounded by the address space.
         self.size + *self.ptr as usize == *to.ptr as usize
@@ -120,6 +130,7 @@ impl Block {
     /// # Panics
     ///
     /// Panics if `pos` is out of bound.
+    #[inline]
     pub fn split(self, pos: usize) -> (Block, Block) {
         assert!(pos <= self.size, "Split {} out of bound (size is {})!", pos, self.size);
 
@@ -138,6 +149,7 @@ impl Block {
     /// Split this block, such that the second block is aligned to `align`.
     ///
     /// Returns an `None` holding the intact block if `align` is out of bounds.
+    #[inline]
     pub fn align(&mut self, align: usize) -> Option<(Block, Block)> {
         let aligner = align - *self.ptr as usize % align;
 
@@ -161,6 +173,7 @@ impl Block {
 }
 
 impl PartialOrd for Block {
+    #[inline]
     fn partial_cmp(&self, other: &Block) -> Option<cmp::Ordering> {
         self.ptr.partial_cmp(&other.ptr)
     }
@@ -168,12 +181,14 @@ impl PartialOrd for Block {
 
 /// Compare the blocks address.
 impl Ord for Block {
+    #[inline]
     fn cmp(&self, other: &Block) -> cmp::Ordering {
         self.ptr.cmp(&other.ptr)
     }
 }
 
 impl cmp::PartialEq for Block {
+    #[inline]
     fn eq(&self, other: &Block) -> bool {
         self.size == other.size && *self.ptr == *other.ptr
     }

+ 5 - 1
src/bookkeeper.rs

@@ -19,7 +19,7 @@ use core::mem::{align_of, size_of};
 /// The return value is always greater than or equals to the argument.
 #[inline]
 fn canonicalize_brk(min: usize) -> usize {
-    const BRK_MULTIPLIER: usize = 1;
+    const BRK_MULTIPLIER: usize = 2;
     const BRK_MIN: usize = 65536;
     /// The maximal amount of _extra_ elements.
     const BRK_MAX_EXTRA: usize = 4 * 65536;
@@ -185,6 +185,7 @@ impl Bookkeeper {
     ///
     /// And we're done. If it cannot be done, we insert the block, while keeping the list sorted.
     /// See [`insert`](#method.insert) for details.
+    #[inline]
     pub fn free(&mut self, block: Block) {
         let ind = self.find(&block);
         self.free_ind(ind, block);
@@ -262,6 +263,7 @@ impl Bookkeeper {
     /// This shouldn't be used when the index of insertion is known, since this performs an binary
     /// search to find the blocks index. When you know the index use
     /// [`realloc_inplace_ind`](#method.realloc_inplace_ind.html).
+    #[inline]
     pub fn realloc_inplace(&mut self, block: Block, new_size: usize) -> Result<Block, Block> {
         let ind = self.find(&block);
         let res = self.realloc_inplace_ind(ind, block, new_size);
@@ -465,6 +467,7 @@ impl Bookkeeper {
     /// located.
     ///
     /// It is guaranteed that no block left to the returned value, satisfy the above condition.
+    #[inline]
     fn find(&self, block: &Block) -> usize {
         // TODO optimize this function.
 
@@ -532,6 +535,7 @@ impl Bookkeeper {
     /// ```
     ///
     /// The insertion is now completed.
+    #[inline]
     fn insert(&mut self, ind: usize, block: Block) {
         // Some assertions...
         debug_assert!(block >= self.pool[ind + 1], "Inserting at {} will make the list unsorted.", ind);

+ 5 - 0
src/lib.rs

@@ -30,6 +30,7 @@ pub use allocator::{free, alloc, realloc, realloc_inplace};
 
 /// Rust allocation symbol.
 #[no_mangle]
+#[inline]
 #[cfg(feature = "allocator")]
 pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
     alloc(size, align)
@@ -37,6 +38,7 @@ pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
 
 /// Rust deallocation symbol.
 #[no_mangle]
+#[inline]
 #[cfg(feature = "allocator")]
 pub unsafe extern fn __rust_deallocate(ptr: *mut u8, size: usize, _align: usize) {
     free(ptr, size);
@@ -44,6 +46,7 @@ pub unsafe extern fn __rust_deallocate(ptr: *mut u8, size: usize, _align: usize)
 
 /// Rust reallocation symbol.
 #[no_mangle]
+#[inline]
 #[cfg(feature = "allocator")]
 pub unsafe extern fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
     realloc(ptr, old_size, size, align)
@@ -51,6 +54,7 @@ pub unsafe extern fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usiz
 
 /// Rust reallocation inplace symbol.
 #[no_mangle]
+#[inline]
 #[cfg(feature = "allocator")]
 pub unsafe extern fn __rust_reallocate_inplace(ptr: *mut u8, old_size: usize, size: usize, _align: usize) -> usize {
     if realloc_inplace(ptr, old_size, size).is_ok() {
@@ -62,6 +66,7 @@ pub unsafe extern fn __rust_reallocate_inplace(ptr: *mut u8, old_size: usize, si
 
 /// Get the usable size of the some number of bytes of allocated memory.
 #[no_mangle]
+#[inline]
 #[cfg(feature = "allocator")]
 pub extern fn __rust_usable_size(size: usize, _align: usize) -> usize {
     // Yay! It matches exactly.

+ 5 - 0
src/ptr.rs

@@ -24,6 +24,7 @@ impl<T> Pointer<T> {
     ///
     /// This function is unsafe since a null pointer can cause UB, due to `Pointer` being
     /// non-nullable.
+    #[inline]
     pub unsafe fn new(ptr: *mut T) -> Pointer<T> {
         // For the sake of nice debugging, make some assertions.
         debug_assert!(!ptr.is_null(), "Null pointer!");
@@ -38,6 +39,7 @@ impl<T> Pointer<T> {
     ///
     /// For technical reasons, this is not implemented through the `Clone` trait, although it acts
     /// similarly.
+    #[inline]
     pub fn duplicate(&self) -> Pointer<T> {
         Pointer {
             ptr: self.ptr,
@@ -48,6 +50,7 @@ impl<T> Pointer<T> {
     /// Cast this pointer into a pointer to another type.
     ///
     /// This will simply transmute the pointer, leaving the actual data unmodified.
+    #[inline]
     pub fn cast<U>(self) -> Pointer<U> {
         Pointer {
             ptr: unsafe { NonZero::new(*self as *mut U) },
@@ -58,6 +61,7 @@ impl<T> Pointer<T> {
     /// Create an "empty" `Pointer`.
     ///
     /// This acts as a null pointer, although it is represented by 0x1 instead of 0x0.
+    #[inline]
     pub const fn empty() -> Pointer<T> {
         Pointer {
             ptr: unsafe { NonZero::new(0x1 as *mut T) },
@@ -72,6 +76,7 @@ impl<T> Pointer<T> {
     /// # Safety
     ///
     /// This is unsafe, due to OOB offsets being undefined behavior.
+    #[inline]
     pub unsafe fn offset(self, diff: isize) -> Pointer<T> {
         Pointer::new(self.ptr.offset(diff))
     }

+ 4 - 0
src/sync.rs

@@ -29,6 +29,7 @@ pub struct MutexGuard<'a, T: 'a> {
 
 /// Release the mutex.
 impl<'a, T> Drop for MutexGuard<'a, T> {
+    #[inline]
     fn drop(&mut self) {
         self.mutex.locked.store(false, atomic::Ordering::SeqCst);
     }
@@ -37,6 +38,7 @@ impl<'a, T> Drop for MutexGuard<'a, T> {
 impl<'a, T> ops::Deref for MutexGuard<'a, T> {
     type Target = T;
 
+    #[inline]
     fn deref(&self) -> &T {
         &self.mutex.inner
     }
@@ -50,6 +52,7 @@ impl<'a, T> ops::DerefMut for MutexGuard<'a, T> {
 
 impl<T> Mutex<T> {
     /// Create a new mutex with some inner value.
+    #[inline]
     pub const fn new(inner: T) -> Mutex<T> {
         Mutex {
             inner: inner,
@@ -60,6 +63,7 @@ impl<T> Mutex<T> {
     /// Lock this mutex.
     ///
     /// If another lock is held, this will block the thread until it is released.
+    #[inline]
     pub fn lock(&self) -> MutexGuard<T> {
         // Lock the mutex.
         while self.locked.compare_and_swap(false, true, atomic::Ordering::SeqCst) {

+ 4 - 0
src/sys.rs

@@ -36,6 +36,7 @@ pub fn yield_now() {
 /// Retrieve the end of the current data segment.
 ///
 /// This will not change the state of the process in any way, and is thus safe.
+    #[inline]
 pub fn segment_end() -> Result<*const u8, Error> {
     unsafe {
         sys_brk(0)
@@ -49,6 +50,7 @@ pub fn segment_end() -> Result<*const u8, Error> {
 ///
 /// This is unsafe for multiple reasons. Most importantly, it can create an inconsistent state,
 /// because it is not atomic. Thus, it can be used to create Undefined Behavior.
+#[inline]
 pub unsafe fn inc_brk(n: usize) -> Result<Pointer<u8>, Error> {
     let orig_seg_end = try!(segment_end()) as usize;
     if n == 0 { return Ok(Pointer::new(orig_seg_end as *mut u8)) }
@@ -67,6 +69,7 @@ pub unsafe fn inc_brk(n: usize) -> Result<Pointer<u8>, Error> {
 }
 
 /// Redox syscall, BRK.
+#[inline]
 #[cfg(target_os = "redox")]
 unsafe fn sys_brk(n: usize) -> Result<usize, Error> {
     use system::syscall;
@@ -79,6 +82,7 @@ unsafe fn sys_brk(n: usize) -> Result<usize, Error> {
 }
 
 /// Unix syscall, BRK.
+#[inline]
 #[cfg(not(target_os = "redox"))]
 unsafe fn sys_brk(n: usize) -> Result<usize, Error> {
     let ret = syscall!(BRK, n);

+ 7 - 0
src/vec.rs

@@ -27,6 +27,7 @@ impl<T: NoDrop> Vec<T> {
     /// Create a new empty vector.
     ///
     /// This won't allocate a buffer, thus it will have a capacity of zero.
+    #[inline]
     pub const fn new() -> Vec<T> {
         Vec {
             ptr: Pointer::empty(),
@@ -41,6 +42,7 @@ impl<T: NoDrop> Vec<T> {
     ///
     /// This is unsafe, since it won't initialize the buffer in any way, possibly breaking type
     /// safety, memory safety, and so on. Thus, care must be taken upon usage.
+    #[inline]
     pub unsafe fn from_raw_parts(block: Block, len: usize) -> Vec<T> {
         // Make some handy assertions.
         debug_assert!(block.size() % size_of::<T>() == 0, "The size of T does not divide the \
@@ -88,11 +90,13 @@ impl<T: NoDrop> Vec<T> {
     ///
     /// Do not perform mutation or any form of manipulation through this pointer, since doing so
     /// might break invariants.
+    #[inline]
     pub fn ptr(&self) -> &Pointer<T> {
         &self.ptr
     }
 
     /// Get the capacity of this vector.
+    #[inline]
     pub fn capacity(&self) -> usize {
         self.cap
     }
@@ -100,6 +104,7 @@ impl<T: NoDrop> Vec<T> {
     /// Push an element to the end of this vector.
     ///
     /// On success, return `Ok(())`. On failure (not enough capacity), return `Err(())`.
+    #[inline]
     pub fn push(&mut self, elem: T) -> Result<(), ()> {
         if self.len == self.cap {
             Err(())
@@ -124,6 +129,7 @@ impl<T: NoDrop> From<Vec<T>> for Block {
 }
 
 impl<T: NoDrop> ops::Deref for Vec<T> {
+    #[inline]
     type Target = [T];
 
     fn deref(&self) -> &[T] {
@@ -134,6 +140,7 @@ impl<T: NoDrop> ops::Deref for Vec<T> {
 }
 
 impl<T: NoDrop> ops::DerefMut for Vec<T> {
+    #[inline]
     fn deref_mut(&mut self) -> &mut [T] {
         unsafe {
             slice::from_raw_parts_mut(*self.ptr as *mut _, self.len)