Pārlūkot izejas kodu

Avoid certain recursive allocations

ticki 9 gadi atpakaļ
vecāks
revīzija
819ed484c3
4 mainītis faili ar 61 papildinājumiem un 34 dzēšanām
  1. 39 17
      src/bookkeeper.rs
  2. 18 15
      src/sys.rs
  3. 1 1
      tests/brk.rs
  4. 3 1
      tests/brk_multithreaded.rs

+ 39 - 17
src/bookkeeper.rs

@@ -7,7 +7,7 @@ use block::Block;
 use sys;
 
 use core::mem::{align_of, size_of};
-use core::{ops, ptr, slice, cmp, intrinsics};
+use core::{ops, ptr, slice, cmp};
 use core::ptr::Unique;
 
 /// An address representing an "empty" or non-allocated value on the heap.
@@ -88,7 +88,7 @@ impl Bookkeeper {
 /// Calculate the aligner.
 ///
 /// The aligner is what we add to a pointer to align it to a given value.
-fn aligner(ptr: *mut u8, align: usize) -> usize {
+fn aligner(ptr: *const u8, align: usize) -> usize {
     align - ptr as usize % align
 }
 
@@ -183,7 +183,7 @@ impl BlockList {
 
         // We run right-to-left, since new blocks tend to get added to the right.
         for (n, i) in self.iter_mut().enumerate().rev() {
-            let aligner = aligner(*i.ptr, align);
+            let aligner = aligner(*i.ptr as *const _, align);
 
             if i.size >= size + aligner {
                 // To catch dumb logic errors.
@@ -262,11 +262,14 @@ impl BlockList {
         // Calculate the canonical size (extra space is allocated to limit the number of system calls).
         let can_size = canonicalize_brk(size);
         // Get the previous segment end.
+        // TODO Is this thread-safe?
         let seg_end = sys::segment_end().unwrap_or_else(|x| x.handle());
         // Calculate the aligner.
         let aligner = aligner(seg_end, align);
         // Use SYSBRK to allocate extra data segment.
-        let ptr = sys::inc_brk(can_size.checked_add(aligner).unwrap_or_else(|| sys::oom())).unwrap_or_else(|x| x.handle());
+        let ptr = unsafe {
+            sys::inc_brk(can_size.checked_add(aligner).unwrap_or_else(|| sys::oom())).unwrap_or_else(|x| x.handle())
+        };
 
         let alignment_block = Block {
             size: aligner,
@@ -412,6 +415,7 @@ impl BlockList {
     /// This will extend the capacity to a number greater than or equals to `needed`, potentially
     /// reallocating the block list.
     fn reserve(&mut self, needed: usize) {
+        /*
         if needed > self.cap {
             // Set the new capacity.
             self.cap = cmp::max(30, self.cap.saturating_mul(2));
@@ -430,27 +434,39 @@ impl BlockList {
             // Check consistency.
             self.check();
         }
-        /*
+        */
         if needed > self.cap {
-            // Set the new capacity.
-            self.cap = cmp::minkii(self.cap + 100, self.cap.saturating_mul(2));
+            let block = Block {
+                ptr: unsafe { Unique::new(*self.ptr as *mut _) },
+                size: self.cap,
+            };
+            let ind = self.find(&block);
+            // TODO allow BRK-free non-inplace reservations.
 
             // Reallocate the block list.
-            self.ptr = unsafe {
-                let block = Block {
-                    ptr: Unique::new(*self.ptr as *mut _),
-                    size: self.cap,
-                };
 
-                let cap = self.cap;
-                let ind = self.find(&block);
-                Unique::new(*self.realloc_inplace(block, ind, cap) as *mut _)
-            };
+            // We first try inplace.
+            if self.realloc_inplace(ind, needed).is_ok() {
+                self.cap = needed;
+            } else {
+                // Inplace alloc failed, so we have to BRK somee new space.
+
+                let old_ptr = *self.ptr;
+
+                // Make a fresh allocation.
+                self.cap = needed.saturating_add(cmp::min(self.cap, 200 + self.cap / 2));
+                unsafe {
+                    let cap = self.cap;
+                    self.ptr = Unique::new(*self.alloc_fresh(cap, align_of::<Block>()) as *mut _);
+
+                    // Copy the content.
+                    ptr::copy_nonoverlapping(old_ptr as *const _, *self.ptr, self.len);
+                }
+            }
 
             // Check consistency.
             self.check();
         }
-        */
     }
 
     /// Perform a binary search to find the appropriate place where the block can be insert or is
@@ -501,7 +517,13 @@ impl BlockList {
     /// See [`insert`](#method.insert) for details.
     fn free(&mut self, block: Block) {
         let ind = self.find(&block);
+        self.free_ind(ind, block);
+    }
 
+    /// Free a block placed on some index.
+    ///
+    /// See [`free`](#method.free) for more information.
+    fn free_ind(&mut self, ind: usize, block: Block) {
         debug_assert!(*self[ind].ptr != *block.ptr || !self[ind].is_free(), "Double free.");
 
         // Try to merge right.

+ 18 - 15
src/sys.rs

@@ -47,34 +47,33 @@ pub fn yield_now() {
 /// Retrieve the end of the current data segment.
 ///
 /// This will not change the state of the process in any way, and is thus safe.
-pub fn segment_end() -> Result<*mut u8, Error> {
+pub fn segment_end() -> Result<*const u8, Error> {
     unsafe {
         sys_brk(0)
-    }.map(|x| x as *mut _)
+    }.map(|x| x as *const _)
 }
 
 /// Increment data segment of this process by some, _n_, return a pointer to the new data segment
 /// start.
 ///
 /// This uses the system call BRK as backend.
-pub fn inc_brk(n: usize) -> Result<Unique<u8>, Error> {
+///
+/// This is unsafe for multiple reasons. Most importantly, it can create an inconsistent state,
+/// because it is not atomic. Thus, it can be used to create Undefined Behavior.
+pub unsafe fn inc_brk(n: usize) -> Result<Unique<u8>, Error> {
     let orig_seg_end = try!(segment_end()) as usize;
-    if n == 0 {
-        unsafe {
-            return Ok(Unique::new(orig_seg_end as *mut u8))
-        }
-    }
+    if n == 0 { return Ok(Unique::new(orig_seg_end as *mut u8)) }
 
     let expected_end = try!(orig_seg_end.checked_add(n).ok_or(Error::ArithOverflow));
-    let new_seg_end = try!(unsafe { sys_brk(expected_end) });
+    let new_seg_end = try!(sys_brk(expected_end));
 
     if new_seg_end != expected_end {
         // Reset the break.
-        try!(unsafe { sys_brk(orig_seg_end) });
+        try!(sys_brk(orig_seg_end));
 
         Err(Error::OutOfMemory)
     } else {
-        Ok(unsafe { Unique::new(orig_seg_end as *mut u8) })
+        Ok(Unique::new(orig_seg_end as *mut u8))
     }
 }
 
@@ -108,21 +107,25 @@ mod test {
 
     #[test]
     fn test_oom() {
-        assert_eq!(inc_brk(9999999999999).err(), Some(Error::OutOfMemory));
+        unsafe {
+            assert_eq!(inc_brk(9999999999999).err(), Some(Error::OutOfMemory));
+        }
     }
 
     #[test]
     fn test_read() {
-        let mem = *inc_brk(8).unwrap() as *mut u64;
         unsafe {
+            let mem = *inc_brk(8).unwrap() as *mut u64;
             assert_eq!(*mem, 0);
         }
     }
 
     #[test]
     fn test_overflow() {
-        assert_eq!(inc_brk(!0).err(), Some(Error::ArithOverflow));
-        assert_eq!(inc_brk(!0 - 2000).err(), Some(Error::ArithOverflow));
+        unsafe {
+            assert_eq!(inc_brk(!0).err(), Some(Error::ArithOverflow));
+            assert_eq!(inc_brk(!0 - 2000).err(), Some(Error::ArithOverflow));
+        }
     }
 
     #[test]

+ 1 - 1
tests/brk.rs

@@ -10,7 +10,7 @@ fn main() {
     let byte_end = unsafe { ptr::read(ptr) };
 
     let abc = "abc";
-    let mem = *inc_brk(8).unwrap() as *mut u64;
+    let mem = unsafe { *inc_brk(8).unwrap() as *mut u64 };
     unsafe {
         *mem = 90823;
         *mem = 2897309273;

+ 3 - 1
tests/brk_multithreaded.rs

@@ -9,7 +9,9 @@ fn main() {
 
     for _ in 0..1000 {
         threads.push(thread::spawn(|| {
-            inc_brk(9999).unwrap();
+            unsafe {
+                inc_brk(9999).unwrap();
+            }
         }));
     }