Selaa lähdekoodia

Fix reserve (called by insert) to not modify the order

Previously it broke indexes (or their association with blocks).
ticki 8 vuotta sitten
vanhempi
commit
302678dae7
4 muutettua tiedostoa jossa 54 lisäystä ja 57 poistoa
  1. 3 3
      Cargo.toml
  2. 3 30
      src/allocator.rs
  3. 1 3
      src/block.rs
  4. 47 21
      src/bookkeeper.rs

+ 3 - 3
Cargo.toml

@@ -12,6 +12,9 @@ readme = "README.md"
 keywords = ["alloc", "malloc", "allocator", "ralloc", "redox"]
 license = "MIT"
 
+[dependencies]
+unborrow = "0.3.0"
+
 [dependencies.ralloc_shim]
 path = "shim"
 
@@ -19,9 +22,6 @@ path = "shim"
 version = "0.0.80"
 optional = true
 
-[dependencies.unborrow]
-git = "https://github.com/durka/unborrow.git"
-
 [profile.release]
 panic = "abort"
 opt-level = 3

+ 3 - 30
src/allocator.rs

@@ -160,36 +160,9 @@ derive_deref!(LocalAllocator, Bookkeeper);
 impl Allocator for LocalAllocator {
     #[inline]
     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
-        /// Canonicalize the requested space.
-        ///
-        /// We request excessive space to the upstream allocator to avoid repeated requests and
-        /// lock contentions.
-        #[inline]
-        fn canonicalize_space(min: usize) -> usize {
-            // TODO tweak this.
-
-            // To avoid having mega-allocations allocate way to much space, we
-            // have a maximal extra space limit.
-            if min > 8192 { min } else {
-                // To avoid paying for short-living or little-allocating threads, we have no minimum.
-                // Instead we multiply.
-                min * 4
-                // This won't overflow due to the conditition of this branch.
-            }
-        }
-
-        // Get the block from the global allocator.
-        let (res, excessive) = GLOBAL_ALLOCATOR.lock()
-            .get()
-            .alloc(canonicalize_space(size), align)
-            .split(size);
-
-        // Free the excessive space to the current allocator. Note that you cannot simply push
-        // (which is the case for SBRK), due the block not necessarily being above all the other
-        // blocks in the pool. For this reason, we let `free` handle the search and so on.
-        self.free(excessive);
-
-        res
+        // Get the block from the global allocator. Please note that we cannot canonicalize `size`,
+        // due to freeing excessive blocks would change the order.
+        GLOBAL_ALLOCATOR.lock().get().alloc(size, align)
     }
 }
 

+ 1 - 3
src/block.rs

@@ -159,9 +159,7 @@ impl Block {
     /// This marks it as free, and returns the old value.
     #[inline]
     pub fn pop(&mut self) -> Block {
-        // TODO durka/unborrow#2 is blocking.
-        let empty = Block::empty(self.ptr.clone());
-        mem::replace(self, empty)
+        unborrow!(mem::replace(self, Block::empty(self.ptr.clone())))
     }
 
     /// Is this block placed left to the given other block?

+ 47 - 21
src/bookkeeper.rs

@@ -79,6 +79,7 @@ impl Bookkeeper {
             pool: vec,
         };
 
+        log!(res, "Bookkeeper created.");
         res.check();
 
         res
@@ -148,6 +149,9 @@ impl Bookkeeper {
     /// Technically, this could be done through an iterator, but this, more unidiomatic, way is
     /// slightly faster in some cases.
     pub fn for_each<F: FnMut(Block)>(mut self, mut f: F) {
+        // Logging.
+        log!(self, "Iterating over the blocks of the bookkeeper...");
+
         // Run over all the blocks in the pool.
         while let Some(i) = self.pool.pop() {
             f(i);
@@ -229,6 +233,11 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     ///
     /// The returned pointer is assumed to be aligned to `align`. If this is not held, all future
     /// guarantees are invalid.
+    ///
+    /// # Assumptions
+    ///
+    /// This is assumed to not modify the order. If some block `b` is associated with index `i`
+    /// prior to call of this function, it should be too after it.
     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block;
 
     /// Allocate a chunk of memory.
@@ -623,8 +632,10 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
                 }
             }
 
-            // Reserve space.
-            unborrow!(self.reserve(self.pool.len() + 1));
+            // Reserve space and free the old buffer.
+            if let Some(x) = unborrow!(self.reserve(self.pool.len() + 1)) {
+                self.free(x);
+            }
 
 
             // Merging failed. Note that trailing empty blocks are not allowed, hence the last block is
@@ -641,8 +652,13 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         self.check();
     }
 
-    /// Reserve some number of elements.
-    fn reserve(&mut self, min_cap: usize) {
+    /// Reserve some number of elements, and return the old buffer's block.
+    ///
+    /// # Assumptions
+    ///
+    /// This is assumed to not modify the order. If some block `b` is associated with index `i`
+    /// prior to call of this function, it should be too after it.
+    fn reserve(&mut self, min_cap: usize) -> Option<Block> {
         // Logging.
         log!(self;min_cap, "Reserving {}.", min_cap);
 
@@ -655,10 +671,10 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
 
             // Break it to me!
             let new_buf = self.alloc_external(new_cap * mem::size_of::<Block>(), mem::align_of::<Block>());
-            let old_buf = self.pool.refill(new_buf);
 
-            // Free the old buffer.
-            self.free(old_buf);
+            Some(self.pool.refill(new_buf))
+        } else {
+            None
         }
     }
 
@@ -667,6 +683,10 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     /// If the space is non-empty, the elements will be pushed filling out the empty gaps to the
     /// right.
     ///
+    /// # Warning
+    ///
+    /// This might in fact break the order.
+    ///
     /// # Panics
     ///
     /// Panics on when `ind` is greater than the block pool's length.
@@ -756,24 +776,25 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         // Log the operation.
         log!(self;ind, "Moving {} blocks to the right.", n);
 
-        unsafe {
-            // TODO clean this mess up.
+        // The old vector's buffer.
+        let mut old_buf = None;
 
-            // We will only extend the length if we were unable to fit it into the current length.
-            if ind + n == self.pool.len() {
-                // Reserve space.
-                // FIXME This breaks order!
-                unborrow!(self.reserve(self.pool.len() + 1));
+        // We will only extend the length if we were unable to fit it into the current length.
+        if ind + n == self.pool.len() {
+            // Reserve space. This does not break order, due to the assumption that
+            // `reserve` never breaks order.
+            old_buf = unborrow!(self.reserve(self.pool.len() + 1));
 
-                // We will move a block into reserved memory but outside of the vec's bounds. For
-                // that reason, we push an uninitialized element to extend the length, which will
-                // be assigned in the memcpy.
-                let res = self.pool.push(mem::uninitialized());
+            // We will move a block into reserved memory but outside of the vec's bounds. For
+            // that reason, we push an uninitialized element to extend the length, which will
+            // be assigned in the memcpy.
+            let res = self.pool.push(unsafe { mem::uninitialized() });
 
-                // Just some assertions...
-                debug_assert!(res.is_ok(), "Push failed (buffer full).");
-            }
+            // Just some assertions...
+            debug_assert!(res.is_ok(), "Push failed (buffer full).");
+        }
 
+        unsafe {
             // Memmove the elements to make a gap to the new block.
             ptr::copy(self.pool.get_unchecked(ind) as *const Block,
                       self.pool.get_unchecked_mut(ind + 1) as *mut Block, n);
@@ -782,6 +803,11 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
             ptr::write(self.pool.get_unchecked_mut(ind), block);
         }
 
+        // Free the old buffer, if it exists.
+        if let Some(block) = old_buf {
+            self.free(block);
+        }
+
         // Check consistency.
         self.check();
     }