Browse Source

Fix bugs in memtrimming related to the byte count.

Various issues (like premature checks) were present in the byte count
beforehand. We fix these.

- Fix invalid byte counts.
- Add a `LOCAL_MEMTRIM_STOP` constant to `shim`, which serves for the
  minimum memtrimmed allocator (i.e. it will stop when the byte count is
  below this value).
- Add more logs.
- Add assertions about libc interference with SBRK.

There is still some problem when running
ticki 8 years ago
parent
commit
25c0dbe99d
9 changed files with 89 additions and 36 deletions
  1. 5 0
      shim/src/config.rs
  2. 19 12
      src/allocator.rs
  3. 13 2
      src/block.rs
  4. 17 10
      src/bookkeeper.rs
  5. 24 7
      src/brk.rs
  6. 1 1
      src/fail.rs
  7. 3 3
      src/log.rs
  8. 1 1
      src/ptr.rs
  9. 6 0
      src/tls.rs

+ 5 - 0
shim/src/config.rs

@@ -20,6 +20,11 @@ pub const FRAGMENTATION_SCALE: usize = 10;
 ///
 /// Whenever an local allocator has more free bytes than this value, it will be memtrimmed.
 pub const LOCAL_MEMTRIM_LIMIT: usize = 16384;
+/// The local memtrim chock.
+///
+/// The local memtrimming will continue until the allocator has less memory (in bytes, of course)
+/// than this value.
+pub const LOCAL_MEMTRIM_STOP: usize = 1024;
 
 /// The minimum log level.
 pub const MIN_LOG_LEVEL: u8 = 0;

+ 19 - 12
src/allocator.rs

@@ -167,6 +167,9 @@ impl Allocator for GlobalAllocator {
 
             // Check if the memtrim is worth it.
             if block.size() >= config::OS_MEMTRIM_WORTHY {
+                /// Logging...
+                log!(NOTE, "Memtrimming the global allocator.");
+
                 // Release the block to the OS.
                 if let Err(block) = brk::lock().release(block) {
                     // It failed, put the block back.
@@ -178,6 +181,9 @@ impl Allocator for GlobalAllocator {
                 // segments being as long as possible. For that reason, repeating to push and
                 // release would fail.
             } else {
+                /// Logging...
+                log!(WARNING, "Memtrimming for the global allocator failed.");
+
                 // Push the block back.
                 // TODO: This can be done faster.
                 self.push(block);
@@ -204,7 +210,7 @@ impl LocalAllocator {
         /// This will simply free everything to the global allocator.
         extern fn dtor(alloc: &ThreadLocalAllocator) {
             /// Logging...
-            log!(NOTE, "Initializing the local allocator.");
+            log!(NOTE, "Deinitializing and freeing the local allocator.");
 
             // This is important! The thread destructors guarantee no other, and thus one could use the
             // allocator _after_ this destructor have been finished. In fact, this is a real problem,
@@ -224,6 +230,9 @@ impl LocalAllocator {
             alloc.into_inner().inner.for_each(move |block| global_alloc.free(block));
         }
 
+        /// Logging...
+        log!(NOTE, "Initializing the local allocator.");
+
         // The initial acquired segment.
         let initial_segment = GLOBAL_ALLOCATOR
             .lock()
@@ -239,14 +248,6 @@ impl LocalAllocator {
             }
         }
     }
-
-    /// Shuld we memtrim this allocator?
-    ///
-    /// The idea is to free memory to the global allocator to unify small stubs and avoid
-    /// fragmentation and thread accumulation.
-    fn should_memtrim(&self) -> bool {
-        self.total_bytes() < config::FRAGMENTATION_SCALE * self.len() || self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT
-    }
 }
 
 #[cfg(feature = "tls")]
@@ -263,7 +264,13 @@ impl Allocator for LocalAllocator {
 
     #[inline]
     fn on_new_memory(&mut self) {
-        if self.should_memtrim() {
+        // The idea is to free memory to the global allocator to unify small stubs and avoid
+        // fragmentation and thread accumulation.
+        if self.total_bytes() < config::FRAGMENTATION_SCALE * self.len()
+           || self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT {
+            // Log stuff.
+            log!(NOTE, "Memtrimming the local allocator.");
+
             // Lock the global allocator.
             let mut global_alloc = GLOBAL_ALLOCATOR.lock();
             let global_alloc = global_alloc.get();
@@ -272,8 +279,8 @@ impl Allocator for LocalAllocator {
                 // Pop'n'free.
                 global_alloc.free(block);
 
-                // Memtrim 'till we can't memtrim anymore.
-                if !self.should_memtrim() { break; }
+                // Memtrim 'till we won't memtrim anymore.
+                if self.total_bytes() < config::LOCAL_MEMTRIM_STOP { break; }
             }
         }
     }

+ 13 - 2
src/block.rs

@@ -131,7 +131,7 @@ impl Block {
         }
     }
 
-    /// Volatile zero this memory.
+    /// Volatile zero this memory if the `security` feature is set.
     pub fn sec_zero(&mut self) {
         use core::intrinsics;
 
@@ -191,6 +191,12 @@ impl Block {
     #[inline]
     #[allow(cast_possible_wrap)]
     pub fn align(&mut self, align: usize) -> Option<(Block, Block)> {
+        // Logging.
+        log!(INTERNAL, "Padding {:?} to align {}", self, align);
+
+        // TODO: This functions suffers from external fragmentation. Leaving bigger segments might
+        // increase performance.
+
         // Calculate the aligner, which defines the smallest size required as precursor to align
         // the block to `align`.
         let aligner = (align - *self.ptr as usize % align) % align;
@@ -217,7 +223,12 @@ impl Block {
                     },
                 }
             ))
-        } else { None }
+        } else {
+            // Logging.
+            log!(INTERNAL, "Unable to align block.");
+
+            None
+        }
     }
 
     /// Mark this block free to the debugger.

+ 17 - 10
src/bookkeeper.rs

@@ -175,7 +175,15 @@ impl Bookkeeper {
 
     /// Pop the top block from the pool.
     pub fn pop(&mut self) -> Option<Block> {
-        self.pool.pop()
+        self.pool.pop().map(|res| {
+            // Update the byte count.
+            self.total_bytes -= res.size();
+
+            // Check stuff, just in case.
+            self.check();
+
+            res
+        })
     }
 
     /// Get the length of the pool.
@@ -240,8 +248,8 @@ impl Bookkeeper {
             }
 
             // Make sure the sum is maintained properly.
-            assert!(total_bytes == self.total_bytes, "The sum is not equal to the `total_bytes` \
-                    field. ({} ≠ {}).", total_bytes, self.total_bytes);
+            assert!(total_bytes == self.total_bytes, "The sum is not equal to the 'total_bytes' \
+                    field: {} ≠ {}.", total_bytes, self.total_bytes);
         }
     }
 }
@@ -343,14 +351,14 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
                 None
             }
         }).next() {
+            // Update the pool byte count.
+            self.total_bytes -= b.size();
+
             if self.pool[n].is_empty() {
                 // For empty alignment invariant.
                 let _ = self.remove_at(n);
             }
 
-            // Update the pool byte count.
-            self.total_bytes -= b.size();
-
             // Split and mark the block uninitialized to the debugger.
             let (res, excessive) = b.mark_uninitialized().split(size);
 
@@ -686,9 +694,8 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
 
             // Reserve space and free the old buffer.
             if let Some(x) = unborrow!(self.reserve(self.pool.len() + 1)) {
-                // `free` handles the count, so we set it back.
-                // TODO: Find a better way to do so.
-                self.total_bytes -= block.size();
+                // Note that we do not set the count down because this isn't setting back our
+                // pushed block.
 
                 self.free(x);
             }
@@ -924,7 +931,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
             // Replace the block at `ind` with the left empty block from `ind + 1`.
             let block = mem::replace(&mut self.pool[ind], empty);
 
-            // Iterate over the pool from `ind` and down.
+            // Iterate over the pool from `ind` and down and set it to the  empty of our block.
             let skip = self.pool.len() - ind;
             for place in self.pool.iter_mut().rev().skip(skip).take_while(|x| x.is_empty()) {
                 // Empty the blocks.

+ 24 - 7
src/brk.rs

@@ -39,7 +39,7 @@ impl BrkLock {
     ///
     /// Due to being able shrink the program break, this method is unsafe.
     unsafe fn sbrk(&mut self, size: isize) -> Result<Pointer<u8>, ()> {
-        log!(NOTE, "BRKing new space.");
+        log!(NOTE, "Incrementing the program break by {} bytes.", size);
 
         // Calculate the new program break. To avoid making multiple syscalls, we make use of the
         // state cache.
@@ -48,16 +48,23 @@ impl BrkLock {
         // Break it to me, babe!
         let old_brk = Pointer::new(syscalls::brk(*expected_brk as *const u8) as *mut u8);
 
-        if expected_brk == old_brk && size != 0 {
-            // BRK failed. This syscall is rather weird, but whenever it fails (e.g. OOM) it
-            // returns the old (unchanged) break.
-            Err(())
-        } else {
+        /// AAAARGH WAY TOO MUCH LOGGING
+        ///
+        /// No, sweetie. Never too much logging.
+        ///
+        /// REEEEEEEEEEEEEEEEEEEEEE
+        log!(INTERNAL, "Program break set.");
+
+        if expected_brk == old_brk {
             // Update the program break cache.
             self.state.current_brk = Some(expected_brk.clone());
 
             // Return the old break.
             Ok(old_brk)
+        } else {
+            // BRK failed. This syscall is rather weird, but whenever it fails (e.g. OOM) it
+            // returns the old (unchanged) break.
+            Err(())
         }
     }
 
@@ -68,6 +75,7 @@ impl BrkLock {
     pub fn release(&mut self, block: Block) -> Result<(), Block> {
         // Check if we are actually next to the program break.
         if self.current_brk() == Pointer::from(block.empty_right()) {
+            // Logging...
             log!(DEBUG, "Releasing {:?} to the OS.", block);
 
             // We are. Now, sbrk the memory back. Do to the condition above, this is safe.
@@ -78,6 +86,9 @@ impl BrkLock {
 
             Ok(())
         } else {
+            // Logging...
+            log!(DEBUG, "Unable to release {:?} to the OS.", block);
+
             // Return the block back.
             Err(block)
         }
@@ -88,7 +99,13 @@ impl BrkLock {
     /// If not available in the cache, requested it from the OS.
     fn current_brk(&mut self) -> Pointer<u8> {
         if let Some(ref cur) = self.state.current_brk {
-            return cur.clone();
+            let res = cur.clone();
+            // Make sure that the break is set properly (i.e. there is no libc interference).
+            debug_assert!(res == current_brk(), "The cached program break is out of sync with the \
+                          actual program break. Are you interfering with BRK? If so, prefer the \
+                          provided 'sbrk' instead, then.");
+
+            return res;
         }
 
         // TODO: Damn it, borrowck.

+ 1 - 1
src/fail.rs

@@ -76,7 +76,7 @@ pub fn set_thread_oom_handler(handler: fn() -> !) {
         let res = thread_oom.replace(Some(handler));
 
         // Throw a warning if it overrides another handler.
-        if cfg!(debug_assertions) && res.is_some() {
+        if res.is_some() {
             log!(WARNING, "An old thread OOM handler was overriden.");
         }
     });

+ 3 - 3
src/log.rs

@@ -67,10 +67,10 @@ macro_rules! bk_log {
         {
             use log::internal::{IntoCursor, BlockLogger};
 
-            log!(INTERNAL, "({:2})   {:10?} : ", $bk.id, BlockLogger {
+            log!(INTERNAL, "({:2}) {:10?} : {}", $bk.id, BlockLogger {
                 cur: $cur.clone().into_cursor(),
                 blocks: &$bk.pool,
-            });
+            }, format_args!($( $arg ),*));
         }
     };
 }
@@ -124,7 +124,7 @@ macro_rules! assert_eq {
         let left = &$left;
         let right = &$right;
 
-        assert!(left == right, "(left: `{:?}`, right: `{:?}`)", left, right)
+        assert!(left == right, "(left: '{:?}', right: '{:?}')", left, right)
     })
 }
 

+ 1 - 1
src/ptr.rs

@@ -7,7 +7,7 @@ use core::{ops, marker};
 ///
 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor of this wrapper owns
 /// the referent.
-#[derive(PartialEq, Debug, Clone)]
+#[derive(PartialEq, Eq, Debug, Clone)]
 pub struct Pointer<T> {
     /// The internal pointer.
     ptr: NonZero<*mut T>,

+ 6 - 0
src/tls.rs

@@ -31,6 +31,9 @@ impl<T: 'static> Key<T> {
     #[inline]
     pub fn with<F, R>(&'static self, f: F) -> R
         where F: FnOnce(&T) -> R {
+        // Logging.
+        log!(INTERNAL, "Accessing TLS variable.");
+
         f(&self.inner)
     }
 
@@ -40,6 +43,9 @@ impl<T: 'static> Key<T> {
     // TODO: Make this automatic on `Drop`.
     #[inline]
     pub fn register_thread_destructor(&'static self, dtor: extern fn(&T)) {
+        // Logging.
+        log!(INTERNAL, "Registering thread destructor.");
+
         // This is safe due to sharing memory layout.
         thread_destructor::register(&self.inner as *const T as *const u8 as *mut u8,
                                     unsafe { mem::transmute(dtor) });