瀏覽代碼

OS memtrimming for the global allocator.

This commit introduces OS/BRK memtrimming, in which the memory from the
global allocator is freed to the OS when a certain limit is reached.
This avoids long-running processes from acquiring too much memory from
the system and never releasing it again.

- Implement safe BRK releasing.
ticki 8 年之前
父節點
當前提交
8b83c9a282
共有 3 個文件被更改,包括 101 次插入35 次删除
  1. 35 0
      src/allocator.rs
  2. 66 33
      src/brk.rs
  3. 0 2
      src/sys.rs

+ 35 - 0
src/allocator.rs

@@ -151,6 +151,41 @@ impl Allocator for GlobalAllocator {
 
         res
     }
+
+    fn on_new_memory(&mut self) {
+        /// The memtrim limit.
+        ///
+        /// Whenever this is exceeded, the allocator will try to free as much memory to the system
+        /// as it can.
+        const OS_MEMTRIM_LIMIT: usize = 200000000;
+        /// Minimum size before a block is worthy to memtrim.
+        const MEMTRIM_WORTHY: usize = 4000;
+
+        if self.total_bytes() > OS_MEMTRIM_LIMIT {
+            // memtrim the fack outta 'em.
+
+            // Pop the last block.
+            let block = self.pop().expect("The byte count on the global allocator is invalid.");
+
+            // Check if the memtrim is worth it.
+            if block.size() >= MEMTRIM_WORTHY {
+                // Release the block to the OS.
+                if let Err(block) = brk::lock().release(block) {
+                    // It failed, put the block back.
+                    // TODO: This can be done faster.
+                    self.push(block);
+                }
+
+                // Note that this block is the only block next to the program break, due to the
+                // segments being as long as possible. For that reason, repeating to push and
+                // release would fail.
+            } else {
+                // Push the block back.
+                // TODO: This can be done faster.
+                self.push(block);
+            }
+        }
+    }
 }
 
 /// A local allocator.

+ 66 - 33
src/brk.rs

@@ -13,7 +13,7 @@ use {sync, sys, fail};
 ///
 /// This is used for avoiding data races in multiple allocator.
 static BRK_MUTEX: Mutex<BrkState> = Mutex::new(BrkState {
-    brk_end: None,
+    current_brk: None,
 });
 
 /// A cache of the BRK state.
@@ -21,16 +21,78 @@ static BRK_MUTEX: Mutex<BrkState> = Mutex::new(BrkState {
 /// To avoid keeping asking the OS for information whenever needed, we cache it.
 struct BrkState {
     /// The program break's end
-    brk_end: Option<Pointer<u8>>,
+    current_brk: Option<Pointer<u8>>,
 }
 
 /// A BRK lock.
 pub struct BrkLock {
     /// The inner lock.
-    guard: sync::MutexGuard<'static, BrkState>,
+    state: sync::MutexGuard<'static, BrkState>,
 }
 
 impl BrkLock {
+    /// Extend the program break.
+    ///
+    /// # Safety
+    ///
+    /// Due to being able shrink the program break, this method is unsafe.
+    unsafe fn sbrk(&mut self, size: isize) -> Result<Pointer<u8>, ()> {
+        // Calculate the new program break. To avoid making multiple syscalls, we make use of the
+        // state cache.
+        let expected_brk = self.current_brk().offset(size);
+
+        // Break it to me, babe!
+        let old_brk = Pointer::new(sys::brk(*expected_brk as *const u8) as *mut u8);
+
+        if expected_brk == old_brk && size != 0 {
+            // BRK failed. This syscall is rather weird, but whenever it fails (e.g. OOM) it
+            // returns the old (unchanged) break.
+            Err(())
+        } else {
+            // Update the program break cache.
+            self.state.current_brk = Some(expected_brk.clone());
+
+            // Return the old break.
+            Ok(old_brk)
+        }
+    }
+
+    /// Safely release memory to the OS.
+    ///
+    /// If failed, we return the memory.
+    #[allow(cast_possible_wrap)]
+    pub fn release(&mut self, block: Block) -> Result<(), Block> {
+        // Check if we are actually next to the program break.
+        if self.current_brk() == Pointer::from(block.empty_right()) {
+            // We are. Now, sbrk the memory back. Do to the condition above, this is safe.
+            let res = unsafe { self.sbrk(-(block.size() as isize)) };
+
+            // In debug mode, we want to check for WTF-worthy scenarios.
+            debug_assert!(res.is_ok(), "Failed to set the program break back.");
+
+            Ok(())
+        } else {
+            // Return the block back.
+            Err(block)
+        }
+    }
+
+    /// Get the current program break.
+    ///
+    /// If not available in the cache, requested it from the OS.
+    fn current_brk(&mut self) -> Pointer<u8> {
+        if let Some(ref cur) = self.state.current_brk {
+            return cur.clone();
+        }
+
+        // TODO: Damn it, borrowck.
+        // Get the current break.
+        let cur = current_brk();
+        self.state.current_brk = Some(cur.clone());
+
+        cur
+    }
+
     /// BRK new space.
     ///
     /// The first block represents the aligner segment (that is the precursor aligning the middle
@@ -64,41 +126,12 @@ impl BrkLock {
 
         (alignment_block, res, excessive)
     }
-
-    /// Extend the program break.
-    ///
-    /// # Safety
-    ///
-    /// Due to being able shrink the program break, this method is unsafe.
-    unsafe fn sbrk(&mut self, size: isize) -> Result<Pointer<u8>, ()> {
-        // Calculate the new program break. To avoid making multiple syscalls, we make use of the
-        // state cache.
-        let new_brk = self.guard.brk_end
-            .clone()
-            .unwrap_or_else(current_brk)
-            .offset(size);
-
-        // Break it to me, babe!
-        let old_brk = Pointer::new(sys::brk(*new_brk as *const u8) as *mut u8);
-
-        if new_brk == old_brk && size != 0 {
-            // BRK failed. This syscall is rather weird, but whenever it fails (e.g. OOM) it
-            // returns the old (unchanged) break.
-            Err(())
-        } else {
-            // Update the program break cache.
-            self.guard.brk_end = Some(old_brk.clone());
-
-            // Return the old break.
-            Ok(old_brk)
-        }
-    }
 }
 
 /// Lock the BRK lock to allow manipulating the program break.
 pub fn lock() -> BrkLock {
     BrkLock {
-        guard: BRK_MUTEX.lock(),
+        state: BRK_MUTEX.lock(),
     }
 }
 

+ 0 - 2
src/sys.rs

@@ -61,7 +61,6 @@ pub fn log(s: &str) -> Result<(), ()> {
 /// Tell the debugger that this segment is free.
 ///
 /// If the `debugger` feature is disabled, this is a NOOP.
-#[inline(always)]
 pub fn mark_free(_ptr: *const u8, _size: usize) {
     #[cfg(feature = "debugger")]
     shim::debug::mark_free(_ptr, _size);
@@ -70,7 +69,6 @@ pub fn mark_free(_ptr: *const u8, _size: usize) {
 /// Tell the debugger that this segment is unaccessible.
 ///
 /// If the `debugger` feature is disabled, this is a NOOP.
-#[inline(always)]
 pub fn mark_uninitialized(_ptr: *const u8, _size: usize) {
     #[cfg(feature = "debugger")]
     shim::debug::mark_free(_ptr, _size);