brk.rs 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. //! BRK abstractions.
  2. //!
  3. //! This module provides safe abstractions over BRK.
  4. use prelude::*;
  5. use core::ptr;
  6. use core::convert::TryInto;
  7. use shim::{syscalls, config};
  8. use {sync, fail};
  9. /// The BRK mutex.
  10. ///
  11. /// This is used for avoiding data races in multiple allocator.
  12. static BRK_MUTEX: Mutex<BrkState> = Mutex::new(BrkState {
  13. current_brk: None,
  14. });
  15. /// A cache of the BRK state.
  16. ///
  17. /// To avoid keeping asking the OS for information whenever needed, we cache it.
  18. struct BrkState {
  19. /// The program break's end
  20. current_brk: Option<Pointer<u8>>,
  21. }
  22. /// A BRK lock.
  23. pub struct BrkLock {
  24. /// The inner lock.
  25. state: sync::MutexGuard<'static, BrkState>,
  26. }
  27. impl BrkLock {
  28. /// Extend the program break.
  29. ///
  30. /// # Safety
  31. ///
  32. /// Due to being able shrink the program break, this method is unsafe.
  33. unsafe fn sbrk(&mut self, size: isize) -> Result<Pointer<u8>, ()> {
  34. log!(NOTE, "Incrementing the program break by {} bytes.", size);
  35. // Calculate the new program break. To avoid making multiple syscalls, we make use of the
  36. // state cache.
  37. let expected_brk = self.current_brk().offset(size);
  38. // Break it to me, babe!
  39. let old_brk = Pointer::new(syscalls::brk(expected_brk.get() as *const u8) as *mut u8);
  40. /// AAAARGH WAY TOO MUCH LOGGING
  41. ///
  42. /// No, sweetie. Never too much logging.
  43. ///
  44. /// REEEEEEEEEEEEEEEEEEEEEE
  45. log!(INTERNAL, "Program break set.");
  46. if expected_brk == old_brk {
  47. // Update the program break cache.
  48. self.state.current_brk = Some(expected_brk.clone());
  49. // Return the old break.
  50. Ok(old_brk)
  51. } else {
  52. // BRK failed. This syscall is rather weird, but whenever it fails (e.g. OOM) it
  53. // returns the old (unchanged) break.
  54. Err(())
  55. }
  56. }
  57. /// Safely release memory to the OS.
  58. ///
  59. /// If failed, we return the memory.
  60. #[allow(cast_possible_wrap)]
  61. pub fn release(&mut self, block: Block) -> Result<(), Block> {
  62. // Check if we are actually next to the program break.
  63. if self.current_brk() == Pointer::from(block.empty_right()) {
  64. // Logging...
  65. log!(DEBUG, "Releasing {:?} to the OS.", block);
  66. // We are. Now, sbrk the memory back. Do to the condition above, this is safe.
  67. let res = unsafe {
  68. // LAST AUDIT: 2016-08-21 (Ticki).
  69. // Note that the end of the block is addressable, making the size as well. For this
  70. // reason the first bit is unset and the cast will never wrap.
  71. self.sbrk(-(block.size() as isize))
  72. };
  73. // In debug mode, we want to check for WTF-worthy scenarios.
  74. debug_assert!(res.is_ok(), "Failed to set the program break back.");
  75. Ok(())
  76. } else {
  77. // Logging...
  78. log!(DEBUG, "Unable to release {:?} to the OS.", block);
  79. // Return the block back.
  80. Err(block)
  81. }
  82. }
  83. /// Get the current program break.
  84. ///
  85. /// If not available in the cache, requested it from the OS.
  86. fn current_brk(&mut self) -> Pointer<u8> {
  87. if let Some(ref cur) = self.state.current_brk {
  88. let res = cur.clone();
  89. // Make sure that the break is set properly (i.e. there is no libc interference).
  90. debug_assert!(res == current_brk(), "The cached program break is out of sync with the \
  91. actual program break. Are you interfering with BRK? If so, prefer the \
  92. provided 'sbrk' instead, then.");
  93. return res;
  94. }
  95. // TODO: Damn it, borrowck.
  96. // Get the current break.
  97. let cur = current_brk();
  98. self.state.current_brk = Some(cur.clone());
  99. cur
  100. }
  101. /// BRK new space.
  102. ///
  103. /// The first block represents the aligner segment (that is the precursor aligning the middle
  104. /// block to `align`), the second one is the result and is of exactly size `size`. The last
  105. /// block is the excessive space.
  106. ///
  107. /// # Failure
  108. ///
  109. /// This method calls the OOM handler if it is unable to acquire the needed space.
  110. // TODO: This method is possibly unsafe.
  111. pub fn canonical_brk(&mut self, size: usize, align: usize) -> (Block, Block, Block) {
  112. // Calculate the canonical size (extra space is allocated to limit the number of system calls).
  113. let brk_size = size + config::extra_brk(size) + align;
  114. // Use SBRK to allocate extra data segment. The alignment is used as precursor for our
  115. // allocated block. This ensures that it is properly memory aligned to the requested value.
  116. // TODO: Audit the casts.
  117. let (alignment_block, rest) = unsafe {
  118. // LAST AUDIT: 2016-08-21 (Ticki).
  119. Block::from_raw_parts(
  120. // Important! The conversion is failable to avoid arithmetic overflow-based
  121. // attacks.
  122. self.sbrk(brk_size.try_into().unwrap()).unwrap_or_else(|()| fail::oom()),
  123. brk_size,
  124. )
  125. }.align(align).unwrap();
  126. // Split the block to leave the excessive space.
  127. let (res, excessive) = rest.split(size);
  128. // Make some assertions.
  129. debug_assert!(res.aligned_to(align), "Alignment failed.");
  130. debug_assert!(res.size() + alignment_block.size() + excessive.size() == brk_size, "BRK memory leak.");
  131. (alignment_block, res, excessive)
  132. }
  133. }
  134. /// Lock the BRK lock to allow manipulating the program break.
  135. pub fn lock() -> BrkLock {
  136. BrkLock {
  137. state: BRK_MUTEX.lock(),
  138. }
  139. }
  140. /// `SBRK` symbol which can coexist with the allocator.
  141. ///
  142. /// `SBRK`-ing directly (from the `BRK` syscall or libc) might make the state inconsistent. This
  143. /// function makes sure that's not happening.
  144. ///
  145. /// With the exception of being able to coexist, it follows the same rules. Refer to the relevant
  146. /// documentation.
  147. ///
  148. /// # Failure
  149. ///
  150. /// On failure the maximum pointer (`!0 as *mut u8`) is returned.
  151. pub unsafe extern fn sbrk(size: isize) -> *mut u8 {
  152. lock().sbrk(size).unwrap_or_else(|()| Pointer::new(!0 as *mut u8)).get()
  153. }
  154. /// Get the current program break.
  155. fn current_brk() -> Pointer<u8> {
  156. unsafe {
  157. // LAST AUDIT: 2016-08-21 (Ticki).
  158. Pointer::new(syscalls::brk(ptr::null()) as *mut u8)
  159. }
  160. }
  161. #[cfg(test)]
  162. mod test {
  163. use super::*;
  164. #[test]
  165. fn test_ordered() {
  166. let brk = lock().canonical_brk(20, 1);
  167. assert!(brk.0 <= brk.1);
  168. assert!(brk.1 <= brk.2);
  169. }
  170. #[test]
  171. fn test_brk_grow_up() {
  172. unsafe {
  173. let brk1 = lock().sbrk(5).unwrap();
  174. let brk2 = lock().sbrk(100).unwrap();
  175. assert!(brk1.get() < brk2.get());
  176. }
  177. }
  178. }