brk.rs 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. //! BRK abstractions.
  2. //!
  3. //! This module provides safe abstractions over BRK.
  4. use prelude::*;
  5. use core::convert::TryInto;
  6. use core::ptr;
  7. use shim::{config, syscalls};
  8. use {fail, sync};
  9. /// The BRK mutex.
  10. ///
  11. /// This is used for avoiding data races in multiple allocator.
  12. static BRK_MUTEX: Mutex<BrkState> = Mutex::new(BrkState { current_brk: None });
  13. /// A cache of the BRK state.
  14. ///
  15. /// To avoid keeping asking the OS for information whenever needed, we cache it.
  16. struct BrkState {
  17. /// The program break's end
  18. current_brk: Option<Pointer<u8>>,
  19. }
  20. /// A BRK lock.
  21. pub struct BrkLock {
  22. /// The inner lock.
  23. state: sync::MutexGuard<'static, BrkState>,
  24. }
  25. impl BrkLock {
  26. /// Extend the program break, and return the old one.
  27. ///
  28. /// # Safety
  29. ///
  30. /// Due to being able shrink the program break, this method is unsafe.
  31. unsafe fn sbrk(&mut self, size: isize) -> Result<Pointer<u8>, ()> {
  32. log!(NOTE, "Incrementing the program break by {} bytes.", size);
  33. // Calculate the new program break. To avoid making multiple syscalls, we make use of the
  34. // state cache.
  35. let old_brk = self.current_brk();
  36. let expected_brk = old_brk.clone().offset(size);
  37. // Break it to me, babe!
  38. let new_brk = Pointer::new(syscalls::brk(expected_brk.get() as *const u8) as *mut u8);
  39. /// AAAARGH WAY TOO MUCH LOGGING
  40. ///
  41. /// No, sweetie. Never too much logging.
  42. ///
  43. /// REEEEEEEEEEEEEEEEEEEEEE
  44. log!(INTERNAL, "Program break set.");
  45. if expected_brk == new_brk {
  46. // Update the program break cache.
  47. self.state.current_brk = Some(expected_brk.clone());
  48. // Return the old break.
  49. Ok(old_brk)
  50. } else {
  51. // BRK failed. This syscall is rather weird, but whenever it fails (e.g. OOM) it
  52. // returns the old (unchanged) break.
  53. assert_eq!(old_brk, new_brk);
  54. Err(())
  55. }
  56. }
  57. /// Safely release memory to the OS.
  58. ///
  59. /// If failed, we return the memory.
  60. pub fn release(&mut self, block: Block) -> Result<(), Block> {
  61. // Check if we are actually next to the program break.
  62. if self.current_brk() == Pointer::from(block.empty_right()) {
  63. // Logging...
  64. log!(DEBUG, "Releasing {:?} to the OS.", block);
  65. // We are. Now, sbrk the memory back. Do to the condition above, this is safe.
  66. let res = unsafe {
  67. // LAST AUDIT: 2016-08-21 (Ticki).
  68. // Note that the end of the block is addressable, making the size as well. For this
  69. // reason the first bit is unset and the cast will never wrap.
  70. self.sbrk(-(block.size() as isize))
  71. };
  72. // In debug mode, we want to check for WTF-worthy scenarios.
  73. debug_assert!(res.is_ok(), "Failed to set the program break back.");
  74. Ok(())
  75. } else {
  76. // Logging...
  77. log!(DEBUG, "Unable to release {:?} to the OS.", block);
  78. // Return the block back.
  79. Err(block)
  80. }
  81. }
  82. /// Get the current program break.
  83. ///
  84. /// If not available in the cache, requested it from the OS.
  85. fn current_brk(&mut self) -> Pointer<u8> {
  86. if let Some(ref cur) = self.state.current_brk {
  87. let res = cur.clone();
  88. // Make sure that the break is set properly (i.e. there is no libc interference).
  89. debug_assert!(
  90. res == current_brk(),
  91. "The cached program break is out of sync with the \
  92. actual program break. Are you interfering with BRK? If so, prefer the \
  93. provided 'sbrk' instead, then."
  94. );
  95. return res;
  96. }
  97. // TODO: Damn it, borrowck.
  98. // Get the current break.
  99. let cur = current_brk();
  100. self.state.current_brk = Some(cur.clone());
  101. cur
  102. }
  103. /// BRK new space.
  104. ///
  105. /// The first block represents the aligner segment (that is the precursor aligning the middle
  106. /// block to `align`), the second one is the result and is of exactly size `size`. The last
  107. /// block is the excessive space.
  108. ///
  109. /// # Failure
  110. ///
  111. /// This method calls the OOM handler if it is unable to acquire the needed space.
  112. // TODO: This method is possibly unsafe.
  113. pub fn canonical_brk(&mut self, size: usize, align: usize) -> (Block, Block, Block) {
  114. // Calculate the canonical size (extra space is allocated to limit the number of system calls).
  115. let brk_size = size + config::extra_brk(size) + align;
  116. // Use SBRK to allocate extra data segment. The alignment is used as precursor for our
  117. // allocated block. This ensures that it is properly memory aligned to the requested value.
  118. // TODO: Audit the casts.
  119. let (alignment_block, rest) = unsafe {
  120. // LAST AUDIT: 2016-08-21 (Ticki).
  121. Block::from_raw_parts(
  122. // Important! The conversion is failable to avoid arithmetic overflow-based
  123. // attacks.
  124. self.sbrk(brk_size.try_into().unwrap())
  125. .unwrap_or_else(|()| fail::oom()),
  126. brk_size,
  127. )
  128. }.align(align)
  129. .unwrap();
  130. // Split the block to leave the excessive space.
  131. let (res, excessive) = rest.split(size);
  132. // Make some assertions.
  133. debug_assert!(res.aligned_to(align), "Alignment failed.");
  134. debug_assert!(
  135. res.size() + alignment_block.size() + excessive.size() == brk_size,
  136. "BRK memory leak."
  137. );
  138. (alignment_block, res, excessive)
  139. }
  140. }
  141. /// Lock the BRK lock to allow manipulating the program break.
  142. pub fn lock() -> BrkLock {
  143. BrkLock {
  144. state: BRK_MUTEX.lock(),
  145. }
  146. }
  147. /// `SBRK` symbol which can coexist with the allocator.
  148. ///
  149. /// `SBRK`-ing directly (from the `BRK` syscall or libc) might make the state inconsistent. This
  150. /// function makes sure that's not happening.
  151. ///
  152. /// With the exception of being able to coexist, it follows the same rules. Refer to the relevant
  153. /// documentation.
  154. ///
  155. /// # Failure
  156. ///
  157. /// On failure the maximum pointer (`!0 as *mut u8`) is returned.
  158. pub unsafe extern "C" fn sbrk(size: isize) -> *mut u8 {
  159. lock()
  160. .sbrk(size)
  161. .unwrap_or_else(|()| Pointer::new(!0 as *mut u8))
  162. .get()
  163. }
  164. /// Get the current program break.
  165. fn current_brk() -> Pointer<u8> {
  166. unsafe {
  167. // LAST AUDIT: 2016-08-21 (Ticki).
  168. Pointer::new(syscalls::brk(ptr::null()) as *mut u8)
  169. }
  170. }
  171. #[cfg(test)]
  172. mod test {
  173. use super::*;
  174. #[test]
  175. fn test_ordered() {
  176. let brk = lock().canonical_brk(20, 1);
  177. assert!(brk.0 <= brk.1);
  178. assert!(brk.1 <= brk.2);
  179. }
  180. #[test]
  181. fn test_brk_grow_up() {
  182. unsafe {
  183. let brk1 = lock().sbrk(5).unwrap();
  184. let brk2 = lock().sbrk(100).unwrap();
  185. assert!(brk1.get() < brk2.get());
  186. }
  187. }
  188. }