brk.rs 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. //! BRK abstractions.
  2. //!
  3. //! This module provides safe abstractions over BRK.
  4. use prelude::*;
  5. use core::ptr;
  6. use core::convert::TryInto;
  7. use shim::{syscalls, config};
  8. use {sync, fail, block, ptr};
  9. /// The BRK mutex.
  10. ///
  11. /// This is used for avoiding data races in multiple allocator.
  12. static BRK_MUTEX: Mutex<BrkState> = Mutex::new(BrkState {
  13. current_brk: None,
  14. });
  15. /// A cache of the BRK state.
  16. ///
  17. /// To avoid keeping asking the OS for information whenever needed, we cache it.
  18. struct BrkState {
  19. /// The program break's end
  20. current_brk: Option<Pointer<u8>>,
  21. }
  22. /// A BRK lock.
  23. pub struct BrkLock {
  24. /// The inner lock.
  25. state: sync::MutexGuard<'static, BrkState>,
  26. }
  27. impl BrkLock {
  28. /// Extend the program break.
  29. ///
  30. /// # Safety
  31. ///
  32. /// Due to being able shrink the program break, this method is unsafe.
  33. unsafe fn sbrk(&mut self, size: isize) -> Result<Pointer<u8>, ()> {
  34. log!(NOTE, "Incrementing the program break by {} bytes.", size);
  35. // Calculate the new program break. To avoid making multiple syscalls, we make use of the
  36. // state cache.
  37. let expected_brk = self.current_brk().offset(size);
  38. // Break it to me, babe!
  39. let old_brk = Pointer::new(syscalls::brk(expected_brk) as *mut u8);
  40. /// AAAARGH WAY TOO MUCH LOGGING
  41. ///
  42. /// No, sweetie. Never too much logging.
  43. ///
  44. /// REEEEEEEEEEEEEEEEEEEEEE
  45. log!(INTERNAL, "Program break set.");
  46. if expected_brk == old_brk {
  47. // Update the program break cache.
  48. self.state.current_brk = Some(expected_brk.clone());
  49. // Return the old break.
  50. Ok(old_brk)
  51. } else {
  52. // BRK failed. This syscall is rather weird, but whenever it fails (e.g. OOM) it
  53. // returns the old (unchanged) break.
  54. Err(())
  55. }
  56. }
  57. /// Safely release memory to the OS.
  58. ///
  59. /// If failed, we return the memory.
  60. pub fn release(&mut self, block: Block) -> Result<(), Block> {
  61. // Check if we are actually next to the program break.
  62. if self.current_brk() == Pointer::from(block.empty_right()) {
  63. log!(DEBUG, "Releasing {:?} to the OS.", block);
  64. // We are. Now, sbrk the memory back. Do to the condition above, this is safe.
  65. let res = unsafe {
  66. // LAST AUDIT: 2016-08-21 (Ticki).
  67. // Note that the end of the block is addressable, making the size as well. For this
  68. // reason the first bit is unset and the cast will never wrap.
  69. self.sbrk(-(block.size() as isize))
  70. };
  71. // In debug mode, we want to check for WTF-worthy scenarios.
  72. debug_assert!(res.is_ok(), "Failed to set the program break back.");
  73. Ok(())
  74. } else {
  75. log!(DEBUG, "Unable to release {:?} to the OS.", block);
  76. // Return the block back.
  77. Err(block)
  78. }
  79. }
  80. /// Get the current program break.
  81. ///
  82. /// If not available in the cache, requested it from the OS.
  83. fn current_brk(&mut self) -> Pointer<u8> {
  84. if let Some(ref cur) = self.state.current_brk {
  85. let res = cur.clone();
  86. // Make sure that the break is set properly (i.e. there is no libc interference).
  87. debug_assert!(res == current_brk(), "The cached program break is out of sync with the \
  88. actual program break. Are you interfering with BRK? If so, prefer the \
  89. provided 'sbrk' instead, then.");
  90. return res;
  91. }
  92. // TODO: Damn it, borrowck.
  93. // Get the current break.
  94. let cur = current_brk();
  95. self.state.current_brk = Some(cur.clone());
  96. cur
  97. }
  98. /// BRK new space.
  99. ///
  100. /// The first block represents the aligner segment (that is the precursor aligning the middle
  101. /// block to `align`), the second one is the result and is of exactly size `size`. The last
  102. /// block is the excessive space.
  103. ///
  104. /// # Failure
  105. ///
  106. /// This method calls the OOM handler if it is unable to acquire the needed space.
  107. // TODO: This method is possibly unsafe.
  108. pub fn canonical_brk(&mut self, size: block::Size, align: ptr::Align) -> (Block, Block, Block) {
  109. // Calculate the canonical size (extra space is allocated to limit the number of system calls).
  110. let brk_size = size + config::extra_brk(size) + align;
  111. // Use SBRK to allocate extra data segment. The alignment is used as precursor for our
  112. // allocated block. This ensures that it is properly memory aligned to the requested value.
  113. // TODO: Audit the casts.
  114. let (alignment_block, rest) = unsafe {
  115. // LAST AUDIT: 2016-08-21 (Ticki).
  116. Block::from_raw_parts(
  117. // Important! The conversion is failable to avoid arithmetic overflow-based
  118. // attacks.
  119. self.sbrk(brk_size.try_into().unwrap()).unwrap_or_else(|()| fail::oom()),
  120. brk_size,
  121. )
  122. }.align(align).unwrap();
  123. // Split the block to leave the excessive space.
  124. let (res, excessive) = rest.split(size);
  125. // Make some assertions.
  126. debug_assert!(res.aligned_to(align), "Alignment failed.");
  127. debug_assert!(res.size() + alignment_block.size() + excessive.size() == brk_size, "BRK memory leak.");
  128. (alignment_block, res, excessive)
  129. }
  130. }
  131. /// Lock the BRK lock to allow manipulating the program break.
  132. // TODO: Consider making this a method of `BrkLock`.
  133. pub fn lock() -> BrkLock {
  134. BrkLock {
  135. state: BRK_MUTEX.lock(),
  136. }
  137. }
  138. /// `SBRK` symbol which can coexist with the allocator.
  139. ///
  140. /// `SBRK`-ing directly (from the `BRK` syscall or libc) might make the state inconsistent. This
  141. /// function makes sure that's not happening.
  142. ///
  143. /// With the exception of being able to coexist, it follows the same rules. Refer to the relevant
  144. /// documentation.
  145. ///
  146. /// # Failure
  147. ///
  148. /// On failure the maximum pointer (`!0 as *mut u8`) is returned.
  149. pub unsafe extern fn sbrk(size: isize) -> *mut u8 {
  150. *lock().sbrk(size).unwrap_or_else(|()| Pointer::new(!0 as *mut u8))
  151. }
  152. /// Get the current program break.
  153. fn current_brk() -> Pointer<u8> {
  154. unsafe {
  155. // LAST AUDIT: 2016-08-21 (Ticki).
  156. Pointer::new(syscalls::brk(ptr::null()) as *mut u8)
  157. }
  158. }
  159. #[cfg(test)]
  160. mod test {
  161. use super;
  162. #[test]
  163. fn ordered() {
  164. let brk = brk::lock().canonical_brk(20, 1);
  165. assert!(brk.0 <= brk.1);
  166. assert!(brk.1 <= brk.2);
  167. }
  168. #[test]
  169. fn brk_grow_up() {
  170. unsafe {
  171. let brk1 = brk::lock().sbrk(5).unwrap();
  172. let brk2 = brk::lock().sbrk(100).unwrap();
  173. assert!(*brk1 < *brk2);
  174. }
  175. }
  176. #[test]
  177. fn brk_right_segment_change() {
  178. unsafe {
  179. let brk1 = brk::lock().sbrk(5).unwrap();
  180. let brk2 = brk::lock().sbrk(100).unwrap();
  181. assert_eq!(brk1.offset(5), brk2);
  182. assert_eq!(brk2.offset(100), current_brk());
  183. assert_eq!(brk::lock().sbrk(0), current_brk());
  184. }
  185. }
  186. }