allocator.rs 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. //! The global allocator.
  2. //!
  3. //! This contains primitives for the cross-thread allocator.
  4. use prelude::*;
  5. use core::mem;
  6. use {brk, sync};
  7. use bookkeeper::{self, Bookkeeper, Allocator};
  8. use shim::config;
  9. #[cfg(feature = "tls")]
  10. use tls;
  11. /// Alias for the wrapper type of the thread-local variable holding the local allocator.
  12. #[cfg(feature = "tls")]
  13. type ThreadLocalAllocator = MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>;
  14. /// The global default allocator.
  15. // TODO: Remove these filthy function pointers.
  16. static GLOBAL_ALLOCATOR: sync::Mutex<LazyInit<fn() -> GlobalAllocator, GlobalAllocator>> =
  17. sync::Mutex::new(LazyInit::new(GlobalAllocator::init));
  18. #[cfg(feature = "tls")]
  19. tls! {
  20. /// The thread-local allocator.
  21. static THREAD_ALLOCATOR: ThreadLocalAllocator = MoveCell::new(Some(LazyInit::new(LocalAllocator::init)));
  22. }
  23. /// Temporarily get the allocator.
  24. ///
  25. /// This is simply to avoid repeating ourself, so we let this take care of the hairy stuff:
  26. ///
  27. /// 1. Initialize the allocator if needed.
  28. /// 2. If the allocator is not yet initialized, fallback to the global allocator.
  29. /// 3. Unlock/move temporarily out of reference.
  30. ///
  31. /// This is a macro due to the lack of generic closure, which makes it impossible to have one
  32. /// closure for both cases (global and local).
  33. // TODO: Instead of falling back to the global allocator, the thread dtor should be set such that
  34. // it run after the TLS keys that might be declared.
  35. macro_rules! get_allocator {
  36. (|$v:ident| $b:expr) => {{
  37. // Get the thread allocator, if TLS is enabled
  38. #[cfg(feature = "tls")]
  39. {
  40. THREAD_ALLOCATOR.with(|thread_alloc| {
  41. if let Some(mut thread_alloc_original) = thread_alloc.replace(None) {
  42. let res = {
  43. // Call the closure involved.
  44. let $v = thread_alloc_original.get();
  45. $b
  46. };
  47. // Put back the original allocator.
  48. thread_alloc.replace(Some(thread_alloc_original));
  49. res
  50. } else {
  51. // The local allocator seems to have been deinitialized, for this reason we fallback to
  52. // the global allocator.
  53. log!(WARNING, "Accessing the allocator after deinitialization of the local allocator.");
  54. // Lock the global allocator.
  55. log!(DEBUG, "Locking global allocator.");
  56. let mut guard = GLOBAL_ALLOCATOR.lock();
  57. // Call the block in question.
  58. let $v = guard.get();
  59. $b
  60. }
  61. })
  62. }
  63. // TLS is disabled, just use the global allocator.
  64. #[cfg(not(feature = "tls"))]
  65. {
  66. // Lock the global allocator.
  67. log!(DEBUG, "Locking global allocator.");
  68. let mut guard = GLOBAL_ALLOCATOR.lock();
  69. // Call the block in question.
  70. let $v = guard.get();
  71. $b
  72. }
  73. }}
  74. }
  75. /// Global SBRK-based allocator.
  76. ///
  77. /// This will extend the data segment whenever new memory is needed. Since this includes leaving
  78. /// userspace, this shouldn't be used when other allocators are available (i.e. the bookkeeper is
  79. /// local).
  80. struct GlobalAllocator {
  81. // The inner bookkeeper.
  82. inner: Bookkeeper,
  83. }
  84. impl GlobalAllocator {
  85. /// Initialize the global allocator.
  86. fn init() -> GlobalAllocator {
  87. log!(NOTE, "Initializing the global allocator.");
  88. // The initial acquired segment.
  89. let (aligner, initial_segment, excessive) =
  90. brk::lock().canonical_brk(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
  91. // Initialize the new allocator.
  92. let mut res = GlobalAllocator {
  93. inner: Bookkeeper::new(unsafe {
  94. // LAST AUDIT: 2016-08-21 (Ticki).
  95. Vec::from_raw_parts(initial_segment, 0)
  96. }),
  97. };
  98. // Free the secondary space.
  99. res.push(aligner);
  100. res.push(excessive);
  101. res
  102. }
  103. }
  104. derive_deref!(GlobalAllocator, Bookkeeper);
  105. impl Allocator for GlobalAllocator {
  106. #[inline]
  107. fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
  108. // Obtain what you need.
  109. let (alignment_block, res, excessive) = brk::lock().canonical_brk(size, align);
  110. // Add it to the list. This will not change the order, since the pointer is higher than all
  111. // the previous blocks (BRK extends the data segment). Although, it is worth noting that
  112. // the stack is higher than the program break.
  113. self.push(alignment_block);
  114. self.push(excessive);
  115. res
  116. }
  117. fn on_new_memory(&mut self) {
  118. if self.total_bytes() > config::OS_MEMTRIM_LIMIT {
  119. // memtrim the fack outta 'em.
  120. // Pop the last block.
  121. let block = self.pop().expect("The byte count on the global allocator is invalid.");
  122. // Check if the memtrim is worth it.
  123. if block.size() >= config::OS_MEMTRIM_WORTHY {
  124. log!(NOTE, "Memtrimming the global allocator.");
  125. // Release the block to the OS.
  126. if let Err(block) = brk::lock().release(block) {
  127. // It failed, put the block back.
  128. // TODO: This can be done faster.
  129. self.push(block);
  130. }
  131. // Note that this block is the only block next to the program break, due to the
  132. // segments being as long as possible. For that reason, repeating to push and
  133. // release would fail.
  134. } else {
  135. log!(WARNING, "Memtrimming for the global allocator failed.");
  136. // Push the block back.
  137. // TODO: This can be done faster.
  138. self.push(block);
  139. }
  140. }
  141. }
  142. }
  143. /// A local allocator.
  144. ///
  145. /// This acquires memory from the upstream (global) allocator, which is protected by a `Mutex`.
  146. #[cfg(feature = "tls")]
  147. pub struct LocalAllocator {
  148. // The inner bookkeeper.
  149. inner: Bookkeeper,
  150. }
  151. #[cfg(feature = "tls")]
  152. impl LocalAllocator {
  153. /// Initialize the local allocator.
  154. fn init() -> LocalAllocator {
  155. /// The destructor of the local allocator.
  156. ///
  157. /// This will simply free everything to the global allocator.
  158. extern fn dtor(alloc: &ThreadLocalAllocator) {
  159. log!(NOTE, "Deinitializing and freeing the local allocator to the global allocator.");
  160. // This is important! The thread destructors guarantee no other, and thus one could use the
  161. // allocator _after_ this destructor have been finished. In fact, this is a real problem,
  162. // and happens when using `Arc` and terminating the main thread, for this reason we place
  163. // `None` as a permanent marker indicating that the allocator is deinitialized. After such
  164. // a state is in place, all allocation calls will be redirected to the global allocator,
  165. // which is of course still usable at this moment.
  166. let alloc = alloc.replace(None).expect("Thread-local allocator is already freed.");
  167. // Lock the global allocator.
  168. let mut global_alloc = GLOBAL_ALLOCATOR.lock();
  169. let global_alloc = global_alloc.get();
  170. // TODO: we know this is sorted, so we could abuse that fact to faster insertion in the
  171. // global allocator.
  172. alloc.into_inner().inner.for_each(move |block| global_alloc.free(block));
  173. }
  174. log!(NOTE, "Initializing the local allocator.");
  175. // The initial acquired segment.
  176. let initial_segment = GLOBAL_ALLOCATOR
  177. .lock()
  178. .get()
  179. .alloc(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
  180. unsafe {
  181. // LAST AUDIT: 2016-08-21 (Ticki).
  182. // Register the thread destructor on the current thread.
  183. THREAD_ALLOCATOR.register_thread_destructor(dtor);
  184. LocalAllocator {
  185. inner: Bookkeeper::new(Vec::from_raw_parts(initial_segment, 0)),
  186. }
  187. }
  188. }
  189. }
  190. #[cfg(feature = "tls")]
  191. derive_deref!(LocalAllocator, Bookkeeper);
  192. #[cfg(feature = "tls")]
  193. impl Allocator for LocalAllocator {
  194. #[inline]
  195. fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
  196. // Get the block from the global allocator. Please note that we cannot canonicalize `size`,
  197. // due to freeing excessive blocks would change the order.
  198. GLOBAL_ALLOCATOR.lock().get().alloc(size, align)
  199. }
  200. #[inline]
  201. fn on_new_memory(&mut self) {
  202. // The idea is to free memory to the global allocator to unify small stubs and avoid
  203. // fragmentation and thread accumulation.
  204. if self.total_bytes() < config::FRAGMENTATION_SCALE * self.len()
  205. || self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT {
  206. // Log stuff.
  207. log!(NOTE, "Memtrimming the local allocator.");
  208. // Lock the global allocator.
  209. let mut global_alloc = GLOBAL_ALLOCATOR.lock();
  210. let global_alloc = global_alloc.get();
  211. while let Some(block) = self.pop() {
  212. // Pop'n'free.
  213. global_alloc.free(block);
  214. // Memtrim 'till we won't memtrim anymore.
  215. if self.total_bytes() < config::LOCAL_MEMTRIM_STOP { break; }
  216. }
  217. }
  218. }
  219. }
  220. /// Allocate a block of memory.
  221. ///
  222. /// # Errors
  223. ///
  224. /// The OOM handler handles out-of-memory conditions.
  225. #[inline]
  226. pub fn alloc(size: usize, align: usize) -> *mut u8 {
  227. log!(CALL, "Allocating buffer of size {} (align {}).", size, align);
  228. get_allocator!(|alloc| *Pointer::from(alloc.alloc(size, align)))
  229. }
  230. /// Free a buffer.
  231. ///
  232. /// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
  233. /// that it is not used after the free.
  234. ///
  235. /// # Important!
  236. ///
  237. /// You should only allocate buffers allocated through `ralloc`. Anything else is considered
  238. /// invalid.
  239. ///
  240. /// # Errors
  241. ///
  242. /// The OOM handler handles out-of-memory conditions.
  243. ///
  244. /// # Safety
  245. ///
  246. /// Rust assume that the allocation symbols returns correct values. For this reason, freeing
  247. /// invalid pointers might introduce memory unsafety.
  248. ///
  249. /// Secondly, freeing an used buffer can introduce use-after-free.
  250. #[inline]
  251. pub unsafe fn free(ptr: *mut u8, size: usize) {
  252. log!(CALL, "Freeing buffer of size {}.", size);
  253. get_allocator!(|alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size)))
  254. }
  255. /// Reallocate memory.
  256. ///
  257. /// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
  258. /// returned pointer with size `size`.
  259. ///
  260. /// # Important!
  261. ///
  262. /// You should only reallocate buffers allocated through `ralloc`. Anything else is considered
  263. /// invalid.
  264. ///
  265. /// # Errors
  266. ///
  267. /// The OOM handler handles out-of-memory conditions.
  268. ///
  269. /// # Safety
  270. ///
  271. /// Due to being able to potentially memcpy an arbitrary buffer, as well as shrinking a buffer,
  272. /// this is marked unsafe.
  273. #[inline]
  274. pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
  275. log!(CALL, "Reallocating buffer of size {} to new size {}.", old_size, size);
  276. get_allocator!(|alloc| {
  277. *Pointer::from(alloc.realloc(
  278. Block::from_raw_parts(Pointer::new(ptr), old_size),
  279. size,
  280. align
  281. ))
  282. })
  283. }
  284. /// Try to reallocate the buffer _inplace_.
  285. ///
  286. /// In case of success, return the new buffer's size. On failure, return the old size.
  287. ///
  288. /// This can be used to shrink (truncate) a buffer as well.
  289. ///
  290. /// # Safety
  291. ///
  292. /// Due to being able to shrink (and thus free) the buffer, this is marked unsafe.
  293. #[inline]
  294. pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
  295. log!(CALL, "Inplace reallocating buffer of size {} to new size {}.", old_size, size);
  296. get_allocator!(|alloc| {
  297. if alloc.realloc_inplace(
  298. Block::from_raw_parts(Pointer::new(ptr), old_size),
  299. size
  300. ).is_ok() {
  301. Ok(())
  302. } else {
  303. Err(())
  304. }
  305. })
  306. }