allocator.rs 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. //! The global allocator.
  2. //!
  3. //! This contains primitives for the cross-thread allocator.
  4. use prelude::*;
  5. use core::{mem, ops};
  6. use {brk, sync};
  7. use bookkeeper::{self, Bookkeeper, Allocator};
  8. use shim::config;
  9. #[cfg(feature = "tls")]
  10. use tls;
  11. /// Alias for the wrapper type of the thread-local variable holding the local allocator.
  12. #[cfg(feature = "tls")]
  13. type ThreadLocalAllocator = MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>;
  14. /// The global default allocator.
  15. // TODO: Remove these filthy function pointers.
  16. static GLOBAL_ALLOCATOR: sync::Mutex<LazyInit<fn() -> GlobalAllocator, GlobalAllocator>> =
  17. sync::Mutex::new(LazyInit::new(GlobalAllocator::init));
  18. #[cfg(feature = "tls")]
  19. tls! {
  20. /// The thread-local allocator.
  21. static THREAD_ALLOCATOR: ThreadLocalAllocator = MoveCell::new(Some(LazyInit::new(LocalAllocator::init)));
  22. }
  23. /// Temporarily get the allocator.
  24. ///
  25. /// This is simply to avoid repeating ourself, so we let this take care of the hairy stuff:
  26. ///
  27. /// 1. Initialize the allocator if needed.
  28. /// 2. If the allocator is not yet initialized, fallback to the global allocator.
  29. /// 3. Unlock/move temporarily out of reference.
  30. ///
  31. /// This is a macro due to the lack of generic closure, which makes it impossible to have one
  32. /// closure for both cases (global and local).
  33. // TODO: Instead of falling back to the global allocator, the thread dtor should be set such that
  34. // it run after the TLS keys that might be declared.
  35. macro_rules! get_allocator {
  36. (|$v:ident| $b:expr) => {{
  37. // Get the thread allocator, if TLS is enabled
  38. #[cfg(feature = "tls")]
  39. {
  40. THREAD_ALLOCATOR.with(|thread_alloc| {
  41. if let Some(mut thread_alloc_original) = thread_alloc.replace(None) {
  42. let res = {
  43. // Call the closure involved.
  44. let $v = thread_alloc_original.get();
  45. $b
  46. };
  47. // Put back the original allocator.
  48. thread_alloc.replace(Some(thread_alloc_original));
  49. res
  50. } else {
  51. // The local allocator seems to have been deinitialized, for this reason we fallback to
  52. // the global allocator.
  53. log!(WARNING, "Accessing the allocator after deinitialization of the local allocator.");
  54. // Lock the global allocator.
  55. let mut guard = GLOBAL_ALLOCATOR.lock();
  56. // Call the block in question.
  57. let $v = guard.get();
  58. $b
  59. }
  60. })
  61. }
  62. // TLS is disabled, just use the global allocator.
  63. #[cfg(not(feature = "tls"))]
  64. {
  65. // Lock the global allocator.
  66. let mut guard = GLOBAL_ALLOCATOR.lock();
  67. // Call the block in question.
  68. let $v = guard.get();
  69. $b
  70. }
  71. }}
  72. }
  73. /// Derives `Deref` and `DerefMut` to the `inner` field.
  74. ///
  75. /// This requires importing `core::ops`.
  76. macro_rules! derive_deref {
  77. ($imp:ty, $target:ty) => {
  78. impl ops::Deref for $imp {
  79. type Target = $target;
  80. fn deref(&self) -> &$target {
  81. &self.inner
  82. }
  83. }
  84. impl ops::DerefMut for $imp {
  85. fn deref_mut(&mut self) -> &mut $target {
  86. &mut self.inner
  87. }
  88. }
  89. };
  90. }
  91. /// Global SBRK-based allocator.
  92. ///
  93. /// This will extend the data segment whenever new memory is needed. Since this includes leaving
  94. /// userspace, this shouldn't be used when other allocators are available (i.e. the bookkeeper is
  95. /// local).
  96. struct GlobalAllocator {
  97. // The inner bookkeeper.
  98. inner: Bookkeeper,
  99. }
  100. impl GlobalAllocator {
  101. /// Initialize the global allocator.
  102. fn init() -> GlobalAllocator {
  103. /// Logging...
  104. log!(NOTE, "Initializing the global allocator.");
  105. // The initial acquired segment.
  106. let (aligner, initial_segment, excessive) =
  107. brk::lock().canonical_brk(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
  108. // Initialize the new allocator.
  109. let mut res = GlobalAllocator {
  110. inner: Bookkeeper::new(unsafe {
  111. // LAST AUDIT: 2016-08-21 (Ticki).
  112. Vec::from_raw_parts(initial_segment, 0)
  113. }),
  114. };
  115. // Free the secondary space.
  116. res.push(aligner);
  117. res.push(excessive);
  118. res
  119. }
  120. }
  121. derive_deref!(GlobalAllocator, Bookkeeper);
  122. impl Allocator for GlobalAllocator {
  123. #[inline]
  124. fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
  125. // Obtain what you need.
  126. let (alignment_block, res, excessive) = brk::lock().canonical_brk(size, align);
  127. // Add it to the list. This will not change the order, since the pointer is higher than all
  128. // the previous blocks (BRK extends the data segment). Although, it is worth noting that
  129. // the stack is higher than the program break.
  130. self.push(alignment_block);
  131. self.push(excessive);
  132. res
  133. }
  134. fn on_new_memory(&mut self) {
  135. if self.total_bytes() > config::OS_MEMTRIM_LIMIT {
  136. // memtrim the fack outta 'em.
  137. // Pop the last block.
  138. let block = self.pop().expect("The byte count on the global allocator is invalid.");
  139. // Check if the memtrim is worth it.
  140. if block.size() >= config::OS_MEMTRIM_WORTHY {
  141. /// Logging...
  142. log!(NOTE, "Memtrimming the global allocator.");
  143. // Release the block to the OS.
  144. if let Err(block) = brk::lock().release(block) {
  145. // It failed, put the block back.
  146. // TODO: This can be done faster.
  147. self.push(block);
  148. }
  149. // Note that this block is the only block next to the program break, due to the
  150. // segments being as long as possible. For that reason, repeating to push and
  151. // release would fail.
  152. } else {
  153. /// Logging...
  154. log!(WARNING, "Memtrimming for the global allocator failed.");
  155. // Push the block back.
  156. // TODO: This can be done faster.
  157. self.push(block);
  158. }
  159. }
  160. }
  161. }
  162. /// A local allocator.
  163. ///
  164. /// This acquires memory from the upstream (global) allocator, which is protected by a `Mutex`.
  165. #[cfg(feature = "tls")]
  166. pub struct LocalAllocator {
  167. // The inner bookkeeper.
  168. inner: Bookkeeper,
  169. }
  170. #[cfg(feature = "tls")]
  171. impl LocalAllocator {
  172. /// Initialize the local allocator.
  173. #[cfg(feature = "tls")]
  174. fn init() -> LocalAllocator {
  175. /// The destructor of the local allocator.
  176. ///
  177. /// This will simply free everything to the global allocator.
  178. extern fn dtor(alloc: &ThreadLocalAllocator) {
  179. /// Logging...
  180. log!(NOTE, "Deinitializing and freeing the local allocator.");
  181. // This is important! The thread destructors guarantee no other, and thus one could use the
  182. // allocator _after_ this destructor have been finished. In fact, this is a real problem,
  183. // and happens when using `Arc` and terminating the main thread, for this reason we place
  184. // `None` as a permanent marker indicating that the allocator is deinitialized. After such
  185. // a state is in place, all allocation calls will be redirected to the global allocator,
  186. // which is of course still usable at this moment.
  187. let alloc = alloc.replace(None).expect("Thread-local allocator is already freed.");
  188. // Lock the global allocator.
  189. let mut global_alloc = GLOBAL_ALLOCATOR.lock();
  190. let global_alloc = global_alloc.get();
  191. // TODO: we know this is sorted, so we could abuse that fact to faster insertion in the
  192. // global allocator.
  193. alloc.into_inner().inner.for_each(move |block| global_alloc.free(block));
  194. }
  195. /// Logging...
  196. log!(NOTE, "Initializing the local allocator.");
  197. // The initial acquired segment.
  198. let initial_segment = GLOBAL_ALLOCATOR
  199. .lock()
  200. .get()
  201. .alloc(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
  202. unsafe {
  203. // LAST AUDIT: 2016-08-21 (Ticki).
  204. // Register the thread destructor on the current thread.
  205. THREAD_ALLOCATOR.register_thread_destructor(dtor);
  206. LocalAllocator {
  207. inner: Bookkeeper::new(Vec::from_raw_parts(initial_segment, 0)),
  208. }
  209. }
  210. }
  211. }
  212. #[cfg(feature = "tls")]
  213. derive_deref!(LocalAllocator, Bookkeeper);
  214. #[cfg(feature = "tls")]
  215. impl Allocator for LocalAllocator {
  216. #[inline]
  217. fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
  218. // Get the block from the global allocator. Please note that we cannot canonicalize `size`,
  219. // due to freeing excessive blocks would change the order.
  220. GLOBAL_ALLOCATOR.lock().get().alloc(size, align)
  221. }
  222. #[inline]
  223. fn on_new_memory(&mut self) {
  224. // The idea is to free memory to the global allocator to unify small stubs and avoid
  225. // fragmentation and thread accumulation.
  226. if self.total_bytes() < config::FRAGMENTATION_SCALE * self.len()
  227. || self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT {
  228. // Log stuff.
  229. log!(NOTE, "Memtrimming the local allocator.");
  230. // Lock the global allocator.
  231. let mut global_alloc = GLOBAL_ALLOCATOR.lock();
  232. let global_alloc = global_alloc.get();
  233. while let Some(block) = self.pop() {
  234. // Pop'n'free.
  235. global_alloc.free(block);
  236. // Memtrim 'till we won't memtrim anymore.
  237. if self.total_bytes() < config::LOCAL_MEMTRIM_STOP { break; }
  238. }
  239. }
  240. }
  241. }
  242. /// Allocate a block of memory.
  243. ///
  244. /// # Errors
  245. ///
  246. /// The OOM handler handles out-of-memory conditions.
  247. #[inline]
  248. pub fn alloc(size: usize, align: usize) -> *mut u8 {
  249. log!(CALL, "Allocating buffer of size {} (align {}).", size, align);
  250. get_allocator!(|alloc| *Pointer::from(alloc.alloc(size, align)))
  251. }
  252. /// Free a buffer.
  253. ///
  254. /// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
  255. /// that it is not used after the free.
  256. ///
  257. /// # Important!
  258. ///
  259. /// You should only allocate buffers allocated through `ralloc`. Anything else is considered
  260. /// invalid.
  261. ///
  262. /// # Errors
  263. ///
  264. /// The OOM handler handles out-of-memory conditions.
  265. ///
  266. /// # Safety
  267. ///
  268. /// Rust assume that the allocation symbols returns correct values. For this reason, freeing
  269. /// invalid pointers might introduce memory unsafety.
  270. ///
  271. /// Secondly, freeing an used buffer can introduce use-after-free.
  272. #[inline]
  273. pub unsafe fn free(ptr: *mut u8, size: usize) {
  274. log!(CALL, "Freeing buffer of size {}.", size);
  275. get_allocator!(|alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size)))
  276. }
  277. /// Reallocate memory.
  278. ///
  279. /// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
  280. /// returned pointer with size `size`.
  281. ///
  282. /// # Important!
  283. ///
  284. /// You should only reallocate buffers allocated through `ralloc`. Anything else is considered
  285. /// invalid.
  286. ///
  287. /// # Errors
  288. ///
  289. /// The OOM handler handles out-of-memory conditions.
  290. ///
  291. /// # Safety
  292. ///
  293. /// Due to being able to potentially memcpy an arbitrary buffer, as well as shrinking a buffer,
  294. /// this is marked unsafe.
  295. #[inline]
  296. pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
  297. log!(CALL, "Reallocating buffer of size {} to new size {}.", old_size, size);
  298. get_allocator!(|alloc| {
  299. *Pointer::from(alloc.realloc(
  300. Block::from_raw_parts(Pointer::new(ptr), old_size),
  301. size,
  302. align
  303. ))
  304. })
  305. }
  306. /// Try to reallocate the buffer _inplace_.
  307. ///
  308. /// In case of success, return the new buffer's size. On failure, return the old size.
  309. ///
  310. /// This can be used to shrink (truncate) a buffer as well.
  311. ///
  312. /// # Safety
  313. ///
  314. /// Due to being able to shrink (and thus free) the buffer, this is marked unsafe.
  315. #[inline]
  316. pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
  317. log!(CALL, "Inplace reallocating buffer of size {} to new size {}.", old_size, size);
  318. get_allocator!(|alloc| {
  319. if alloc.realloc_inplace(
  320. Block::from_raw_parts(Pointer::new(ptr), old_size),
  321. size
  322. ).is_ok() {
  323. Ok(())
  324. } else {
  325. Err(())
  326. }
  327. })
  328. }