allocator.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. //! The global allocator.
  2. //!
  3. //! This contains primitives for the cross-thread allocator.
  4. use prelude::*;
  5. use core::{mem, ops};
  6. use {brk, sync};
  7. use bookkeeper::{self, Bookkeeper, Allocator};
  8. #[cfg(feature = "tls")]
  9. use tls;
  10. /// Alias for the wrapper type of the thread-local variable holding the local allocator.
  11. #[cfg(feature = "tls")]
  12. type ThreadLocalAllocator = MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>;
  13. /// The global default allocator.
  14. // TODO: Remove these filthy function pointers.
  15. static GLOBAL_ALLOCATOR: sync::Mutex<LazyInit<fn() -> GlobalAllocator, GlobalAllocator>> =
  16. sync::Mutex::new(LazyInit::new(GlobalAllocator::init));
  17. #[cfg(feature = "tls")]
  18. tls! {
  19. /// The thread-local allocator.
  20. static THREAD_ALLOCATOR: ThreadLocalAllocator = MoveCell::new(Some(LazyInit::new(LocalAllocator::init)));
  21. }
  22. /// Temporarily get the allocator.
  23. ///
  24. /// This is simply to avoid repeating ourself, so we let this take care of the hairy stuff:
  25. ///
  26. /// 1. Initialize the allocator if needed.
  27. /// 2. If the allocator is not yet initialized, fallback to the global allocator.
  28. /// 3. Unlock/move temporarily out of reference.
  29. ///
  30. /// This is a macro due to the lack of generic closure, which makes it impossible to have one
  31. /// closure for both cases (global and local).
  32. // TODO: Instead of falling back to the global allocator, the thread dtor should be set such that
  33. // it run after the TLS keys that might be declared.
  34. macro_rules! get_allocator {
  35. (|$v:ident| $b:expr) => {{
  36. // Get the thread allocator, if TLS is enabled
  37. #[cfg(feature = "tls")]
  38. {
  39. THREAD_ALLOCATOR.with(|thread_alloc| {
  40. if let Some(mut thread_alloc_original) = thread_alloc.replace(None) {
  41. let res = {
  42. // Call the closure involved.
  43. let $v = thread_alloc_original.get();
  44. $b
  45. };
  46. // Put back the original allocator.
  47. thread_alloc.replace(Some(thread_alloc_original));
  48. res
  49. } else {
  50. // The local allocator seems to have been deinitialized, for this reason we fallback to
  51. // the global allocator.
  52. // Lock the global allocator.
  53. let mut guard = GLOBAL_ALLOCATOR.lock();
  54. // Call the block in question.
  55. let $v = guard.get();
  56. $b
  57. }
  58. })
  59. }
  60. // TLS is disabled, just use the global allocator.
  61. #[cfg(not(feature = "tls"))]
  62. {
  63. // Lock the global allocator.
  64. let mut guard = GLOBAL_ALLOCATOR.lock();
  65. // Call the block in question.
  66. let $v = guard.get();
  67. $b
  68. }
  69. }}
  70. }
  71. /// Derives `Deref` and `DerefMut` to the `inner` field.
  72. ///
  73. /// This requires importing `core::ops`.
  74. macro_rules! derive_deref {
  75. ($imp:ty, $target:ty) => {
  76. impl ops::Deref for $imp {
  77. type Target = $target;
  78. fn deref(&self) -> &$target {
  79. &self.inner
  80. }
  81. }
  82. impl ops::DerefMut for $imp {
  83. fn deref_mut(&mut self) -> &mut $target {
  84. &mut self.inner
  85. }
  86. }
  87. };
  88. }
  89. /// Global SBRK-based allocator.
  90. ///
  91. /// This will extend the data segment whenever new memory is needed. Since this includes leaving
  92. /// userspace, this shouldn't be used when other allocators are available (i.e. the bookkeeper is
  93. /// local).
  94. struct GlobalAllocator {
  95. // The inner bookkeeper.
  96. inner: Bookkeeper,
  97. }
  98. impl GlobalAllocator {
  99. /// Initialize the global allocator.
  100. fn init() -> GlobalAllocator {
  101. // The initial acquired segment.
  102. let (aligner, initial_segment, excessive) =
  103. brk::get(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
  104. // Initialize the new allocator.
  105. let mut res = GlobalAllocator {
  106. inner: Bookkeeper::new(unsafe {
  107. Vec::from_raw_parts(initial_segment, 0)
  108. }),
  109. };
  110. // Free the secondary space.
  111. res.push(aligner);
  112. res.push(excessive);
  113. res
  114. }
  115. }
  116. derive_deref!(GlobalAllocator, Bookkeeper);
  117. impl Allocator for GlobalAllocator {
  118. #[inline]
  119. fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
  120. // Obtain what you need.
  121. let (alignment_block, res, excessive) = brk::get(size, align);
  122. // Add it to the list. This will not change the order, since the pointer is higher than all
  123. // the previous blocks (BRK extends the data segment). Although, it is worth noting that
  124. // the stack is higher than the program break.
  125. self.push(alignment_block);
  126. self.push(excessive);
  127. res
  128. }
  129. }
  130. /// A local allocator.
  131. ///
  132. /// This acquires memory from the upstream (global) allocator, which is protected by a `Mutex`.
  133. #[cfg(feature = "tls")]
  134. pub struct LocalAllocator {
  135. // The inner bookkeeper.
  136. inner: Bookkeeper,
  137. }
  138. impl LocalAllocator {
  139. /// Initialize the local allocator.
  140. #[cfg(feature = "tls")]
  141. fn init() -> LocalAllocator {
  142. /// The destructor of the local allocator.
  143. ///
  144. /// This will simply free everything to the global allocator.
  145. extern fn dtor(alloc: &ThreadLocalAllocator) {
  146. // This is important! The thread destructors guarantee no other, and thus one could use the
  147. // allocator _after_ this destructor have been finished. In fact, this is a real problem,
  148. // and happens when using `Arc` and terminating the main thread, for this reason we place
  149. // `None` as a permanent marker indicating that the allocator is deinitialized. After such
  150. // a state is in place, all allocation calls will be redirected to the global allocator,
  151. // which is of course still usable at this moment.
  152. let alloc = alloc.replace(None).expect("Thread-local allocator is already freed.");
  153. // Lock the global allocator.
  154. let mut global_alloc = GLOBAL_ALLOCATOR.lock();
  155. let global_alloc = global_alloc.get();
  156. // TODO: we know this is sorted, so we could abuse that fact to faster insertion in the
  157. // global allocator.
  158. alloc.into_inner().inner.for_each(move |block| global_alloc.free(block));
  159. }
  160. // The initial acquired segment.
  161. let initial_segment = GLOBAL_ALLOCATOR
  162. .lock()
  163. .get()
  164. .alloc(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
  165. unsafe {
  166. // Register the thread destructor on the current thread.
  167. THREAD_ALLOCATOR.register_thread_destructor(dtor)
  168. .expect("Unable to register a thread destructor.");
  169. LocalAllocator {
  170. inner: Bookkeeper::new(Vec::from_raw_parts(initial_segment, 0)),
  171. }
  172. }
  173. }
  174. /// Shuld we memtrim this allocator?
  175. ///
  176. /// The idea is to free memory to the global allocator to unify small stubs and avoid
  177. /// fragmentation and thread accumulation.
  178. fn should_memtrim(&self) -> bool {
  179. // TODO: Tweak this.
  180. /// The fragmentation scale constant.
  181. ///
  182. /// This is used for determining the minimum avarage block size before memtrimming.
  183. const FRAGMENTATION_SCALE: usize = 10;
  184. /// The local memtrim limit.
  185. ///
  186. /// Whenever an allocator has more free bytes than this value, it will be memtrimmed.
  187. const LOCAL_MEMTRIM_LIMIT: usize = 16384;
  188. self.total_bytes() < FRAGMENTATION_SCALE * self.len() || self.total_bytes() > LOCAL_MEMTRIM_LIMIT
  189. }
  190. }
  191. #[cfg(feature = "tls")]
  192. derive_deref!(LocalAllocator, Bookkeeper);
  193. #[cfg(feature = "tls")]
  194. impl Allocator for LocalAllocator {
  195. #[inline]
  196. fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
  197. // Get the block from the global allocator. Please note that we cannot canonicalize `size`,
  198. // due to freeing excessive blocks would change the order.
  199. GLOBAL_ALLOCATOR.lock().get().alloc(size, align)
  200. }
  201. #[inline]
  202. fn on_new_memory(&mut self) {
  203. if self.should_memtrim() {
  204. // Lock the global allocator.
  205. let mut global_alloc = GLOBAL_ALLOCATOR.lock();
  206. let global_alloc = global_alloc.get();
  207. while let Some(block) = self.pop() {
  208. // Pop'n'free.
  209. global_alloc.free(block);
  210. // Memtrim 'till we can't memtrim anymore.
  211. if !self.should_memtrim() { break; }
  212. }
  213. }
  214. }
  215. }
  216. /// Allocate a block of memory.
  217. ///
  218. /// # Errors
  219. ///
  220. /// The OOM handler handles out-of-memory conditions.
  221. #[inline]
  222. pub fn alloc(size: usize, align: usize) -> *mut u8 {
  223. get_allocator!(|alloc| *Pointer::from(alloc.alloc(size, align)))
  224. }
  225. /// Free a buffer.
  226. ///
  227. /// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
  228. /// that it is not used after the free.
  229. ///
  230. /// # Important!
  231. ///
  232. /// You should only allocate buffers allocated through `ralloc`. Anything else is considered
  233. /// invalid.
  234. ///
  235. /// # Errors
  236. ///
  237. /// The OOM handler handles out-of-memory conditions.
  238. ///
  239. /// # Safety
  240. ///
  241. /// Rust assume that the allocation symbols returns correct values. For this reason, freeing
  242. /// invalid pointers might introduce memory unsafety.
  243. ///
  244. /// Secondly, freeing an used buffer can introduce use-after-free.
  245. #[inline]
  246. pub unsafe fn free(ptr: *mut u8, size: usize) {
  247. get_allocator!(|alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size)))
  248. }
  249. /// Reallocate memory.
  250. ///
  251. /// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
  252. /// returned pointer with size `size`.
  253. ///
  254. /// # Important!
  255. ///
  256. /// You should only reallocate buffers allocated through `ralloc`. Anything else is considered
  257. /// invalid.
  258. ///
  259. /// # Errors
  260. ///
  261. /// The OOM handler handles out-of-memory conditions.
  262. ///
  263. /// # Safety
  264. ///
  265. /// Due to being able to potentially memcpy an arbitrary buffer, as well as shrinking a buffer,
  266. /// this is marked unsafe.
  267. #[inline]
  268. pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
  269. get_allocator!(|alloc| {
  270. *Pointer::from(alloc.realloc(
  271. Block::from_raw_parts(Pointer::new(ptr), old_size),
  272. size,
  273. align
  274. ))
  275. })
  276. }
  277. /// Try to reallocate the buffer _inplace_.
  278. ///
  279. /// In case of success, return the new buffer's size. On failure, return the old size.
  280. ///
  281. /// This can be used to shrink (truncate) a buffer as well.
  282. ///
  283. /// # Safety
  284. ///
  285. /// Due to being able to shrink (and thus free) the buffer, this is marked unsafe.
  286. #[inline]
  287. pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
  288. get_allocator!(|alloc| {
  289. if alloc.realloc_inplace(
  290. Block::from_raw_parts(Pointer::new(ptr), old_size),
  291. size
  292. ).is_ok() {
  293. Ok(())
  294. } else {
  295. Err(())
  296. }
  297. })
  298. }