sc.rs 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. //! A SCAllocator that can allocate fixed size objects.
  2. use core::mem;
  3. use crate::*;
  4. /// A genius(?) const min()
  5. ///
  6. /// # What this does
  7. /// * create an array of the two elements you want to choose between
  8. /// * create an arbitrary boolean expression
  9. /// * cast said expresison to a usize
  10. /// * use that value to index into the array created above
  11. ///
  12. /// # Source
  13. /// https://stackoverflow.com/questions/53619695/calculating-maximum-value-of-a-set-of-constant-expressions-at-compile-time
  14. #[cfg(feature = "unstable")]
  15. const fn cmin(a: usize, b: usize) -> usize {
  16. [a, b][(a > b) as usize]
  17. }
  18. /// The boring variant of min (not const).
  19. #[cfg(not(feature = "unstable"))]
  20. fn cmin(a: usize, b: usize) -> usize {
  21. core::cmp::min(a, b)
  22. }
  23. /// A slab allocator allocates elements of a fixed size.
  24. ///
  25. /// It maintains three internal lists of objects that implement `AllocablePage`
  26. /// from which it can allocate memory.
  27. ///
  28. /// * `empty_slabs`: Is a list of pages that the SCAllocator maintains, but
  29. /// has 0 allocations in them, these can be given back to a requestor in case
  30. /// of reclamation.
  31. /// * `slabs`: A list of pages partially allocated and still have room for more.
  32. /// * `full_slabs`: A list of pages that are completely allocated.
  33. ///
  34. /// On allocation we allocate memory from `slabs`, however if the list is empty
  35. /// we try to reclaim a page from `empty_slabs` before we return with an out-of-memory
  36. /// error. If a page becomes full after the allocation we move it from `slabs` to
  37. /// `full_slabs`.
  38. ///
  39. /// Similarly, on dealloaction we might move a page from `full_slabs` to `slabs`
  40. /// or from `slabs` to `empty_slabs` after we deallocated an object.
  41. ///
  42. /// If an allocation returns `OutOfMemory` a client using SCAllocator can refill
  43. /// it using the `refill` function.
  44. pub struct SCAllocator<'a, P: AllocablePage> {
  45. /// Maximum possible allocation size for this `SCAllocator`.
  46. pub(crate) size: usize,
  47. /// Keeps track of succeeded allocations.
  48. pub(crate) allocation_count: usize,
  49. /// max objects per page
  50. pub(crate) obj_per_page: usize,
  51. /// List of empty ObjectPages (nothing allocated in these).
  52. pub(crate) empty_slabs: PageList<'a, P>,
  53. /// List of partially used ObjectPage (some objects allocated but pages are not full).
  54. pub(crate) slabs: PageList<'a, P>,
  55. /// List of full ObjectPages (everything allocated in these don't need to search them).
  56. pub(crate) full_slabs: PageList<'a, P>,
  57. }
  58. /// Creates an instance of a scallocator, we do this in a macro because we
  59. /// re-use the code in const and non-const functions
  60. macro_rules! new_sc_allocator {
  61. ($size:expr) => {
  62. SCAllocator {
  63. size: $size,
  64. allocation_count: 0,
  65. obj_per_page: cmin((P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD) / $size, 8 * 64),
  66. empty_slabs: PageList::new(),
  67. slabs: PageList::new(),
  68. full_slabs: PageList::new(),
  69. }
  70. };
  71. }
  72. impl<'a, P: AllocablePage> SCAllocator<'a, P> {
  73. const REBALANCE_COUNT: usize = 64;
  74. /// Create a new SCAllocator.
  75. #[cfg(feature = "unstable")]
  76. pub const fn new(size: usize) -> SCAllocator<'a, P> {
  77. new_sc_allocator!(size)
  78. }
  79. #[cfg(not(feature = "unstable"))]
  80. pub fn new(size: usize) -> SCAllocator<'a, P> {
  81. new_sc_allocator!(size)
  82. }
  83. /// Returns the maximum supported object size of this allocator.
  84. pub fn size(&self) -> usize {
  85. self.size
  86. }
  87. /// Add a new ObjectPage.
  88. fn insert_partial_slab(&mut self, new_head: &'a mut P) {
  89. self.slabs.insert_front(new_head);
  90. }
  91. /// Add page to empty list.
  92. fn insert_empty(&mut self, new_head: &'a mut P) {
  93. assert_eq!(
  94. new_head as *const P as usize % P::SIZE,
  95. 0,
  96. "Inserted page is not aligned to page-size."
  97. );
  98. self.empty_slabs.insert_front(new_head);
  99. }
  100. /// Since `dealloc` can not reassign pages without requiring a lock
  101. /// we check slabs and full slabs periodically as part of `alloc`
  102. /// and move them to the empty or partially allocated slab lists.
  103. pub(crate) fn check_page_assignments(&mut self) {
  104. for slab_page in self.full_slabs.iter_mut() {
  105. if !slab_page.is_full() {
  106. // We need to move it from self.full_slabs -> self.slabs
  107. trace!("move {:p} full -> partial", slab_page);
  108. self.move_full_to_partial(slab_page);
  109. }
  110. }
  111. for slab_page in self.slabs.iter_mut() {
  112. if slab_page.is_empty(self.obj_per_page) {
  113. // We need to move it from self.slabs -> self.empty_slabs
  114. trace!("move {:p} partial -> empty", slab_page);
  115. self.move_to_empty(slab_page);
  116. }
  117. }
  118. }
  119. /// Move a page from `slabs` to `empty_slabs`.
  120. fn move_to_empty(&mut self, page: &'a mut P) {
  121. let page_ptr = page as *const P;
  122. debug_assert!(self.slabs.contains(page_ptr));
  123. debug_assert!(
  124. !self.empty_slabs.contains(page_ptr),
  125. "Page {:p} already in emtpy_slabs",
  126. page_ptr
  127. );
  128. self.slabs.remove_from_list(page);
  129. self.empty_slabs.insert_front(page);
  130. debug_assert!(!self.slabs.contains(page_ptr));
  131. debug_assert!(self.empty_slabs.contains(page_ptr));
  132. }
  133. /// Move a page from `full_slabs` to `slab`.
  134. fn move_partial_to_full(&mut self, page: &'a mut P) {
  135. let page_ptr = page as *const P;
  136. debug_assert!(self.slabs.contains(page_ptr));
  137. debug_assert!(!self.full_slabs.contains(page_ptr));
  138. self.slabs.remove_from_list(page);
  139. self.full_slabs.insert_front(page);
  140. debug_assert!(!self.slabs.contains(page_ptr));
  141. debug_assert!(self.full_slabs.contains(page_ptr));
  142. }
  143. /// Move a page from `full_slabs` to `slab`.
  144. fn move_full_to_partial(&mut self, page: &'a mut P) {
  145. let page_ptr = page as *const P;
  146. debug_assert!(!self.slabs.contains(page_ptr));
  147. debug_assert!(self.full_slabs.contains(page_ptr));
  148. self.full_slabs.remove_from_list(page);
  149. self.slabs.insert_front(page);
  150. debug_assert!(self.slabs.contains(page_ptr));
  151. debug_assert!(!self.full_slabs.contains(page_ptr));
  152. }
  153. /// Tries to allocate a block of memory with respect to the `layout`.
  154. /// Searches within already allocated slab pages, if no suitable spot is found
  155. /// will try to use a page from the empty page list.
  156. ///
  157. /// # Arguments
  158. /// * `sc_layout`: This is not the original layout but adjusted for the
  159. /// SCAllocator size (>= original).
  160. fn try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8 {
  161. // TODO: Do we really need to check multiple slab pages (due to alignment)
  162. // If not we can get away with a singly-linked list and have 8 more bytes
  163. // for the bitfield in an ObjectPage.
  164. for slab_page in self.slabs.iter_mut() {
  165. let ptr = slab_page.allocate(sc_layout);
  166. if !ptr.is_null() {
  167. if slab_page.is_full() {
  168. trace!("move {:p} partial -> full", slab_page);
  169. self.move_partial_to_full(slab_page);
  170. }
  171. self.allocation_count += 1;
  172. return ptr;
  173. } else {
  174. continue;
  175. }
  176. }
  177. // Periodically rebalance page-lists (since dealloc can't do it for us)
  178. if self.allocation_count > SCAllocator::<P>::REBALANCE_COUNT {
  179. self.check_page_assignments();
  180. self.allocation_count = 0;
  181. }
  182. ptr::null_mut()
  183. }
  184. pub fn try_reclaim_pages<F>(&mut self, to_reclaim: usize, dealloc: &mut F) -> usize
  185. where
  186. F: FnMut(*mut P),
  187. {
  188. self.check_page_assignments();
  189. let mut reclaimed = 0;
  190. while reclaimed < to_reclaim {
  191. if let Some(page) = self.empty_slabs.pop() {
  192. dealloc(page as *mut P);
  193. reclaimed += 1;
  194. } else {
  195. break;
  196. }
  197. }
  198. reclaimed
  199. }
  200. /// Refill the SCAllocator
  201. ///
  202. /// # Safety
  203. /// ObjectPage needs to be empty etc.
  204. pub unsafe fn refill(&mut self, page: &'a mut P) {
  205. page.bitfield_mut()
  206. .initialize(self.size, P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD);
  207. *page.prev() = Rawlink::none();
  208. *page.next() = Rawlink::none();
  209. trace!("adding page to SCAllocator {:p}", page);
  210. self.insert_empty(page);
  211. }
  212. /// Allocates a block of memory descriped by `layout`.
  213. ///
  214. /// Returns a pointer to a valid region of memory or an
  215. /// AllocationError.
  216. ///
  217. /// The function may also move around pages between lists
  218. /// (empty -> partial or partial -> full).
  219. pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError> {
  220. trace!(
  221. "SCAllocator({}) is trying to allocate {:?}",
  222. self.size,
  223. layout
  224. );
  225. assert!(layout.size() <= self.size);
  226. assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
  227. let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
  228. assert!(new_layout.size() >= layout.size());
  229. let ptr = {
  230. // Try to allocate from partial slabs,
  231. // if we fail check if we have empty pages and allocate from there
  232. let ptr = self.try_allocate_from_pagelist(new_layout);
  233. if ptr.is_null() && self.empty_slabs.head.is_some() {
  234. // Re-try allocation in empty page
  235. let empty_page = self.empty_slabs.pop().expect("We checked head.is_some()");
  236. debug_assert!(!self.empty_slabs.contains(empty_page));
  237. let ptr = empty_page.allocate(layout);
  238. debug_assert!(!ptr.is_null(), "Allocation must have succeeded here.");
  239. trace!(
  240. "move {:p} empty -> partial empty count {}",
  241. empty_page,
  242. self.empty_slabs.elements
  243. );
  244. // Move empty page to partial pages
  245. self.insert_partial_slab(empty_page);
  246. ptr
  247. } else {
  248. ptr
  249. }
  250. };
  251. let res = NonNull::new(ptr).ok_or(AllocationError::OutOfMemory);
  252. if !ptr.is_null() {
  253. trace!(
  254. "SCAllocator({}) allocated ptr=0x{:x}",
  255. self.size,
  256. ptr as usize
  257. );
  258. }
  259. res
  260. }
  261. /// Deallocates a previously allocated `ptr` described by `Layout`.
  262. ///
  263. /// May return an error in case an invalid `layout` is provided.
  264. /// The function may also move internal slab pages between lists partial -> empty
  265. /// or full -> partial lists.
  266. pub fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
  267. assert!(layout.size() <= self.size);
  268. assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
  269. trace!(
  270. "SCAllocator({}) is trying to deallocate ptr = {:p} layout={:?} P.size= {}",
  271. self.size,
  272. ptr,
  273. layout,
  274. P::SIZE
  275. );
  276. let page = (ptr.as_ptr() as usize) & !(P::SIZE - 1);
  277. // Figure out which page we are on and construct a reference to it
  278. // TODO: The linked list will have another &mut reference
  279. let slab_page = unsafe { mem::transmute::<VAddr, &'a mut P>(page) };
  280. let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
  281. let ret = slab_page.deallocate(ptr, new_layout);
  282. debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment");
  283. ret
  284. }
  285. }