sc.rs 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. //! A SCAllocator that can allocate fixed size objects.
  2. use core::mem;
  3. use crate::*;
  4. /// A genius(?) const min()
  5. ///
  6. /// # What this does
  7. /// * create an array of the two elements you want to choose between
  8. /// * create an arbitrary boolean expression
  9. /// * cast said expresison to a usize
  10. /// * use that value to index into the array created above
  11. ///
  12. /// # Source
  13. /// https://stackoverflow.com/questions/53619695/calculating-maximum-value-of-a-set-of-constant-expressions-at-compile-time
  14. #[cfg(feature = "unstable")]
  15. const fn cmin(a: usize, b: usize) -> usize {
  16. [a, b][(a > b) as usize]
  17. }
  18. /// The boring variant of min (not const).
  19. #[cfg(not(feature = "unstable"))]
  20. fn cmin(a: usize, b: usize) -> usize {
  21. core::cmp::min(a, b)
  22. }
  23. /// A slab allocator allocates elements of a fixed size.
  24. ///
  25. /// It maintains three internal lists of objects that implement `AllocablePage`
  26. /// from which it can allocate memory.
  27. ///
  28. /// * `empty_slabs`: Is a list of pages that the SCAllocator maintains, but
  29. /// has 0 allocations in them, these can be given back to a requestor in case
  30. /// of reclamation.
  31. /// * `slabs`: A list of pages partially allocated and still have room for more.
  32. /// * `full_slabs`: A list of pages that are completely allocated.
  33. ///
  34. /// On allocation we allocate memory from `slabs`, however if the list is empty
  35. /// we try to reclaim a page from `empty_slabs` before we return with an out-of-memory
  36. /// error. If a page becomes full after the allocation we move it from `slabs` to
  37. /// `full_slabs`.
  38. ///
  39. /// Similarly, on dealloaction we might move a page from `full_slabs` to `slabs`
  40. /// or from `slabs` to `empty_slabs` after we deallocated an object.
  41. ///
  42. /// If an allocation returns `OutOfMemory` a client using SCAllocator can refill
  43. /// it using the `refill` function.
  44. pub struct SCAllocator<'a, P: AllocablePage> {
  45. /// Maximum possible allocation size for this `SCAllocator`.
  46. pub(crate) size: usize,
  47. /// Keeps track of succeeded allocations.
  48. pub(crate) allocation_count: usize,
  49. /// max objects per page
  50. pub(crate) obj_per_page: usize,
  51. /// List of empty ObjectPages (nothing allocated in these).
  52. pub(crate) empty_slabs: PageList<'a, P>,
  53. /// List of partially used ObjectPage (some objects allocated but pages are not full).
  54. pub(crate) slabs: PageList<'a, P>,
  55. /// List of full ObjectPages (everything allocated in these don't need to search them).
  56. pub(crate) full_slabs: PageList<'a, P>,
  57. /// Free objects count
  58. pub(crate) free_obj_count: usize,
  59. /// Maximum free objects num for this `SCAllocator`.
  60. pub(crate) free_limit: usize,
  61. }
  62. /// Creates an instance of a scallocator, we do this in a macro because we
  63. /// re-use the code in const and non-const functions
  64. macro_rules! new_sc_allocator {
  65. ($size:expr) => {{
  66. let obj_per_page = cmin((P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD) / $size, 8 * 64);
  67. SCAllocator {
  68. size: $size,
  69. allocation_count: 0,
  70. obj_per_page,
  71. empty_slabs: PageList::new(),
  72. slabs: PageList::new(),
  73. full_slabs: PageList::new(),
  74. // TODO: 优化free_limit的计算: https://bbs.dragonos.org.cn/t/topic/358
  75. free_limit: 2 * obj_per_page,
  76. free_obj_count: 0,
  77. }
  78. }};
  79. }
  80. impl<'a, P: AllocablePage> SCAllocator<'a, P> {
  81. const REBALANCE_COUNT: usize = 64;
  82. /// Create a new SCAllocator.
  83. #[cfg(feature = "unstable")]
  84. pub const fn new(size: usize) -> SCAllocator<'a, P> {
  85. new_sc_allocator!(size)
  86. }
  87. #[cfg(not(feature = "unstable"))]
  88. pub fn new(size: usize) -> SCAllocator<'a, P> {
  89. new_sc_allocator!(size)
  90. }
  91. /// Returns the maximum supported object size of this allocator.
  92. pub fn size(&self) -> usize {
  93. self.size
  94. }
  95. /// Add a new ObjectPage.
  96. fn insert_partial_slab(&mut self, new_head: &'a mut P) {
  97. self.slabs.insert_front(new_head);
  98. }
  99. /// Add page to empty list.
  100. fn insert_empty(&mut self, new_head: &'a mut P) {
  101. assert_eq!(
  102. new_head as *const P as usize % P::SIZE,
  103. 0,
  104. "Inserted page is not aligned to page-size."
  105. );
  106. self.empty_slabs.insert_front(new_head);
  107. }
  108. /// Since `dealloc` can not reassign pages without requiring a lock
  109. /// we check slabs and full slabs periodically as part of `alloc`
  110. /// and move them to the empty or partially allocated slab lists.
  111. pub(crate) fn check_page_assignments(&mut self) {
  112. for slab_page in self.full_slabs.iter_mut() {
  113. if !slab_page.is_full() {
  114. // We need to move it from self.full_slabs -> self.slabs
  115. trace!("move {:p} full -> partial", slab_page);
  116. self.move_full_to_partial(slab_page);
  117. }
  118. }
  119. for slab_page in self.slabs.iter_mut() {
  120. if slab_page.is_empty(self.obj_per_page) {
  121. // We need to move it from self.slabs -> self.empty_slabs
  122. trace!("move {:p} partial -> empty", slab_page);
  123. self.move_to_empty(slab_page);
  124. }
  125. }
  126. }
  127. /// Move a page from `slabs` to `empty_slabs`.
  128. fn move_to_empty(&mut self, page: &'a mut P) {
  129. let page_ptr = page as *const P;
  130. debug_assert!(self.slabs.contains(page_ptr));
  131. debug_assert!(
  132. !self.empty_slabs.contains(page_ptr),
  133. "Page {:p} already in emtpy_slabs",
  134. page_ptr
  135. );
  136. self.slabs.remove_from_list(page);
  137. self.empty_slabs.insert_front(page);
  138. debug_assert!(!self.slabs.contains(page_ptr));
  139. debug_assert!(self.empty_slabs.contains(page_ptr));
  140. }
  141. /// Move a page from `full_slabs` to `slab`.
  142. fn move_partial_to_full(&mut self, page: &'a mut P) {
  143. let page_ptr = page as *const P;
  144. debug_assert!(self.slabs.contains(page_ptr));
  145. debug_assert!(!self.full_slabs.contains(page_ptr));
  146. self.slabs.remove_from_list(page);
  147. self.full_slabs.insert_front(page);
  148. debug_assert!(!self.slabs.contains(page_ptr));
  149. debug_assert!(self.full_slabs.contains(page_ptr));
  150. }
  151. /// Move a page from `full_slabs` to `slab`.
  152. fn move_full_to_partial(&mut self, page: &'a mut P) {
  153. let page_ptr = page as *const P;
  154. debug_assert!(!self.slabs.contains(page_ptr));
  155. debug_assert!(self.full_slabs.contains(page_ptr));
  156. self.full_slabs.remove_from_list(page);
  157. self.slabs.insert_front(page);
  158. debug_assert!(self.slabs.contains(page_ptr));
  159. debug_assert!(!self.full_slabs.contains(page_ptr));
  160. }
  161. /// Tries to allocate a block of memory with respect to the `layout`.
  162. /// Searches within already allocated slab pages, if no suitable spot is found
  163. /// will try to use a page from the empty page list.
  164. ///
  165. /// # Arguments
  166. /// * `sc_layout`: This is not the original layout but adjusted for the
  167. /// SCAllocator size (>= original).
  168. fn try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8 {
  169. // TODO: Do we really need to check multiple slab pages (due to alignment)
  170. // If not we can get away with a singly-linked list and have 8 more bytes
  171. // for the bitfield in an ObjectPage.
  172. for slab_page in self.slabs.iter_mut() {
  173. let ptr = slab_page.allocate(sc_layout);
  174. if !ptr.is_null() {
  175. if slab_page.is_full() {
  176. trace!("move {:p} partial -> full", slab_page);
  177. self.move_partial_to_full(slab_page);
  178. }
  179. self.allocation_count += 1;
  180. return ptr;
  181. } else {
  182. continue;
  183. }
  184. }
  185. // Periodically rebalance page-lists (since dealloc can't do it for us)
  186. if self.allocation_count > SCAllocator::<P>::REBALANCE_COUNT {
  187. self.check_page_assignments();
  188. self.allocation_count = 0;
  189. }
  190. ptr::null_mut()
  191. }
  192. pub fn try_reclaim_pages<F>(&mut self, to_reclaim: usize, dealloc: &mut F) -> usize
  193. where
  194. F: FnMut(*mut P),
  195. {
  196. self.check_page_assignments();
  197. let mut reclaimed = 0;
  198. while reclaimed < to_reclaim {
  199. if let Some(page) = self.empty_slabs.pop() {
  200. dealloc(page as *mut P);
  201. reclaimed += 1;
  202. } else {
  203. break;
  204. }
  205. }
  206. reclaimed
  207. }
  208. /// Refill the SCAllocator
  209. ///
  210. /// # Safety
  211. /// ObjectPage needs to be empty etc.
  212. pub unsafe fn refill(&mut self, page: &'a mut P) {
  213. page.bitfield_mut()
  214. .initialize(self.size, P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD);
  215. *page.prev() = Rawlink::none();
  216. *page.next() = Rawlink::none();
  217. trace!("adding page to SCAllocator {:p}", page);
  218. self.insert_empty(page);
  219. self.free_obj_count += self.obj_per_page;
  220. }
  221. /// Allocates a block of memory descriped by `layout`.
  222. ///
  223. /// Returns a pointer to a valid region of memory or an
  224. /// AllocationError.
  225. ///
  226. /// The function may also move around pages between lists
  227. /// (empty -> partial or partial -> full).
  228. pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError> {
  229. trace!(
  230. "SCAllocator({}) is trying to allocate {:?}",
  231. self.size,
  232. layout
  233. );
  234. assert!(layout.size() <= self.size);
  235. assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
  236. let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
  237. assert!(new_layout.size() >= layout.size());
  238. let ptr = {
  239. // Try to allocate from partial slabs,
  240. // if we fail check if we have empty pages and allocate from there
  241. let ptr = self.try_allocate_from_pagelist(new_layout);
  242. if ptr.is_null() && self.empty_slabs.head.is_some() {
  243. // Re-try allocation in empty page
  244. let empty_page = self.empty_slabs.pop().expect("We checked head.is_some()");
  245. debug_assert!(!self.empty_slabs.contains(empty_page));
  246. let ptr = empty_page.allocate(layout);
  247. debug_assert!(!ptr.is_null(), "Allocation must have succeeded here.");
  248. trace!(
  249. "move {:p} empty -> partial empty count {}",
  250. empty_page,
  251. self.empty_slabs.elements
  252. );
  253. // Move empty page to partial pages
  254. self.insert_partial_slab(empty_page);
  255. ptr
  256. } else {
  257. ptr
  258. }
  259. };
  260. let res = NonNull::new(ptr).ok_or(AllocationError::OutOfMemory);
  261. if !ptr.is_null() {
  262. trace!(
  263. "SCAllocator({}) allocated ptr=0x{:x}",
  264. self.size,
  265. ptr as usize
  266. );
  267. self.free_obj_count -= 1;
  268. }
  269. res
  270. }
  271. /// Deallocates a previously allocated `ptr` described by `Layout`.
  272. ///
  273. /// May return an error in case an invalid `layout` is provided.
  274. /// The function may also move internal slab pages between lists partial -> empty
  275. /// or full -> partial lists.
  276. ///
  277. /// # Safety
  278. /// The caller must ensure that the `layout` is valid.
  279. pub unsafe fn deallocate(
  280. &mut self,
  281. ptr: NonNull<u8>,
  282. layout: Layout,
  283. slab_callback: &'static dyn CallBack,
  284. ) -> Result<(), AllocationError> {
  285. assert!(layout.size() <= self.size);
  286. assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
  287. trace!(
  288. "SCAllocator({}) is trying to deallocate ptr = {:p} layout={:?} P.size= {}",
  289. self.size,
  290. ptr,
  291. layout,
  292. P::SIZE
  293. );
  294. let page = (ptr.as_ptr() as usize) & !(P::SIZE - 1);
  295. // Figure out which page we are on and construct a reference to it
  296. // TODO: The linked list will have another &mut reference
  297. let slab_page = unsafe { mem::transmute::<VAddr, &'a mut P>(page) };
  298. let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
  299. let ret = slab_page.deallocate(ptr, new_layout);
  300. debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment");
  301. self.free_obj_count += 1;
  302. let is_empty_after_dealloc = slab_page.is_empty(self.obj_per_page);
  303. // 如果slab_page是空白的,且空闲块数大于free_limit,将slab_page归还buddy
  304. if self.free_obj_count >= self.free_limit && is_empty_after_dealloc {
  305. self.slabs.remove_from_list(slab_page);
  306. // 将slab_page归还buddy
  307. slab_callback.free_slab_page(slab_page as *const P as *mut u8, P::SIZE);
  308. }
  309. self.check_page_assignments();
  310. ret
  311. }
  312. }