pages.rs 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. use alloc::boxed::Box;
  2. use crate::*;
  3. use core::{
  4. mem,
  5. sync::atomic::{AtomicU64, Ordering},
  6. };
  7. /// A trait defining bitfield operations we need for tracking allocated objects within a page.
  8. pub(crate) trait Bitfield {
  9. fn initialize(&mut self, for_size: usize, capacity: usize);
  10. fn first_fit(
  11. &self,
  12. base_addr: usize,
  13. layout: Layout,
  14. page_size: usize,
  15. ) -> Option<(usize, usize)>;
  16. fn is_allocated(&self, idx: usize) -> bool;
  17. fn set_bit(&self, idx: usize);
  18. fn clear_bit(&self, idx: usize);
  19. fn is_full(&self) -> bool;
  20. fn all_free(&self, relevant_bits: usize) -> bool;
  21. }
  22. /// Implementation of bit operations on u64 slices.
  23. ///
  24. /// We allow deallocations (i.e. clearning a bit in the field)
  25. /// from any thread. That's why the bitfield is a bunch of AtomicU64.
  26. impl Bitfield for [AtomicU64] {
  27. /// Initialize the bitfield
  28. ///
  29. /// # Arguments
  30. /// * `for_size`: Object size we want to allocate
  31. /// * `capacity`: Maximum size of the buffer the bitmap maintains.
  32. ///
  33. /// Ensures that we only have free slots for what we can allocate
  34. /// within the page (by marking everything else allocated).
  35. fn initialize(&mut self, for_size: usize, capacity: usize) {
  36. // Set everything to allocated
  37. for bitmap in self.iter_mut() {
  38. *bitmap = AtomicU64::new(u64::MAX);
  39. }
  40. // Mark actual slots as free
  41. let relevant_bits = core::cmp::min(capacity / for_size, self.len() * 64);
  42. for idx in 0..relevant_bits {
  43. self.clear_bit(idx);
  44. }
  45. }
  46. /// Tries to find a free block of memory that satisfies `alignment` requirement.
  47. ///
  48. /// # Notes
  49. /// * We pass size here to be able to calculate the resulting address within `data`.
  50. #[inline(always)]
  51. fn first_fit(
  52. &self,
  53. base_addr: usize,
  54. layout: Layout,
  55. page_size: usize,
  56. ) -> Option<(usize, usize)> {
  57. let start_offset = get_offset_for_align(layout);
  58. let data_start = base_addr + start_offset;
  59. for (base_idx, b) in self.iter().enumerate() {
  60. let bitval = b.load(Ordering::Relaxed);
  61. if bitval == u64::MAX {
  62. continue;
  63. } else {
  64. let negated = !bitval;
  65. let first_free = negated.trailing_zeros() as usize;
  66. let idx: usize = base_idx * 64 + first_free;
  67. let offset = idx * layout.size();
  68. // TODO(bad): psize needs to be passed as arg
  69. let offset_inside_data_area =
  70. offset <= (page_size - OBJECT_PAGE_METADATA_OVERHEAD - layout.size());
  71. if !offset_inside_data_area {
  72. return None;
  73. }
  74. let addr: usize = data_start + offset;
  75. let alignment_ok = addr % layout.align() == 0;
  76. let block_is_free = bitval & (1 << first_free) == 0;
  77. if alignment_ok && block_is_free {
  78. return Some((idx, addr));
  79. }
  80. }
  81. }
  82. None
  83. }
  84. /// Check if the bit `idx` is set.
  85. #[inline(always)]
  86. fn is_allocated(&self, idx: usize) -> bool {
  87. let base_idx = idx / 64;
  88. let bit_idx = idx % 64;
  89. (self[base_idx].load(Ordering::Relaxed) & (1 << bit_idx)) > 0
  90. }
  91. /// Sets the bit number `idx` in the bit-field.
  92. #[inline(always)]
  93. fn set_bit(&self, idx: usize) {
  94. let base_idx = idx / 64;
  95. let bit_idx = idx % 64;
  96. self[base_idx].fetch_or(1 << bit_idx, Ordering::Relaxed);
  97. }
  98. /// Clears bit number `idx` in the bit-field.
  99. #[inline(always)]
  100. fn clear_bit(&self, idx: usize) {
  101. let base_idx = idx / 64;
  102. let bit_idx = idx % 64;
  103. self[base_idx].fetch_and(!(1 << bit_idx), Ordering::Relaxed);
  104. }
  105. /// Checks if we could allocate more objects of a given `alloc_size` within the
  106. /// `capacity` of the memory allocator.
  107. ///
  108. /// # Note
  109. /// The ObjectPage will make sure to mark the top-most bits as allocated
  110. /// for large sizes (i.e., a size 512 SCAllocator will only really need 3 bits)
  111. /// to track allocated objects). That's why this function can be simpler
  112. /// than it would need to be in practice.
  113. #[inline(always)]
  114. fn is_full(&self) -> bool {
  115. self.iter()
  116. .filter(|&x| x.load(Ordering::Relaxed) != u64::MAX)
  117. .count()
  118. == 0
  119. }
  120. /// Checks if the page has currently no allocations.
  121. ///
  122. /// This is called `all_free` rather than `is_emtpy` because
  123. /// we already have an is_empty fn as part of the slice.
  124. #[inline(always)]
  125. fn all_free(&self, relevant_bits: usize) -> bool {
  126. for (idx, bitmap) in self.iter().enumerate() {
  127. let checking_bit_range = (idx * 64, (idx + 1) * 64);
  128. if relevant_bits >= checking_bit_range.0 && relevant_bits < checking_bit_range.1 {
  129. // Last relevant bitmap, here we only have to check that a subset of bitmap is marked free
  130. // the rest will be marked full
  131. let bits_that_should_be_free = relevant_bits - checking_bit_range.0;
  132. let free_mask = (1 << bits_that_should_be_free) - 1;
  133. return (free_mask & bitmap.load(Ordering::Relaxed)) == 0;
  134. }
  135. if bitmap.load(Ordering::Relaxed) == 0 {
  136. continue;
  137. } else {
  138. return false;
  139. }
  140. }
  141. true
  142. }
  143. }
  144. /// # get_offset_for_align - 根据布局大小获取page内对齐偏移量
  145. ///
  146. /// 这个函数根据给定的`Layout`大小确定一个合适的对齐偏移量。
  147. ///
  148. /// ## 参数
  149. ///
  150. /// - layout: Layout,这是需要计算对齐偏移量的布局参数。
  151. ///
  152. /// ## 返回值
  153. ///
  154. /// - usize: 成功时返回一个usize类型的对齐偏移量。
  155. fn get_offset_for_align(layout: Layout) -> usize {
  156. match layout.size() {
  157. 0..=8 => 80,
  158. 9..=16 => 80,
  159. 17..=32 => 96,
  160. 33..=64 => 128,
  161. 65..=128 => 128,
  162. 129..=256 => 256,
  163. 257..=512 => 512,
  164. 513..=1024 => 1024,
  165. 1025..=2048 => 2048,
  166. _ => panic!(),
  167. }
  168. }
  169. /// This trait is used to define a page from which objects are allocated
  170. /// in an `SCAllocator`.
  171. ///
  172. /// The implementor of this trait needs to provide access to the page meta-data,
  173. /// which consists of:
  174. /// - A bitfield (to track allocations),
  175. /// - `prev` and `next` pointers to insert the page in free lists
  176. pub trait AllocablePage {
  177. /// The total size (in bytes) of the page.
  178. ///
  179. /// # Note
  180. /// We also assume that the address of the page will be aligned to `SIZE`.
  181. const SIZE: usize;
  182. fn bitfield(&self) -> &[AtomicU64; 8];
  183. fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8];
  184. fn prev(&mut self) -> &mut Rawlink<Self>
  185. where
  186. Self: core::marker::Sized;
  187. fn next(&mut self) -> &mut Rawlink<Self>
  188. where
  189. Self: core::marker::Sized;
  190. /// Tries to find a free block within `data` that satisfies `alignment` requirement.
  191. fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
  192. let base_addr = (self as *const Self as *const u8) as usize;
  193. self.bitfield().first_fit(base_addr, layout, Self::SIZE)
  194. }
  195. /// Tries to allocate an object within this page.
  196. ///
  197. /// In case the slab is full, returns a null ptr.
  198. fn allocate(&mut self, layout: Layout) -> *mut u8 {
  199. match self.first_fit(layout) {
  200. Some((idx, addr)) => {
  201. self.bitfield().set_bit(idx);
  202. addr as *mut u8
  203. }
  204. None => ptr::null_mut(),
  205. }
  206. }
  207. /// Checks if we can still allocate more objects of a given layout within the page.
  208. fn is_full(&self) -> bool {
  209. self.bitfield().is_full()
  210. }
  211. /// Checks if the page has currently no allocations.
  212. fn is_empty(&self, relevant_bits: usize) -> bool {
  213. self.bitfield().all_free(relevant_bits)
  214. }
  215. /// Deallocates a memory object within this page.
  216. fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
  217. trace!(
  218. "AllocablePage deallocating ptr = {:p} with {:?}",
  219. ptr,
  220. layout
  221. );
  222. let align_offset = get_offset_for_align(layout);
  223. let page_offset = ((ptr.as_ptr() as usize) - align_offset) & (Self::SIZE - 1);
  224. assert!(page_offset % layout.size() == 0);
  225. let idx = page_offset / layout.size();
  226. assert!(
  227. self.bitfield().is_allocated(idx),
  228. "{:p} not marked allocated?",
  229. ptr
  230. );
  231. self.bitfield().clear_bit(idx);
  232. Ok(())
  233. }
  234. /// 统计page中还可以分配多少个object
  235. fn free_obj_count(&self) -> usize {
  236. // 统计page中还可以分配多少个object
  237. let mut free_obj_count = 0;
  238. // 遍历page中的bitfield(用来统计内存分配情况的u64数组)
  239. for b in self.bitfield().iter() {
  240. let bitval = b.load(Ordering::Relaxed);
  241. free_obj_count += bitval.count_zeros() as usize;
  242. }
  243. free_obj_count
  244. }
  245. }
  246. /// Holds allocated data within a 4 KiB page.
  247. ///
  248. /// Has a data-section where objects are allocated from
  249. /// and a small amount of meta-data in form of a bitmap
  250. /// to track allocations at the end of the page.
  251. ///
  252. /// # Notes
  253. /// An object of this type will be exactly 4 KiB.
  254. /// It is marked `repr(C)` because we rely on a well defined order of struct
  255. /// members (e.g., dealloc does a cast to find the bitfield).
  256. #[repr(C)]
  257. pub struct ObjectPage<'a> {
  258. #[allow(dead_code)]
  259. /// A bit-field to track free/allocated memory within `data`.
  260. pub(crate) bitfield: [AtomicU64; 8],
  261. /// Next element in list (used by `PageList`).
  262. next: Rawlink<ObjectPage<'a>>,
  263. /// Previous element in list (used by `PageList`)
  264. prev: Rawlink<ObjectPage<'a>>,
  265. /// Holds memory objects.
  266. data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD],
  267. }
  268. impl<'a> ObjectPage<'a> {
  269. pub fn new() -> Box<ObjectPage<'a>> {
  270. unsafe { Box::new_uninit().assume_init() }
  271. }
  272. }
  273. // These needs some more work to be really safe...
  274. unsafe impl Send for ObjectPage<'_> {}
  275. unsafe impl Sync for ObjectPage<'_> {}
  276. impl AllocablePage for ObjectPage<'_> {
  277. const SIZE: usize = OBJECT_PAGE_SIZE;
  278. fn bitfield(&self) -> &[AtomicU64; 8] {
  279. &self.bitfield
  280. }
  281. fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8] {
  282. &mut self.bitfield
  283. }
  284. fn prev(&mut self) -> &mut Rawlink<Self> {
  285. &mut self.prev
  286. }
  287. fn next(&mut self) -> &mut Rawlink<Self> {
  288. &mut self.next
  289. }
  290. }
  291. impl<'a> Default for ObjectPage<'a> {
  292. fn default() -> ObjectPage<'a> {
  293. unsafe { mem::MaybeUninit::zeroed().assume_init() }
  294. }
  295. }
  296. impl fmt::Debug for ObjectPage<'_> {
  297. fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  298. write!(f, "ObjectPage")
  299. }
  300. }
  301. /// A list of pages.
  302. pub(crate) struct PageList<'a, T: AllocablePage> {
  303. /// Points to the head of the list.
  304. pub(crate) head: Option<&'a mut T>,
  305. /// Number of elements in the list.
  306. pub(crate) elements: usize,
  307. }
  308. impl<'a, T: AllocablePage> PageList<'a, T> {
  309. #[cfg(feature = "unstable")]
  310. pub(crate) const fn new() -> PageList<'a, T> {
  311. PageList {
  312. head: None,
  313. elements: 0,
  314. }
  315. }
  316. #[cfg(not(feature = "unstable"))]
  317. pub(crate) fn new() -> PageList<'a, T> {
  318. PageList {
  319. head: None,
  320. elements: 0,
  321. }
  322. }
  323. pub(crate) fn iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T> {
  324. let m = match self.head {
  325. None => Rawlink::none(),
  326. Some(ref mut m) => Rawlink::some(*m),
  327. };
  328. ObjectPageIterMut {
  329. head: m,
  330. phantom: core::marker::PhantomData,
  331. }
  332. }
  333. /// Inserts `new_head` at the front of the list.
  334. pub(crate) fn insert_front<'b>(&'b mut self, mut new_head: &'a mut T) {
  335. match self.head {
  336. None => {
  337. *new_head.prev() = Rawlink::none();
  338. self.head = Some(new_head);
  339. }
  340. Some(ref mut head) => {
  341. *new_head.prev() = Rawlink::none();
  342. *head.prev() = Rawlink::some(new_head);
  343. mem::swap(head, &mut new_head);
  344. *head.next() = Rawlink::some(new_head);
  345. }
  346. }
  347. self.elements += 1;
  348. }
  349. /// Removes `slab_page` from the list.
  350. pub(crate) fn remove_from_list(&mut self, slab_page: &mut T) {
  351. unsafe {
  352. match slab_page.prev().resolve_mut() {
  353. None => {
  354. self.head = slab_page.next().resolve_mut();
  355. }
  356. Some(prev) => {
  357. *prev.next() = match slab_page.next().resolve_mut() {
  358. None => Rawlink::none(),
  359. Some(next) => Rawlink::some(next),
  360. };
  361. }
  362. }
  363. match slab_page.next().resolve_mut() {
  364. None => (),
  365. Some(next) => {
  366. *next.prev() = match slab_page.prev().resolve_mut() {
  367. None => Rawlink::none(),
  368. Some(prev) => Rawlink::some(prev),
  369. };
  370. }
  371. }
  372. }
  373. *slab_page.prev() = Rawlink::none();
  374. *slab_page.next() = Rawlink::none();
  375. self.elements -= 1;
  376. }
  377. /// Removes `slab_page` from the list.
  378. #[allow(clippy::manual_inspect)]
  379. pub(crate) fn pop<'b>(&'b mut self) -> Option<&'a mut T> {
  380. match self.head {
  381. None => None,
  382. Some(ref mut head) => {
  383. let head_next = head.next();
  384. let mut new_head = unsafe { head_next.resolve_mut() };
  385. mem::swap(&mut self.head, &mut new_head);
  386. let _ = self.head.as_mut().map(|n| {
  387. *n.prev() = Rawlink::none();
  388. });
  389. self.elements -= 1;
  390. new_head.map(|node| {
  391. *node.prev() = Rawlink::none();
  392. *node.next() = Rawlink::none();
  393. node
  394. })
  395. }
  396. }
  397. }
  398. /// Does the list contain `s`?
  399. pub(crate) fn contains(&mut self, s: *const T) -> bool {
  400. for slab_page in self.iter_mut() {
  401. if core::ptr::eq(slab_page, s) {
  402. return true;
  403. }
  404. }
  405. false
  406. }
  407. }
  408. /// Iterate over all the pages inside a slab allocator
  409. pub(crate) struct ObjectPageIterMut<'a, P: AllocablePage> {
  410. head: Rawlink<P>,
  411. phantom: core::marker::PhantomData<&'a P>,
  412. }
  413. impl<'a, P: AllocablePage + 'a> Iterator for ObjectPageIterMut<'a, P> {
  414. type Item = &'a mut P;
  415. #[inline]
  416. #[allow(clippy::manual_inspect)]
  417. fn next(&mut self) -> Option<&'a mut P> {
  418. unsafe {
  419. self.head.resolve_mut().map(|next| {
  420. self.head = match next.next().resolve_mut() {
  421. None => Rawlink::none(),
  422. Some(ref mut sp) => Rawlink::some(*sp),
  423. };
  424. next
  425. })
  426. }
  427. }
  428. }
  429. /// Rawlink is a type like Option<T> but for holding a raw pointer.
  430. ///
  431. /// We use it to link AllocablePages together. You probably won't need
  432. /// to use this type if you're not implementing AllocablePage
  433. /// for a custom page-size.
  434. pub struct Rawlink<T> {
  435. p: *mut T,
  436. }
  437. impl<T> Default for Rawlink<T> {
  438. fn default() -> Self {
  439. Rawlink { p: ptr::null_mut() }
  440. }
  441. }
  442. impl<T> Rawlink<T> {
  443. /// Like Option::None for Rawlink
  444. pub(crate) fn none() -> Rawlink<T> {
  445. Rawlink { p: ptr::null_mut() }
  446. }
  447. /// Like Option::Some for Rawlink
  448. pub(crate) fn some(n: &mut T) -> Rawlink<T> {
  449. Rawlink { p: n }
  450. }
  451. /// Convert the `Rawlink` into an Option value
  452. ///
  453. /// **unsafe** because:
  454. ///
  455. /// - Dereference of raw pointer.
  456. /// - Returns reference of arbitrary lifetime.
  457. #[allow(dead_code)]
  458. pub(crate) unsafe fn resolve<'a>(&self) -> Option<&'a T> {
  459. self.p.as_ref()
  460. }
  461. /// Convert the `Rawlink` into an Option value
  462. ///
  463. /// **unsafe** because:
  464. ///
  465. /// - Dereference of raw pointer.
  466. /// - Returns reference of arbitrary lifetime.
  467. pub(crate) unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> {
  468. self.p.as_mut()
  469. }
  470. /// Return the `Rawlink` and replace with `Rawlink::none()`
  471. #[allow(dead_code)]
  472. pub(crate) fn take(&mut self) -> Rawlink<T> {
  473. mem::replace(self, Rawlink::none())
  474. }
  475. }