瀏覽代碼

feat(mm): 添加slab内存分配器 (#683)

feat(mm): 添加slab内存分配器 
---------

Co-authored-by: longjin <[email protected]>
laokengwt 11 月之前
父節點
當前提交
ceeb2e943c

+ 1 - 0
kernel/Cargo.toml

@@ -52,6 +52,7 @@ fdt = "=0.1.5"
 uefi = { version = "=0.26.0", features = ["alloc"] }
 uefi-raw = "=0.5.0"
 paste = "=1.0.14"
+slabmalloc = { path = "crates/rust-slabmalloc" }
 
 
 # target为x86_64时,使用下面的依赖

+ 16 - 0
kernel/crates/rust-slabmalloc/Cargo.toml

@@ -0,0 +1,16 @@
+[package]
+name = "slabmalloc"
+version = "0.11.0"
+edition = "2018"
+
+[features]
+unstable = []
+default = [ "unstable" ]
+
+[dependencies]
+log = "0.4"
+
+[target.'cfg(unix)'.dev-dependencies]
+rand = "0.8"
+env_logger = "0.9"
+spin = "0.9.8"

+ 79 - 0
kernel/crates/rust-slabmalloc/src/lib.rs

@@ -0,0 +1,79 @@
+//! A slab allocator implementation for objects less than a page-size (4 KiB or 2MiB).
+//!
+//! # Overview
+//!
+//! The organization is as follows:
+//!
+//!  * A `ZoneAllocator` manages many `SCAllocator` and can
+//!    satisfy requests for different allocation sizes.
+//!  * A `SCAllocator` allocates objects of exactly one size.
+//!    It stores the objects and meta-data in one or multiple `AllocablePage` objects.
+//!  * A trait `AllocablePage` that defines the page-type from which we allocate objects.
+//!
+//! Lastly, it provides two default `AllocablePage` implementations `ObjectPage` and `LargeObjectPage`:
+//!  * A `ObjectPage` that is 4 KiB in size and contains allocated objects and associated meta-data.
+//!  * A `LargeObjectPage` that is 2 MiB in size and contains allocated objects and associated meta-data.
+//!
+//!
+//! # Implementing GlobalAlloc
+//! See the [global alloc](https://github.com/gz/rust-slabmalloc/tree/master/examples/global_alloc.rs) example.
+#![allow(unused_features)]
+#![cfg_attr(feature = "unstable", feature(const_mut_refs))]
+#![no_std]
+#![crate_name = "slabmalloc"]
+#![crate_type = "lib"]
+#![feature(new_uninit)]
+#![feature(maybe_uninit_as_bytes)]
+
+extern crate alloc;
+
+mod pages;
+mod sc;
+mod zone;
+
+pub use pages::*;
+pub use sc::*;
+pub use zone::*;
+
+use core::alloc::Layout;
+use core::fmt;
+use core::ptr::{self, NonNull};
+
+use log::trace;
+
+/// How many bytes in the page are used by allocator meta-data.
+const OBJECT_PAGE_METADATA_OVERHEAD: usize = 80;
+
+/// How many bytes a [`ObjectPage`] is.
+const OBJECT_PAGE_SIZE: usize = 4096;
+
+type VAddr = usize;
+
+/// Error that can be returned for `allocation` and `deallocation` requests.
+#[derive(Debug)]
+pub enum AllocationError {
+    /// Can't satisfy the allocation request for Layout because the allocator
+    /// does not have enough memory (you may be able to `refill` it).
+    OutOfMemory,
+    /// Allocator can't deal with the provided size of the Layout.
+    InvalidLayout,
+}
+
+/// Allocator trait to be implemented by users of slabmalloc to provide memory to slabmalloc.
+///
+/// # Safety
+/// Needs to adhere to safety requirements of a rust allocator (see GlobalAlloc et. al.).
+pub unsafe trait Allocator<'a> {
+    fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError>;
+    fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError>;
+
+    /// Refill the allocator with a [`ObjectPage`].
+    ///
+    /// # Safety
+    /// TBD (this API needs to change anyways, likely new page should be a raw pointer)
+    unsafe fn refill(
+        &mut self,
+        layout: Layout,
+        new_page: &'a mut ObjectPage<'a>,
+    ) -> Result<(), AllocationError>;
+}

+ 521 - 0
kernel/crates/rust-slabmalloc/src/pages.rs

@@ -0,0 +1,521 @@
+use alloc::boxed::Box;
+
+use crate::*;
+use core::{
+    mem,
+    sync::atomic::{AtomicU64, Ordering},
+};
+
+/// A trait defining bitfield operations we need for tracking allocated objects within a page.
+pub(crate) trait Bitfield {
+    fn initialize(&mut self, for_size: usize, capacity: usize);
+    fn first_fit(
+        &self,
+        base_addr: usize,
+        layout: Layout,
+        page_size: usize,
+    ) -> Option<(usize, usize)>;
+    fn is_allocated(&self, idx: usize) -> bool;
+    fn set_bit(&self, idx: usize);
+    fn clear_bit(&self, idx: usize);
+    fn is_full(&self) -> bool;
+    fn all_free(&self, relevant_bits: usize) -> bool;
+}
+
+/// Implementation of bit operations on u64 slices.
+///
+/// We allow deallocations (i.e. clearning a bit in the field)
+/// from any thread. That's why the bitfield is a bunch of AtomicU64.
+impl Bitfield for [AtomicU64] {
+    /// Initialize the bitfield
+    ///
+    /// # Arguments
+    ///  * `for_size`: Object size we want to allocate
+    ///  * `capacity`: Maximum size of the buffer the bitmap maintains.
+    ///
+    /// Ensures that we only have free slots for what we can allocate
+    /// within the page (by marking everything else allocated).
+    fn initialize(&mut self, for_size: usize, capacity: usize) {
+        // Set everything to allocated
+        for bitmap in self.iter_mut() {
+            *bitmap = AtomicU64::new(u64::max_value());
+        }
+
+        // Mark actual slots as free
+        let relevant_bits = core::cmp::min(capacity / for_size, self.len() * 64);
+        for idx in 0..relevant_bits {
+            self.clear_bit(idx);
+        }
+    }
+
+    /// Tries to find a free block of memory that satisfies `alignment` requirement.
+    ///
+    /// # Notes
+    /// * We pass size here to be able to calculate the resulting address within `data`.
+    #[inline(always)]
+    fn first_fit(
+        &self,
+        base_addr: usize,
+        layout: Layout,
+        page_size: usize,
+    ) -> Option<(usize, usize)> {
+        let start_offset = get_offset_for_align(layout);
+        let data_start = base_addr + start_offset;
+
+        for (base_idx, b) in self.iter().enumerate() {
+            let bitval = b.load(Ordering::Relaxed);
+            if bitval == u64::max_value() {
+                continue;
+            } else {
+                let negated = !bitval;
+                let first_free = negated.trailing_zeros() as usize;
+                let idx: usize = base_idx * 64 + first_free;
+                let offset = idx * layout.size();
+
+                // TODO(bad): psize needs to be passed as arg
+                let offset_inside_data_area =
+                    offset <= (page_size - OBJECT_PAGE_METADATA_OVERHEAD - layout.size());
+                if !offset_inside_data_area {
+                    return None;
+                }
+
+                let addr: usize = data_start + offset;
+                let alignment_ok = addr % layout.align() == 0;
+                let block_is_free = bitval & (1 << first_free) == 0;
+                if alignment_ok && block_is_free {
+                    return Some((idx, addr));
+                }
+            }
+        }
+        None
+    }
+
+    /// Check if the bit `idx` is set.
+    #[inline(always)]
+    fn is_allocated(&self, idx: usize) -> bool {
+        let base_idx = idx / 64;
+        let bit_idx = idx % 64;
+        (self[base_idx].load(Ordering::Relaxed) & (1 << bit_idx)) > 0
+    }
+
+    /// Sets the bit number `idx` in the bit-field.
+    #[inline(always)]
+    fn set_bit(&self, idx: usize) {
+        let base_idx = idx / 64;
+        let bit_idx = idx % 64;
+        self[base_idx].fetch_or(1 << bit_idx, Ordering::Relaxed);
+    }
+
+    /// Clears bit number `idx` in the bit-field.
+    #[inline(always)]
+    fn clear_bit(&self, idx: usize) {
+        let base_idx = idx / 64;
+        let bit_idx = idx % 64;
+        self[base_idx].fetch_and(!(1 << bit_idx), Ordering::Relaxed);
+    }
+
+    /// Checks if we could allocate more objects of a given `alloc_size` within the
+    /// `capacity` of the memory allocator.
+    ///
+    /// # Note
+    /// The ObjectPage will make sure to mark the top-most bits as allocated
+    /// for large sizes (i.e., a size 512 SCAllocator will only really need 3 bits)
+    /// to track allocated objects). That's why this function can be simpler
+    /// than it would need to be in practice.
+    #[inline(always)]
+    fn is_full(&self) -> bool {
+        self.iter()
+            .filter(|&x| x.load(Ordering::Relaxed) != u64::max_value())
+            .count()
+            == 0
+    }
+
+    /// Checks if the page has currently no allocations.
+    ///
+    /// This is called `all_free` rather than `is_emtpy` because
+    /// we already have an is_empty fn as part of the slice.
+    #[inline(always)]
+    fn all_free(&self, relevant_bits: usize) -> bool {
+        for (idx, bitmap) in self.iter().enumerate() {
+            let checking_bit_range = (idx * 64, (idx + 1) * 64);
+            if relevant_bits >= checking_bit_range.0 && relevant_bits < checking_bit_range.1 {
+                // Last relevant bitmap, here we only have to check that a subset of bitmap is marked free
+                // the rest will be marked full
+                let bits_that_should_be_free = relevant_bits - checking_bit_range.0;
+                let free_mask = (1 << bits_that_should_be_free) - 1;
+                return (free_mask & bitmap.load(Ordering::Relaxed)) == 0;
+            }
+
+            if bitmap.load(Ordering::Relaxed) == 0 {
+                continue;
+            } else {
+                return false;
+            }
+        }
+
+        true
+    }
+}
+
+/// # get_offset_for_align - 根据布局大小获取page内对齐偏移量
+///
+/// 这个函数根据给定的`Layout`大小确定一个合适的对齐偏移量。
+///
+/// ## 参数
+///
+/// - layout: Layout,这是需要计算对齐偏移量的布局参数。
+///
+/// ## 返回值
+///
+/// - usize: 成功时返回一个usize类型的对齐偏移量。
+fn get_offset_for_align(layout: Layout) -> usize {
+    match layout.size() {
+        0..=8 => 80,
+        9..=16 => 80,
+        17..=32 => 96,
+        33..=64 => 128,
+        65..=128 => 128,
+        129..=256 => 256,
+        257..=512 => 512,
+        513..=1024 => 1024,
+        1025..=2048 => 2048,
+        _ => panic!(),
+    }
+}
+
+/// This trait is used to define a page from which objects are allocated
+/// in an `SCAllocator`.
+///
+/// The implementor of this trait needs to provide access to the page meta-data,
+/// which consists of:
+/// - A bitfield (to track allocations),
+/// - `prev` and `next` pointers to insert the page in free lists
+pub trait AllocablePage {
+    /// The total size (in bytes) of the page.
+    ///
+    /// # Note
+    /// We also assume that the address of the page will be aligned to `SIZE`.
+    const SIZE: usize;
+
+    fn bitfield(&self) -> &[AtomicU64; 8];
+    fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8];
+    fn prev(&mut self) -> &mut Rawlink<Self>
+    where
+        Self: core::marker::Sized;
+    fn next(&mut self) -> &mut Rawlink<Self>
+    where
+        Self: core::marker::Sized;
+
+    /// Tries to find a free block within `data` that satisfies `alignment` requirement.
+    fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
+        let base_addr = (self as *const Self as *const u8) as usize;
+        self.bitfield().first_fit(base_addr, layout, Self::SIZE)
+    }
+
+    /// Tries to allocate an object within this page.
+    ///
+    /// In case the slab is full, returns a null ptr.
+    fn allocate(&mut self, layout: Layout) -> *mut u8 {
+        match self.first_fit(layout) {
+            Some((idx, addr)) => {
+                self.bitfield().set_bit(idx);
+                addr as *mut u8
+            }
+            None => ptr::null_mut(),
+        }
+    }
+
+    /// Checks if we can still allocate more objects of a given layout within the page.
+    fn is_full(&self) -> bool {
+        self.bitfield().is_full()
+    }
+
+    /// Checks if the page has currently no allocations.
+    fn is_empty(&self, relevant_bits: usize) -> bool {
+        self.bitfield().all_free(relevant_bits)
+    }
+
+    /// Deallocates a memory object within this page.
+    fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
+        trace!(
+            "AllocablePage deallocating ptr = {:p} with {:?}",
+            ptr,
+            layout
+        );
+        let align_offset = get_offset_for_align(layout);
+        let page_offset = ((ptr.as_ptr() as usize) - align_offset) & (Self::SIZE - 1);
+        assert!(page_offset % layout.size() == 0);
+        let idx = page_offset / layout.size();
+        assert!(
+            self.bitfield().is_allocated(idx),
+            "{:p} not marked allocated?",
+            ptr
+        );
+
+        self.bitfield().clear_bit(idx);
+        Ok(())
+    }
+}
+
+/// Holds allocated data within a 4 KiB page.
+///
+/// Has a data-section where objects are allocated from
+/// and a small amount of meta-data in form of a bitmap
+/// to track allocations at the end of the page.
+///
+/// # Notes
+/// An object of this type will be exactly 4 KiB.
+/// It is marked `repr(C)` because we rely on a well defined order of struct
+/// members (e.g., dealloc does a cast to find the bitfield).
+#[repr(C)]
+pub struct ObjectPage<'a> {
+    #[allow(dead_code)]
+    /// A bit-field to track free/allocated memory within `data`.
+    pub(crate) bitfield: [AtomicU64; 8],
+
+    /// Next element in list (used by `PageList`).
+    next: Rawlink<ObjectPage<'a>>,
+    /// Previous element in  list (used by `PageList`)
+    prev: Rawlink<ObjectPage<'a>>,
+
+    /// Holds memory objects.
+    data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD],
+}
+
+impl<'a> ObjectPage<'a> {
+    pub fn new() -> Box<ObjectPage<'a>> {
+        unsafe { Box::new_uninit().assume_init() }
+    }
+}
+
+// These needs some more work to be really safe...
+unsafe impl<'a> Send for ObjectPage<'a> {}
+unsafe impl<'a> Sync for ObjectPage<'a> {}
+
+impl<'a> AllocablePage for ObjectPage<'a> {
+    const SIZE: usize = OBJECT_PAGE_SIZE;
+
+    fn bitfield(&self) -> &[AtomicU64; 8] {
+        &self.bitfield
+    }
+    fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8] {
+        &mut self.bitfield
+    }
+
+    fn prev(&mut self) -> &mut Rawlink<Self> {
+        &mut self.prev
+    }
+
+    fn next(&mut self) -> &mut Rawlink<Self> {
+        &mut self.next
+    }
+}
+
+impl<'a> Default for ObjectPage<'a> {
+    fn default() -> ObjectPage<'a> {
+        unsafe { mem::MaybeUninit::zeroed().assume_init() }
+    }
+}
+
+impl<'a> fmt::Debug for ObjectPage<'a> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "ObjectPage")
+    }
+}
+
+/// A list of pages.
+pub(crate) struct PageList<'a, T: AllocablePage> {
+    /// Points to the head of the list.
+    pub(crate) head: Option<&'a mut T>,
+    /// Number of elements in the list.
+    pub(crate) elements: usize,
+}
+
+impl<'a, T: AllocablePage> PageList<'a, T> {
+    #[cfg(feature = "unstable")]
+    pub(crate) const fn new() -> PageList<'a, T> {
+        PageList {
+            head: None,
+            elements: 0,
+        }
+    }
+
+    #[cfg(not(feature = "unstable"))]
+    pub(crate) fn new() -> PageList<'a, T> {
+        PageList {
+            head: None,
+            elements: 0,
+        }
+    }
+
+    pub(crate) fn iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T> {
+        let m = match self.head {
+            None => Rawlink::none(),
+            Some(ref mut m) => Rawlink::some(*m),
+        };
+
+        ObjectPageIterMut {
+            head: m,
+            phantom: core::marker::PhantomData,
+        }
+    }
+
+    /// Inserts `new_head` at the front of the list.
+    pub(crate) fn insert_front<'b>(&'b mut self, mut new_head: &'a mut T) {
+        match self.head {
+            None => {
+                *new_head.prev() = Rawlink::none();
+                self.head = Some(new_head);
+            }
+            Some(ref mut head) => {
+                *new_head.prev() = Rawlink::none();
+                *head.prev() = Rawlink::some(new_head);
+                mem::swap(head, &mut new_head);
+                *head.next() = Rawlink::some(new_head);
+            }
+        }
+
+        self.elements += 1;
+    }
+
+    /// Removes `slab_page` from the list.
+    pub(crate) fn remove_from_list(&mut self, slab_page: &mut T) {
+        unsafe {
+            match slab_page.prev().resolve_mut() {
+                None => {
+                    self.head = slab_page.next().resolve_mut();
+                }
+                Some(prev) => {
+                    *prev.next() = match slab_page.next().resolve_mut() {
+                        None => Rawlink::none(),
+                        Some(next) => Rawlink::some(next),
+                    };
+                }
+            }
+
+            match slab_page.next().resolve_mut() {
+                None => (),
+                Some(next) => {
+                    *next.prev() = match slab_page.prev().resolve_mut() {
+                        None => Rawlink::none(),
+                        Some(prev) => Rawlink::some(prev),
+                    };
+                }
+            }
+        }
+
+        *slab_page.prev() = Rawlink::none();
+        *slab_page.next() = Rawlink::none();
+        self.elements -= 1;
+    }
+
+    /// Removes `slab_page` from the list.
+    pub(crate) fn pop<'b>(&'b mut self) -> Option<&'a mut T> {
+        match self.head {
+            None => None,
+            Some(ref mut head) => {
+                let head_next = head.next();
+                let mut new_head = unsafe { head_next.resolve_mut() };
+                mem::swap(&mut self.head, &mut new_head);
+                let _ = self.head.as_mut().map(|n| {
+                    *n.prev() = Rawlink::none();
+                });
+
+                self.elements -= 1;
+                new_head.map(|node| {
+                    *node.prev() = Rawlink::none();
+                    *node.next() = Rawlink::none();
+                    node
+                })
+            }
+        }
+    }
+
+    /// Does the list contain `s`?
+    pub(crate) fn contains(&mut self, s: *const T) -> bool {
+        for slab_page in self.iter_mut() {
+            if core::ptr::eq(slab_page, s) {
+                return true;
+            }
+        }
+
+        false
+    }
+}
+
+/// Iterate over all the pages inside a slab allocator
+pub(crate) struct ObjectPageIterMut<'a, P: AllocablePage> {
+    head: Rawlink<P>,
+    phantom: core::marker::PhantomData<&'a P>,
+}
+
+impl<'a, P: AllocablePage + 'a> Iterator for ObjectPageIterMut<'a, P> {
+    type Item = &'a mut P;
+
+    #[inline]
+    fn next(&mut self) -> Option<&'a mut P> {
+        unsafe {
+            self.head.resolve_mut().map(|next| {
+                self.head = match next.next().resolve_mut() {
+                    None => Rawlink::none(),
+                    Some(ref mut sp) => Rawlink::some(*sp),
+                };
+                next
+            })
+        }
+    }
+}
+
+/// Rawlink is a type like Option<T> but for holding a raw pointer.
+///
+/// We use it to link AllocablePages together. You probably won't need
+/// to use this type if you're not implementing AllocablePage
+/// for a custom page-size.
+pub struct Rawlink<T> {
+    p: *mut T,
+}
+
+impl<T> Default for Rawlink<T> {
+    fn default() -> Self {
+        Rawlink { p: ptr::null_mut() }
+    }
+}
+
+impl<T> Rawlink<T> {
+    /// Like Option::None for Rawlink
+    pub(crate) fn none() -> Rawlink<T> {
+        Rawlink { p: ptr::null_mut() }
+    }
+
+    /// Like Option::Some for Rawlink
+    pub(crate) fn some(n: &mut T) -> Rawlink<T> {
+        Rawlink { p: n }
+    }
+
+    /// Convert the `Rawlink` into an Option value
+    ///
+    /// **unsafe** because:
+    ///
+    /// - Dereference of raw pointer.
+    /// - Returns reference of arbitrary lifetime.
+    #[allow(dead_code)]
+    pub(crate) unsafe fn resolve<'a>(&self) -> Option<&'a T> {
+        self.p.as_ref()
+    }
+
+    /// Convert the `Rawlink` into an Option value
+    ///
+    /// **unsafe** because:
+    ///
+    /// - Dereference of raw pointer.
+    /// - Returns reference of arbitrary lifetime.
+    pub(crate) unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> {
+        self.p.as_mut()
+    }
+
+    /// Return the `Rawlink` and replace with `Rawlink::none()`
+    #[allow(dead_code)]
+    pub(crate) fn take(&mut self) -> Rawlink<T> {
+        mem::replace(self, Rawlink::none())
+    }
+}

+ 329 - 0
kernel/crates/rust-slabmalloc/src/sc.rs

@@ -0,0 +1,329 @@
+//! A SCAllocator that can allocate fixed size objects.
+
+use core::mem;
+
+use crate::*;
+
+/// A genius(?) const min()
+///
+/// # What this does
+/// * create an array of the two elements you want to choose between
+/// * create an arbitrary boolean expression
+/// * cast said expresison to a usize
+/// * use that value to index into the array created above
+///
+/// # Source
+/// https://stackoverflow.com/questions/53619695/calculating-maximum-value-of-a-set-of-constant-expressions-at-compile-time
+#[cfg(feature = "unstable")]
+const fn cmin(a: usize, b: usize) -> usize {
+    [a, b][(a > b) as usize]
+}
+
+/// The boring variant of min (not const).
+#[cfg(not(feature = "unstable"))]
+fn cmin(a: usize, b: usize) -> usize {
+    core::cmp::min(a, b)
+}
+
+/// A slab allocator allocates elements of a fixed size.
+///
+/// It maintains three internal lists of objects that implement `AllocablePage`
+/// from which it can allocate memory.
+///
+///  * `empty_slabs`: Is a list of pages that the SCAllocator maintains, but
+///    has 0 allocations in them, these can be given back to a requestor in case
+///    of reclamation.
+///  * `slabs`: A list of pages partially allocated and still have room for more.
+///  * `full_slabs`: A list of pages that are completely allocated.
+///
+/// On allocation we allocate memory from `slabs`, however if the list is empty
+/// we try to reclaim a page from `empty_slabs` before we return with an out-of-memory
+/// error. If a page becomes full after the allocation we move it from `slabs` to
+/// `full_slabs`.
+///
+/// Similarly, on dealloaction we might move a page from `full_slabs` to `slabs`
+/// or from `slabs` to `empty_slabs` after we deallocated an object.
+///
+/// If an allocation returns `OutOfMemory` a client using SCAllocator can refill
+/// it using the `refill` function.
+pub struct SCAllocator<'a, P: AllocablePage> {
+    /// Maximum possible allocation size for this `SCAllocator`.
+    pub(crate) size: usize,
+    /// Keeps track of succeeded allocations.
+    pub(crate) allocation_count: usize,
+    /// max objects per page
+    pub(crate) obj_per_page: usize,
+    /// List of empty ObjectPages (nothing allocated in these).
+    pub(crate) empty_slabs: PageList<'a, P>,
+    /// List of partially used ObjectPage (some objects allocated but pages are not full).
+    pub(crate) slabs: PageList<'a, P>,
+    /// List of full ObjectPages (everything allocated in these don't need to search them).
+    pub(crate) full_slabs: PageList<'a, P>,
+}
+
+/// Creates an instance of a scallocator, we do this in a macro because we
+/// re-use the code in const and non-const functions
+macro_rules! new_sc_allocator {
+    ($size:expr) => {
+        SCAllocator {
+            size: $size,
+            allocation_count: 0,
+            obj_per_page: cmin((P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD) / $size, 8 * 64),
+            empty_slabs: PageList::new(),
+            slabs: PageList::new(),
+            full_slabs: PageList::new(),
+        }
+    };
+}
+
+impl<'a, P: AllocablePage> SCAllocator<'a, P> {
+    const REBALANCE_COUNT: usize = 64;
+
+    /// Create a new SCAllocator.
+    #[cfg(feature = "unstable")]
+    pub const fn new(size: usize) -> SCAllocator<'a, P> {
+        new_sc_allocator!(size)
+    }
+
+    #[cfg(not(feature = "unstable"))]
+    pub fn new(size: usize) -> SCAllocator<'a, P> {
+        new_sc_allocator!(size)
+    }
+
+    /// Returns the maximum supported object size of this allocator.
+    pub fn size(&self) -> usize {
+        self.size
+    }
+
+    /// Add a new ObjectPage.
+    fn insert_partial_slab(&mut self, new_head: &'a mut P) {
+        self.slabs.insert_front(new_head);
+    }
+
+    /// Add page to empty list.
+    fn insert_empty(&mut self, new_head: &'a mut P) {
+        assert_eq!(
+            new_head as *const P as usize % P::SIZE,
+            0,
+            "Inserted page is not aligned to page-size."
+        );
+        self.empty_slabs.insert_front(new_head);
+    }
+
+    /// Since `dealloc` can not reassign pages without requiring a lock
+    /// we check slabs and full slabs periodically as part of `alloc`
+    /// and move them to the empty or partially allocated slab lists.
+    pub(crate) fn check_page_assignments(&mut self) {
+        for slab_page in self.full_slabs.iter_mut() {
+            if !slab_page.is_full() {
+                // We need to move it from self.full_slabs -> self.slabs
+                trace!("move {:p} full -> partial", slab_page);
+                self.move_full_to_partial(slab_page);
+            }
+        }
+
+        for slab_page in self.slabs.iter_mut() {
+            if slab_page.is_empty(self.obj_per_page) {
+                // We need to move it from self.slabs -> self.empty_slabs
+                trace!("move {:p} partial -> empty", slab_page);
+                self.move_to_empty(slab_page);
+            }
+        }
+    }
+
+    /// Move a page from `slabs` to `empty_slabs`.
+    fn move_to_empty(&mut self, page: &'a mut P) {
+        let page_ptr = page as *const P;
+
+        debug_assert!(self.slabs.contains(page_ptr));
+        debug_assert!(
+            !self.empty_slabs.contains(page_ptr),
+            "Page {:p} already in emtpy_slabs",
+            page_ptr
+        );
+
+        self.slabs.remove_from_list(page);
+        self.empty_slabs.insert_front(page);
+
+        debug_assert!(!self.slabs.contains(page_ptr));
+        debug_assert!(self.empty_slabs.contains(page_ptr));
+    }
+
+    /// Move a page from `full_slabs` to `slab`.
+    fn move_partial_to_full(&mut self, page: &'a mut P) {
+        let page_ptr = page as *const P;
+
+        debug_assert!(self.slabs.contains(page_ptr));
+        debug_assert!(!self.full_slabs.contains(page_ptr));
+
+        self.slabs.remove_from_list(page);
+        self.full_slabs.insert_front(page);
+
+        debug_assert!(!self.slabs.contains(page_ptr));
+        debug_assert!(self.full_slabs.contains(page_ptr));
+    }
+
+    /// Move a page from `full_slabs` to `slab`.
+    fn move_full_to_partial(&mut self, page: &'a mut P) {
+        let page_ptr = page as *const P;
+
+        debug_assert!(!self.slabs.contains(page_ptr));
+        debug_assert!(self.full_slabs.contains(page_ptr));
+
+        self.full_slabs.remove_from_list(page);
+        self.slabs.insert_front(page);
+
+        debug_assert!(self.slabs.contains(page_ptr));
+        debug_assert!(!self.full_slabs.contains(page_ptr));
+    }
+
+    /// Tries to allocate a block of memory with respect to the `layout`.
+    /// Searches within already allocated slab pages, if no suitable spot is found
+    /// will try to use a page from the empty page list.
+    ///
+    /// # Arguments
+    ///  * `sc_layout`: This is not the original layout but adjusted for the
+    ///     SCAllocator size (>= original).
+    fn try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8 {
+        // TODO: Do we really need to check multiple slab pages (due to alignment)
+        // If not we can get away with a singly-linked list and have 8 more bytes
+        // for the bitfield in an ObjectPage.
+
+        for slab_page in self.slabs.iter_mut() {
+            let ptr = slab_page.allocate(sc_layout);
+            if !ptr.is_null() {
+                if slab_page.is_full() {
+                    trace!("move {:p} partial -> full", slab_page);
+                    self.move_partial_to_full(slab_page);
+                }
+                self.allocation_count += 1;
+                return ptr;
+            } else {
+                continue;
+            }
+        }
+
+        // Periodically rebalance page-lists (since dealloc can't do it for us)
+        if self.allocation_count > SCAllocator::<P>::REBALANCE_COUNT {
+            self.check_page_assignments();
+            self.allocation_count = 0;
+        }
+
+        ptr::null_mut()
+    }
+
+    pub fn try_reclaim_pages<F>(&mut self, to_reclaim: usize, dealloc: &mut F) -> usize
+    where
+        F: FnMut(*mut P),
+    {
+        self.check_page_assignments();
+        let mut reclaimed = 0;
+        while reclaimed < to_reclaim {
+            if let Some(page) = self.empty_slabs.pop() {
+                dealloc(page as *mut P);
+                reclaimed += 1;
+            } else {
+                break;
+            }
+        }
+
+        reclaimed
+    }
+
+    /// Refill the SCAllocator
+    ///
+    /// # Safety
+    /// ObjectPage needs to be empty etc.
+    pub unsafe fn refill(&mut self, page: &'a mut P) {
+        page.bitfield_mut()
+            .initialize(self.size, P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD);
+        *page.prev() = Rawlink::none();
+        *page.next() = Rawlink::none();
+        trace!("adding page to SCAllocator {:p}", page);
+        self.insert_empty(page);
+    }
+
+    /// Allocates a block of memory descriped by `layout`.
+    ///
+    /// Returns a pointer to a valid region of memory or an
+    /// AllocationError.
+    ///
+    /// The function may also move around pages between lists
+    /// (empty -> partial or partial -> full).
+    pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError> {
+        trace!(
+            "SCAllocator({}) is trying to allocate {:?}",
+            self.size,
+            layout
+        );
+        assert!(layout.size() <= self.size);
+        assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
+        let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
+        assert!(new_layout.size() >= layout.size());
+
+        let ptr = {
+            // Try to allocate from partial slabs,
+            // if we fail check if we have empty pages and allocate from there
+            let ptr = self.try_allocate_from_pagelist(new_layout);
+            if ptr.is_null() && self.empty_slabs.head.is_some() {
+                // Re-try allocation in empty page
+                let empty_page = self.empty_slabs.pop().expect("We checked head.is_some()");
+                debug_assert!(!self.empty_slabs.contains(empty_page));
+
+                let ptr = empty_page.allocate(layout);
+                debug_assert!(!ptr.is_null(), "Allocation must have succeeded here.");
+
+                trace!(
+                    "move {:p} empty -> partial empty count {}",
+                    empty_page,
+                    self.empty_slabs.elements
+                );
+                // Move empty page to partial pages
+                self.insert_partial_slab(empty_page);
+                ptr
+            } else {
+                ptr
+            }
+        };
+
+        let res = NonNull::new(ptr).ok_or(AllocationError::OutOfMemory);
+
+        if !ptr.is_null() {
+            trace!(
+                "SCAllocator({}) allocated ptr=0x{:x}",
+                self.size,
+                ptr as usize
+            );
+        }
+
+        res
+    }
+
+    /// Deallocates a previously allocated `ptr` described by `Layout`.
+    ///
+    /// May return an error in case an invalid `layout` is provided.
+    /// The function may also move internal slab pages between lists partial -> empty
+    /// or full -> partial lists.
+    pub fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
+        assert!(layout.size() <= self.size);
+        assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD));
+        trace!(
+            "SCAllocator({}) is trying to deallocate ptr = {:p} layout={:?} P.size= {}",
+            self.size,
+            ptr,
+            layout,
+            P::SIZE
+        );
+
+        let page = (ptr.as_ptr() as usize) & !(P::SIZE - 1);
+
+        // Figure out which page we are on and construct a reference to it
+        // TODO: The linked list will have another &mut reference
+        let slab_page = unsafe { mem::transmute::<VAddr, &'a mut P>(page) };
+        let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
+
+        let ret = slab_page.deallocate(ptr, new_layout);
+        debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment");
+        ret
+    }
+}

+ 170 - 0
kernel/crates/rust-slabmalloc/src/zone.rs

@@ -0,0 +1,170 @@
+//! A ZoneAllocator to allocate arbitrary object sizes (up to `ZoneAllocator::MAX_ALLOC_SIZE`)
+//!
+//! The ZoneAllocator achieves this by having many `SCAllocator`
+
+use crate::*;
+
+/// Creates an instance of a zone, we do this in a macro because we
+/// re-use the code in const and non-const functions
+///
+/// We can get rid of this once the const fn feature is fully stabilized.
+macro_rules! new_zone {
+    () => {
+        ZoneAllocator {
+            // TODO(perf): We should probably pick better classes
+            // rather than powers-of-two (see SuperMalloc etc.)
+            small_slabs: [
+                SCAllocator::new(1 << 3),  // 8
+                SCAllocator::new(1 << 4),  // 16
+                SCAllocator::new(1 << 5),  // 32
+                SCAllocator::new(1 << 6),  // 64
+                SCAllocator::new(1 << 7),  // 128
+                SCAllocator::new(1 << 8),  // 256
+                SCAllocator::new(1 << 9),  // 512
+                SCAllocator::new(1 << 10), // 1024
+                SCAllocator::new(1 << 11), // 2048            ],
+            ],
+        }
+    };
+}
+
+/// A zone allocator for arbitrary sized allocations.
+///
+/// Has a bunch of `SCAllocator` and through that can serve allocation
+/// requests for many different object sizes up to (MAX_SIZE_CLASSES) by selecting
+/// the right `SCAllocator` for allocation and deallocation.
+///
+/// The allocator provides to refill functions `refill` and `refill_large`
+/// to provide the underlying `SCAllocator` with more memory in case it runs out.
+pub struct ZoneAllocator<'a> {
+    small_slabs: [SCAllocator<'a, ObjectPage<'a>>; ZoneAllocator::MAX_BASE_SIZE_CLASSES],
+}
+
+impl<'a> Default for ZoneAllocator<'a> {
+    fn default() -> ZoneAllocator<'a> {
+        new_zone!()
+    }
+}
+
+enum Slab {
+    Base(usize),
+    Unsupported,
+}
+
+impl<'a> ZoneAllocator<'a> {
+    /// Maximum size that allocated within LargeObjectPages (2 MiB).
+    /// This is also the maximum object size that this allocator can handle.
+    pub const MAX_ALLOC_SIZE: usize = 1 << 11;
+
+    /// Maximum size which is allocated with ObjectPages (4 KiB pages).
+    ///
+    /// e.g. this is 4 KiB - 80 bytes of meta-data.
+    pub const MAX_BASE_ALLOC_SIZE: usize = 256;
+
+    /// How many allocators of type SCAllocator<ObjectPage> we have.
+    const MAX_BASE_SIZE_CLASSES: usize = 9;
+
+    #[cfg(feature = "unstable")]
+    pub const fn new() -> ZoneAllocator<'a> {
+        new_zone!()
+    }
+
+    #[cfg(not(feature = "unstable"))]
+    pub fn new() -> ZoneAllocator<'a> {
+        new_zone!()
+    }
+
+    /// Return maximum size an object of size `current_size` can use.
+    ///
+    /// Used to optimize `realloc`.
+    pub fn get_max_size(current_size: usize) -> Option<usize> {
+        match current_size {
+            0..=8 => Some(8),
+            9..=16 => Some(16),
+            17..=32 => Some(32),
+            33..=64 => Some(64),
+            65..=128 => Some(128),
+            129..=256 => Some(256),
+            257..=512 => Some(512),
+            513..=1024 => Some(1024),
+            1025..=2048 => Some(2048),
+            _ => None,
+        }
+    }
+
+    /// Figure out index into zone array to get the correct slab allocator for that size.
+    fn get_slab(requested_size: usize) -> Slab {
+        match requested_size {
+            0..=8 => Slab::Base(0),
+            9..=16 => Slab::Base(1),
+            17..=32 => Slab::Base(2),
+            33..=64 => Slab::Base(3),
+            65..=128 => Slab::Base(4),
+            129..=256 => Slab::Base(5),
+            257..=512 => Slab::Base(6),
+            513..=1024 => Slab::Base(7),
+            1025..=2048 => Slab::Base(8),
+            _ => Slab::Unsupported,
+        }
+    }
+
+    /// Reclaims empty pages by calling `dealloc` on it and removing it from the
+    /// empty lists in the [`SCAllocator`].
+    ///
+    /// The `dealloc` function is called at most `reclaim_base_max` times for
+    /// base pages, and at most `reclaim_large_max` for large pages.
+    pub fn try_reclaim_base_pages<F>(&mut self, mut to_reclaim: usize, mut dealloc: F)
+    where
+        F: Fn(*mut ObjectPage),
+    {
+        for i in 0..ZoneAllocator::MAX_BASE_SIZE_CLASSES {
+            let slab = &mut self.small_slabs[i];
+            let just_reclaimed = slab.try_reclaim_pages(to_reclaim, &mut dealloc);
+            to_reclaim = to_reclaim.saturating_sub(just_reclaimed);
+            if to_reclaim == 0 {
+                break;
+            }
+        }
+    }
+}
+
+unsafe impl<'a> crate::Allocator<'a> for ZoneAllocator<'a> {
+    /// Allocate a pointer to a block of memory described by `layout`.
+    fn allocate(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocationError> {
+        match ZoneAllocator::get_slab(layout.size()) {
+            Slab::Base(idx) => self.small_slabs[idx].allocate(layout),
+            Slab::Unsupported => Err(AllocationError::InvalidLayout),
+        }
+    }
+
+    /// Deallocates a pointer to a block of memory, which was
+    /// previously allocated by `allocate`.
+    ///
+    /// # Arguments
+    ///  * `ptr` - Address of the memory location to free.
+    ///  * `layout` - Memory layout of the block pointed to by `ptr`.
+    fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> Result<(), AllocationError> {
+        match ZoneAllocator::get_slab(layout.size()) {
+            Slab::Base(idx) => self.small_slabs[idx].deallocate(ptr, layout),
+            Slab::Unsupported => Err(AllocationError::InvalidLayout),
+        }
+    }
+
+    /// Refills the SCAllocator for a given Layout with an ObjectPage.
+    ///
+    /// # Safety
+    /// ObjectPage needs to be emtpy etc.
+    unsafe fn refill(
+        &mut self,
+        layout: Layout,
+        new_page: &'a mut ObjectPage<'a>,
+    ) -> Result<(), AllocationError> {
+        match ZoneAllocator::get_slab(layout.size()) {
+            Slab::Base(idx) => {
+                self.small_slabs[idx].refill(new_page);
+                Ok(())
+            }
+            Slab::Unsupported => Err(AllocationError::InvalidLayout),
+        }
+    }
+}

+ 70 - 37
kernel/src/mm/allocator/kernel_allocator.rs

@@ -1,4 +1,4 @@
-use klog_types::AllocLogItem;
+use klog_types::{AllocLogItem, LogSource};
 
 use crate::{
     arch::mm::LockedFrameAllocator,
@@ -13,7 +13,10 @@ use core::{
     ptr::NonNull,
 };
 
-use super::page_frame::{FrameAllocator, PageFrameCount};
+use super::{
+    page_frame::{FrameAllocator, PageFrameCount},
+    slab::{slab_init_state, SLABALLOCATOR},
+};
 
 /// 类kmalloc的分配器应当实现的trait
 pub trait LocalAlloc {
@@ -59,25 +62,43 @@ impl KernelAllocator {
 /// 为内核分配器实现LocalAlloc的trait
 impl LocalAlloc for KernelAllocator {
     unsafe fn local_alloc(&self, layout: Layout) -> *mut u8 {
-        return self
-            .alloc_in_buddy(layout)
-            .map(|x| x.as_mut_ptr())
-            .unwrap_or(core::ptr::null_mut());
+        if allocator_select_condition(layout) {
+            return self
+                .alloc_in_buddy(layout)
+                .map(|x| x.as_mut_ptr())
+                .unwrap_or(core::ptr::null_mut());
+        } else {
+            if let Some(ref mut slab) = SLABALLOCATOR {
+                return slab.allocate(layout);
+            };
+            return core::ptr::null_mut();
+        }
     }
 
     unsafe fn local_alloc_zeroed(&self, layout: Layout) -> *mut u8 {
-        return self
-            .alloc_in_buddy(layout)
-            .map(|x| {
-                let ptr: *mut u8 = x.as_mut_ptr();
-                core::ptr::write_bytes(ptr, 0, x.len());
-                ptr
-            })
-            .unwrap_or(core::ptr::null_mut());
+        if allocator_select_condition(layout) {
+            return self
+                .alloc_in_buddy(layout)
+                .map(|x| {
+                    let ptr: *mut u8 = x.as_mut_ptr();
+                    core::ptr::write_bytes(ptr, 0, x.len());
+                    ptr
+                })
+                .unwrap_or(core::ptr::null_mut());
+        } else {
+            if let Some(ref mut slab) = SLABALLOCATOR {
+                return slab.allocate(layout);
+            };
+            return core::ptr::null_mut();
+        }
     }
 
     unsafe fn local_dealloc(&self, ptr: *mut u8, layout: Layout) {
-        self.free_in_buddy(ptr, layout);
+        if allocator_select_condition(layout) || ((ptr as usize) % 4096) == 0 {
+            self.free_in_buddy(ptr, layout)
+        } else if let Some(ref mut slab) = SLABALLOCATOR {
+            slab.deallocate(ptr, layout).unwrap()
+        }
     }
 }
 
@@ -85,41 +106,53 @@ impl LocalAlloc for KernelAllocator {
 unsafe impl GlobalAlloc for KernelAllocator {
     unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
         let r = self.local_alloc_zeroed(layout);
-        mm_debug_log(
-            klog_types::AllocatorLogType::Alloc(AllocLogItem::new(layout, Some(r as usize), None)),
-            klog_types::LogSource::Buddy,
-        );
-
+        if allocator_select_condition(layout) {
+            alloc_debug_log(klog_types::LogSource::Buddy, layout, r);
+        } else {
+            alloc_debug_log(klog_types::LogSource::Slab, layout, r);
+        }
         return r;
-
-        // self.local_alloc_zeroed(layout, 0)
     }
 
     unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
         let r = self.local_alloc_zeroed(layout);
-
-        mm_debug_log(
-            klog_types::AllocatorLogType::AllocZeroed(AllocLogItem::new(
-                layout,
-                Some(r as usize),
-                None,
-            )),
-            klog_types::LogSource::Buddy,
-        );
-
+        if allocator_select_condition(layout) {
+            alloc_debug_log(klog_types::LogSource::Buddy, layout, r);
+        } else {
+            alloc_debug_log(klog_types::LogSource::Slab, layout, r);
+        }
         return r;
     }
 
     unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
-        mm_debug_log(
-            klog_types::AllocatorLogType::Free(AllocLogItem::new(layout, Some(ptr as usize), None)),
-            klog_types::LogSource::Buddy,
-        );
-
+        if allocator_select_condition(layout) || ((ptr as usize) % 4096) == 0 {
+            dealloc_debug_log(klog_types::LogSource::Buddy, layout, ptr);
+        } else {
+            dealloc_debug_log(klog_types::LogSource::Slab, layout, ptr);
+        }
         self.local_dealloc(ptr, layout);
     }
 }
 
+/// 判断选择buddy分配器还是slab分配器
+fn allocator_select_condition(layout: Layout) -> bool {
+    layout.size() > 2048 || !slab_init_state()
+}
+
+fn alloc_debug_log(source: LogSource, layout: Layout, ptr: *mut u8) {
+    mm_debug_log(
+        klog_types::AllocatorLogType::Alloc(AllocLogItem::new(layout, Some(ptr as usize), None)),
+        source,
+    )
+}
+
+fn dealloc_debug_log(source: LogSource, layout: Layout, ptr: *mut u8) {
+    mm_debug_log(
+        klog_types::AllocatorLogType::Free(AllocLogItem::new(layout, Some(ptr as usize), None)),
+        source,
+    )
+}
+
 /// 为内核slab分配器实现Allocator特性
 // unsafe impl Allocator for KernelAllocator {
 //     fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {

+ 56 - 103
kernel/src/mm/allocator/slab.rs

@@ -1,123 +1,76 @@
 //! 当前slab分配器暂时不使用,等待后续完善后合并主线
 #![allow(dead_code)]
 
-use core::alloc::Layout;
+use core::{alloc::Layout, ptr::NonNull, sync::atomic::AtomicBool};
 
-// 定义Slab,用来存放空闲块
-pub struct Slab {
-    block_size: usize,
-    free_block_list: FreeBlockList,
-}
+use alloc::boxed::Box;
+use slabmalloc::*;
 
-impl Slab {
-    /// @brief: 初始化一个slab
-    /// @param {usize} start_addr
-    /// @param {usize} slab_size
-    /// @param {usize} block_size
-    pub unsafe fn new(start_addr: usize, slab_size: usize, block_size: usize) -> Slab {
-        let blocks_num = slab_size / block_size;
-        return Slab {
-            block_size,
-            free_block_list: FreeBlockList::new(start_addr, block_size, blocks_num),
-        };
-    }
+// 全局slab分配器
+pub(crate) static mut SLABALLOCATOR: Option<SlabAllocator> = None;
 
-    /// @brief: 获取slab中可用的block数
-    pub fn used_blocks(&self) -> usize {
-        return self.free_block_list.len();
-    }
+// slab初始化状态
+pub(crate) static mut SLABINITSTATE: AtomicBool = AtomicBool::new(false);
 
-    /// @brief: 扩大free_block_list
-    /// @param {*} mut
-    /// @param {usize} start_addr
-    /// @param {usize} slab_size
-    pub fn grow(&mut self, start_addr: usize, slab_size: usize) {
-        let num_of_blocks = slab_size / self.block_size;
-        let mut block_list =
-            unsafe { FreeBlockList::new(start_addr, self.block_size, num_of_blocks) };
-        // 将新链表接到原链表的后面
-        while let Some(block) = block_list.pop() {
-            self.free_block_list.push(block);
-        }
-    }
-    /// @brief: 从slab中分配一个block
-    /// @return 分配的内存地址
-    pub fn allocate(&mut self, _layout: Layout) -> Option<*mut u8> {
-        match self.free_block_list.pop() {
-            Some(block) => return Some(block.addr() as *mut u8),
-            None => return None,
-        }
-    }
-    /// @brief: 将block归还给slab
-    pub fn free(&mut self, ptr: *mut u8) {
-        let ptr = ptr as *mut FreeBlock;
-        unsafe {
-            self.free_block_list.push(&mut *ptr);
-        }
-    }
-}
-/// slab中的空闲块
-struct FreeBlockList {
-    len: usize,
-    head: Option<&'static mut FreeBlock>,
+/// slab分配器,实际为一堆小的allocator,可以在里面装4K的page
+/// 利用这些allocator可以为对象分配不同大小的空间
+pub(crate) struct SlabAllocator {
+    zone: ZoneAllocator<'static>,
 }
 
-impl FreeBlockList {
-    unsafe fn new(start_addr: usize, block_size: usize, num_of_blocks: usize) -> FreeBlockList {
-        let mut new_list = FreeBlockList::new_empty();
-        for i in (0..num_of_blocks).rev() {
-            // 从后往前分配,避免内存碎片
-            let new_block = (start_addr + i * block_size) as *mut FreeBlock;
-            new_list.push(&mut *new_block);
+impl SlabAllocator {
+    /// 创建slab分配器
+    pub fn new() -> SlabAllocator {
+        kdebug!("trying to new a slab_allocator");
+        SlabAllocator {
+            zone: ZoneAllocator::new(),
         }
-        return new_list;
-    }
-
-    fn new_empty() -> FreeBlockList {
-        return FreeBlockList { len: 0, head: None };
-    }
-
-    fn len(&self) -> usize {
-        return self.len;
     }
 
-    /// @brief: 将空闲块从链表中弹出
-    fn pop(&mut self) -> Option<&'static mut FreeBlock> {
-        // 从链表中弹出一个空闲块
-        let block = self.head.take().map(|node| {
-            self.head = node.next.take();
-            self.len -= 1;
-            node
-        });
-        return block;
-    }
-
-    /// @brief: 将空闲块压入链表
-    fn push(&mut self, free_block: &'static mut FreeBlock) {
-        free_block.next = self.head.take();
-        self.len += 1;
-        self.head = Some(free_block);
-    }
-
-    fn is_empty(&self) -> bool {
-        return self.head.is_none();
+    /// 为对象(2K以内)分配内存空间
+    pub(crate) unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
+        match self.zone.allocate(layout) {
+            Ok(nptr) => nptr.as_ptr(),
+            Err(AllocationError::OutOfMemory) => {
+                let boxed_page = ObjectPage::new();
+                let leaked_page = Box::leak(boxed_page);
+                self.zone
+                    .refill(layout, leaked_page)
+                    .expect("Could not refill?");
+                self.zone
+                    .allocate(layout)
+                    .expect("Should succeed after refill")
+                    .as_ptr()
+            }
+            Err(AllocationError::InvalidLayout) => panic!("Can't allocate this size"),
+        }
     }
-}
 
-impl Drop for FreeBlockList {
-    fn drop(&mut self) {
-        while self.pop().is_some() {}
+    /// 释放内存空间
+    pub(crate) unsafe fn deallocate(
+        &mut self,
+        ptr: *mut u8,
+        layout: Layout,
+    ) -> Result<(), AllocationError> {
+        if let Some(nptr) = NonNull::new(ptr) {
+            self.zone
+                .deallocate(nptr, layout)
+                .expect("Couldn't deallocate");
+            return Ok(());
+        } else {
+            return Ok(());
+        }
     }
 }
 
-struct FreeBlock {
-    next: Option<&'static mut FreeBlock>,
+/// 初始化slab分配器
+pub unsafe fn slab_init() {
+    kdebug!("trying to init a slab_allocator");
+    SLABALLOCATOR = Some(SlabAllocator::new());
+    SLABINITSTATE = true.into();
 }
 
-impl FreeBlock {
-    /// @brief: 获取FreeBlock的地址
-    /// @return {*}
-    fn addr(&self) -> usize {
-        return self as *const _ as usize;
-    }
+// 查看slab初始化状态
+pub fn slab_init_state() -> bool {
+    unsafe { *SLABINITSTATE.get_mut() }
 }

+ 4 - 1
kernel/src/mm/init.rs

@@ -6,7 +6,7 @@ use crate::{
     filesystem::procfs::kmsg::kmsg_init,
     ipc::shm::shm_manager_init,
     libs::printk::PrintkWriter,
-    mm::{mmio_buddy::mmio_init, page::page_manager_init},
+    mm::{allocator::slab::slab_init, mmio_buddy::mmio_init, page::page_manager_init},
 };
 
 use super::MemoryManagementArch;
@@ -44,6 +44,9 @@ pub unsafe fn mm_init() {
 
     MMArch::init();
 
+    // init slab
+    slab_init();
+
     // enable mmio
     mmio_init();
     // enable KMSG