12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061 |
- use core::{
- fmt::{self, Debug, Error, Formatter},
- marker::PhantomData,
- mem,
- ops::Add,
- sync::atomic::{compiler_fence, Ordering},
- };
- use alloc::sync::Arc;
- use hashbrown::{HashMap, HashSet};
- use crate::{
- arch::{interrupt::ipi::send_ipi, MMArch},
- exception::ipi::{IpiKind, IpiTarget},
- kerror, kwarn,
- libs::spinlock::{SpinLock, SpinLockGuard},
- };
- use super::{
- allocator::page_frame::FrameAllocator, syscall::ProtFlags, ucontext::LockedVMA,
- MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr,
- };
- pub const PAGE_4K_SHIFT: usize = 12;
- #[allow(dead_code)]
- pub const PAGE_2M_SHIFT: usize = 21;
- pub const PAGE_1G_SHIFT: usize = 30;
- pub static mut PAGE_MANAGER: Option<SpinLock<PageManager>> = None;
- pub fn page_manager_init() {
- kinfo!("page_manager_init");
- let page_manager = SpinLock::new(PageManager::new());
- compiler_fence(Ordering::SeqCst);
- unsafe { PAGE_MANAGER = Some(page_manager) };
- compiler_fence(Ordering::SeqCst);
- kinfo!("page_manager_init done");
- }
- pub fn page_manager_lock_irasave() -> SpinLockGuard<'static, PageManager> {
- unsafe { PAGE_MANAGER.as_ref().unwrap().lock_irqsave() }
- }
- pub struct PageManager {
- phys2page: HashMap<PhysAddr, Page>,
- }
- impl PageManager {
- pub fn new() -> Self {
- Self {
- phys2page: HashMap::new(),
- }
- }
- pub fn get_mut(&mut self, paddr: &PhysAddr) -> &mut Page {
- self.phys2page.get_mut(paddr).unwrap()
- }
- pub fn insert(&mut self, paddr: PhysAddr, page: Page) {
- self.phys2page.insert(paddr, page);
- }
- pub fn remove_page(&mut self, paddr: &PhysAddr) {
- self.phys2page.remove(paddr);
- }
- }
- pub struct Page {
-
- map_count: usize,
-
- shared: bool,
-
- anon_vma: HashSet<Arc<LockedVMA>>,
- }
- impl Page {
- pub fn new(shared: bool) -> Self {
- Self {
- map_count: 0,
- shared,
- anon_vma: HashSet::new(),
- }
- }
-
- pub fn insert_vma(&mut self, vma: Arc<LockedVMA>) {
- self.anon_vma.insert(vma);
- self.map_count += 1;
- }
-
- pub fn remove_vma(&mut self, vma: &LockedVMA) {
- self.anon_vma.remove(vma);
- self.map_count -= 1;
- }
-
- pub fn can_deallocate(&self) -> bool {
- self.map_count == 0 && !self.shared
- }
- }
- #[derive(Debug)]
- pub struct PageTable<Arch> {
-
- base: VirtAddr,
-
- phys: PhysAddr,
-
- level: usize,
- phantom: PhantomData<Arch>,
- }
- #[allow(dead_code)]
- impl<Arch: MemoryManagementArch> PageTable<Arch> {
- pub unsafe fn new(base: VirtAddr, phys: PhysAddr, level: usize) -> Self {
- Self {
- base,
- phys,
- level,
- phantom: PhantomData,
- }
- }
-
-
-
-
-
-
-
-
-
- pub unsafe fn top_level_table(table_kind: PageTableKind) -> Self {
- return Self::new(
- VirtAddr::new(0),
- Arch::table(table_kind),
- Arch::PAGE_LEVELS - 1,
- );
- }
-
- #[inline(always)]
- pub fn phys(&self) -> PhysAddr {
- self.phys
- }
-
- #[inline(always)]
- pub fn base(&self) -> VirtAddr {
- self.base
- }
-
- #[inline(always)]
- pub fn level(&self) -> usize {
- self.level
- }
-
- #[inline(always)]
- pub unsafe fn virt(&self) -> VirtAddr {
- return Arch::phys_2_virt(self.phys).unwrap();
- }
-
- pub fn entry_base(&self, i: usize) -> Option<VirtAddr> {
- if i < Arch::PAGE_ENTRY_NUM {
- let shift = self.level * Arch::PAGE_ENTRY_SHIFT + Arch::PAGE_SHIFT;
- return Some(self.base.add(i << shift));
- } else {
- return None;
- }
- }
-
- pub unsafe fn entry_virt(&self, i: usize) -> Option<VirtAddr> {
- if i < Arch::PAGE_ENTRY_NUM {
- return Some(self.virt().add(i * Arch::PAGE_ENTRY_SIZE));
- } else {
- return None;
- }
- }
-
- pub unsafe fn entry(&self, i: usize) -> Option<PageEntry<Arch>> {
- let entry_virt = self.entry_virt(i)?;
- return Some(PageEntry::from_usize(Arch::read::<usize>(entry_virt)));
- }
-
- pub unsafe fn set_entry(&self, i: usize, entry: PageEntry<Arch>) -> Option<()> {
- let entry_virt = self.entry_virt(i)?;
- Arch::write::<usize>(entry_virt, entry.data());
- return Some(());
- }
-
-
-
-
-
-
- pub fn entry_mapped(&self, i: usize) -> Option<bool> {
- let etv = unsafe { self.entry_virt(i) }?;
- if unsafe { Arch::read::<usize>(etv) } != 0 {
- return Some(true);
- } else {
- return Some(false);
- }
- }
-
-
-
-
-
-
-
-
-
- pub unsafe fn index_of(&self, addr: VirtAddr) -> Option<usize> {
- let addr = VirtAddr::new(addr.data() & Arch::PAGE_ADDRESS_MASK);
- let shift = self.level * Arch::PAGE_ENTRY_SHIFT + Arch::PAGE_SHIFT;
- let mask = (MMArch::PAGE_ENTRY_NUM << shift) - 1;
- if addr < self.base || addr >= self.base.add(mask) {
- return None;
- } else {
- return Some((addr.data() >> shift) & MMArch::PAGE_ENTRY_MASK);
- }
- }
-
- pub unsafe fn next_level_table(&self, index: usize) -> Option<Self> {
- if self.level == 0 {
- return None;
- }
-
- return Some(PageTable::new(
- self.entry_base(index)?,
- self.entry(index)?.address().ok()?,
- self.level - 1,
- ));
- }
- }
- #[derive(Copy, Clone)]
- pub struct PageEntry<Arch> {
- data: usize,
- phantom: PhantomData<Arch>,
- }
- impl<Arch> Debug for PageEntry<Arch> {
- fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
- f.write_fmt(format_args!("PageEntry({:#x})", self.data))
- }
- }
- impl<Arch: MemoryManagementArch> PageEntry<Arch> {
- #[inline(always)]
- pub fn new(paddr: PhysAddr, flags: PageFlags<Arch>) -> Self {
- Self {
- data: MMArch::make_entry(paddr, flags.data()),
- phantom: PhantomData,
- }
- }
- #[inline(always)]
- pub fn from_usize(data: usize) -> Self {
- Self {
- data,
- phantom: PhantomData,
- }
- }
- #[inline(always)]
- pub fn data(&self) -> usize {
- self.data
- }
-
-
-
-
-
-
- #[inline(always)]
- pub fn address(&self) -> Result<PhysAddr, PhysAddr> {
- let paddr: PhysAddr = {
- #[cfg(target_arch = "x86_64")]
- {
- PhysAddr::new(self.data & Arch::PAGE_ADDRESS_MASK)
- }
- #[cfg(target_arch = "riscv64")]
- {
- let ppn = ((self.data & (!((1 << 10) - 1))) >> 10) & ((1 << 54) - 1);
- super::allocator::page_frame::PhysPageFrame::from_ppn(ppn).phys_address()
- }
- };
- if self.present() {
- Ok(paddr)
- } else {
- Err(paddr)
- }
- }
- #[inline(always)]
- pub fn flags(&self) -> PageFlags<Arch> {
- unsafe { PageFlags::from_data(self.data & Arch::ENTRY_FLAGS_MASK) }
- }
- #[inline(always)]
- pub fn set_flags(&mut self, flags: PageFlags<Arch>) {
- self.data = (self.data & !Arch::ENTRY_FLAGS_MASK) | flags.data();
- }
- #[inline(always)]
- pub fn present(&self) -> bool {
- return self.data & Arch::ENTRY_FLAG_PRESENT != 0;
- }
- }
- #[derive(Copy, Clone, Hash)]
- pub struct PageFlags<Arch> {
- data: usize,
- phantom: PhantomData<Arch>,
- }
- #[allow(dead_code)]
- impl<Arch: MemoryManagementArch> PageFlags<Arch> {
- #[inline(always)]
- pub fn new() -> Self {
- let mut r = unsafe {
- Self::from_data(
- Arch::ENTRY_FLAG_DEFAULT_PAGE
- | Arch::ENTRY_FLAG_READONLY
- | Arch::ENTRY_FLAG_NO_EXEC,
- )
- };
- #[cfg(target_arch = "x86_64")]
- {
- if crate::arch::mm::X86_64MMArch::is_xd_reserved() {
- r = r.set_execute(true);
- }
- }
- return r;
- }
-
-
-
-
-
-
- pub fn from_prot_flags(prot_flags: ProtFlags, user: bool) -> PageFlags<Arch> {
- let flags: PageFlags<Arch> = PageFlags::new()
- .set_user(user)
- .set_execute(prot_flags.contains(ProtFlags::PROT_EXEC))
- .set_write(prot_flags.contains(ProtFlags::PROT_WRITE));
- return flags;
- }
- #[inline(always)]
- pub fn data(&self) -> usize {
- self.data
- }
- #[inline(always)]
- pub const unsafe fn from_data(data: usize) -> Self {
- return Self {
- data,
- phantom: PhantomData,
- };
- }
-
-
-
-
-
-
-
- #[inline(always)]
- pub fn new_page_table(user: bool) -> Self {
- return unsafe {
- let r = {
- #[cfg(target_arch = "x86_64")]
- {
- Self::from_data(Arch::ENTRY_FLAG_DEFAULT_TABLE | Arch::ENTRY_FLAG_READWRITE)
- }
- #[cfg(target_arch = "riscv64")]
- {
-
- Self::from_data(Arch::ENTRY_FLAG_DEFAULT_TABLE)
- }
- };
- if user {
- r.set_user(true)
- } else {
- r
- }
- };
- }
-
-
-
-
-
-
-
-
-
- #[inline(always)]
- #[must_use]
- pub fn update_flags(mut self, flag: usize, value: bool) -> Self {
- if value {
- self.data |= flag;
- } else {
- self.data &= !flag;
- }
- return self;
- }
-
- #[inline(always)]
- pub fn has_flag(&self, flag: usize) -> bool {
- return self.data & flag == flag;
- }
- #[inline(always)]
- pub fn present(&self) -> bool {
- return self.has_flag(Arch::ENTRY_FLAG_PRESENT);
- }
-
-
-
- #[must_use]
- #[inline(always)]
- pub fn set_user(self, value: bool) -> Self {
- return self.update_flags(Arch::ENTRY_FLAG_USER, value);
- }
-
- #[inline(always)]
- pub fn has_user(&self) -> bool {
- return self.has_flag(Arch::ENTRY_FLAG_USER);
- }
-
-
-
-
-
-
-
- #[must_use]
- #[inline(always)]
- pub fn set_write(self, value: bool) -> Self {
- #[cfg(target_arch = "x86_64")]
- {
-
- return self
- .update_flags(Arch::ENTRY_FLAG_READONLY, !value)
- .update_flags(Arch::ENTRY_FLAG_READWRITE, value);
- }
- #[cfg(target_arch = "riscv64")]
- {
- if value {
- return self.update_flags(Arch::ENTRY_FLAG_READWRITE, true);
- } else {
- return self.update_flags(Arch::ENTRY_FLAG_READONLY, true);
- }
- }
- }
-
- #[inline(always)]
- pub fn has_write(&self) -> bool {
-
- return self.data & (Arch::ENTRY_FLAG_READWRITE | Arch::ENTRY_FLAG_READONLY)
- == Arch::ENTRY_FLAG_READWRITE;
- }
-
- #[must_use]
- #[inline(always)]
- pub fn set_execute(self, mut value: bool) -> Self {
- #[cfg(target_arch = "x86_64")]
- {
-
- if crate::arch::mm::X86_64MMArch::is_xd_reserved() {
- value = true;
- }
- }
-
- return self
- .update_flags(Arch::ENTRY_FLAG_NO_EXEC, !value)
- .update_flags(Arch::ENTRY_FLAG_EXEC, value);
- }
-
- #[inline(always)]
- pub fn has_execute(&self) -> bool {
-
- return self.data & (Arch::ENTRY_FLAG_EXEC | Arch::ENTRY_FLAG_NO_EXEC)
- == Arch::ENTRY_FLAG_EXEC;
- }
-
-
-
-
-
- #[inline(always)]
- pub fn set_page_cache_disable(self, value: bool) -> Self {
- return self.update_flags(Arch::ENTRY_FLAG_CACHE_DISABLE, value);
- }
-
-
-
-
-
- #[inline(always)]
- pub fn has_page_cache_disable(&self) -> bool {
- return self.has_flag(Arch::ENTRY_FLAG_CACHE_DISABLE);
- }
-
-
-
-
-
- #[inline(always)]
- pub fn set_page_write_through(self, value: bool) -> Self {
- return self.update_flags(Arch::ENTRY_FLAG_WRITE_THROUGH, value);
- }
-
-
-
-
-
- #[inline(always)]
- pub fn has_page_write_through(&self) -> bool {
- return self.has_flag(Arch::ENTRY_FLAG_WRITE_THROUGH);
- }
-
- #[inline(always)]
- pub fn mmio_flags() -> Self {
- return Self::new()
- .set_user(false)
- .set_write(true)
- .set_execute(true)
- .set_page_cache_disable(true)
- .set_page_write_through(true);
- }
- }
- impl<Arch: MemoryManagementArch> fmt::Debug for PageFlags<Arch> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("PageFlags")
- .field("bits", &format_args!("{:#0x}", self.data))
- .field("present", &self.present())
- .field("has_write", &self.has_write())
- .field("has_execute", &self.has_execute())
- .field("has_user", &self.has_user())
- .finish()
- }
- }
- #[derive(Hash)]
- pub struct PageMapper<Arch, F> {
-
- table_kind: PageTableKind,
-
- table_paddr: PhysAddr,
-
- frame_allocator: F,
- phantom: PhantomData<fn() -> Arch>,
- }
- impl<Arch: MemoryManagementArch, F: FrameAllocator> PageMapper<Arch, F> {
-
-
-
-
-
-
-
-
-
-
- pub unsafe fn new(table_kind: PageTableKind, table_paddr: PhysAddr, allocator: F) -> Self {
- return Self {
- table_kind,
- table_paddr,
- frame_allocator: allocator,
- phantom: PhantomData,
- };
- }
-
- pub unsafe fn create(table_kind: PageTableKind, mut allocator: F) -> Option<Self> {
- let table_paddr = allocator.allocate_one()?;
-
- let table_vaddr = Arch::phys_2_virt(table_paddr)?;
- Arch::write_bytes(table_vaddr, 0, Arch::PAGE_SIZE);
- return Some(Self::new(table_kind, table_paddr, allocator));
- }
-
- #[inline(always)]
- pub unsafe fn current(table_kind: PageTableKind, allocator: F) -> Self {
- let table_paddr = Arch::table(table_kind);
- return Self::new(table_kind, table_paddr, allocator);
- }
-
- #[inline(always)]
- pub fn is_current(&self) -> bool {
- return unsafe { self.table().phys() == Arch::table(self.table_kind) };
- }
-
- #[inline(always)]
- pub unsafe fn make_current(&self) {
- Arch::set_table(self.table_kind, self.table_paddr);
- }
-
- #[inline(always)]
- pub fn table(&self) -> PageTable<Arch> {
-
- return unsafe {
- PageTable::new(VirtAddr::new(0), self.table_paddr, Arch::PAGE_LEVELS - 1)
- };
- }
-
- #[inline(always)]
- #[allow(dead_code)]
- pub fn allocator_ref(&self) -> &F {
- return &self.frame_allocator;
- }
-
- #[inline(always)]
- pub fn allocator_mut(&mut self) -> &mut F {
- return &mut self.frame_allocator;
- }
-
- pub unsafe fn map(
- &mut self,
- virt: VirtAddr,
- flags: PageFlags<Arch>,
- ) -> Option<PageFlush<Arch>> {
- compiler_fence(Ordering::SeqCst);
- let phys: PhysAddr = self.frame_allocator.allocate_one()?;
- compiler_fence(Ordering::SeqCst);
- page_manager_lock_irasave().insert(phys, Page::new(false));
- return self.map_phys(virt, phys, flags);
- }
-
- pub unsafe fn map_phys(
- &mut self,
- virt: VirtAddr,
- phys: PhysAddr,
- flags: PageFlags<Arch>,
- ) -> Option<PageFlush<Arch>> {
-
- if !(virt.check_aligned(Arch::PAGE_SIZE) && phys.check_aligned(Arch::PAGE_SIZE)) {
- kerror!(
- "Try to map unaligned page: virt={:?}, phys={:?}",
- virt,
- phys
- );
- return None;
- }
- let virt = VirtAddr::new(virt.data() & (!Arch::PAGE_NEGATIVE_MASK));
-
-
- let entry = PageEntry::new(phys, flags);
- let mut table = self.table();
- loop {
- let i = table.index_of(virt)?;
- assert!(i < Arch::PAGE_ENTRY_NUM);
- if table.level() == 0 {
-
-
- if table.entry_mapped(i)? {
- kwarn!("Page {:?} already mapped", virt);
- }
- compiler_fence(Ordering::SeqCst);
- table.set_entry(i, entry);
- compiler_fence(Ordering::SeqCst);
- return Some(PageFlush::new(virt));
- } else {
- let next_table = table.next_level_table(i);
- if let Some(next_table) = next_table {
- table = next_table;
-
- } else {
-
- let frame = self.frame_allocator.allocate_one()?;
-
- MMArch::write_bytes(MMArch::phys_2_virt(frame).unwrap(), 0, MMArch::PAGE_SIZE);
-
- let flags: PageFlags<Arch> =
- PageFlags::new_page_table(virt.kind() == PageTableKind::User);
-
-
- table.set_entry(i, PageEntry::new(frame, flags));
-
- table = table.next_level_table(i)?;
- }
- }
- }
- }
-
- #[allow(dead_code)]
- pub unsafe fn map_linearly(
- &mut self,
- phys: PhysAddr,
- flags: PageFlags<Arch>,
- ) -> Option<(VirtAddr, PageFlush<Arch>)> {
- let virt: VirtAddr = Arch::phys_2_virt(phys)?;
- return self.map_phys(virt, phys, flags).map(|flush| (virt, flush));
- }
-
-
-
-
-
-
-
-
-
-
-
- pub unsafe fn remap(
- &mut self,
- virt: VirtAddr,
- flags: PageFlags<Arch>,
- ) -> Option<PageFlush<Arch>> {
- return self
- .visit(virt, |p1, i| {
- let mut entry = p1.entry(i)?;
- entry.set_flags(flags);
- p1.set_entry(i, entry);
- Some(PageFlush::new(virt))
- })
- .flatten();
- }
-
-
-
-
-
-
-
-
-
- pub fn translate(&self, virt: VirtAddr) -> Option<(PhysAddr, PageFlags<Arch>)> {
- let entry: PageEntry<Arch> = self.visit(virt, |p1, i| unsafe { p1.entry(i) })??;
- let paddr = entry.address().ok()?;
- let flags = entry.flags();
- return Some((paddr, flags));
- }
-
-
-
-
-
-
-
-
-
-
-
- #[allow(dead_code)]
- pub unsafe fn unmap(&mut self, virt: VirtAddr, unmap_parents: bool) -> Option<PageFlush<Arch>> {
- let (paddr, _, flusher) = self.unmap_phys(virt, unmap_parents)?;
- self.frame_allocator.free_one(paddr);
- return Some(flusher);
- }
-
-
-
-
-
-
-
-
-
-
- pub unsafe fn unmap_phys(
- &mut self,
- virt: VirtAddr,
- unmap_parents: bool,
- ) -> Option<(PhysAddr, PageFlags<Arch>, PageFlush<Arch>)> {
- if !virt.check_aligned(Arch::PAGE_SIZE) {
- kerror!("Try to unmap unaligned page: virt={:?}", virt);
- return None;
- }
- let table = self.table();
- return unmap_phys_inner(virt, &table, unmap_parents, self.allocator_mut())
- .map(|(paddr, flags)| (paddr, flags, PageFlush::<Arch>::new(virt)));
- }
-
- fn visit<T>(
- &self,
- virt: VirtAddr,
- f: impl FnOnce(&mut PageTable<Arch>, usize) -> T,
- ) -> Option<T> {
- let mut table = self.table();
- unsafe {
- loop {
- let i = table.index_of(virt)?;
- if table.level() == 0 {
- return Some(f(&mut table, i));
- } else {
- table = table.next_level_table(i)?;
- }
- }
- }
- }
- }
- unsafe fn unmap_phys_inner<Arch: MemoryManagementArch>(
- vaddr: VirtAddr,
- table: &PageTable<Arch>,
- unmap_parents: bool,
- allocator: &mut impl FrameAllocator,
- ) -> Option<(PhysAddr, PageFlags<Arch>)> {
-
- let i = table.index_of(vaddr)?;
-
- if table.level() == 0 {
- let entry = table.entry(i)?;
- table.set_entry(i, PageEntry::from_usize(0));
- return Some((entry.address().ok()?, entry.flags()));
- }
- let subtable = table.next_level_table(i)?;
-
- let result = unmap_phys_inner(vaddr, &subtable, unmap_parents, allocator)?;
-
-
- if unmap_parents {
-
-
- let x = (0..Arch::PAGE_ENTRY_NUM)
- .map(|k| subtable.entry(k).expect("invalid page entry"))
- .any(|e| e.present());
- if !x {
-
- table.set_entry(i, PageEntry::from_usize(0));
-
- allocator.free_one(subtable.phys());
- }
- }
- return Some(result);
- }
- impl<Arch, F: Debug> Debug for PageMapper<Arch, F> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("PageMapper")
- .field("table_paddr", &self.table_paddr)
- .field("frame_allocator", &self.frame_allocator)
- .finish()
- }
- }
- pub trait Flusher<Arch: MemoryManagementArch> {
-
- fn consume(&mut self, flush: PageFlush<Arch>);
- }
- #[must_use = "The flusher must call the 'flush()', or the changes to page table will be unsafely ignored."]
- pub struct PageFlush<Arch: MemoryManagementArch> {
- virt: VirtAddr,
- phantom: PhantomData<Arch>,
- }
- impl<Arch: MemoryManagementArch> PageFlush<Arch> {
- pub fn new(virt: VirtAddr) -> Self {
- return Self {
- virt,
- phantom: PhantomData,
- };
- }
- pub fn flush(self) {
- unsafe { Arch::invalidate_page(self.virt) };
- }
-
- pub unsafe fn ignore(self) {
- mem::forget(self);
- }
- }
- impl<Arch: MemoryManagementArch> Drop for PageFlush<Arch> {
- fn drop(&mut self) {
- unsafe {
- MMArch::invalidate_page(self.virt);
- }
- }
- }
- #[must_use = "The flusher must call the 'flush()', or the changes to page table will be unsafely ignored."]
- pub struct PageFlushAll<Arch: MemoryManagementArch> {
- phantom: PhantomData<fn() -> Arch>,
- }
- #[allow(dead_code)]
- impl<Arch: MemoryManagementArch> PageFlushAll<Arch> {
- pub fn new() -> Self {
- return Self {
- phantom: PhantomData,
- };
- }
- pub fn flush(self) {
- unsafe { Arch::invalidate_all() };
- }
-
- pub unsafe fn ignore(self) {
- mem::forget(self);
- }
- }
- impl<Arch: MemoryManagementArch> Flusher<Arch> for PageFlushAll<Arch> {
-
- fn consume(&mut self, flush: PageFlush<Arch>) {
- unsafe { flush.ignore() };
- }
- }
- impl<Arch: MemoryManagementArch, T: Flusher<Arch> + ?Sized> Flusher<Arch> for &mut T {
-
- fn consume(&mut self, flush: PageFlush<Arch>) {
- <T as Flusher<Arch>>::consume(self, flush);
- }
- }
- impl<Arch: MemoryManagementArch> Flusher<Arch> for () {
- fn consume(&mut self, _flush: PageFlush<Arch>) {}
- }
- impl<Arch: MemoryManagementArch> Drop for PageFlushAll<Arch> {
- fn drop(&mut self) {
- unsafe {
- Arch::invalidate_all();
- }
- }
- }
- #[derive(Debug)]
- pub struct InactiveFlusher;
- impl InactiveFlusher {
- pub fn new() -> Self {
- return Self {};
- }
- }
- impl Flusher<MMArch> for InactiveFlusher {
- fn consume(&mut self, flush: PageFlush<MMArch>) {
- unsafe {
- flush.ignore();
- }
- }
- }
- impl Drop for InactiveFlusher {
- fn drop(&mut self) {
-
- send_ipi(IpiKind::FlushTLB, IpiTarget::Other);
- }
- }
- pub fn round_down_to_page_size(addr: usize) -> usize {
- addr & !(MMArch::PAGE_SIZE - 1)
- }
- pub fn round_up_to_page_size(addr: usize) -> usize {
- round_down_to_page_size(addr + MMArch::PAGE_SIZE - 1)
- }
|