123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969 |
- use core::{
- fmt::{self, Debug, Error, Formatter},
- marker::PhantomData,
- mem,
- ops::Add,
- sync::atomic::{compiler_fence, Ordering},
- };
- use crate::{
- arch::{interrupt::ipi::send_ipi, MMArch},
- exception::ipi::{IpiKind, IpiTarget},
- kerror, kwarn,
- };
- use super::{
- allocator::page_frame::FrameAllocator, syscall::ProtFlags, MemoryManagementArch, PageTableKind,
- PhysAddr, VirtAddr,
- };
- #[derive(Debug)]
- pub struct PageTable<Arch> {
-
- base: VirtAddr,
-
- phys: PhysAddr,
-
- level: usize,
- phantom: PhantomData<Arch>,
- }
- #[allow(dead_code)]
- impl<Arch: MemoryManagementArch> PageTable<Arch> {
- pub unsafe fn new(base: VirtAddr, phys: PhysAddr, level: usize) -> Self {
- Self {
- base,
- phys,
- level,
- phantom: PhantomData,
- }
- }
-
-
-
-
-
-
-
-
-
- pub unsafe fn top_level_table(table_kind: PageTableKind) -> Self {
- return Self::new(
- VirtAddr::new(0),
- Arch::table(table_kind),
- Arch::PAGE_LEVELS - 1,
- );
- }
-
- #[inline(always)]
- pub fn phys(&self) -> PhysAddr {
- self.phys
- }
-
- #[inline(always)]
- pub fn base(&self) -> VirtAddr {
- self.base
- }
-
- #[inline(always)]
- pub fn level(&self) -> usize {
- self.level
- }
-
- #[inline(always)]
- pub unsafe fn virt(&self) -> VirtAddr {
- return Arch::phys_2_virt(self.phys).unwrap();
- }
-
- pub fn entry_base(&self, i: usize) -> Option<VirtAddr> {
- if i < Arch::PAGE_ENTRY_NUM {
- let shift = self.level * Arch::PAGE_ENTRY_SHIFT + Arch::PAGE_SHIFT;
- return Some(self.base.add(i << shift));
- } else {
- return None;
- }
- }
-
- pub unsafe fn entry_virt(&self, i: usize) -> Option<VirtAddr> {
- if i < Arch::PAGE_ENTRY_NUM {
- return Some(self.virt().add(i * Arch::PAGE_ENTRY_SIZE));
- } else {
- return None;
- }
- }
-
- pub unsafe fn entry(&self, i: usize) -> Option<PageEntry<Arch>> {
- let entry_virt = self.entry_virt(i)?;
- return Some(PageEntry::from_usize(Arch::read::<usize>(entry_virt)));
- }
-
- pub unsafe fn set_entry(&self, i: usize, entry: PageEntry<Arch>) -> Option<()> {
- let entry_virt = self.entry_virt(i)?;
- Arch::write::<usize>(entry_virt, entry.data());
- return Some(());
- }
-
-
-
-
-
-
- pub fn entry_mapped(&self, i: usize) -> Option<bool> {
- let etv = unsafe { self.entry_virt(i) }?;
- if unsafe { Arch::read::<usize>(etv) } != 0 {
- return Some(true);
- } else {
- return Some(false);
- }
- }
-
-
-
-
-
-
-
-
-
- pub unsafe fn index_of(&self, addr: VirtAddr) -> Option<usize> {
- let addr = VirtAddr::new(addr.data() & Arch::PAGE_ADDRESS_MASK);
- let shift = self.level * Arch::PAGE_ENTRY_SHIFT + Arch::PAGE_SHIFT;
- let mask = (MMArch::PAGE_ENTRY_NUM << shift) - 1;
- if addr < self.base || addr >= self.base.add(mask) {
- return None;
- } else {
- return Some((addr.data() >> shift) & MMArch::PAGE_ENTRY_MASK);
- }
- }
-
- pub unsafe fn next_level_table(&self, index: usize) -> Option<Self> {
- if self.level == 0 {
- return None;
- }
-
- return Some(PageTable::new(
- self.entry_base(index)?,
- self.entry(index)?.address().ok()?,
- self.level - 1,
- ));
- }
- }
- #[derive(Copy, Clone)]
- pub struct PageEntry<Arch> {
- data: usize,
- phantom: PhantomData<Arch>,
- }
- impl<Arch> Debug for PageEntry<Arch> {
- fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
- f.write_fmt(format_args!("PageEntry({:#x})", self.data))
- }
- }
- impl<Arch: MemoryManagementArch> PageEntry<Arch> {
- #[inline(always)]
- pub fn new(paddr: PhysAddr, flags: PageFlags<Arch>) -> Self {
- Self {
- data: MMArch::make_entry(paddr, flags.data()),
- phantom: PhantomData,
- }
- }
- #[inline(always)]
- pub fn from_usize(data: usize) -> Self {
- Self {
- data,
- phantom: PhantomData,
- }
- }
- #[inline(always)]
- pub fn data(&self) -> usize {
- self.data
- }
-
-
-
-
-
-
- #[inline(always)]
- pub fn address(&self) -> Result<PhysAddr, PhysAddr> {
- let paddr: PhysAddr = {
- #[cfg(target_arch = "x86_64")]
- {
- PhysAddr::new(self.data & Arch::PAGE_ADDRESS_MASK)
- }
- #[cfg(target_arch = "riscv64")]
- {
- let ppn = ((self.data & (!((1 << 10) - 1))) >> 10) & ((1 << 44) - 1);
- super::allocator::page_frame::PhysPageFrame::from_ppn(ppn).phys_address()
- }
- };
- if self.present() {
- Ok(paddr)
- } else {
- Err(paddr)
- }
- }
- #[inline(always)]
- pub fn flags(&self) -> PageFlags<Arch> {
- unsafe { PageFlags::from_data(self.data & Arch::ENTRY_FLAGS_MASK) }
- }
- #[inline(always)]
- pub fn set_flags(&mut self, flags: PageFlags<Arch>) {
- self.data = (self.data & !Arch::ENTRY_FLAGS_MASK) | flags.data();
- }
- #[inline(always)]
- pub fn present(&self) -> bool {
- return self.data & Arch::ENTRY_FLAG_PRESENT != 0;
- }
- }
- #[derive(Copy, Clone, Hash)]
- pub struct PageFlags<Arch> {
- data: usize,
- phantom: PhantomData<Arch>,
- }
- #[allow(dead_code)]
- impl<Arch: MemoryManagementArch> PageFlags<Arch> {
- #[inline(always)]
- pub fn new() -> Self {
- let mut r = unsafe {
- Self::from_data(
- Arch::ENTRY_FLAG_DEFAULT_PAGE
- | Arch::ENTRY_FLAG_READONLY
- | Arch::ENTRY_FLAG_NO_EXEC,
- )
- };
- #[cfg(target_arch = "x86_64")]
- {
- if crate::arch::mm::X86_64MMArch::is_xd_reserved() {
- r = r.set_execute(true);
- }
- }
- return r;
- }
-
-
-
-
-
-
- pub fn from_prot_flags(prot_flags: ProtFlags, user: bool) -> PageFlags<Arch> {
- let flags: PageFlags<Arch> = PageFlags::new()
- .set_user(user)
- .set_execute(prot_flags.contains(ProtFlags::PROT_EXEC))
- .set_write(prot_flags.contains(ProtFlags::PROT_WRITE));
- return flags;
- }
- #[inline(always)]
- pub fn data(&self) -> usize {
- self.data
- }
- #[inline(always)]
- pub const unsafe fn from_data(data: usize) -> Self {
- return Self {
- data: data,
- phantom: PhantomData,
- };
- }
-
-
-
-
-
-
-
- #[inline(always)]
- pub fn new_page_table(user: bool) -> Self {
- return unsafe {
- let r = {
- #[cfg(target_arch = "x86_64")]
- {
- Self::from_data(Arch::ENTRY_FLAG_DEFAULT_TABLE | Arch::ENTRY_FLAG_READWRITE)
- }
- #[cfg(target_arch = "riscv64")]
- {
-
- Self::from_data(Arch::ENTRY_FLAG_DEFAULT_TABLE)
- }
- };
- if user {
- r.set_user(true)
- } else {
- r
- }
- };
- }
-
-
-
-
-
-
-
-
-
- #[inline(always)]
- #[must_use]
- pub fn update_flags(mut self, flag: usize, value: bool) -> Self {
- if value {
- self.data |= flag;
- } else {
- self.data &= !flag;
- }
- return self;
- }
-
- #[inline(always)]
- pub fn has_flag(&self, flag: usize) -> bool {
- return self.data & flag == flag;
- }
- #[inline(always)]
- pub fn present(&self) -> bool {
- return self.has_flag(Arch::ENTRY_FLAG_PRESENT);
- }
-
-
-
- #[must_use]
- #[inline(always)]
- pub fn set_user(self, value: bool) -> Self {
- return self.update_flags(Arch::ENTRY_FLAG_USER, value);
- }
-
- #[inline(always)]
- pub fn has_user(&self) -> bool {
- return self.has_flag(Arch::ENTRY_FLAG_USER);
- }
-
-
-
-
-
-
-
- #[must_use]
- #[inline(always)]
- pub fn set_write(self, value: bool) -> Self {
- #[cfg(target_arch = "x86_64")]
- {
-
- return self
- .update_flags(Arch::ENTRY_FLAG_READONLY, !value)
- .update_flags(Arch::ENTRY_FLAG_READWRITE, value);
- }
- #[cfg(target_arch = "riscv64")]
- {
- if value {
- return self.update_flags(Arch::ENTRY_FLAG_READWRITE, true);
- } else {
- return self.update_flags(Arch::ENTRY_FLAG_READONLY, true);
- }
- }
- }
-
- #[inline(always)]
- pub fn has_write(&self) -> bool {
-
- return self.data & (Arch::ENTRY_FLAG_READWRITE | Arch::ENTRY_FLAG_READONLY)
- == Arch::ENTRY_FLAG_READWRITE;
- }
-
- #[must_use]
- #[inline(always)]
- pub fn set_execute(self, mut value: bool) -> Self {
- #[cfg(target_arch = "x86_64")]
- {
-
- if crate::arch::mm::X86_64MMArch::is_xd_reserved() {
- value = true;
- }
- }
-
- return self
- .update_flags(Arch::ENTRY_FLAG_NO_EXEC, !value)
- .update_flags(Arch::ENTRY_FLAG_EXEC, value);
- }
-
- #[inline(always)]
- pub fn has_execute(&self) -> bool {
-
- return self.data & (Arch::ENTRY_FLAG_EXEC | Arch::ENTRY_FLAG_NO_EXEC)
- == Arch::ENTRY_FLAG_EXEC;
- }
-
-
-
-
-
- #[inline(always)]
- pub fn set_page_cache_disable(self, value: bool) -> Self {
- return self.update_flags(Arch::ENTRY_FLAG_CACHE_DISABLE, value);
- }
-
-
-
-
-
- #[inline(always)]
- pub fn has_page_cache_disable(&self) -> bool {
- return self.has_flag(Arch::ENTRY_FLAG_CACHE_DISABLE);
- }
-
-
-
-
-
- #[inline(always)]
- pub fn set_page_write_through(self, value: bool) -> Self {
- return self.update_flags(Arch::ENTRY_FLAG_WRITE_THROUGH, value);
- }
-
-
-
-
-
- #[inline(always)]
- pub fn has_page_write_through(&self) -> bool {
- return self.has_flag(Arch::ENTRY_FLAG_WRITE_THROUGH);
- }
-
- #[inline(always)]
- pub fn mmio_flags() -> Self {
- return Self::new()
- .set_user(false)
- .set_write(true)
- .set_execute(true)
- .set_page_cache_disable(true)
- .set_page_write_through(true);
- }
- }
- impl<Arch: MemoryManagementArch> fmt::Debug for PageFlags<Arch> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("PageFlags")
- .field("bits", &format_args!("{:#0x}", self.data))
- .field("present", &self.present())
- .field("has_write", &self.has_write())
- .field("has_execute", &self.has_execute())
- .field("has_user", &self.has_user())
- .finish()
- }
- }
- #[derive(Hash)]
- pub struct PageMapper<Arch, F> {
-
- table_kind: PageTableKind,
-
- table_paddr: PhysAddr,
-
- frame_allocator: F,
- phantom: PhantomData<fn() -> Arch>,
- }
- impl<Arch: MemoryManagementArch, F: FrameAllocator> PageMapper<Arch, F> {
-
-
-
-
-
-
-
-
-
-
- pub unsafe fn new(table_kind: PageTableKind, table_paddr: PhysAddr, allocator: F) -> Self {
- return Self {
- table_kind,
- table_paddr,
- frame_allocator: allocator,
- phantom: PhantomData,
- };
- }
-
- pub unsafe fn create(table_kind: PageTableKind, mut allocator: F) -> Option<Self> {
- let table_paddr = allocator.allocate_one()?;
-
- let table_vaddr = Arch::phys_2_virt(table_paddr)?;
- Arch::write_bytes(table_vaddr, 0, Arch::PAGE_SIZE);
- return Some(Self::new(table_kind, table_paddr, allocator));
- }
-
- #[inline(always)]
- pub unsafe fn current(table_kind: PageTableKind, allocator: F) -> Self {
- let table_paddr = Arch::table(table_kind);
- return Self::new(table_kind, table_paddr, allocator);
- }
-
- #[inline(always)]
- pub fn is_current(&self) -> bool {
- return unsafe { self.table().phys() == Arch::table(self.table_kind) };
- }
-
- #[inline(always)]
- pub unsafe fn make_current(&self) {
- Arch::set_table(self.table_kind, self.table_paddr);
- }
-
- #[inline(always)]
- pub fn table(&self) -> PageTable<Arch> {
-
- return unsafe {
- PageTable::new(VirtAddr::new(0), self.table_paddr, Arch::PAGE_LEVELS - 1)
- };
- }
-
- #[inline(always)]
- #[allow(dead_code)]
- pub fn allocator_ref(&self) -> &F {
- return &self.frame_allocator;
- }
-
- #[inline(always)]
- pub fn allocator_mut(&mut self) -> &mut F {
- return &mut self.frame_allocator;
- }
-
- pub unsafe fn map(
- &mut self,
- virt: VirtAddr,
- flags: PageFlags<Arch>,
- ) -> Option<PageFlush<Arch>> {
- compiler_fence(Ordering::SeqCst);
- let phys: PhysAddr = self.frame_allocator.allocate_one()?;
- compiler_fence(Ordering::SeqCst);
- return self.map_phys(virt, phys, flags);
- }
-
- pub unsafe fn map_phys(
- &mut self,
- virt: VirtAddr,
- phys: PhysAddr,
- flags: PageFlags<Arch>,
- ) -> Option<PageFlush<Arch>> {
-
- if !(virt.check_aligned(Arch::PAGE_SIZE) && phys.check_aligned(Arch::PAGE_SIZE)) {
- kerror!(
- "Try to map unaligned page: virt={:?}, phys={:?}",
- virt,
- phys
- );
- return None;
- }
- let virt = VirtAddr::new(virt.data() & (!Arch::PAGE_NEGATIVE_MASK));
-
-
- let entry = PageEntry::new(phys, flags);
- let mut table = self.table();
- loop {
- let i = table.index_of(virt)?;
- assert!(i < Arch::PAGE_ENTRY_NUM);
- if table.level() == 0 {
-
-
- if table.entry_mapped(i)? == true {
- kwarn!("Page {:?} already mapped", virt);
- }
- compiler_fence(Ordering::SeqCst);
- table.set_entry(i, entry);
- compiler_fence(Ordering::SeqCst);
- return Some(PageFlush::new(virt));
- } else {
- let next_table = table.next_level_table(i);
- if let Some(next_table) = next_table {
- table = next_table;
-
- } else {
-
- let frame = self.frame_allocator.allocate_one()?;
-
- MMArch::write_bytes(MMArch::phys_2_virt(frame).unwrap(), 0, MMArch::PAGE_SIZE);
-
- let flags: PageFlags<Arch> =
- PageFlags::new_page_table(virt.kind() == PageTableKind::User);
-
-
- table.set_entry(i, PageEntry::new(frame, flags));
-
- table = table.next_level_table(i)?;
- }
- }
- }
- }
-
- #[allow(dead_code)]
- pub unsafe fn map_linearly(
- &mut self,
- phys: PhysAddr,
- flags: PageFlags<Arch>,
- ) -> Option<(VirtAddr, PageFlush<Arch>)> {
- let virt: VirtAddr = Arch::phys_2_virt(phys)?;
- return self.map_phys(virt, phys, flags).map(|flush| (virt, flush));
- }
-
-
-
-
-
-
-
-
-
-
-
- pub unsafe fn remap(
- &mut self,
- virt: VirtAddr,
- flags: PageFlags<Arch>,
- ) -> Option<PageFlush<Arch>> {
- return self
- .visit(virt, |p1, i| {
- let mut entry = p1.entry(i)?;
- entry.set_flags(flags);
- p1.set_entry(i, entry);
- Some(PageFlush::new(virt))
- })
- .flatten();
- }
-
-
-
-
-
-
-
-
-
- pub fn translate(&self, virt: VirtAddr) -> Option<(PhysAddr, PageFlags<Arch>)> {
- let entry: PageEntry<Arch> = self.visit(virt, |p1, i| unsafe { p1.entry(i) })??;
- let paddr = entry.address().ok()?;
- let flags = entry.flags();
- return Some((paddr, flags));
- }
-
-
-
-
-
-
-
-
-
-
-
- #[allow(dead_code)]
- pub unsafe fn unmap(&mut self, virt: VirtAddr, unmap_parents: bool) -> Option<PageFlush<Arch>> {
- let (paddr, _, flusher) = self.unmap_phys(virt, unmap_parents)?;
- self.frame_allocator.free_one(paddr);
- return Some(flusher);
- }
-
-
-
-
-
-
-
-
-
-
- pub unsafe fn unmap_phys(
- &mut self,
- virt: VirtAddr,
- unmap_parents: bool,
- ) -> Option<(PhysAddr, PageFlags<Arch>, PageFlush<Arch>)> {
- if !virt.check_aligned(Arch::PAGE_SIZE) {
- kerror!("Try to unmap unaligned page: virt={:?}", virt);
- return None;
- }
- let mut table = self.table();
- return unmap_phys_inner(virt, &mut table, unmap_parents, self.allocator_mut())
- .map(|(paddr, flags)| (paddr, flags, PageFlush::<Arch>::new(virt)));
- }
-
- fn visit<T>(
- &self,
- virt: VirtAddr,
- f: impl FnOnce(&mut PageTable<Arch>, usize) -> T,
- ) -> Option<T> {
- let mut table = self.table();
- unsafe {
- loop {
- let i = table.index_of(virt)?;
- if table.level() == 0 {
- return Some(f(&mut table, i));
- } else {
- table = table.next_level_table(i)?;
- }
- }
- }
- }
- }
- unsafe fn unmap_phys_inner<Arch: MemoryManagementArch>(
- vaddr: VirtAddr,
- table: &mut PageTable<Arch>,
- unmap_parents: bool,
- allocator: &mut impl FrameAllocator,
- ) -> Option<(PhysAddr, PageFlags<Arch>)> {
-
- let i = table.index_of(vaddr)?;
-
- if table.level() == 0 {
- let entry = table.entry(i)?;
- table.set_entry(i, PageEntry::from_usize(0));
- return Some((entry.address().ok()?, entry.flags()));
- }
- let mut subtable = table.next_level_table(i)?;
-
- let result = unmap_phys_inner(vaddr, &mut subtable, unmap_parents, allocator)?;
-
-
- if unmap_parents {
-
-
- let x = (0..Arch::PAGE_ENTRY_NUM)
- .map(|k| subtable.entry(k).expect("invalid page entry"))
- .any(|e| e.present());
- if !x {
-
- table.set_entry(i, PageEntry::from_usize(0));
-
- allocator.free_one(subtable.phys());
- }
- }
- return Some(result);
- }
- impl<Arch, F: Debug> Debug for PageMapper<Arch, F> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("PageMapper")
- .field("table_paddr", &self.table_paddr)
- .field("frame_allocator", &self.frame_allocator)
- .finish()
- }
- }
- pub trait Flusher<Arch: MemoryManagementArch> {
-
- fn consume(&mut self, flush: PageFlush<Arch>);
- }
- #[must_use = "The flusher must call the 'flush()', or the changes to page table will be unsafely ignored."]
- pub struct PageFlush<Arch: MemoryManagementArch> {
- virt: VirtAddr,
- phantom: PhantomData<Arch>,
- }
- impl<Arch: MemoryManagementArch> PageFlush<Arch> {
- pub fn new(virt: VirtAddr) -> Self {
- return Self {
- virt,
- phantom: PhantomData,
- };
- }
- pub fn flush(self) {
- unsafe { Arch::invalidate_page(self.virt) };
- }
-
- pub unsafe fn ignore(self) {
- mem::forget(self);
- }
- }
- impl<Arch: MemoryManagementArch> Drop for PageFlush<Arch> {
- fn drop(&mut self) {
- unsafe {
- MMArch::invalidate_page(self.virt);
- }
- }
- }
- #[must_use = "The flusher must call the 'flush()', or the changes to page table will be unsafely ignored."]
- pub struct PageFlushAll<Arch: MemoryManagementArch> {
- phantom: PhantomData<fn() -> Arch>,
- }
- #[allow(dead_code)]
- impl<Arch: MemoryManagementArch> PageFlushAll<Arch> {
- pub fn new() -> Self {
- return Self {
- phantom: PhantomData,
- };
- }
- pub fn flush(self) {
- unsafe { Arch::invalidate_all() };
- }
-
- pub unsafe fn ignore(self) {
- mem::forget(self);
- }
- }
- impl<Arch: MemoryManagementArch> Flusher<Arch> for PageFlushAll<Arch> {
-
- fn consume(&mut self, flush: PageFlush<Arch>) {
- unsafe { flush.ignore() };
- }
- }
- impl<Arch: MemoryManagementArch, T: Flusher<Arch> + ?Sized> Flusher<Arch> for &mut T {
-
- fn consume(&mut self, flush: PageFlush<Arch>) {
- <T as Flusher<Arch>>::consume(self, flush);
- }
- }
- impl<Arch: MemoryManagementArch> Flusher<Arch> for () {
- fn consume(&mut self, _flush: PageFlush<Arch>) {}
- }
- impl<Arch: MemoryManagementArch> Drop for PageFlushAll<Arch> {
- fn drop(&mut self) {
- unsafe {
- Arch::invalidate_all();
- }
- }
- }
- #[derive(Debug)]
- pub struct InactiveFlusher;
- impl InactiveFlusher {
- pub fn new() -> Self {
- return Self {};
- }
- }
- impl Flusher<MMArch> for InactiveFlusher {
- fn consume(&mut self, flush: PageFlush<MMArch>) {
- unsafe {
- flush.ignore();
- }
- }
- }
- impl Drop for InactiveFlusher {
- fn drop(&mut self) {
-
- send_ipi(IpiKind::FlushTLB, IpiTarget::Other);
- }
- }
- pub fn round_down_to_page_size(addr: usize) -> usize {
- addr & !(MMArch::PAGE_SIZE - 1)
- }
- pub fn round_up_to_page_size(addr: usize) -> usize {
- round_down_to_page_size(addr + MMArch::PAGE_SIZE - 1)
- }
|