Browse Source

添加页面回收机制

MemoryShore 9 months ago
parent
commit
9ba2fa3869

+ 25 - 20
kernel/src/arch/riscv64/mm/mod.rs

@@ -283,36 +283,41 @@ impl MemoryManagementArch for RiscV64MMArch {
     const PAGE_READONLY: usize = 0;
     const PAGE_READONLY_EXEC: usize = 0;
 
-    const PROTECTION_MAP: [usize; 16] = protection_map();
+    const PROTECTION_MAP: [EntryFlags<MMArch>; 16] = protection_map();
 }
 
-const fn protection_map() -> [usize; 16] {
-    type Arch = RiscV64MMArch;
+const fn protection_map() -> [EntryFlags<MMArch>; 16] {
     let mut map = [0; 16];
-    map[VmFlags::VM_NONE.bits()] = Arch::PAGE_NONE;
-    map[VmFlags::VM_READ.bits()] = Arch::PAGE_READONLY;
-    map[VmFlags::VM_WRITE.bits()] = Arch::PAGE_COPY;
-    map[VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] = Arch::PAGE_COPY;
-    map[VmFlags::VM_EXEC.bits()] = Arch::PAGE_READONLY_EXEC;
-    map[VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] = Arch::PAGE_READONLY_EXEC;
-    map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] = Arch::PAGE_COPY_EXEC;
+    map[VmFlags::VM_NONE.bits()] = MMArch::PAGE_NONE;
+    map[VmFlags::VM_READ.bits()] = MMArch::PAGE_READONLY;
+    map[VmFlags::VM_WRITE.bits()] = MMArch::PAGE_COPY;
+    map[VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] = MMArch::PAGE_COPY;
+    map[VmFlags::VM_EXEC.bits()] = MMArch::PAGE_READONLY_EXEC;
+    map[VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] = MMArch::PAGE_READONLY_EXEC;
+    map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] = MMArch::PAGE_COPY_EXEC;
     map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
-        Arch::PAGE_COPY_EXEC;
-    map[VmFlags::VM_SHARED.bits()] = Arch::PAGE_NONE;
-    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_READ.bits()] = Arch::PAGE_READONLY;
-    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits()] = Arch::PAGE_SHARED;
+        MMArch::PAGE_COPY_EXEC;
+    map[VmFlags::VM_SHARED.bits()] = MMArch::PAGE_NONE;
+    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_READ.bits()] = MMArch::PAGE_READONLY;
+    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits()] = MMArch::PAGE_SHARED;
     map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
-        Arch::PAGE_SHARED;
-    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits()] = Arch::PAGE_READONLY_EXEC;
+        MMArch::PAGE_SHARED;
+    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits()] = MMArch::PAGE_READONLY_EXEC;
     map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
-        Arch::PAGE_READONLY_EXEC;
+        MMArch::PAGE_READONLY_EXEC;
     map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
-        Arch::PAGE_SHARED_EXEC;
+        MMArch::PAGE_SHARED_EXEC;
     map[VmFlags::VM_SHARED.bits()
         | VmFlags::VM_EXEC.bits()
         | VmFlags::VM_WRITE.bits()
-        | VmFlags::VM_READ.bits()] = Arch::PAGE_SHARED_EXEC;
-    map
+        | VmFlags::VM_READ.bits()] = MMArch::PAGE_SHARED_EXEC;
+    let mut ret = [unsafe { EntryFlags::from_data(0) }; 16];
+    let mut index = 0;
+    while index < 16 {
+        ret[index] = unsafe { EntryFlags::from_data(map[index]) };
+        index += 1;
+    }
+    ret
 }
 
 const PAGE_ENTRY_BASE: usize = RiscV64MMArch::ENTRY_FLAG_PRESENT

+ 48 - 34
kernel/src/arch/x86_64/mm/mod.rs

@@ -353,7 +353,7 @@ impl MemoryManagementArch for X86_64MMArch {
     //     map
     // }
 
-    const PROTECTION_MAP: [usize; 16] = protection_map();
+    const PROTECTION_MAP: [EntryFlags<MMArch>; 16] = protection_map();
 
     const PAGE_NONE: usize =
         Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_ACCESSED | Self::ENTRY_FLAG_GLOBAL;
@@ -402,33 +402,39 @@ impl MemoryManagementArch for X86_64MMArch {
 ///
 /// ## 返回值
 /// - `[usize; 16]`: 长度为16的映射表
-const fn protection_map() -> [usize; 16] {
-    type Arch = X86_64MMArch;
-    let mut map = [0; 16];
-    map[VmFlags::VM_NONE.bits()] = Arch::PAGE_NONE;
-    map[VmFlags::VM_READ.bits()] = Arch::PAGE_READONLY;
-    map[VmFlags::VM_WRITE.bits()] = Arch::PAGE_COPY;
-    map[VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] = Arch::PAGE_COPY;
-    map[VmFlags::VM_EXEC.bits()] = Arch::PAGE_READONLY_EXEC;
-    map[VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] = Arch::PAGE_READONLY_EXEC;
-    map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] = Arch::PAGE_COPY_EXEC;
-    map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
-        Arch::PAGE_COPY_EXEC;
-    map[VmFlags::VM_SHARED.bits()] = X86_64MMArch::PAGE_NONE;
-    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_READ.bits()] = Arch::PAGE_READONLY;
-    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits()] = Arch::PAGE_SHARED;
-    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
-        Arch::PAGE_SHARED;
-    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits()] = Arch::PAGE_READONLY_EXEC;
-    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
-        Arch::PAGE_READONLY_EXEC;
-    map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
-        Arch::PAGE_SHARED_EXEC;
-    map[VmFlags::VM_SHARED.bits()
-        | VmFlags::VM_EXEC.bits()
-        | VmFlags::VM_WRITE.bits()
-        | VmFlags::VM_READ.bits()] = Arch::PAGE_SHARED_EXEC;
-
+const fn protection_map() -> [EntryFlags<MMArch>; 16] {
+    let mut map = [unsafe { EntryFlags::from_data(0) }; 16];
+    unsafe {
+        map[VmFlags::VM_NONE.bits()] = EntryFlags::from_data(MMArch::PAGE_NONE);
+        map[VmFlags::VM_READ.bits()] = EntryFlags::from_data(MMArch::PAGE_READONLY);
+        map[VmFlags::VM_WRITE.bits()] = EntryFlags::from_data(MMArch::PAGE_COPY);
+        map[VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
+            EntryFlags::from_data(MMArch::PAGE_COPY);
+        map[VmFlags::VM_EXEC.bits()] = EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
+        map[VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
+            EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
+        map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
+            EntryFlags::from_data(MMArch::PAGE_COPY_EXEC);
+        map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
+            EntryFlags::from_data(MMArch::PAGE_COPY_EXEC);
+        map[VmFlags::VM_SHARED.bits()] = EntryFlags::from_data(MMArch::PAGE_NONE);
+        map[VmFlags::VM_SHARED.bits() | VmFlags::VM_READ.bits()] =
+            EntryFlags::from_data(MMArch::PAGE_READONLY);
+        map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits()] =
+            EntryFlags::from_data(MMArch::PAGE_SHARED);
+        map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
+            EntryFlags::from_data(MMArch::PAGE_SHARED);
+        map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits()] =
+            EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
+        map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
+            EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
+        map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
+            EntryFlags::from_data(MMArch::PAGE_SHARED_EXEC);
+        map[VmFlags::VM_SHARED.bits()
+            | VmFlags::VM_EXEC.bits()
+            | VmFlags::VM_WRITE.bits()
+            | VmFlags::VM_READ.bits()] = EntryFlags::from_data(MMArch::PAGE_SHARED_EXEC);
+    }
     // if X86_64MMArch::is_xd_reserved() {
     //     map.iter_mut().for_each(|x| *x &= !Self::ENTRY_FLAG_NO_EXEC)
     // }
@@ -734,13 +740,21 @@ impl FrameAllocator for LockedFrameAllocator {
     unsafe fn allocate(&mut self, mut count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> {
         count = count.next_power_of_two();
         if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
-            // 首次分配时内存不足
-            allocator.allocate(count).or_else(|| {
+            let usage = self.usage();
+            if usage.used().data() + count.data() > usage.total().data() / 2 {
+                log::info!("shrink: {:?}", usage);
                 let mut page_manager_guard = page_manager_lock_irqsave();
-                // 释放部分页面并再次尝试分配
-                page_manager_guard.shrink_list();
-                return allocator.allocate(count);
-            })
+                // 释放部分页面
+                page_manager_guard.shrink_list(count);
+            }
+            allocator.allocate(count)
+            // // 首次分配时内存不足
+            // allocator.allocate(count).or_else(|| {
+            //     let mut page_manager_guard = page_manager_lock_irqsave();
+            //     // 释放部分页面并再次尝试分配
+            //     page_manager_guard.shrink_list(count);
+            //     return allocator.allocate(count);
+            // })
         } else {
             return None;
         }

+ 12 - 0
kernel/src/filesystem/fat/fs.rs

@@ -194,6 +194,8 @@ impl LockedFATInode {
             FileType::File
         };
 
+        let mut page_cache = PageCache::default();
+
         let inode: Arc<LockedFATInode> = Arc::new(LockedFATInode(SpinLock::new(FATInode {
             parent,
             self_ref: Weak::default(),
@@ -225,10 +227,20 @@ impl LockedFATInode {
             page_cache: Arc::new(PageCache::default()),
         })));
 
+        page_cache.inode = Some(Arc::downgrade(&inode) as Weak<dyn IndexNode>);
+
+        inode.0.lock().page_cache = Arc::new(page_cache);
+
         inode.0.lock().self_ref = Arc::downgrade(&inode);
 
         inode.0.lock().update_metadata();
 
+        // inode
+        //     .0
+        //     .lock()
+        //     .page_cache
+        //     .set_inode(Arc::downgrade(&inode) as Weak<dyn IndexNode>);
+
         return inode;
     }
 }

+ 8 - 7
kernel/src/filesystem/vfs/file.rs

@@ -125,6 +125,7 @@ impl FileMode {
 #[allow(dead_code)]
 pub struct PageCache {
     xarray: SpinLock<XArray<Arc<Page>>>,
+    pub inode: Option<Weak<dyn IndexNode>>,
 }
 
 impl core::fmt::Debug for PageCache {
@@ -144,9 +145,10 @@ impl core::fmt::Debug for PageCache {
 }
 
 impl PageCache {
-    pub fn new() -> PageCache {
+    pub fn new(inode: Option<Weak<dyn IndexNode>>) -> PageCache {
         Self {
             xarray: SpinLock::new(XArray::new()),
+            inode,
         }
     }
 
@@ -169,6 +171,10 @@ impl PageCache {
         cursor.remove();
     }
 
+    pub fn set_inode(&mut self, inode: Weak<dyn IndexNode>) {
+        self.inode = Some(inode)
+    }
+
     // pub fn get_pages(&self, start_pgoff: usize, end_pgoff: usize) -> Vec<Arc<Page>> {
     //     let mut vec = Vec::new();
     //     for pgoff in start_pgoff..=end_pgoff {
@@ -182,15 +188,10 @@ impl PageCache {
 
 impl Default for PageCache {
     fn default() -> Self {
-        Self::new()
+        Self::new(None)
     }
 }
 
-pub trait PageCacheOperations: IndexNode {
-    fn write_page(&self, page: Page);
-    fn read_ahead(&self);
-}
-
 /// @brief 抽象文件结构体
 #[derive(Debug)]
 pub struct File {

+ 15 - 5
kernel/src/mm/fault.rs

@@ -633,6 +633,7 @@ impl PageFaultHandler {
             let page = Arc::new(Page::new(false, new_cache_page));
             pfm.page = Some(page.clone());
 
+            page.write().add_flags(PageFlags::PG_LRU);
             page_manager_guard.insert(new_cache_page, &page);
             page_cache.add_page(file_pgoff, &page);
 
@@ -677,19 +678,28 @@ impl PageFaultHandler {
                 MMArch::PAGE_SIZE,
             );
 
-            let vma = pfm.vma();
-            let vma_guard = vma.lock();
-            let file = vma_guard.vm_file().expect("no vm_file in vma");
-            let page_cache = file.inode().page_cache().unwrap();
+            let mut page_manager_guard = page_manager_lock_irqsave();
             let page_guard = cache_page.read();
             let new_page = Arc::new(Page::new(page_guard.shared(), new_phys));
-            page_cache.add_page(pfm.file_pgoff.unwrap(), &new_page);
+
+            // 新页加入页管理器中
+            page_manager_guard.insert(new_phys, &new_page);
             new_page
                 .write()
                 .set_page_cache_index(cache_page.read().page_cache(), cache_page.read().index());
+
+            // 将vma插入页的vma链表中
+            new_page.write().insert_vma(pfm.vma());
         } else {
             // 直接映射到PageCache
             mapper.map_phys(*pfm.address(), page_phys, vma_guard.flags());
+
+            if pfm.flags().contains(FaultFlags::FAULT_FLAG_WRITE)
+                && pfm.vma().lock().vm_flags().contains(VmFlags::VM_SHARED)
+            {
+                // 如果是共享写映射,将pagecache页设为脏页,以便回收时能够回写
+                cache_page.write().add_flags(PageFlags::PG_DIRTY)
+            }
         }
         VmFaultReason::VM_FAULT_COMPLETED
     }

+ 5 - 7
kernel/src/mm/mod.rs

@@ -678,7 +678,7 @@ pub trait MemoryManagementArch: Clone + Copy + Debug {
     //     map
     // }
 
-    const PROTECTION_MAP: [usize; 16];
+    const PROTECTION_MAP: [EntryFlags<Self>; 16];
 
     /// 页面保护标志转换函数
     /// ## 参数
@@ -689,13 +689,11 @@ pub trait MemoryManagementArch: Clone + Copy + Debug {
     /// - EntryFlags: 页面的保护位
     fn vm_get_page_prot(vm_flags: VmFlags) -> EntryFlags<Self> {
         let map = Self::PROTECTION_MAP;
-        let mut ret = unsafe {
-            EntryFlags::from_data(
-                map[vm_flags.intersection(
-                    VmFlags::VM_READ | VmFlags::VM_WRITE | VmFlags::VM_EXEC | VmFlags::VM_SHARED,
-                )],
+        let mut ret = map[vm_flags
+            .intersection(
+                VmFlags::VM_READ | VmFlags::VM_WRITE | VmFlags::VM_EXEC | VmFlags::VM_SHARED,
             )
-        };
+            .bits()];
 
         #[cfg(target_arch = "x86_64")]
         {

+ 74 - 25
kernel/src/mm/page.rs

@@ -7,14 +7,14 @@ use core::{
 };
 
 use alloc::sync::Arc;
-use hashbrown::HashSet;
+use hashbrown::{HashMap, HashSet};
 use log::{error, info};
 use lru::LruCache;
 
 use crate::{
     arch::{interrupt::ipi::send_ipi, MMArch},
     exception::ipi::{IpiKind, IpiTarget},
-    filesystem::vfs::file::PageCache,
+    filesystem::vfs::{file::PageCache, FilePrivateData},
     ipc::shm::ShmId,
     libs::{
         rwlock::RwLock,
@@ -58,25 +58,29 @@ pub fn page_manager_lock_irqsave() -> SpinLockGuard<'static, PageManager> {
 
 // 物理页管理器
 pub struct PageManager {
-    phys2page: LruCache<PhysAddr, Arc<Page>>,
+    phys2page: HashMap<PhysAddr, Arc<Page>>,
+    lru: LruCache<PhysAddr, Arc<Page>>,
 }
 
 impl PageManager {
     pub fn new() -> Self {
         Self {
-            phys2page: LruCache::unbounded(),
+            phys2page: HashMap::new(),
+            lru: LruCache::unbounded(),
         }
     }
 
     pub fn contains(&self, paddr: &PhysAddr) -> bool {
-        self.phys2page.peek(paddr).is_some()
+        self.phys2page.contains_key(paddr)
     }
 
     pub fn get(&mut self, paddr: &PhysAddr) -> Option<Arc<Page>> {
+        self.lru.promote(paddr);
         self.phys2page.get(paddr).cloned()
     }
 
     pub fn get_unwrap(&mut self, paddr: &PhysAddr) -> Arc<Page> {
+        self.lru.promote(paddr);
         self.phys2page
             .get(paddr)
             .unwrap_or_else(|| panic!("Phys Page not found, {:?}", paddr))
@@ -84,30 +88,60 @@ impl PageManager {
     }
 
     pub fn insert(&mut self, paddr: PhysAddr, page: &Arc<Page>) {
-        self.phys2page.put(paddr, page.clone());
+        self.phys2page.insert(paddr, page.clone());
+        if page.read().flags.contains(PageFlags::PG_LRU) {
+            self.lru.put(paddr, page.clone());
+        }
     }
 
     pub fn remove_page(&mut self, paddr: &PhysAddr) {
-        self.phys2page.pop(paddr);
-    }
-
-    pub fn shrink_list(&mut self) {
-        let entry = self.phys2page.peek_lru().unwrap();
-        let page = entry.1.clone();
-        let phys = *entry.0;
-        let page_cache = page.read().page_cache().unwrap();
-        for vma in page.read().anon_vma() {
-            let address_space = vma.lock().address_space().unwrap();
-            let address_space = address_space.upgrade().unwrap();
-            let mut guard = address_space.write();
-            let mapper = &mut guard.user_mapper.utable;
-            let virt = vma.lock().page_address(&page).unwrap();
-            unsafe {
-                mapper.unmap(virt, false).unwrap().flush();
+        self.phys2page.remove(paddr);
+    }
+
+    pub fn shrink_list(&mut self, count: PageFrameCount) {
+        for _ in 0..count.data() {
+            let entry = self.lru.pop_lru().unwrap();
+            let page = entry.1.clone();
+            let page_cache = page.read().page_cache().unwrap();
+            for vma in page.read().anon_vma() {
+                let address_space = vma.lock().address_space().unwrap();
+                let address_space = address_space.upgrade().unwrap();
+                let mut guard = address_space.write();
+                let mapper = &mut guard.user_mapper.utable;
+                let virt = vma.lock().page_address(&page).unwrap();
+                unsafe {
+                    mapper.unmap(virt, false).unwrap().flush();
+                }
+            }
+            page_cache.remove_page(page.read().index().unwrap());
+            if page.read().flags.contains(PageFlags::PG_DIRTY) {
+                //TODO 回写页面
+                let inode = page
+                    .read()
+                    .page_cache
+                    .clone()
+                    .unwrap()
+                    .inode
+                    .clone()
+                    .unwrap()
+                    .upgrade()
+                    .unwrap();
+                inode
+                    .write_at(
+                        page.read().index().unwrap(),
+                        MMArch::PAGE_SIZE,
+                        unsafe {
+                            core::slice::from_raw_parts(
+                                MMArch::phys_2_virt(page.read().phys_addr).unwrap().data()
+                                    as *mut u8,
+                                MMArch::PAGE_SIZE,
+                            )
+                        },
+                        SpinLock::new(FilePrivateData::Unused).lock(),
+                    )
+                    .unwrap();
             }
         }
-        self.phys2page.pop(&phys);
-        page_cache.remove_page(page.read().index().unwrap());
     }
 }
 
@@ -271,6 +305,21 @@ impl InnerPage {
         &self.flags
     }
 
+    #[inline(always)]
+    pub fn set_flags(&mut self, flags: PageFlags) {
+        self.flags = flags
+    }
+
+    #[inline(always)]
+    pub fn add_flags(&mut self, flags: PageFlags) {
+        self.flags = self.flags.union(flags);
+    }
+
+    #[inline(always)]
+    pub fn remove_flags(&mut self, flags: PageFlags) {
+        self.flags = self.flags.difference(flags);
+    }
+
     #[inline(always)]
     pub fn phys_address(&self) -> PhysAddr {
         self.phys_addr
@@ -623,7 +672,7 @@ impl<Arch: MemoryManagementArch> EntryFlags<Arch> {
     ///
     /// - prot_flags: 页的保护标志
     /// - user: 用户空间是否可访问
-    pub fn from_prot_flags(prot_flags: ProtFlags, user: bool) -> EntryFlags<Arch> {
+    pub fn from_prot_flags(prot_flags: ProtFlags, user: bool) -> Self {
         let vm_flags = super::VmFlags::from(prot_flags);
         // let flags: EntryFlags<Arch> = EntryFlags::new()
         //     .set_user(user)

+ 1 - 0
kernel/src/sched/mod.rs

@@ -112,6 +112,7 @@ pub trait Scheduler {
     );
 
     /// ## 选择接下来最适合运行的任务
+    #[allow(dead_code)]
     fn pick_task(rq: &mut CpuRunQueue) -> Option<Arc<ProcessControlBlock>>;
 
     /// ## 选择接下来最适合运行的任务