Browse Source

bugfix: 当物理机具有多个memory area的时候,无法正确使用这些区域的问题.以及在内核代码处出现内存空洞而导致无法正常运行的问题. (#448)

* bugfix: 当物理机具有多个memory area的时候,无法正确使用这些区域的问题.以及在内核代码处出现内存空洞而导致无法正常运行的问题.

解决方案:
1. 分区域把空闲页添加到buddy
2. 将内核链接到16M的位置,以避免uefi带来的内存空洞.

这个值是因为我看到linux的救援内核也是在16M的地址,因此猜测厂商不会使用这块内存.
尽管uefi规范讲的是固件可以采用任何地址,内核需要使用内核重定位技术去避免遇到内存空洞,但我没有这么做.
LoGin 1 year ago
parent
commit
99dbf38d2e

+ 53 - 0
kernel/src/arch/x86_64/mm/bump.rs

@@ -0,0 +1,53 @@
+use crate::{
+    kdebug,
+    libs::align::{page_align_down, page_align_up},
+    mm::{
+        allocator::bump::BumpAllocator, MemoryManagementArch, PhysAddr, PhysMemoryArea, VirtAddr,
+    },
+};
+
+use super::{X86_64MMBootstrapInfo, BOOTSTRAP_MM_INFO, PHYS_MEMORY_AREAS};
+
+impl<MMA: MemoryManagementArch> BumpAllocator<MMA> {
+    pub unsafe fn arch_remain_areas(
+        ret_areas: &mut [PhysMemoryArea],
+        mut res_count: usize,
+    ) -> usize {
+        let info: X86_64MMBootstrapInfo = BOOTSTRAP_MM_INFO.clone().unwrap();
+        let load_base = info.kernel_load_base_paddr;
+        let kernel_code_start = MMA::virt_2_phys(VirtAddr::new(info.kernel_code_start))
+            .unwrap()
+            .data();
+
+        let offset_start = page_align_up(core::cmp::max(load_base + 16384, 0x200000));
+        let offset_end = page_align_down(kernel_code_start - 16384);
+
+        // 把内核代码前的空间加入到可用内存区域中
+        for area in &PHYS_MEMORY_AREAS {
+            let area_base = area.area_base_aligned().data();
+            let area_end = area.area_end_aligned().data();
+            if area_base >= offset_end {
+                break;
+            }
+
+            if area_end <= offset_start {
+                continue;
+            }
+
+            let new_start = core::cmp::max(offset_start, area_base);
+            let new_end = core::cmp::min(offset_end, area_end);
+
+            if new_start >= new_end {
+                continue;
+            }
+
+            ret_areas[res_count] =
+                PhysMemoryArea::new(PhysAddr::new(new_start), new_end - new_start);
+
+            kdebug!("new arch remain area: {:?}", ret_areas[res_count]);
+            res_count += 1;
+        }
+
+        return res_count;
+    }
+}

+ 37 - 14
kernel/src/arch/x86_64/mm/mod.rs

@@ -1,4 +1,5 @@
 pub mod barrier;
+pub mod bump;
 
 use alloc::vec::Vec;
 use hashbrown::HashSet;
@@ -7,7 +8,8 @@ use x86_64::registers::model_specific::EferFlags;
 
 use crate::driver::tty::serial::serial8250::send_to_default_serial8250_port;
 use crate::include::bindings::bindings::{
-    multiboot2_get_memory, multiboot2_iter, multiboot_mmap_entry_t,
+    multiboot2_get_load_base, multiboot2_get_memory, multiboot2_iter, multiboot_mmap_entry_t,
+    multiboot_tag_load_base_addr_t,
 };
 use crate::libs::align::page_align_up;
 use crate::libs::lib_ui::screen_manager::scm_disable_put_to_window;
@@ -55,8 +57,9 @@ static KERNEL_PML4E_NO: usize = (X86_64MMArch::PHYS_OFFSET & ((1 << 48) - 1)) >>
 
 static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None);
 
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, Debug)]
 pub struct X86_64MMBootstrapInfo {
+    kernel_load_base_paddr: usize,
     kernel_code_start: usize,
     kernel_code_end: usize,
     kernel_data_end: usize,
@@ -64,16 +67,7 @@ pub struct X86_64MMBootstrapInfo {
     start_brk: usize,
 }
 
-impl Debug for X86_64MMBootstrapInfo {
-    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
-        write!(
-            f,
-            "kernel_code_start: {:x}, kernel_code_end: {:x}, kernel_data_end: {:x}, kernel_rodata_end: {:x}, start_brk: {:x}",
-            self.kernel_code_start, self.kernel_code_end, self.kernel_data_end, self.kernel_rodata_end, self.start_brk)
-    }
-}
-
-pub static mut BOOTSTRAP_MM_INFO: Option<X86_64MMBootstrapInfo> = None;
+pub(super) static mut BOOTSTRAP_MM_INFO: Option<X86_64MMBootstrapInfo> = None;
 
 /// @brief X86_64的内存管理架构结构体
 #[derive(Debug, Clone, Copy, Hash)]
@@ -136,14 +130,17 @@ impl MemoryManagementArch for X86_64MMArch {
         }
 
         Self::init_xd_rsvd();
+        let load_base_paddr = Self::get_load_base_paddr();
 
         let bootstrap_info = X86_64MMBootstrapInfo {
+            kernel_load_base_paddr: load_base_paddr.data(),
             kernel_code_start: _text as usize,
             kernel_code_end: _etext as usize,
             kernel_data_end: _edata as usize,
             kernel_rodata_end: _erodata as usize,
             start_brk: _end as usize,
         };
+
         unsafe {
             BOOTSTRAP_MM_INFO = Some(bootstrap_info);
         }
@@ -151,6 +148,7 @@ impl MemoryManagementArch for X86_64MMArch {
         // 初始化物理内存区域(从multiboot2中获取)
         let areas_count =
             Self::init_memory_area_from_multiboot2().expect("init memory area failed");
+
         send_to_default_serial8250_port("x86 64 init end\n\0".as_bytes());
 
         return &PHYS_MEMORY_AREAS[0..areas_count];
@@ -238,6 +236,28 @@ impl MemoryManagementArch for X86_64MMArch {
 }
 
 impl X86_64MMArch {
+    unsafe fn get_load_base_paddr() -> PhysAddr {
+        let mut mb2_lb_info: [multiboot_tag_load_base_addr_t; 512] = mem::zeroed();
+        send_to_default_serial8250_port("get_load_base_paddr begin\n\0".as_bytes());
+
+        let mut mb2_count: u32 = 0;
+        multiboot2_iter(
+            Some(multiboot2_get_load_base),
+            &mut mb2_lb_info as *mut [multiboot_tag_load_base_addr_t; 512] as usize as *mut c_void,
+            &mut mb2_count,
+        );
+
+        if mb2_count == 0 {
+            send_to_default_serial8250_port(
+                "get_load_base_paddr mb2_count == 0, default to 1MB\n\0".as_bytes(),
+            );
+            return PhysAddr::new(0x100000);
+        }
+
+        let phys = mb2_lb_info[0].load_base_addr as usize;
+
+        return PhysAddr::new(phys);
+    }
     unsafe fn init_memory_area_from_multiboot2() -> Result<usize, SystemError> {
         // 这个数组用来存放内存区域的信息(从C获取)
         let mut mb2_mem_info: [multiboot_mmap_entry_t; 512] = mem::zeroed();
@@ -269,7 +289,6 @@ impl X86_64MMArch {
         }
         send_to_default_serial8250_port("init_memory_area_from_multiboot2 end\n\0".as_bytes());
         kinfo!("Total memory size: {} MB, total areas from multiboot2: {mb2_count}, valid areas: {areas_count}", total_mem_size / 1024 / 1024);
-
         return Ok(areas_count);
     }
 
@@ -286,7 +305,11 @@ impl X86_64MMArch {
 
     /// 判断XD标志位是否被保留
     pub fn is_xd_reserved() -> bool {
-        return XD_RESERVED.load(Ordering::Relaxed);
+        // return XD_RESERVED.load(Ordering::Relaxed);
+
+        // 由于暂时不支持execute disable,因此直接返回true
+        // 不支持的原因是,目前好像没有能正确的设置page-level的xd位,会触发page fault
+        return true;
     }
 }
 

+ 14 - 0
kernel/src/driver/multiboot2/multiboot2.c

@@ -102,6 +102,20 @@ bool multiboot2_get_VBE_info(const struct iter_data_t *_iter_data, void *data, u
   return true;
 }
 
+/// @brief 获取加载基地址
+/// @param _iter_data 
+/// @param data 
+/// @param reserved 
+/// @return 
+bool multiboot2_get_load_base(const struct iter_data_t *_iter_data, void *data, unsigned int *reserved)
+{
+
+  if (_iter_data->type != MULTIBOOT_TAG_TYPE_LOAD_BASE_ADDR)
+    return false;
+  *(struct multiboot_tag_load_base_addr_t *)data = *(struct multiboot_tag_load_base_addr_t *)_iter_data;
+  return true;
+}
+
 /**
  * @brief 获取帧缓冲区信息
  *

+ 2 - 0
kernel/src/driver/multiboot2/multiboot2.h

@@ -434,6 +434,8 @@ void multiboot2_iter(bool (*_fun)(const struct iter_data_t *, void *, unsigned i
  */
 bool multiboot2_get_memory(const struct iter_data_t *_iter_data, void *_data, unsigned int *count);
 
+bool multiboot2_get_load_base(const struct iter_data_t *_iter_data, void *data, unsigned int *reserved);
+
 /**
  * @brief 获取VBE信息
  *

+ 3 - 1
kernel/src/link.lds

@@ -18,7 +18,8 @@ SECTIONS
 		*(.bootstrap.data)
 		. = ALIGN(4096);
 	}
-	
+
+	. = 0x1000000;
 	. += KERNEL_VMA;
 	. = ALIGN(32768);
 	text_start_pa = .;
@@ -40,6 +41,7 @@ SECTIONS
 	{
 		_data = .;
 		*(.data)
+		*(.data.*)
 		
 		_edata = .;
 	}

+ 60 - 119
kernel/src/mm/allocator/buddy.rs

@@ -6,9 +6,9 @@
 use crate::arch::MMArch;
 use crate::mm::allocator::bump::BumpAllocator;
 use crate::mm::allocator::page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage};
-use crate::mm::{MemoryManagementArch, PhysAddr, VirtAddr};
+use crate::mm::{MemoryManagementArch, PhysAddr, PhysMemoryArea, VirtAddr};
 use crate::{kdebug, kwarn};
-use core::cmp::{max, min};
+use core::cmp::min;
 use core::fmt::Debug;
 use core::intrinsics::{likely, unlikely};
 
@@ -77,15 +77,9 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
 
     pub unsafe fn new(mut bump_allocator: BumpAllocator<A>) -> Option<Self> {
         let initial_free_pages = bump_allocator.usage().free();
+        let total_memory = bump_allocator.usage().total();
         kdebug!("Free pages before init buddy: {:?}", initial_free_pages);
         kdebug!("Buddy entries: {}", Self::BUDDY_ENTRIES);
-        // 最高阶的链表页数
-        let max_order_linked_list_page_num = max(
-            1,
-            (((initial_free_pages.data() * A::PAGE_SIZE) >> (MAX_ORDER - 1)) + Self::BUDDY_ENTRIES
-                - 1)
-                / Self::BUDDY_ENTRIES,
-        );
 
         let mut free_area: [PhysAddr; (MAX_ORDER - MIN_ORDER) as usize] =
             [PhysAddr::new(0); (MAX_ORDER - MIN_ORDER) as usize];
@@ -102,138 +96,85 @@ impl<A: MemoryManagementArch> BuddyAllocator<A> {
             Self::write_page(*f, page_list);
         }
 
-        // 分配最高阶的链表页
-        for _ in 1..max_order_linked_list_page_num {
-            let curr_page = bump_allocator.allocate_one().unwrap();
-            // 清空当前页
-            core::ptr::write_bytes(
-                MMArch::phys_2_virt(curr_page)?.data() as *mut u8,
-                0,
-                A::PAGE_SIZE,
-            );
-
-            let page_list: PageList<A> =
-                PageList::new(0, free_area[Self::order2index((MAX_ORDER - 1) as u8)]);
-            Self::write_page(curr_page, page_list);
-            free_area[Self::order2index((MAX_ORDER - 1) as u8)] = curr_page;
-        }
+        let mut allocator = Self {
+            free_area,
+            total: PageFrameCount::new(0),
+            phantom: PhantomData,
+        };
 
-        let initial_bump_offset = bump_allocator.offset();
-        let pages_to_buddy = bump_allocator.usage().free();
-        kdebug!("pages_to_buddy {:?}", pages_to_buddy);
-        // kdebug!("initial_bump_offset {:#x}", initial_bump_offset);
-        let mut paddr = initial_bump_offset;
-        let mut remain_pages = pages_to_buddy;
-        // 设置entry,这里假设了bump_allocator当前offset之后,所有的area的地址是连续的.
-        // TODO: 这里需要修改,按照area来处理
-        for i in MIN_ORDER..MAX_ORDER {
-            // kdebug!("i {i}, remain pages={}", remain_pages.data());
-            if remain_pages.data() < (1 << (i - MIN_ORDER)) {
-                break;
-            }
+        let mut total_pages_to_buddy = PageFrameCount::new(0);
+        let mut res_areas = [PhysMemoryArea::default(); 128];
+        let mut offset_in_remain_area = bump_allocator
+            .remain_areas(&mut res_areas)
+            .expect("BuddyAllocator: failed to get remain areas from bump allocator");
 
-            assert!(paddr & ((1 << i) - 1) == 0);
+        let remain_areas = &res_areas[0..];
 
-            if likely(i != MAX_ORDER - 1) {
-                // 要填写entry
-                if paddr & (1 << i) != 0 {
-                    let page_list_paddr: PhysAddr = free_area[Self::order2index(i as u8)];
-                    let mut page_list: PageList<A> = Self::read_page(page_list_paddr);
+        kdebug!("Remain areas: {:?}", &remain_areas[0..10]);
+        kdebug!("offset_in_remain_area: {:?}", offset_in_remain_area);
 
-                    A::write(
-                        Self::entry_virt_addr(page_list_paddr, page_list.entry_num),
-                        paddr,
-                    );
-                    page_list.entry_num += 1;
-                    Self::write_page(page_list_paddr, page_list);
+        for area in remain_areas {
+            let mut paddr = (area.area_base_aligned() + offset_in_remain_area).data();
+            let mut remain_pages =
+                PageFrameCount::from_bytes(area.area_end_aligned().data() - paddr).unwrap();
+            total_pages_to_buddy += remain_pages;
 
-                    paddr += 1 << i;
-                    remain_pages -= 1 << (i - MIN_ORDER);
-                };
-            } else {
-                // 往最大的阶数的链表中添加entry(注意要考虑到最大阶数的链表可能有多页)
-                // 断言剩余页面数量是MAX_ORDER-1阶的整数倍
+            if offset_in_remain_area != 0 {
+                offset_in_remain_area = 0;
+            }
 
-                let mut entries = (remain_pages.data() * A::PAGE_SIZE) >> i;
-                let mut page_list_paddr: PhysAddr = free_area[Self::order2index(i as u8)];
-                let block_size = 1usize << i;
+            // 先从低阶开始,尽可能地填满空闲链表
+            for i in MIN_ORDER..MAX_ORDER {
+                // kdebug!("i {i}, remain pages={}", remain_pages.data());
+                if remain_pages.data() < (1 << (i - MIN_ORDER)) {
+                    break;
+                }
 
-                if entries > Self::BUDDY_ENTRIES {
-                    // 在第一页填写一些entries
-                    let num = entries % Self::BUDDY_ENTRIES;
-                    entries -= num;
+                assert!(paddr & ((1 << i) - 1) == 0);
 
-                    let mut page_list: PageList<A> = Self::read_page(page_list_paddr);
-                    for _j in 0..num {
-                        A::write(
-                            Self::entry_virt_addr(page_list_paddr, page_list.entry_num),
-                            paddr,
-                        );
-                        page_list.entry_num += 1;
-                        paddr += block_size;
-                        remain_pages -= 1 << (i - MIN_ORDER);
-                    }
-                    page_list_paddr = page_list.next_page;
-                    Self::write_page(page_list_paddr, page_list);
-                    assert!(!page_list_paddr.is_null());
-                }
+                if likely(i != MAX_ORDER - 1) {
+                    // 要填写entry
+                    if paddr & (1 << i) != 0 {
+                        allocator.buddy_free(PhysAddr::new(paddr), i as u8);
 
-                while entries > 0 {
-                    let mut page_list: PageList<A> = Self::read_page(page_list_paddr);
+                        paddr += 1 << i;
+                        remain_pages -= 1 << (i - MIN_ORDER);
+                    };
+                } else {
+                    // 往最大的阶数的链表中添加entry(注意要考虑到最大阶数的链表可能有多页)
+                    // 断言剩余页面数量是MAX_ORDER-1阶的整数倍
 
-                    for _ in 0..Self::BUDDY_ENTRIES {
-                        A::write(
-                            Self::entry_virt_addr(page_list_paddr, page_list.entry_num),
-                            paddr,
-                        );
-                        page_list.entry_num += 1;
-                        paddr += block_size;
+                    let mut entries = (remain_pages.data() * A::PAGE_SIZE) >> i;
+                    while entries > 0 {
+                        allocator.buddy_free(PhysAddr::new(paddr), i as u8);
+                        paddr += 1 << i;
                         remain_pages -= 1 << (i - MIN_ORDER);
-                        entries -= 1;
-                        if entries == 0 {
-                            break;
-                        }
-                    }
-                    page_list_paddr = page_list.next_page;
-                    Self::write_page(page_list_paddr, page_list);
 
-                    if likely(entries > 0) {
-                        assert!(!page_list_paddr.is_null());
+                        entries -= 1;
                     }
                 }
             }
-        }
 
-        let mut remain_bytes = remain_pages.data() * A::PAGE_SIZE;
+            // 然后从高往低,把剩余的页面加入链表
+            let mut remain_bytes = remain_pages.data() * A::PAGE_SIZE;
 
-        assert!(remain_bytes < (1 << MAX_ORDER - 1));
+            assert!(remain_bytes < (1 << MAX_ORDER - 1));
 
-        for i in (MIN_ORDER..MAX_ORDER).rev() {
-            if remain_bytes >= (1 << i) {
-                assert!(paddr & ((1 << i) - 1) == 0);
-                let page_list_paddr: PhysAddr = free_area[Self::order2index(i as u8)];
-                let mut page_list: PageList<A> = Self::read_page(page_list_paddr);
-
-                A::write(
-                    Self::entry_virt_addr(page_list_paddr, page_list.entry_num),
-                    paddr,
-                );
-                page_list.entry_num += 1;
-                Self::write_page(page_list_paddr, page_list);
+            for i in (MIN_ORDER..MAX_ORDER).rev() {
+                if remain_bytes >= (1 << i) {
+                    assert!(paddr & ((1 << i) - 1) == 0);
+                    allocator.buddy_free(PhysAddr::new(paddr), i as u8);
 
-                paddr += 1 << i;
-                remain_bytes -= 1 << i;
+                    paddr += 1 << i;
+                    remain_bytes -= 1 << i;
+                }
             }
-        }
 
-        assert!(remain_bytes == 0);
-        assert!(paddr == initial_bump_offset + pages_to_buddy.data() * A::PAGE_SIZE);
+            assert!(remain_bytes == 0);
+        }
 
-        let allocator = Self {
-            free_area,
-            total: pages_to_buddy,
-            phantom: PhantomData,
-        };
+        kdebug!("Total pages to buddy: {:?}", total_pages_to_buddy);
+        allocator.total = total_memory;
 
         Some(allocator)
     }

+ 58 - 2
kernel/src/mm/allocator/bump.rs

@@ -37,6 +37,62 @@ impl<MMA: MemoryManagementArch> BumpAllocator<MMA> {
     pub fn offset(&self) -> usize {
         return self.offset;
     }
+
+    /// 返回剩余的尚未被分配的物理内存区域
+    ///
+    /// ## 返回值
+    ///
+    /// - `result_area`:剩余的尚未被分配的物理内存区域的数组
+    /// - `offset_aligned`:返回的第一个物理内存区域内,已经分配的偏移量(相对于物理内存区域的已对齐的起始地址)
+    pub fn remain_areas(&self, result_area: &mut [PhysMemoryArea]) -> Option<usize> {
+        let mut offset = self.offset();
+
+        let mut ret_offset_aligned = 0;
+
+        let mut res_cnt = 0;
+
+        // 遍历所有的物理内存区域
+        for i in 0..self.areas().len() {
+            let area = &self.areas()[i];
+            // 将area的base地址与PAGE_SIZE对齐,对齐时向上取整
+            // let area_base = (area.base.data() + MMA::PAGE_SHIFT) & !(MMA::PAGE_SHIFT);
+            let area_base = area.area_base_aligned().data();
+            // 将area的末尾地址与PAGE_SIZE对齐,对齐时向下取整
+            // let area_end = (area.base.data() + area.size) & !(MMA::PAGE_SHIFT);
+            let area_end = area.area_end_aligned().data();
+
+            // 如果offset大于area_end,说明当前的物理内存区域已经分配完了,需要跳到下一个物理内存区域
+            if offset >= area_end {
+                continue;
+            }
+
+            // 如果offset小于area_base ,说明当前的物理内存区域还没有分配过页帧,将offset设置为area_base
+            if offset < area_base {
+                offset = area_base;
+            } else if offset < area_end {
+                // 将offset对齐到PAGE_SIZE
+                offset = (offset + (MMA::PAGE_SIZE - 1)) & !(MMA::PAGE_SIZE - 1);
+            }
+            // found
+            if offset + 1 * MMA::PAGE_SIZE <= area_end {
+                for j in i..self.areas().len() {
+                    if self.areas()[j].area_base_aligned() < self.areas()[j].area_end_aligned() {
+                        result_area[res_cnt] = self.areas()[j];
+                        res_cnt += 1;
+                    }
+                }
+                ret_offset_aligned = offset;
+                break;
+            }
+        }
+
+        let res_cnt = unsafe { Self::arch_remain_areas(result_area, res_cnt) };
+        if res_cnt == 0 {
+            return None;
+        } else {
+            return Some(ret_offset_aligned);
+        }
+    }
 }
 
 impl<MMA: MemoryManagementArch> FrameAllocator for BumpAllocator<MMA> {
@@ -50,10 +106,10 @@ impl<MMA: MemoryManagementArch> FrameAllocator for BumpAllocator<MMA> {
         for area in self.areas().iter() {
             // 将area的base地址与PAGE_SIZE对齐,对齐时向上取整
             // let area_base = (area.base.data() + MMA::PAGE_SHIFT) & !(MMA::PAGE_SHIFT);
-            let area_base = (area.base.data() + (MMA::PAGE_SIZE - 1)) & !(MMA::PAGE_SIZE - 1);
+            let area_base = area.area_base_aligned().data();
             // 将area的末尾地址与PAGE_SIZE对齐,对齐时向下取整
             // let area_end = (area.base.data() + area.size) & !(MMA::PAGE_SHIFT);
-            let area_end = (area.base.data() + area.size) & !(MMA::PAGE_SIZE - 1);
+            let area_end = area.area_end_aligned().data();
 
             // 如果offset大于area_end,说明当前的物理内存区域已经分配完了,需要跳到下一个物理内存区域
             if offset >= area_end {

+ 27 - 0
kernel/src/mm/mod.rs

@@ -329,6 +329,33 @@ pub struct PhysMemoryArea {
     pub size: usize,
 }
 
+impl PhysMemoryArea {
+    pub fn new(base: PhysAddr, size: usize) -> Self {
+        Self { base, size }
+    }
+
+    /// 返回向上页面对齐的区域起始物理地址
+    pub fn area_base_aligned(&self) -> PhysAddr {
+        return PhysAddr::new(
+            (self.base.data() + (MMArch::PAGE_SIZE - 1)) & !(MMArch::PAGE_SIZE - 1),
+        );
+    }
+
+    /// 返回向下页面对齐的区域截止物理地址
+    pub fn area_end_aligned(&self) -> PhysAddr {
+        return PhysAddr::new((self.base.data() + self.size) & !(MMArch::PAGE_SIZE - 1));
+    }
+}
+
+impl Default for PhysMemoryArea {
+    fn default() -> Self {
+        Self {
+            base: PhysAddr::new(0),
+            size: 0,
+        }
+    }
+}
+
 pub trait MemoryManagementArch: Clone + Copy + Debug {
     /// 页面大小的shift(假如页面4K,那么这个值就是12,因为2^12=4096)
     const PAGE_SHIFT: usize;

+ 2 - 2
tools/write_disk_image.sh

@@ -62,7 +62,7 @@ if [ ! -f "${root_folder}/bin/disk.img" ]; then
         --bios) 
         case "$2" in
                 uefi)
-            sudo bash ./create_hdd_image.sh -P GPT #GPT分区
+            sudo bash ./create_hdd_image.sh -P MBR #GPT分区
             ;;
                 legacy)
             sudo bash ./create_hdd_image.sh -P MBR #MBR分区
@@ -88,7 +88,7 @@ echo $LOOP_DEVICE
 # 检测grub文件夹是否存在
 if [ -d "${GRUB_INSTALL_PATH}" ]; then
   echo "grub已安装"
-  INSTALL_GRUB_TO_IMAGE="0"
+   INSTALL_GRUB_TO_IMAGE="0"
 else
   mkdir -p ${GRUB_INSTALL_PATH}
 fi