mod.rs 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. pub mod barrier;
  2. pub mod bump;
  3. pub mod fault;
  4. pub mod pkru;
  5. use alloc::sync::Arc;
  6. use alloc::vec::Vec;
  7. use hashbrown::HashSet;
  8. use log::{debug, info, warn};
  9. use x86::time::rdtsc;
  10. use x86_64::registers::model_specific::EferFlags;
  11. use crate::driver::serial::serial8250::send_to_default_serial8250_port;
  12. use crate::include::bindings::bindings::{
  13. multiboot2_get_load_base, multiboot2_get_memory, multiboot2_iter, multiboot_mmap_entry_t,
  14. multiboot_tag_load_base_addr_t,
  15. };
  16. use crate::libs::align::page_align_up;
  17. use crate::libs::lib_ui::screen_manager::scm_disable_put_to_window;
  18. use crate::libs::spinlock::SpinLock;
  19. use crate::mm::allocator::page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage};
  20. use crate::mm::memblock::mem_block_manager;
  21. use crate::mm::ucontext::LockedVMA;
  22. use crate::{
  23. arch::MMArch,
  24. mm::allocator::{buddy::BuddyAllocator, bump::BumpAllocator},
  25. };
  26. use crate::mm::kernel_mapper::KernelMapper;
  27. use crate::mm::page::{EntryFlags, PageEntry, PAGE_1G_SHIFT};
  28. use crate::mm::{MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr, VmFlags};
  29. use system_error::SystemError;
  30. use core::arch::asm;
  31. use core::ffi::c_void;
  32. use core::fmt::Debug;
  33. use core::mem::{self};
  34. use core::sync::atomic::{compiler_fence, AtomicBool, Ordering};
  35. use super::kvm::vmx::vmcs::VmcsFields;
  36. use super::kvm::vmx::vmx_asm_wrapper::vmx_vmread;
  37. pub type PageMapper =
  38. crate::mm::page::PageMapper<crate::arch::x86_64::mm::X86_64MMArch, LockedFrameAllocator>;
  39. /// 初始的CR3寄存器的值,用于内存管理初始化时,创建的第一个内核页表的位置
  40. static mut INITIAL_CR3_VALUE: PhysAddr = PhysAddr::new(0);
  41. static INNER_ALLOCATOR: SpinLock<Option<BuddyAllocator<MMArch>>> = SpinLock::new(None);
  42. #[derive(Clone, Copy, Debug)]
  43. pub struct X86_64MMBootstrapInfo {
  44. kernel_load_base_paddr: usize,
  45. kernel_code_start: usize,
  46. kernel_code_end: usize,
  47. kernel_data_end: usize,
  48. kernel_rodata_end: usize,
  49. start_brk: usize,
  50. }
  51. pub(super) static mut BOOTSTRAP_MM_INFO: Option<X86_64MMBootstrapInfo> = None;
  52. /// @brief X86_64的内存管理架构结构体
  53. #[derive(Debug, Clone, Copy, Hash)]
  54. pub struct X86_64MMArch;
  55. /// XD标志位是否被保留
  56. static XD_RESERVED: AtomicBool = AtomicBool::new(false);
  57. impl MemoryManagementArch for X86_64MMArch {
  58. /// X86目前支持缺页中断
  59. const PAGE_FAULT_ENABLED: bool = true;
  60. /// 4K页
  61. const PAGE_SHIFT: usize = 12;
  62. /// 每个页表项占8字节,总共有512个页表项
  63. const PAGE_ENTRY_SHIFT: usize = 9;
  64. /// 四级页表(PML4T、PDPT、PDT、PT)
  65. const PAGE_LEVELS: usize = 4;
  66. /// 页表项的有效位的index。在x86_64中,页表项的第[0, 47]位表示地址和flag,
  67. /// 第[48, 51]位表示保留。因此,有效位的index为52。
  68. /// 请注意,第63位是XD位,表示是否允许执行。
  69. const ENTRY_ADDRESS_SHIFT: usize = 52;
  70. const ENTRY_FLAG_DEFAULT_PAGE: usize = Self::ENTRY_FLAG_PRESENT;
  71. const ENTRY_FLAG_DEFAULT_TABLE: usize = Self::ENTRY_FLAG_PRESENT;
  72. const ENTRY_FLAG_PRESENT: usize = 1 << 0;
  73. const ENTRY_FLAG_READONLY: usize = 0;
  74. const ENTRY_FLAG_WRITEABLE: usize = 1 << 1;
  75. const ENTRY_FLAG_READWRITE: usize = 1 << 1;
  76. const ENTRY_FLAG_USER: usize = 1 << 2;
  77. const ENTRY_FLAG_WRITE_THROUGH: usize = 1 << 3;
  78. const ENTRY_FLAG_CACHE_DISABLE: usize = 1 << 4;
  79. const ENTRY_FLAG_NO_EXEC: usize = 1 << 63;
  80. /// x86_64不存在EXEC标志位,只有NO_EXEC(XD)标志位
  81. const ENTRY_FLAG_EXEC: usize = 0;
  82. const ENTRY_FLAG_ACCESSED: usize = 1 << 5;
  83. const ENTRY_FLAG_DIRTY: usize = 1 << 6;
  84. const ENTRY_FLAG_HUGE_PAGE: usize = 1 << 7;
  85. const ENTRY_FLAG_GLOBAL: usize = 1 << 8;
  86. /// 物理地址与虚拟地址的偏移量
  87. /// 0xffff_8000_0000_0000
  88. const PHYS_OFFSET: usize = Self::PAGE_NEGATIVE_MASK + (Self::PAGE_ADDRESS_SIZE >> 1);
  89. const KERNEL_LINK_OFFSET: usize = 0x100000;
  90. // 参考 https://code.dragonos.org.cn/xref/linux-6.1.9/arch/x86/include/asm/page_64_types.h#75
  91. const USER_END_VADDR: VirtAddr =
  92. VirtAddr::new((Self::PAGE_ADDRESS_SIZE >> 1) - Self::PAGE_SIZE);
  93. const USER_BRK_START: VirtAddr = VirtAddr::new(0x700000000000);
  94. const USER_STACK_START: VirtAddr = VirtAddr::new(0x6ffff0a00000);
  95. const FIXMAP_START_VADDR: VirtAddr = VirtAddr::new(0xffffb00000000000);
  96. /// 设置FIXMAP区域大小为1M
  97. const FIXMAP_SIZE: usize = 256 * 4096;
  98. const MMIO_BASE: VirtAddr = VirtAddr::new(0xffffa10000000000);
  99. const MMIO_SIZE: usize = 1 << PAGE_1G_SHIFT;
  100. /// @brief 获取物理内存区域
  101. unsafe fn init() {
  102. extern "C" {
  103. fn _text();
  104. fn _etext();
  105. fn _edata();
  106. fn _erodata();
  107. fn _end();
  108. }
  109. Self::init_xd_rsvd();
  110. let load_base_paddr = Self::get_load_base_paddr();
  111. let bootstrap_info = X86_64MMBootstrapInfo {
  112. kernel_load_base_paddr: load_base_paddr.data(),
  113. kernel_code_start: _text as usize,
  114. kernel_code_end: _etext as usize,
  115. kernel_data_end: _edata as usize,
  116. kernel_rodata_end: _erodata as usize,
  117. start_brk: _end as usize,
  118. };
  119. unsafe {
  120. BOOTSTRAP_MM_INFO = Some(bootstrap_info);
  121. }
  122. // 初始化物理内存区域(从multiboot2中获取)
  123. Self::init_memory_area_from_multiboot2().expect("init memory area failed");
  124. debug!("bootstrap info: {:?}", unsafe { BOOTSTRAP_MM_INFO });
  125. debug!("phys[0]=virt[0x{:x}]", unsafe {
  126. MMArch::phys_2_virt(PhysAddr::new(0)).unwrap().data()
  127. });
  128. // 初始化内存管理器
  129. unsafe { allocator_init() };
  130. send_to_default_serial8250_port("x86 64 init done\n\0".as_bytes());
  131. }
  132. /// @brief 刷新TLB中,关于指定虚拟地址的条目
  133. unsafe fn invalidate_page(address: VirtAddr) {
  134. compiler_fence(Ordering::SeqCst);
  135. asm!("invlpg [{0}]", in(reg) address.data(), options(nostack, preserves_flags));
  136. compiler_fence(Ordering::SeqCst);
  137. }
  138. /// @brief 刷新TLB中,所有的条目
  139. unsafe fn invalidate_all() {
  140. compiler_fence(Ordering::SeqCst);
  141. // 通过设置cr3寄存器,来刷新整个TLB
  142. Self::set_table(PageTableKind::User, Self::table(PageTableKind::User));
  143. compiler_fence(Ordering::SeqCst);
  144. }
  145. /// @brief 获取顶级页表的物理地址
  146. unsafe fn table(table_kind: PageTableKind) -> PhysAddr {
  147. match table_kind {
  148. PageTableKind::Kernel | PageTableKind::User => {
  149. compiler_fence(Ordering::SeqCst);
  150. let cr3 = x86::controlregs::cr3() as usize;
  151. compiler_fence(Ordering::SeqCst);
  152. return PhysAddr::new(cr3);
  153. }
  154. PageTableKind::EPT => {
  155. let eptp =
  156. vmx_vmread(VmcsFields::CTRL_EPTP_PTR as u32).expect("Failed to read eptp");
  157. return PhysAddr::new(eptp as usize);
  158. }
  159. }
  160. }
  161. /// @brief 设置顶级页表的物理地址到处理器中
  162. unsafe fn set_table(_table_kind: PageTableKind, table: PhysAddr) {
  163. compiler_fence(Ordering::SeqCst);
  164. asm!("mov cr3, {}", in(reg) table.data(), options(nostack, preserves_flags));
  165. compiler_fence(Ordering::SeqCst);
  166. }
  167. /// @brief 判断虚拟地址是否合法
  168. fn virt_is_valid(virt: VirtAddr) -> bool {
  169. return virt.is_canonical();
  170. }
  171. /// 获取内存管理初始化时,创建的第一个内核页表的地址
  172. fn initial_page_table() -> PhysAddr {
  173. unsafe {
  174. return INITIAL_CR3_VALUE;
  175. }
  176. }
  177. /// @brief 创建新的顶层页表
  178. ///
  179. /// 该函数会创建页表并复制内核的映射到新的页表中
  180. ///
  181. /// @return 新的页表
  182. fn setup_new_usermapper() -> Result<crate::mm::ucontext::UserMapper, SystemError> {
  183. let new_umapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe {
  184. PageMapper::create(PageTableKind::User, LockedFrameAllocator)
  185. .ok_or(SystemError::ENOMEM)?
  186. };
  187. let current_ktable: KernelMapper = KernelMapper::lock();
  188. let copy_mapping = |pml4_entry_no| unsafe {
  189. let entry: PageEntry<X86_64MMArch> = current_ktable
  190. .table()
  191. .entry(pml4_entry_no)
  192. .unwrap_or_else(|| panic!("entry {} not found", pml4_entry_no));
  193. new_umapper.table().set_entry(pml4_entry_no, entry)
  194. };
  195. // 复制内核的映射
  196. for pml4_entry_no in MMArch::PAGE_KERNEL_INDEX..MMArch::PAGE_ENTRY_NUM {
  197. copy_mapping(pml4_entry_no);
  198. }
  199. return Ok(crate::mm::ucontext::UserMapper::new(new_umapper));
  200. }
  201. const PAGE_SIZE: usize = 1 << Self::PAGE_SHIFT;
  202. const PAGE_OFFSET_MASK: usize = Self::PAGE_SIZE - 1;
  203. const PAGE_MASK: usize = !(Self::PAGE_OFFSET_MASK);
  204. const PAGE_ADDRESS_SHIFT: usize = Self::PAGE_LEVELS * Self::PAGE_ENTRY_SHIFT + Self::PAGE_SHIFT;
  205. const PAGE_ADDRESS_SIZE: usize = 1 << Self::PAGE_ADDRESS_SHIFT;
  206. const PAGE_ADDRESS_MASK: usize = Self::PAGE_ADDRESS_SIZE - Self::PAGE_SIZE;
  207. const PAGE_ENTRY_SIZE: usize = 1 << (Self::PAGE_SHIFT - Self::PAGE_ENTRY_SHIFT);
  208. const PAGE_ENTRY_NUM: usize = 1 << Self::PAGE_ENTRY_SHIFT;
  209. const PAGE_ENTRY_MASK: usize = Self::PAGE_ENTRY_NUM - 1;
  210. const PAGE_KERNEL_INDEX: usize = (Self::PHYS_OFFSET & Self::PAGE_ADDRESS_MASK)
  211. >> (Self::PAGE_ADDRESS_SHIFT - Self::PAGE_ENTRY_SHIFT);
  212. const PAGE_NEGATIVE_MASK: usize = !((Self::PAGE_ADDRESS_SIZE) - 1);
  213. const ENTRY_ADDRESS_SIZE: usize = 1 << Self::ENTRY_ADDRESS_SHIFT;
  214. const ENTRY_ADDRESS_MASK: usize = Self::ENTRY_ADDRESS_SIZE - Self::PAGE_SIZE;
  215. const ENTRY_FLAGS_MASK: usize = !Self::ENTRY_ADDRESS_MASK;
  216. unsafe fn read<T>(address: VirtAddr) -> T {
  217. return core::ptr::read(address.data() as *const T);
  218. }
  219. unsafe fn write<T>(address: VirtAddr, value: T) {
  220. core::ptr::write(address.data() as *mut T, value);
  221. }
  222. unsafe fn write_bytes(address: VirtAddr, value: u8, count: usize) {
  223. core::ptr::write_bytes(address.data() as *mut u8, value, count);
  224. }
  225. unsafe fn phys_2_virt(phys: PhysAddr) -> Option<VirtAddr> {
  226. if let Some(vaddr) = phys.data().checked_add(Self::PHYS_OFFSET) {
  227. return Some(VirtAddr::new(vaddr));
  228. } else {
  229. return None;
  230. }
  231. }
  232. unsafe fn virt_2_phys(virt: VirtAddr) -> Option<PhysAddr> {
  233. if let Some(paddr) = virt.data().checked_sub(Self::PHYS_OFFSET) {
  234. return Some(PhysAddr::new(paddr));
  235. } else {
  236. return None;
  237. }
  238. }
  239. #[inline(always)]
  240. fn make_entry(paddr: PhysAddr, page_flags: usize) -> usize {
  241. return paddr.data() | page_flags;
  242. }
  243. fn vma_access_permitted(
  244. vma: Arc<LockedVMA>,
  245. write: bool,
  246. execute: bool,
  247. foreign: bool,
  248. ) -> bool {
  249. if execute {
  250. return true;
  251. }
  252. if foreign | vma.is_foreign() {
  253. return true;
  254. }
  255. pkru::pkru_allows_pkey(pkru::vma_pkey(vma), write)
  256. }
  257. // fn protection_map() -> [usize; 16] {
  258. // let mut map = [0; 16];
  259. // map[VmFlags::VM_NONE] = Self::PAGE_NONE;
  260. // map[VmFlags::VM_READ] = Self::PAGE_READONLY;
  261. // map[VmFlags::VM_WRITE] = Self::PAGE_COPY;
  262. // map[VmFlags::VM_WRITE | VmFlags::VM_READ] = Self::PAGE_COPY;
  263. // map[VmFlags::VM_EXEC] = Self::PAGE_READONLY_EXEC;
  264. // map[VmFlags::VM_EXEC | VmFlags::VM_READ] = Self::PAGE_READONLY_EXEC;
  265. // map[VmFlags::VM_EXEC | VmFlags::VM_WRITE] = Self::PAGE_COPY_EXEC;
  266. // map[VmFlags::VM_EXEC | VmFlags::VM_WRITE | VmFlags::VM_READ] = Self::PAGE_COPY_EXEC;
  267. // map[VmFlags::VM_SHARED] = Self::PAGE_NONE;
  268. // map[VmFlags::VM_SHARED | VmFlags::VM_READ] = Self::PAGE_READONLY;
  269. // map[VmFlags::VM_SHARED | VmFlags::VM_WRITE] = Self::PAGE_SHARED;
  270. // map[VmFlags::VM_SHARED | VmFlags::VM_WRITE | VmFlags::VM_READ] = Self::PAGE_SHARED;
  271. // map[VmFlags::VM_SHARED | VmFlags::VM_EXEC] = Self::PAGE_READONLY_EXEC;
  272. // map[VmFlags::VM_SHARED | VmFlags::VM_EXEC | VmFlags::VM_READ] = Self::PAGE_READONLY_EXEC;
  273. // map[VmFlags::VM_SHARED | VmFlags::VM_EXEC | VmFlags::VM_WRITE] = Self::PAGE_SHARED_EXEC;
  274. // map[VmFlags::VM_SHARED | VmFlags::VM_EXEC | VmFlags::VM_WRITE | VmFlags::VM_READ] =
  275. // Self::PAGE_SHARED_EXEC;
  276. // if Self::is_xd_reserved() {
  277. // map.iter_mut().for_each(|x| *x &= !Self::ENTRY_FLAG_NO_EXEC)
  278. // }
  279. // map
  280. // }
  281. const PROTECTION_MAP: [EntryFlags<MMArch>; 16] = protection_map();
  282. const PAGE_NONE: usize =
  283. Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_ACCESSED | Self::ENTRY_FLAG_GLOBAL;
  284. const PAGE_SHARED: usize = Self::ENTRY_FLAG_PRESENT
  285. | Self::ENTRY_FLAG_READWRITE
  286. | Self::ENTRY_FLAG_USER
  287. | Self::ENTRY_FLAG_ACCESSED
  288. | Self::ENTRY_FLAG_NO_EXEC;
  289. const PAGE_SHARED_EXEC: usize = Self::ENTRY_FLAG_PRESENT
  290. | Self::ENTRY_FLAG_READWRITE
  291. | Self::ENTRY_FLAG_USER
  292. | Self::ENTRY_FLAG_ACCESSED;
  293. const PAGE_COPY_NOEXEC: usize = Self::ENTRY_FLAG_PRESENT
  294. | Self::ENTRY_FLAG_USER
  295. | Self::ENTRY_FLAG_ACCESSED
  296. | Self::ENTRY_FLAG_NO_EXEC;
  297. const PAGE_COPY_EXEC: usize =
  298. Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_USER | Self::ENTRY_FLAG_ACCESSED;
  299. const PAGE_COPY: usize = Self::ENTRY_FLAG_PRESENT
  300. | Self::ENTRY_FLAG_USER
  301. | Self::ENTRY_FLAG_ACCESSED
  302. | Self::ENTRY_FLAG_NO_EXEC;
  303. const PAGE_READONLY: usize = Self::ENTRY_FLAG_PRESENT
  304. | Self::ENTRY_FLAG_USER
  305. | Self::ENTRY_FLAG_ACCESSED
  306. | Self::ENTRY_FLAG_NO_EXEC;
  307. const PAGE_READONLY_EXEC: usize =
  308. Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_USER | Self::ENTRY_FLAG_ACCESSED;
  309. const PAGE_READ: usize = 0;
  310. const PAGE_READ_EXEC: usize = 0;
  311. const PAGE_WRITE: usize = 0;
  312. const PAGE_WRITE_EXEC: usize = 0;
  313. const PAGE_EXEC: usize = 0;
  314. }
  315. /// 获取保护标志的映射表
  316. ///
  317. ///
  318. /// ## 返回值
  319. /// - `[usize; 16]`: 长度为16的映射表
  320. const fn protection_map() -> [EntryFlags<MMArch>; 16] {
  321. let mut map = [unsafe { EntryFlags::from_data(0) }; 16];
  322. unsafe {
  323. map[VmFlags::VM_NONE.bits()] = EntryFlags::from_data(MMArch::PAGE_NONE);
  324. map[VmFlags::VM_READ.bits()] = EntryFlags::from_data(MMArch::PAGE_READONLY);
  325. map[VmFlags::VM_WRITE.bits()] = EntryFlags::from_data(MMArch::PAGE_COPY);
  326. map[VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
  327. EntryFlags::from_data(MMArch::PAGE_COPY);
  328. map[VmFlags::VM_EXEC.bits()] = EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
  329. map[VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
  330. EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
  331. map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
  332. EntryFlags::from_data(MMArch::PAGE_COPY_EXEC);
  333. map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
  334. EntryFlags::from_data(MMArch::PAGE_COPY_EXEC);
  335. map[VmFlags::VM_SHARED.bits()] = EntryFlags::from_data(MMArch::PAGE_NONE);
  336. map[VmFlags::VM_SHARED.bits() | VmFlags::VM_READ.bits()] =
  337. EntryFlags::from_data(MMArch::PAGE_READONLY);
  338. map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits()] =
  339. EntryFlags::from_data(MMArch::PAGE_SHARED);
  340. map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
  341. EntryFlags::from_data(MMArch::PAGE_SHARED);
  342. map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits()] =
  343. EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
  344. map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
  345. EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
  346. map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
  347. EntryFlags::from_data(MMArch::PAGE_SHARED_EXEC);
  348. map[VmFlags::VM_SHARED.bits()
  349. | VmFlags::VM_EXEC.bits()
  350. | VmFlags::VM_WRITE.bits()
  351. | VmFlags::VM_READ.bits()] = EntryFlags::from_data(MMArch::PAGE_SHARED_EXEC);
  352. }
  353. // if X86_64MMArch::is_xd_reserved() {
  354. // map.iter_mut().for_each(|x| *x &= !Self::ENTRY_FLAG_NO_EXEC)
  355. // }
  356. map
  357. }
  358. impl X86_64MMArch {
  359. unsafe fn get_load_base_paddr() -> PhysAddr {
  360. let mut mb2_lb_info: [multiboot_tag_load_base_addr_t; 512] = mem::zeroed();
  361. send_to_default_serial8250_port("get_load_base_paddr begin\n\0".as_bytes());
  362. let mut mb2_count: u32 = 0;
  363. multiboot2_iter(
  364. Some(multiboot2_get_load_base),
  365. &mut mb2_lb_info as *mut [multiboot_tag_load_base_addr_t; 512] as usize as *mut c_void,
  366. &mut mb2_count,
  367. );
  368. if mb2_count == 0 {
  369. send_to_default_serial8250_port(
  370. "get_load_base_paddr mb2_count == 0, default to 1MB\n\0".as_bytes(),
  371. );
  372. return PhysAddr::new(0x100000);
  373. }
  374. let phys = mb2_lb_info[0].load_base_addr as usize;
  375. return PhysAddr::new(phys);
  376. }
  377. unsafe fn init_memory_area_from_multiboot2() -> Result<usize, SystemError> {
  378. // 这个数组用来存放内存区域的信息(从C获取)
  379. let mut mb2_mem_info: [multiboot_mmap_entry_t; 512] = mem::zeroed();
  380. send_to_default_serial8250_port("init_memory_area_from_multiboot2 begin\n\0".as_bytes());
  381. let mut mb2_count: u32 = 0;
  382. multiboot2_iter(
  383. Some(multiboot2_get_memory),
  384. &mut mb2_mem_info as *mut [multiboot_mmap_entry_t; 512] as usize as *mut c_void,
  385. &mut mb2_count,
  386. );
  387. send_to_default_serial8250_port("init_memory_area_from_multiboot2 2\n\0".as_bytes());
  388. let mb2_count = mb2_count as usize;
  389. let mut areas_count = 0usize;
  390. let mut total_mem_size = 0usize;
  391. for info_entry in mb2_mem_info.iter().take(mb2_count) {
  392. // Only use the memory area if its type is 1 (RAM)
  393. if info_entry.type_ == 1 {
  394. // Skip the memory area if its len is 0
  395. if info_entry.len == 0 {
  396. continue;
  397. }
  398. total_mem_size += info_entry.len as usize;
  399. mem_block_manager()
  400. .add_block(
  401. PhysAddr::new(info_entry.addr as usize),
  402. info_entry.len as usize,
  403. )
  404. .unwrap_or_else(|e| {
  405. warn!(
  406. "Failed to add memory block: base={:#x}, size={:#x}, error={:?}",
  407. info_entry.addr, info_entry.len, e
  408. );
  409. });
  410. areas_count += 1;
  411. }
  412. }
  413. send_to_default_serial8250_port("init_memory_area_from_multiboot2 end\n\0".as_bytes());
  414. info!("Total memory size: {} MB, total areas from multiboot2: {mb2_count}, valid areas: {areas_count}", total_mem_size / 1024 / 1024);
  415. return Ok(areas_count);
  416. }
  417. fn init_xd_rsvd() {
  418. // 读取ia32-EFER寄存器的值
  419. let efer: EferFlags = x86_64::registers::model_specific::Efer::read();
  420. if !efer.contains(EferFlags::NO_EXECUTE_ENABLE) {
  421. // NO_EXECUTE_ENABLE是false,那么就设置xd_reserved为true
  422. debug!("NO_EXECUTE_ENABLE is false, set XD_RESERVED to true");
  423. XD_RESERVED.store(true, Ordering::Relaxed);
  424. }
  425. compiler_fence(Ordering::SeqCst);
  426. }
  427. /// 判断XD标志位是否被保留
  428. pub fn is_xd_reserved() -> bool {
  429. // return XD_RESERVED.load(Ordering::Relaxed);
  430. // 由于暂时不支持execute disable,因此直接返回true
  431. // 不支持的原因是,目前好像没有能正确的设置page-level的xd位,会触发page fault
  432. return true;
  433. }
  434. }
  435. impl VirtAddr {
  436. /// @brief 判断虚拟地址是否合法
  437. #[inline(always)]
  438. pub fn is_canonical(self) -> bool {
  439. let x = self.data() & X86_64MMArch::PHYS_OFFSET;
  440. // 如果x为0,说明虚拟地址的高位为0,是合法的用户地址
  441. // 如果x为PHYS_OFFSET,说明虚拟地址的高位全为1,是合法的内核地址
  442. return x == 0 || x == X86_64MMArch::PHYS_OFFSET;
  443. }
  444. }
  445. unsafe fn allocator_init() {
  446. let virt_offset = VirtAddr::new(page_align_up(BOOTSTRAP_MM_INFO.unwrap().start_brk));
  447. let phy_offset = unsafe { MMArch::virt_2_phys(virt_offset) }.unwrap();
  448. mem_block_manager()
  449. .reserve_block(PhysAddr::new(0), phy_offset.data())
  450. .expect("Failed to reserve block");
  451. let mut bump_allocator = BumpAllocator::<X86_64MMArch>::new(phy_offset.data());
  452. debug!(
  453. "BumpAllocator created, offset={:?}",
  454. bump_allocator.offset()
  455. );
  456. // 暂存初始在head.S中指定的页表的地址,后面再考虑是否需要把它加到buddy的可用空间里面!
  457. // 现在不加的原因是,我担心会有安全漏洞问题:这些初始的页表,位于内核的数据段。如果归还到buddy,
  458. // 可能会产生一定的安全风险(有的代码可能根据虚拟地址来进行安全校验)
  459. let _old_page_table = MMArch::table(PageTableKind::Kernel);
  460. let new_page_table: PhysAddr;
  461. // 使用bump分配器,把所有的内存页都映射到页表
  462. {
  463. // 用bump allocator创建新的页表
  464. let mut mapper: crate::mm::page::PageMapper<MMArch, &mut BumpAllocator<MMArch>> =
  465. crate::mm::page::PageMapper::<MMArch, _>::create(
  466. PageTableKind::Kernel,
  467. &mut bump_allocator,
  468. )
  469. .expect("Failed to create page mapper");
  470. new_page_table = mapper.table().phys();
  471. debug!("PageMapper created");
  472. // 取消最开始时候,在head.S中指定的映射(暂时不刷新TLB)
  473. {
  474. let table = mapper.table();
  475. let empty_entry = PageEntry::<MMArch>::from_usize(0);
  476. for i in 0..MMArch::PAGE_ENTRY_NUM {
  477. table
  478. .set_entry(i, empty_entry)
  479. .expect("Failed to empty page table entry");
  480. }
  481. }
  482. debug!("Successfully emptied page table");
  483. let total_num = mem_block_manager().total_initial_memory_regions();
  484. for i in 0..total_num {
  485. let area = mem_block_manager().get_initial_memory_region(i).unwrap();
  486. // debug!("area: base={:?}, size={:#x}, end={:?}", area.base, area.size, area.base + area.size);
  487. for i in 0..((area.size + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE) {
  488. let paddr = area.base.add(i * MMArch::PAGE_SIZE);
  489. let vaddr = unsafe { MMArch::phys_2_virt(paddr) }.unwrap();
  490. let flags = kernel_page_flags::<MMArch>(vaddr);
  491. let flusher = mapper
  492. .map_phys(vaddr, paddr, flags)
  493. .expect("Failed to map frame");
  494. // 暂时不刷新TLB
  495. flusher.ignore();
  496. }
  497. }
  498. }
  499. unsafe {
  500. INITIAL_CR3_VALUE = new_page_table;
  501. }
  502. debug!(
  503. "After mapping all physical memory, DragonOS used: {} KB",
  504. bump_allocator.offset() / 1024
  505. );
  506. // 初始化buddy_allocator
  507. let buddy_allocator = unsafe { BuddyAllocator::<X86_64MMArch>::new(bump_allocator).unwrap() };
  508. // 设置全局的页帧分配器
  509. unsafe { set_inner_allocator(buddy_allocator) };
  510. info!("Successfully initialized buddy allocator");
  511. // 关闭显示输出
  512. scm_disable_put_to_window();
  513. // make the new page table current
  514. {
  515. let mut binding = INNER_ALLOCATOR.lock();
  516. let mut allocator_guard = binding.as_mut().unwrap();
  517. debug!("To enable new page table.");
  518. compiler_fence(Ordering::SeqCst);
  519. let mapper = crate::mm::page::PageMapper::<MMArch, _>::new(
  520. PageTableKind::Kernel,
  521. new_page_table,
  522. &mut allocator_guard,
  523. );
  524. compiler_fence(Ordering::SeqCst);
  525. mapper.make_current();
  526. compiler_fence(Ordering::SeqCst);
  527. debug!("New page table enabled");
  528. }
  529. debug!("Successfully enabled new page table");
  530. }
  531. #[no_mangle]
  532. pub extern "C" fn rs_test_buddy() {
  533. test_buddy();
  534. }
  535. pub fn test_buddy() {
  536. // 申请内存然后写入数据然后free掉
  537. // 总共申请200MB内存
  538. const TOTAL_SIZE: usize = 200 * 1024 * 1024;
  539. for i in 0..10 {
  540. debug!("Test buddy, round: {i}");
  541. // 存放申请的内存块
  542. let mut v: Vec<(PhysAddr, PageFrameCount)> = Vec::with_capacity(60 * 1024);
  543. // 存放已经申请的内存块的地址(用于检查重复)
  544. let mut addr_set: HashSet<PhysAddr> = HashSet::new();
  545. let mut allocated = 0usize;
  546. let mut free_count = 0usize;
  547. while allocated < TOTAL_SIZE {
  548. let mut random_size = 0u64;
  549. unsafe { x86::random::rdrand64(&mut random_size) };
  550. // 一次最多申请4M
  551. random_size %= 1024 * 4096;
  552. if random_size == 0 {
  553. continue;
  554. }
  555. let random_size =
  556. core::cmp::min(page_align_up(random_size as usize), TOTAL_SIZE - allocated);
  557. let random_size = PageFrameCount::from_bytes(random_size.next_power_of_two()).unwrap();
  558. // 获取帧
  559. let (paddr, allocated_frame_count) =
  560. unsafe { LockedFrameAllocator.allocate(random_size).unwrap() };
  561. assert!(allocated_frame_count.data().is_power_of_two());
  562. assert!(paddr.data() % MMArch::PAGE_SIZE == 0);
  563. unsafe {
  564. assert!(MMArch::phys_2_virt(paddr)
  565. .as_ref()
  566. .unwrap()
  567. .check_aligned(allocated_frame_count.data() * MMArch::PAGE_SIZE));
  568. }
  569. allocated += allocated_frame_count.data() * MMArch::PAGE_SIZE;
  570. v.push((paddr, allocated_frame_count));
  571. assert!(addr_set.insert(paddr), "duplicate address: {:?}", paddr);
  572. // 写入数据
  573. let vaddr = unsafe { MMArch::phys_2_virt(paddr).unwrap() };
  574. let slice = unsafe {
  575. core::slice::from_raw_parts_mut(
  576. vaddr.data() as *mut u8,
  577. allocated_frame_count.data() * MMArch::PAGE_SIZE,
  578. )
  579. };
  580. for (i, item) in slice.iter_mut().enumerate() {
  581. *item = ((i + unsafe { rdtsc() } as usize) % 256) as u8;
  582. }
  583. // 随机释放一个内存块
  584. if !v.is_empty() {
  585. let mut random_index = 0u64;
  586. unsafe { x86::random::rdrand64(&mut random_index) };
  587. // 70%概率释放
  588. if random_index % 10 > 7 {
  589. continue;
  590. }
  591. random_index %= v.len() as u64;
  592. let random_index = random_index as usize;
  593. let (paddr, allocated_frame_count) = v.remove(random_index);
  594. assert!(addr_set.remove(&paddr));
  595. unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) };
  596. free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE;
  597. }
  598. }
  599. debug!(
  600. "Allocated {} MB memory, release: {} MB, no release: {} bytes",
  601. allocated / 1024 / 1024,
  602. free_count / 1024 / 1024,
  603. (allocated - free_count)
  604. );
  605. debug!("Now, to release buddy memory");
  606. // 释放所有的内存
  607. for (paddr, allocated_frame_count) in v {
  608. unsafe { LockedFrameAllocator.free(paddr, allocated_frame_count) };
  609. assert!(addr_set.remove(&paddr));
  610. free_count += allocated_frame_count.data() * MMArch::PAGE_SIZE;
  611. }
  612. debug!("release done!, allocated: {allocated}, free_count: {free_count}");
  613. }
  614. }
  615. /// 全局的页帧分配器
  616. #[derive(Debug, Clone, Copy, Hash)]
  617. pub struct LockedFrameAllocator;
  618. impl FrameAllocator for LockedFrameAllocator {
  619. unsafe fn allocate(&mut self, mut count: PageFrameCount) -> Option<(PhysAddr, PageFrameCount)> {
  620. count = count.next_power_of_two();
  621. if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
  622. return allocator.allocate(count);
  623. } else {
  624. return None;
  625. }
  626. }
  627. unsafe fn free(&mut self, address: crate::mm::PhysAddr, count: PageFrameCount) {
  628. assert!(count.data().is_power_of_two());
  629. if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
  630. return allocator.free(address, count);
  631. }
  632. }
  633. unsafe fn usage(&self) -> PageFrameUsage {
  634. if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock_irqsave() {
  635. return allocator.usage();
  636. } else {
  637. panic!("usage error");
  638. }
  639. }
  640. }
  641. /// 获取内核地址默认的页面标志
  642. pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(virt: VirtAddr) -> EntryFlags<A> {
  643. let info: X86_64MMBootstrapInfo = BOOTSTRAP_MM_INFO.unwrap();
  644. if virt.data() >= info.kernel_code_start && virt.data() < info.kernel_code_end {
  645. // Remap kernel code execute
  646. return EntryFlags::new().set_execute(true).set_write(true);
  647. } else if virt.data() >= info.kernel_data_end && virt.data() < info.kernel_rodata_end {
  648. // Remap kernel rodata read only
  649. return EntryFlags::new().set_execute(true);
  650. } else {
  651. return EntryFlags::new().set_write(true).set_execute(true);
  652. }
  653. }
  654. unsafe fn set_inner_allocator(allocator: BuddyAllocator<MMArch>) {
  655. static FLAG: AtomicBool = AtomicBool::new(false);
  656. if FLAG
  657. .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
  658. .is_err()
  659. {
  660. panic!("Cannot set inner allocator twice!");
  661. }
  662. *INNER_ALLOCATOR.lock() = Some(allocator);
  663. }
  664. /// 低地址重映射的管理器
  665. ///
  666. /// 低地址重映射的管理器,在smp初始化完成之前,需要使用低地址的映射,因此需要在smp初始化完成之后,取消这一段映射
  667. pub struct LowAddressRemapping;
  668. impl LowAddressRemapping {
  669. // 映射64M
  670. const REMAP_SIZE: usize = 64 * 1024 * 1024;
  671. pub unsafe fn remap_at_low_address(mapper: &mut PageMapper) {
  672. for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) {
  673. let paddr = PhysAddr::new(i * MMArch::PAGE_SIZE);
  674. let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE);
  675. let flags = kernel_page_flags::<MMArch>(vaddr);
  676. let flusher = mapper
  677. .map_phys(vaddr, paddr, flags)
  678. .expect("Failed to map frame");
  679. // 暂时不刷新TLB
  680. flusher.ignore();
  681. }
  682. }
  683. /// 取消低地址的映射
  684. pub unsafe fn unmap_at_low_address(mapper: &mut PageMapper, flush: bool) {
  685. for i in 0..(Self::REMAP_SIZE / MMArch::PAGE_SIZE) {
  686. let vaddr = VirtAddr::new(i * MMArch::PAGE_SIZE);
  687. let (_, _, flusher) = mapper
  688. .unmap_phys(vaddr, true)
  689. .expect("Failed to unmap frame");
  690. if !flush {
  691. flusher.ignore();
  692. }
  693. }
  694. }
  695. }