kvm_mmu.rs 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. use crate::arch::mm::X86_64MMArch;
  2. use crate::arch::vm::asm::VmxAsm;
  3. use crate::arch::vm::kvm_host::page::KVM_MIN_FREE_MMU_PAGES;
  4. use crate::mm::PhysAddr;
  5. use crate::{
  6. arch::{mm::LockedFrameAllocator, MMArch, VirtCpuArch},
  7. libs::spinlock::{SpinLock, SpinLockGuard},
  8. mm::{page::PageMapper, MemoryManagementArch, PageTableKind},
  9. virt::vm::kvm_host::{vcpu::VirtCpu, Vm},
  10. };
  11. use alloc::{sync::Arc, vec::Vec};
  12. use bitfield_struct::bitfield;
  13. use core::intrinsics::likely;
  14. use core::ops::{Add, Sub};
  15. use log::{debug, error, warn};
  16. use raw_cpuid::CpuId;
  17. use system_error::SystemError;
  18. use x86::controlregs::{Cr0, Cr4};
  19. use x86::vmx::vmcs::guest;
  20. use x86_64::registers::control::EferFlags;
  21. use super::super::{vmx::vmx_info, x86_kvm_ops};
  22. use super::mmu_internal::KvmPageFault;
  23. const PT64_ROOT_5LEVEL: usize = 5;
  24. const PT64_ROOT_4LEVEL: usize = 4;
  25. const PT32_ROOT_LEVEL: usize = 2;
  26. const PT32E_ROOT_LEVEL: usize = 3;
  27. static mut TDP_ENABLED: bool = false;
  28. static mut TDP_MMU_ENABLED: bool = true;
  29. static mut TDP_MMU_ALLOWED: bool = unsafe { TDP_MMU_ENABLED };
  30. static mut TDP_ROOT_LEVEL: usize = 0;
  31. static mut MAX_TDP_LEVEL: usize = 0;
  32. static mut SHADOW_ACCESSED_MASK: usize = 0;
  33. static mut MAX_HUGE_PAGE_LEVEL: PageLevel = PageLevel::None;
  34. pub const PAGE_SHIFT: u32 = 12;
  35. pub const PAGE_SIZE: u64 = 1 << PAGE_SHIFT;
  36. pub fn is_tdp_mmu_enabled() -> bool {
  37. unsafe { TDP_MMU_ENABLED }
  38. }
  39. #[allow(dead_code)]
  40. #[repr(u8)]
  41. #[derive(Debug, PartialEq, Eq, Clone, Copy)]
  42. pub enum PageLevel {
  43. None,
  44. Level4K,
  45. Level2M,
  46. Level1G,
  47. Level512G,
  48. LevelNum,
  49. }
  50. // 实现 Add trait
  51. impl Add<usize> for PageLevel {
  52. type Output = Self;
  53. fn add(self, other: usize) -> Self {
  54. let result = self as usize + other;
  55. match result {
  56. 0 => PageLevel::None,
  57. 1 => PageLevel::Level4K,
  58. 2 => PageLevel::Level2M,
  59. 3 => PageLevel::Level1G,
  60. 4 => PageLevel::Level512G,
  61. 5 => PageLevel::LevelNum,
  62. _ => PageLevel::LevelNum, // 超出范围时返回 LevelNum
  63. }
  64. }
  65. }
  66. // 实现 Sub trait
  67. impl Sub<usize> for PageLevel {
  68. type Output = Self;
  69. fn sub(self, other: usize) -> Self {
  70. let result = self as isize - other as isize;
  71. match result {
  72. 0 => PageLevel::None,
  73. 1 => PageLevel::Level4K,
  74. 2 => PageLevel::Level2M,
  75. 3 => PageLevel::Level1G,
  76. 4 => PageLevel::Level512G,
  77. 5 => PageLevel::LevelNum,
  78. _ => PageLevel::None, // 超出范围时返回 None
  79. }
  80. }
  81. }
  82. impl PageLevel {
  83. fn kvm_hpage_gfn_shift(level: u8) -> u32 {
  84. ((level - 1) * 9) as u32
  85. }
  86. fn kvm_hpage_shift(level: u8) -> u32 {
  87. PAGE_SHIFT + Self::kvm_hpage_gfn_shift(level)
  88. }
  89. fn kvm_hpage_size(level: u8) -> u64 {
  90. 1 << Self::kvm_hpage_shift(level)
  91. }
  92. /// 计算每个大页包含的页数
  93. ///
  94. /// # 参数
  95. /// - `level`: 页级别
  96. ///
  97. /// # 返回值
  98. /// 返回每个大页包含的页数
  99. pub fn kvm_pages_per_hpage(level: u8) -> u64 {
  100. Self::kvm_hpage_size(level) / PAGE_SIZE
  101. }
  102. }
  103. ///计算给定 GFN(Guest Frame Number)在指定级别上的对齐值
  104. pub fn gfn_round_for_level(gfn: u64, level: u8) -> u64 {
  105. gfn & !(PageLevel::kvm_pages_per_hpage(level) - 1)
  106. }
  107. #[derive(Debug)]
  108. pub struct LockedKvmMmu {
  109. inner: SpinLock<KvmMmu>,
  110. }
  111. impl LockedKvmMmu {
  112. pub fn new(mmu: KvmMmu) -> Arc<Self> {
  113. Arc::new(Self {
  114. inner: SpinLock::new(mmu),
  115. })
  116. }
  117. pub fn lock(&self) -> SpinLockGuard<KvmMmu> {
  118. self.inner.lock()
  119. }
  120. }
  121. pub type KvmMmuPageFaultHandler =
  122. fn(vcpu: &mut VirtCpu, page_fault: &KvmPageFault) -> Result<i32, SystemError>;
  123. #[derive(Debug, Default)]
  124. #[allow(dead_code)]
  125. pub struct KvmMmu {
  126. pub root: KvmMmuRootInfo,
  127. pub cpu_role: KvmCpuRole,
  128. pub root_role: KvmMmuPageRole,
  129. pub page_fault: Option<KvmMmuPageFaultHandler>,
  130. pkru_mask: u32,
  131. prev_roots: [KvmMmuRootInfo; Self::KVM_MMU_NUM_PREV_ROOTS],
  132. pae_root: Vec<u64>,
  133. pub pdptrs: [u64; 4],
  134. }
  135. impl KvmMmu {
  136. pub fn _save_pdptrs(&mut self) {
  137. self.pdptrs[0] = VmxAsm::vmx_vmread(guest::PDPTE0_FULL);
  138. self.pdptrs[1] = VmxAsm::vmx_vmread(guest::PDPTE1_FULL);
  139. self.pdptrs[2] = VmxAsm::vmx_vmread(guest::PDPTE2_FULL);
  140. self.pdptrs[3] = VmxAsm::vmx_vmread(guest::PDPTE3_FULL);
  141. }
  142. const KVM_MMU_NUM_PREV_ROOTS: usize = 3;
  143. pub const INVALID_PAGE: u64 = u64::MAX;
  144. #[inline]
  145. pub fn tdp_enabled() -> bool {
  146. unsafe { TDP_ENABLED }
  147. }
  148. #[inline]
  149. pub fn tdp_root_level() -> usize {
  150. unsafe { TDP_ROOT_LEVEL }
  151. }
  152. #[inline]
  153. pub fn max_tdp_level() -> usize {
  154. unsafe { MAX_TDP_LEVEL }
  155. }
  156. #[inline]
  157. pub fn ad_enabled() -> bool {
  158. unsafe { SHADOW_ACCESSED_MASK != 0 }
  159. }
  160. /// 初始化mmu的配置,因为其是无锁的,所以该函数只能在初始化vmx时调用
  161. pub fn kvm_configure_mmu(
  162. enable_tdp: bool,
  163. tdp_forced_root_level: usize,
  164. tdp_max_root_level: usize,
  165. tdp_huge_page_level: PageLevel,
  166. ) {
  167. unsafe {
  168. TDP_ENABLED = enable_tdp;
  169. TDP_ROOT_LEVEL = tdp_forced_root_level;
  170. MAX_TDP_LEVEL = tdp_max_root_level;
  171. TDP_MMU_ENABLED = TDP_MMU_ALLOWED && TDP_ENABLED;
  172. if TDP_ENABLED {
  173. MAX_HUGE_PAGE_LEVEL = tdp_huge_page_level;
  174. } else if CpuId::new()
  175. .get_extended_processor_and_feature_identifiers()
  176. .unwrap()
  177. .has_1gib_pages()
  178. {
  179. MAX_HUGE_PAGE_LEVEL = PageLevel::Level1G;
  180. } else {
  181. MAX_HUGE_PAGE_LEVEL = PageLevel::Level2M;
  182. }
  183. }
  184. }
  185. }
  186. #[derive(Debug, Default)]
  187. pub struct KvmMmuRootInfo {
  188. pub pgd: u64,
  189. pub hpa: u64,
  190. }
  191. #[derive(Debug, Default, Clone, Copy)]
  192. pub struct KvmCpuRole {
  193. base: KvmMmuPageRole,
  194. extend: KvmMmuExtenedRole,
  195. }
  196. impl PartialEq for KvmCpuRole {
  197. fn eq(&self, other: &Self) -> bool {
  198. self.base.0 == other.base.0 && self.extend.0 == other.extend.0
  199. }
  200. }
  201. /// ### 用于跟踪影子页(包括 TDP 页)的属性,以确定页面是否可以在给定的 MMU 上下文中使用。
  202. #[bitfield(u32)]
  203. pub struct KvmMmuPageRole {
  204. /// 表示页表级别,占用 4 位。对于普通的页表,取值是 2(二级页表)、3(三级页表)、4(四级页表)和 5(五级页表)
  205. #[bits(4)]
  206. pub level: u32,
  207. /// 页表项是否为 4 字节,占用 1 位。在非 PAE 分页模式下,该值为 1
  208. has_4_byte_gpte: bool,
  209. /// 表示页表项所在的象限,占用 2 位。该字段仅在 has_4_byte_gpte 为 1 时有效。
  210. #[bits(2)]
  211. quadrant: u32,
  212. /// 页面是否直接映射
  213. direct: bool,
  214. /// 页面的访问权限
  215. #[bits(3)]
  216. access: u32,
  217. /// 页面是否无效
  218. invalid: bool,
  219. /// 页面是否启用 NX(不可执行)位
  220. efer_nx: bool,
  221. /// CR0 寄存器中的写保护位(WP)是否被置位
  222. cr0_wp: bool,
  223. /// SMEP(Supervisor Mode Execution Protection)和非写保护位的组合
  224. smep_andnot_wp: bool,
  225. /// SMAP(Supervisor Mode Access Prevention)和非写保护位的组合
  226. smap_andnot_wp: bool,
  227. /// 页面是否禁用访问位(Accessed Bit)
  228. ad_disabled: bool,
  229. /// 当前页是否处于客户机模式
  230. guest_mode: bool,
  231. /// 是否将此页透传给客户机
  232. passthrough: bool,
  233. /// 未使用位域
  234. #[bits(5)]
  235. unused: u32,
  236. /// 表示 SMM(System Management Mode)模式
  237. #[bits(8)]
  238. pub smm: u32,
  239. }
  240. impl KvmMmuPageRole {
  241. pub fn is_cr0_pg(&self) -> bool {
  242. self.level() > 0
  243. }
  244. pub fn is_cr4_pae(&self) -> bool {
  245. !self.has_4_byte_gpte()
  246. }
  247. pub fn get_direct(&self) -> bool {
  248. self.direct()
  249. }
  250. }
  251. #[bitfield(u32)]
  252. pub struct KvmMmuExtenedRole {
  253. valid: bool,
  254. execonly: bool,
  255. cr4_pse: bool,
  256. cr4_pke: bool,
  257. cr4_smap: bool,
  258. cr4_smep: bool,
  259. cr4_la57: bool,
  260. efer_lma: bool,
  261. #[bits(24)]
  262. unused: u32,
  263. }
  264. pub struct KvmMmuRoleRegs {
  265. pub cr0: Cr0,
  266. pub cr4: Cr4,
  267. pub efer: EferFlags,
  268. }
  269. /// page falut的返回值, 用于表示页面错误的处理结果
  270. /// 应用在handle_mmio_page_fault()、mmu.page_fault()、fast_page_fault()和
  271. /// kvm_mmu_do_page_fault()等
  272. #[derive(Debug, Eq, PartialEq, FromPrimitive, Clone)]
  273. #[repr(u32)]
  274. pub enum PFRet {
  275. Continue, // RET_PF_CONTINUE: 到目前为止一切正常,继续处理页面错误。
  276. Retry, // RET_PF_RETRY: 让 CPU 再次对该地址发生页面错误。
  277. Emulate, // RET_PF_EMULATE: MMIO 页面错误,直接模拟指令。
  278. Invalid, // RET_PF_INVALID: SPTE 无效,让实际的页面错误路径更新它。
  279. Fixed, // RET_PF_FIXED: 故障的条目已经被修复
  280. Spurious, // RET_PF_SPURIOUS: 故障的条目已经被修复,例如由另一个 vCPU 修复。
  281. Err = u32::MAX, // 错误
  282. }
  283. impl From<PFRet> for i32 {
  284. fn from(pf_ret: PFRet) -> Self {
  285. pf_ret as i32
  286. }
  287. }
  288. impl From<i32> for PFRet {
  289. fn from(value: i32) -> Self {
  290. match value {
  291. 0 => PFRet::Continue,
  292. 1 => PFRet::Retry,
  293. 2 => PFRet::Emulate,
  294. 3 => PFRet::Invalid,
  295. 4 => PFRet::Fixed,
  296. 5 => PFRet::Spurious,
  297. _ => PFRet::Err, // 默认返回 Invalid
  298. }
  299. }
  300. }
  301. impl VirtCpuArch {
  302. pub fn kvm_init_mmu(&mut self) {
  303. let regs = self.role_regs();
  304. let cpu_role = self.calc_cpu_role(&regs);
  305. if self.walk_mmu.is_some()
  306. && self.nested_mmu.is_some()
  307. && Arc::ptr_eq(
  308. self.walk_mmu.as_ref().unwrap(),
  309. self.nested_mmu.as_ref().unwrap(),
  310. )
  311. {
  312. todo!()
  313. } else if KvmMmu::tdp_enabled() {
  314. self.init_tdp_mmu(cpu_role);
  315. } else {
  316. todo!()
  317. }
  318. }
  319. fn unload_mmu(&mut self) {
  320. // TODO
  321. }
  322. pub fn reset_mmu_context(&mut self) {
  323. self.unload_mmu();
  324. self.kvm_init_mmu();
  325. }
  326. fn role_regs(&mut self) -> KvmMmuRoleRegs {
  327. KvmMmuRoleRegs {
  328. cr0: self.read_cr0_bits(Cr0::CR0_ENABLE_PAGING | Cr0::CR0_WRITE_PROTECT),
  329. cr4: self.read_cr4_bits(
  330. Cr4::CR4_ENABLE_PSE
  331. | Cr4::CR4_ENABLE_PAE
  332. | Cr4::CR4_ENABLE_LA57
  333. | Cr4::CR4_ENABLE_SMEP
  334. | Cr4::CR4_ENABLE_SMAP
  335. | Cr4::CR4_ENABLE_PROTECTION_KEY,
  336. ),
  337. efer: self.efer,
  338. }
  339. }
  340. fn calc_cpu_role(&self, regs: &KvmMmuRoleRegs) -> KvmCpuRole {
  341. let mut role = KvmCpuRole::default();
  342. let base = &mut role.base;
  343. let ext = &mut role.extend;
  344. base.set_access(0b111);
  345. base.set_smm(self.is_smm() as u32);
  346. base.set_guest_mode(self.is_guest_mode());
  347. ext.set_valid(true);
  348. if !regs.cr0.contains(Cr0::CR0_ENABLE_PAGING) {
  349. base.set_direct(true);
  350. return role;
  351. }
  352. base.set_efer_nx(regs.efer.contains(EferFlags::NO_EXECUTE_ENABLE));
  353. base.set_cr0_wp(regs.cr0.contains(Cr0::CR0_WRITE_PROTECT));
  354. base.set_smep_andnot_wp(
  355. regs.cr4.contains(Cr4::CR4_ENABLE_SMEP) && !regs.cr0.contains(Cr0::CR0_WRITE_PROTECT),
  356. );
  357. base.set_smap_andnot_wp(
  358. regs.cr4.contains(Cr4::CR4_ENABLE_SMAP) && !regs.cr0.contains(Cr0::CR0_WRITE_PROTECT),
  359. );
  360. base.set_has_4_byte_gpte(!regs.cr4.contains(Cr4::CR4_ENABLE_PAE));
  361. if regs.efer.contains(EferFlags::LONG_MODE_ACTIVE) {
  362. let level = if regs.cr4.contains(Cr4::CR4_ENABLE_LA57) {
  363. PT64_ROOT_5LEVEL as u32
  364. } else {
  365. PT64_ROOT_4LEVEL as u32
  366. };
  367. base.set_level(level);
  368. } else if regs.cr4.contains(Cr4::CR4_ENABLE_PAE) {
  369. base.set_level(PT32E_ROOT_LEVEL as u32);
  370. } else {
  371. base.set_level(PT32_ROOT_LEVEL as u32);
  372. }
  373. ext.set_cr4_smep(regs.cr4.contains(Cr4::CR4_ENABLE_SMEP));
  374. ext.set_cr4_smap(regs.cr4.contains(Cr4::CR4_ENABLE_SMAP));
  375. ext.set_cr4_pse(regs.cr4.contains(Cr4::CR4_ENABLE_PSE));
  376. ext.set_cr4_pke(
  377. regs.efer.contains(EferFlags::LONG_MODE_ACTIVE)
  378. && regs.cr4.contains(Cr4::CR4_ENABLE_PROTECTION_KEY),
  379. );
  380. ext.set_cr4_la57(
  381. regs.efer.contains(EferFlags::LONG_MODE_ACTIVE)
  382. && regs.cr4.contains(Cr4::CR4_ENABLE_LA57),
  383. );
  384. ext.set_efer_lma(regs.efer.contains(EferFlags::LONG_MODE_ACTIVE));
  385. role
  386. }
  387. /// https://code.dragonos.org.cn/xref/linux-6.6.21/arch/x86/kvm/mmu/mmu.c#6019
  388. pub fn vcpu_arch_mmu_create(&mut self) {
  389. if vmx_info().tdp_enabled() {
  390. self.guset_mmu = Some(self._mmu_create());
  391. }
  392. self.root_mmu = Some(self._mmu_create());
  393. self.mmu = self.root_mmu.clone();
  394. self.walk_mmu = self.root_mmu.clone();
  395. }
  396. fn _mmu_create(&self) -> Arc<LockedKvmMmu> {
  397. let mut mmu = KvmMmu::default();
  398. mmu.root.hpa = KvmMmu::INVALID_PAGE;
  399. mmu.root.pgd = 0;
  400. for role in &mut mmu.prev_roots {
  401. role.hpa = KvmMmu::INVALID_PAGE;
  402. role.pgd = KvmMmu::INVALID_PAGE;
  403. }
  404. if KvmMmu::tdp_enabled() && self.mmu_get_tdp_level() > PT32E_ROOT_LEVEL {
  405. return LockedKvmMmu::new(mmu);
  406. }
  407. mmu.pae_root
  408. .resize(MMArch::PAGE_SIZE / core::mem::size_of::<u64>(), 0);
  409. return LockedKvmMmu::new(mmu);
  410. }
  411. fn mmu_get_tdp_level(&self) -> usize {
  412. if KvmMmu::tdp_root_level() != 0 {
  413. return KvmMmu::tdp_root_level();
  414. }
  415. if KvmMmu::max_tdp_level() == 5 && self.max_phyaddr <= 48 {
  416. return 4;
  417. }
  418. return KvmMmu::max_tdp_level();
  419. }
  420. pub fn init_tdp_mmu(&mut self, cpu_role: KvmCpuRole) {
  421. let context = self.root_mmu();
  422. let mut context = context.lock();
  423. let root_role = self.calc_tdp_mmu_root_page_role(cpu_role);
  424. if cpu_role == context.cpu_role && root_role.0 == context.root_role.0 {
  425. return;
  426. }
  427. context.cpu_role = cpu_role;
  428. context.root_role = root_role;
  429. // todo 设置函数集
  430. if !context.cpu_role.base.is_cr0_pg() {
  431. // todo: context->gva_to_gpa = nonpaging_gva_to_gpa;
  432. warn!("context->gva_to_gpa = nonpaging_gva_to_gpa todo!");
  433. } else if context.cpu_role.base.is_cr4_pae() {
  434. // todo: context->gva_to_gpa = paging64_gva_to_gpa;
  435. warn!("context->gva_to_gpa = paging64_gva_to_gpa todo!");
  436. } else {
  437. // todo: context->gva_to_gpa = paging32_gva_to_gpa;
  438. warn!("context->gva_to_gpa = paging32_gva_to_gpa todo!");
  439. }
  440. // todo:
  441. // reset_guest_paging_metadata(vcpu, context);
  442. // reset_tdp_shadow_zero_bits_mask(context);
  443. }
  444. #[inline]
  445. pub fn root_mmu(&self) -> &Arc<LockedKvmMmu> {
  446. self.root_mmu.as_ref().unwrap()
  447. }
  448. #[inline]
  449. pub fn mmu(&self) -> SpinLockGuard<KvmMmu> {
  450. self.mmu.as_ref().unwrap().lock()
  451. }
  452. fn calc_tdp_mmu_root_page_role(&self, cpu_role: KvmCpuRole) -> KvmMmuPageRole {
  453. let mut role = KvmMmuPageRole::default();
  454. role.set_access(0b111);
  455. role.set_cr0_wp(true);
  456. role.set_efer_nx(true);
  457. role.set_smm(cpu_role.base.smm());
  458. role.set_guest_mode(cpu_role.base.guest_mode());
  459. role.set_ad_disabled(!KvmMmu::ad_enabled());
  460. role.set_level(self.mmu_get_tdp_level() as u32);
  461. role.set_direct(true);
  462. role.set_has_4_byte_gpte(false);
  463. role
  464. }
  465. }
  466. impl VirtCpu {
  467. pub fn kvm_mmu_reload(&mut self, vm: &Vm) -> Result<(), SystemError> {
  468. if likely(self.arch.mmu().root.hpa != KvmMmu::INVALID_PAGE) {
  469. return Ok(());
  470. }
  471. return self.kvm_mmu_load(vm);
  472. }
  473. pub fn kvm_mmu_load(&mut self, vm: &Vm) -> Result<(), SystemError> {
  474. let direct = self.arch.mmu().root_role.direct();
  475. self.mmu_topup_memory_caches(!direct)?;
  476. self.mmu_alloc_special_roots()?;
  477. if direct {
  478. self.mmu_alloc_direct_roots(vm)?;
  479. } else {
  480. self.mmu_alloc_shadow_roots(vm)?;
  481. }
  482. // TODO: kvm_mmu_sync_roots
  483. self.kvm_mmu_load_pgd(vm);
  484. Ok(())
  485. }
  486. pub fn kvm_mmu_load_pgd(&mut self, vm: &Vm) {
  487. let root_hpa = self.arch.mmu().root.hpa;
  488. debug!("kvm_mmu_load_pgd::root_hpa = {:#x}", root_hpa);
  489. if root_hpa == KvmMmu::INVALID_PAGE {
  490. return;
  491. }
  492. let level = self.arch.mmu().root_role.level();
  493. x86_kvm_ops().load_mmu_pgd(self, vm, root_hpa, level);
  494. }
  495. fn mmu_topup_memory_caches(&mut self, _maybe_indirect: bool) -> Result<(), SystemError> {
  496. // TODO
  497. Ok(())
  498. }
  499. fn mmu_alloc_special_roots(&mut self) -> Result<(), SystemError> {
  500. // TODO
  501. Ok(())
  502. }
  503. fn mmu_alloc_direct_roots(&mut self, vm: &Vm) -> Result<(), SystemError> {
  504. let shadow_root_level = self.arch.mmu().root_role.level();
  505. let _r: Result<(), SystemError> = self.make_mmu_pages_available(vm);
  506. let root: PhysAddr;
  507. if KvmMmu::tdp_enabled() {
  508. root = self.kvm_tdp_mmu_get_vcpu_root_hpa().unwrap();
  509. let mut mmu = self.arch.mmu();
  510. mmu.root.hpa = root.data() as u64;
  511. } else if shadow_root_level >= PT64_ROOT_4LEVEL as u32 {
  512. todo!()
  513. } else if shadow_root_level == PT32E_ROOT_LEVEL as u32 {
  514. todo!()
  515. } else {
  516. error!("Bad TDP root level = {}", shadow_root_level);
  517. return Err(SystemError::EIO);
  518. }
  519. /* root.pgd is ignored for direct MMUs. */
  520. self.arch.mmu().root.pgd = 0;
  521. Ok(())
  522. }
  523. fn mmu_alloc_shadow_roots(&mut self, _vm: &Vm) -> Result<(), SystemError> {
  524. todo!();
  525. }
  526. fn make_mmu_pages_available(&mut self, vm: &Vm) -> Result<(), SystemError> {
  527. let avail = Self::kvm_mmu_available_pages(vm);
  528. if likely(avail >= KVM_MIN_FREE_MMU_PAGES) {
  529. return Ok(());
  530. }
  531. //kvm_mmu_zap_oldest_mmu_pages(vm, KVM_REFILL_PAGES - avail);
  532. if Self::kvm_mmu_available_pages(vm) == 0 {
  533. return Err(SystemError::ENOSPC);
  534. }
  535. Ok(())
  536. }
  537. fn kvm_mmu_available_pages(vm: &Vm) -> usize {
  538. if vm.arch.n_max_mmu_pages > vm.arch.n_used_mmu_pages {
  539. return vm.arch.n_max_mmu_pages - vm.arch.n_used_mmu_pages;
  540. }
  541. return 0;
  542. }
  543. fn kvm_tdp_mmu_get_vcpu_root_hpa(&self) -> Result<PhysAddr, SystemError> {
  544. //todo Check for an existing root before allocating a new one. Note, the
  545. // role check prevents consuming an invalid root.
  546. let root = self.tdp_mmu_alloc_sp().unwrap();
  547. Ok(PhysAddr::new(root as usize))
  548. }
  549. fn tdp_mmu_alloc_sp(&self) -> Result<u64, SystemError> {
  550. // 申请并创建新的页表
  551. let mapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe {
  552. PageMapper::create(PageTableKind::EPT, LockedFrameAllocator)
  553. .ok_or(SystemError::ENOMEM)?
  554. };
  555. let ept_root_hpa = mapper.table().phys();
  556. self.arch.mmu().root.hpa = ept_root_hpa.data() as u64;
  557. debug!("ept_root_hpa:{:x}!", ept_root_hpa.data() as u64);
  558. return Ok(self.arch.mmu().root.hpa);
  559. }
  560. }