vcpu.rs 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. use super::vmcs::{
  2. VMCSRegion, VmcsFields, VmxEntryCtrl, VmxPrimaryExitCtrl, VmxPrimaryProcessBasedExecuteCtrl,
  3. VmxSecondaryProcessBasedExecuteCtrl,
  4. };
  5. use super::vmx_asm_wrapper::{vmx_vmclear, vmx_vmptrld, vmx_vmread, vmx_vmwrite, vmxoff, vmxon};
  6. use crate::arch::kvm::vmx::mmu::KvmMmu;
  7. use crate::arch::kvm::vmx::seg::{seg_setup, Sreg};
  8. use crate::arch::kvm::vmx::{VcpuRegIndex, X86_CR0};
  9. use crate::arch::mm::{LockedFrameAllocator, PageMapper};
  10. use crate::arch::x86_64::mm::X86_64MMArch;
  11. use crate::arch::MMArch;
  12. use crate::mm::{MemoryManagementArch, PageTableKind};
  13. use crate::mm::{PhysAddr, VirtAddr};
  14. use crate::virt::kvm::vcpu::Vcpu;
  15. use crate::virt::kvm::vm::Vm;
  16. use alloc::alloc::Global;
  17. use alloc::boxed::Box;
  18. use core::slice;
  19. use log::debug;
  20. use raw_cpuid::CpuId;
  21. use system_error::SystemError;
  22. use x86;
  23. use x86::{controlregs, msr, segmentation};
  24. // use crate::arch::kvm::vmx::seg::RMODE_TSS_SIZE;
  25. // use crate::virt::kvm::{KVM};
  26. // KERNEL_ALLOCATOR
  27. pub const PAGE_SIZE: usize = 0x1000;
  28. pub const NR_VCPU_REGS: usize = 16;
  29. #[repr(C, align(4096))]
  30. #[derive(Debug)]
  31. pub struct VmxonRegion {
  32. pub revision_id: u32,
  33. pub data: [u8; PAGE_SIZE - 4],
  34. }
  35. #[repr(C, align(4096))]
  36. #[derive(Debug)]
  37. pub struct MSRBitmap {
  38. pub data: [u8; PAGE_SIZE],
  39. }
  40. #[allow(dead_code)]
  41. #[derive(Debug)]
  42. pub struct VcpuData {
  43. /// The virtual and physical address of the Vmxon naturally aligned 4-KByte region of memory
  44. pub vmxon_region: Box<VmxonRegion>,
  45. pub vmxon_region_physical_address: u64, // vmxon需要该地址
  46. /// The virtual and physical address of the Vmcs naturally aligned 4-KByte region of memory
  47. /// holds the complete CPU state of both the host and the guest.
  48. /// includes the segment registers, GDT, IDT, TR, various MSR’s
  49. /// and control field structures for handling exit and entry operations
  50. pub vmcs_region: Box<VMCSRegion>,
  51. pub vmcs_region_physical_address: u64, // vmptrld, vmclear需要该地址
  52. pub msr_bitmap: Box<MSRBitmap>,
  53. pub msr_bitmap_physical_address: u64,
  54. }
  55. #[derive(Default, Debug)]
  56. #[repr(C)]
  57. pub struct VcpuContextFrame {
  58. pub regs: [usize; NR_VCPU_REGS], // 通用寄存器
  59. pub rip: usize,
  60. pub rflags: usize,
  61. }
  62. #[derive(Debug)]
  63. #[allow(dead_code)]
  64. pub enum VcpuState {
  65. Inv = 0,
  66. Pend = 1,
  67. Act = 2,
  68. }
  69. #[allow(dead_code)]
  70. #[derive(Debug)]
  71. pub struct VmxVcpu {
  72. pub vcpu_id: u32,
  73. pub vcpu_ctx: VcpuContextFrame, // 保存vcpu切换时的上下文,如通用寄存器等
  74. pub vcpu_state: VcpuState, // vcpu当前运行状态
  75. pub mmu: KvmMmu, // vcpu的内存管理单元
  76. pub data: VcpuData, // vcpu的数据
  77. pub parent_vm: Vm, // parent KVM
  78. }
  79. impl VcpuData {
  80. pub fn alloc() -> Result<Self, SystemError> {
  81. let vmxon_region: Box<VmxonRegion> = unsafe {
  82. Box::try_new_zeroed_in(Global)
  83. .expect("Try new zeroed fail!")
  84. .assume_init()
  85. };
  86. let vmcs_region: Box<VMCSRegion> = unsafe {
  87. Box::try_new_zeroed_in(Global)
  88. .expect("Try new zeroed fail!")
  89. .assume_init()
  90. };
  91. let msr_bitmap: Box<MSRBitmap> = unsafe {
  92. Box::try_new_zeroed_in(Global)
  93. .expect("Try new zeroed fail!")
  94. .assume_init()
  95. };
  96. // FIXME: virt_2_phys的转换正确性存疑
  97. let vmxon_region_physical_address = {
  98. let vaddr = VirtAddr::new(vmxon_region.as_ref() as *const _ as _);
  99. unsafe { MMArch::virt_2_phys(vaddr).unwrap().data() as u64 }
  100. };
  101. let vmcs_region_physical_address = {
  102. let vaddr = VirtAddr::new(vmcs_region.as_ref() as *const _ as _);
  103. unsafe { MMArch::virt_2_phys(vaddr).unwrap().data() as u64 }
  104. };
  105. let msr_bitmap_physical_address = {
  106. let vaddr = VirtAddr::new(msr_bitmap.as_ref() as *const _ as _);
  107. unsafe { MMArch::virt_2_phys(vaddr).unwrap().data() as u64 }
  108. };
  109. let mut instance = Self {
  110. // Allocate a naturally aligned 4-KByte VMXON region of memory to enable VMX operation (Intel Manual: 25.11.5 VMXON Region)
  111. vmxon_region,
  112. vmxon_region_physical_address,
  113. // Allocate a naturally aligned 4-KByte VMCS region of memory
  114. vmcs_region,
  115. vmcs_region_physical_address,
  116. msr_bitmap,
  117. msr_bitmap_physical_address,
  118. };
  119. // printk_color!(GREEN, BLACK, "[+] init_region\n");
  120. instance.init_region()?;
  121. Ok(instance)
  122. }
  123. pub fn init_region(&mut self) -> Result<(), SystemError> {
  124. // Get the Virtual Machine Control Structure revision identifier (VMCS revision ID)
  125. // (Intel Manual: 25.11.5 VMXON Region)
  126. let revision_id = unsafe { (msr::rdmsr(msr::IA32_VMX_BASIC) as u32) & 0x7FFF_FFFF };
  127. debug!("[+] VMXON Region Virtual Address: {:p}", self.vmxon_region);
  128. debug!(
  129. "[+] VMXON Region Physical Addresss: 0x{:x}",
  130. self.vmxon_region_physical_address
  131. );
  132. debug!("[+] VMCS Region Virtual Address: {:p}", self.vmcs_region);
  133. debug!(
  134. "[+] VMCS Region Physical Address1: 0x{:x}",
  135. self.vmcs_region_physical_address
  136. );
  137. self.vmxon_region.revision_id = revision_id;
  138. self.vmcs_region.revision_id = revision_id;
  139. return Ok(());
  140. }
  141. }
  142. impl VmxVcpu {
  143. pub fn new(vcpu_id: u32, parent_vm: Vm) -> Result<Self, SystemError> {
  144. debug!("Creating processor {}", vcpu_id);
  145. let instance = Self {
  146. vcpu_id,
  147. vcpu_ctx: VcpuContextFrame {
  148. regs: [0; NR_VCPU_REGS],
  149. rip: 0,
  150. rflags: 0,
  151. },
  152. vcpu_state: VcpuState::Inv,
  153. mmu: KvmMmu::default(),
  154. data: VcpuData::alloc()?,
  155. parent_vm,
  156. };
  157. Ok(instance)
  158. }
  159. pub fn vmx_set_cr0(cr0: X86_CR0) -> Result<(), SystemError> {
  160. let mut hw_cr0 = cr0 & !(X86_CR0::CR0_NW | X86_CR0::CR0_CD);
  161. hw_cr0 |= X86_CR0::CR0_WP | X86_CR0::CR0_NE;
  162. vmx_vmwrite(VmcsFields::GUEST_CR0 as u32, cr0.bits() as u64)?;
  163. Ok(())
  164. }
  165. pub fn vmcs_init_guest(&self) -> Result<(), SystemError> {
  166. // https://www.sandpile.org/x86/initial.htm
  167. // segment field initialization
  168. seg_setup(Sreg::CS as usize)?;
  169. vmx_vmwrite(VmcsFields::GUEST_CS_SELECTOR as u32, 0xf000)?;
  170. vmx_vmwrite(VmcsFields::GUEST_CS_BASE as u32, 0xffff0000)?;
  171. seg_setup(Sreg::DS as usize)?;
  172. seg_setup(Sreg::ES as usize)?;
  173. seg_setup(Sreg::FS as usize)?;
  174. seg_setup(Sreg::GS as usize)?;
  175. seg_setup(Sreg::SS as usize)?;
  176. vmx_vmwrite(VmcsFields::GUEST_TR_SELECTOR as u32, 0)?;
  177. vmx_vmwrite(VmcsFields::GUEST_TR_BASE as u32, 0)?;
  178. vmx_vmwrite(VmcsFields::GUEST_TR_LIMIT as u32, 0xffff)?;
  179. vmx_vmwrite(VmcsFields::GUEST_TR_ACCESS_RIGHTS as u32, 0x008b)?;
  180. vmx_vmwrite(VmcsFields::GUEST_LDTR_SELECTOR as u32, 0)?;
  181. vmx_vmwrite(VmcsFields::GUEST_LDTR_BASE as u32, 0)?;
  182. vmx_vmwrite(VmcsFields::GUEST_LDTR_LIMIT as u32, 0xffff)?;
  183. vmx_vmwrite(VmcsFields::GUEST_LDTR_ACCESS_RIGHTS as u32, 0x00082)?;
  184. vmx_vmwrite(VmcsFields::GUEST_RFLAGS as u32, 2)?;
  185. vmx_vmwrite(VmcsFields::GUEST_GDTR_BASE as u32, 0)?;
  186. vmx_vmwrite(VmcsFields::GUEST_GDTR_LIMIT as u32, 0x0000_FFFF_u64)?;
  187. vmx_vmwrite(VmcsFields::GUEST_IDTR_BASE as u32, 0)?;
  188. vmx_vmwrite(VmcsFields::GUEST_IDTR_LIMIT as u32, 0x0000_FFFF_u64)?;
  189. vmx_vmwrite(VmcsFields::GUEST_ACTIVITY_STATE as u32, 0)?; // State = Active
  190. vmx_vmwrite(VmcsFields::GUEST_INTERRUPTIBILITY_STATE as u32, 0)?;
  191. vmx_vmwrite(VmcsFields::GUEST_PENDING_DBG_EXCEPTIONS as u32, 0)?;
  192. vmx_vmwrite(VmcsFields::CTRL_VM_ENTRY_INTR_INFO_FIELD as u32, 0)?;
  193. let cr0 = X86_CR0::CR0_NW | X86_CR0::CR0_CD | X86_CR0::CR0_ET;
  194. Self::vmx_set_cr0(cr0)?;
  195. vmx_vmwrite(VmcsFields::GUEST_CR0 as u32, cr0.bits() as u64)?;
  196. vmx_vmwrite(
  197. VmcsFields::GUEST_SYSENTER_CS as u32,
  198. vmx_vmread(VmcsFields::HOST_SYSENTER_CS as u32).unwrap(),
  199. )?;
  200. vmx_vmwrite(VmcsFields::GUEST_VMX_PREEMPT_TIMER_VALUE as u32, 0)?;
  201. vmx_vmwrite(VmcsFields::GUEST_INTR_STATUS as u32, 0)?;
  202. vmx_vmwrite(VmcsFields::GUEST_PML_INDEX as u32, 0)?;
  203. vmx_vmwrite(VmcsFields::GUEST_VMCS_LINK_PTR as u32, u64::MAX)?;
  204. vmx_vmwrite(VmcsFields::GUEST_DEBUGCTL as u32, unsafe {
  205. msr::rdmsr(msr::IA32_DEBUGCTL)
  206. })?;
  207. vmx_vmwrite(
  208. VmcsFields::GUEST_SYSENTER_ESP as u32,
  209. vmx_vmread(VmcsFields::HOST_SYSENTER_ESP as u32).unwrap(),
  210. )?;
  211. vmx_vmwrite(
  212. VmcsFields::GUEST_SYSENTER_EIP as u32,
  213. vmx_vmread(VmcsFields::HOST_SYSENTER_EIP as u32).unwrap(),
  214. )?;
  215. // Self::vmx_set_cr0();
  216. vmx_vmwrite(VmcsFields::GUEST_CR3 as u32, 0)?;
  217. vmx_vmwrite(
  218. VmcsFields::GUEST_CR4 as u32,
  219. 1, // enable vme
  220. )?;
  221. vmx_vmwrite(VmcsFields::GUEST_DR7 as u32, 0x0000_0000_0000_0400)?;
  222. vmx_vmwrite(
  223. VmcsFields::GUEST_RSP as u32,
  224. self.vcpu_ctx.regs[VcpuRegIndex::Rsp as usize] as u64,
  225. )?;
  226. vmx_vmwrite(VmcsFields::GUEST_RIP as u32, self.vcpu_ctx.rip as u64)?;
  227. debug!("vmcs init guest rip: {:#x}", self.vcpu_ctx.rip as u64);
  228. debug!(
  229. "vmcs init guest rsp: {:#x}",
  230. self.vcpu_ctx.regs[VcpuRegIndex::Rsp as usize] as u64
  231. );
  232. // vmx_vmwrite(VmcsFields::GUEST_RFLAGS as u32, x86::bits64::rflags::read().bits())?;
  233. Ok(())
  234. }
  235. #[allow(deprecated)]
  236. pub fn vmcs_init_host(&self) -> Result<(), SystemError> {
  237. vmx_vmwrite(VmcsFields::HOST_CR0 as u32, unsafe {
  238. controlregs::cr0().bits().try_into().unwrap()
  239. })?;
  240. vmx_vmwrite(VmcsFields::HOST_CR3 as u32, unsafe { controlregs::cr3() })?;
  241. vmx_vmwrite(VmcsFields::HOST_CR4 as u32, unsafe {
  242. controlregs::cr4().bits().try_into().unwrap()
  243. })?;
  244. vmx_vmwrite(
  245. VmcsFields::HOST_ES_SELECTOR as u32,
  246. (segmentation::es().bits() & (!0x07)).into(),
  247. )?;
  248. vmx_vmwrite(
  249. VmcsFields::HOST_CS_SELECTOR as u32,
  250. (segmentation::cs().bits() & (!0x07)).into(),
  251. )?;
  252. vmx_vmwrite(
  253. VmcsFields::HOST_SS_SELECTOR as u32,
  254. (segmentation::ss().bits() & (!0x07)).into(),
  255. )?;
  256. vmx_vmwrite(
  257. VmcsFields::HOST_DS_SELECTOR as u32,
  258. (segmentation::ds().bits() & (!0x07)).into(),
  259. )?;
  260. vmx_vmwrite(
  261. VmcsFields::HOST_FS_SELECTOR as u32,
  262. (segmentation::fs().bits() & (!0x07)).into(),
  263. )?;
  264. vmx_vmwrite(
  265. VmcsFields::HOST_GS_SELECTOR as u32,
  266. (segmentation::gs().bits() & (!0x07)).into(),
  267. )?;
  268. vmx_vmwrite(VmcsFields::HOST_TR_SELECTOR as u32, unsafe {
  269. (x86::task::tr().bits() & (!0x07)).into()
  270. })?;
  271. vmx_vmwrite(VmcsFields::HOST_FS_BASE as u32, unsafe {
  272. msr::rdmsr(msr::IA32_FS_BASE)
  273. })?;
  274. vmx_vmwrite(VmcsFields::HOST_GS_BASE as u32, unsafe {
  275. msr::rdmsr(msr::IA32_GS_BASE)
  276. })?;
  277. let mut pseudo_descriptpr: x86::dtables::DescriptorTablePointer<u64> = Default::default();
  278. unsafe {
  279. x86::dtables::sgdt(&mut pseudo_descriptpr);
  280. };
  281. vmx_vmwrite(
  282. VmcsFields::HOST_TR_BASE as u32,
  283. get_segment_base(pseudo_descriptpr.base, pseudo_descriptpr.limit, unsafe {
  284. x86::task::tr().bits()
  285. }),
  286. )?;
  287. vmx_vmwrite(
  288. VmcsFields::HOST_GDTR_BASE as u32,
  289. pseudo_descriptpr.base as usize as u64,
  290. )?;
  291. vmx_vmwrite(VmcsFields::HOST_IDTR_BASE as u32, unsafe {
  292. let mut pseudo_descriptpr: x86::dtables::DescriptorTablePointer<u64> =
  293. Default::default();
  294. x86::dtables::sidt(&mut pseudo_descriptpr);
  295. pseudo_descriptpr.base as usize as u64
  296. })?;
  297. // fast entry into the kernel
  298. vmx_vmwrite(VmcsFields::HOST_SYSENTER_ESP as u32, unsafe {
  299. msr::rdmsr(msr::IA32_SYSENTER_ESP)
  300. })?;
  301. vmx_vmwrite(VmcsFields::HOST_SYSENTER_EIP as u32, unsafe {
  302. msr::rdmsr(msr::IA32_SYSENTER_EIP)
  303. })?;
  304. vmx_vmwrite(VmcsFields::HOST_SYSENTER_CS as u32, unsafe {
  305. msr::rdmsr(msr::IA32_SYSENTER_CS)
  306. })?;
  307. // vmx_vmwrite(VmcsFields::HOST_RIP as u32, vmx_return as *const () as u64)?;
  308. // debug!("vmcs init host rip: {:#x}", vmx_return as *const () as u64);
  309. Ok(())
  310. }
  311. // Intel SDM Volume 3C Chapter 25.3 “Organization of VMCS Data”
  312. pub fn vmcs_init(&self) -> Result<(), SystemError> {
  313. vmx_vmwrite(VmcsFields::CTRL_PAGE_FAULT_ERR_CODE_MASK as u32, 0)?;
  314. vmx_vmwrite(VmcsFields::CTRL_PAGE_FAULT_ERR_CODE_MATCH as u32, 0)?;
  315. vmx_vmwrite(VmcsFields::CTRL_CR3_TARGET_COUNT as u32, 0)?;
  316. vmx_vmwrite(
  317. VmcsFields::CTRL_PIN_BASED_VM_EXEC_CTRLS as u32,
  318. adjust_vmx_pinbased_controls() as u64,
  319. )?;
  320. vmx_vmwrite(
  321. VmcsFields::CTRL_MSR_BITMAP_ADDR as u32,
  322. self.data.msr_bitmap_physical_address,
  323. )?;
  324. vmx_vmwrite(VmcsFields::CTRL_CR0_READ_SHADOW as u32, unsafe {
  325. controlregs::cr0().bits().try_into().unwrap()
  326. })?;
  327. vmx_vmwrite(VmcsFields::CTRL_CR4_READ_SHADOW as u32, unsafe {
  328. controlregs::cr4().bits().try_into().unwrap()
  329. })?;
  330. vmx_vmwrite(
  331. VmcsFields::CTRL_VM_ENTRY_CTRLS as u32,
  332. adjust_vmx_entry_controls() as u64,
  333. )?;
  334. vmx_vmwrite(
  335. VmcsFields::CTRL_PRIMARY_VM_EXIT_CTRLS as u32,
  336. adjust_vmx_exit_controls() as u64,
  337. )?;
  338. vmx_vmwrite(
  339. VmcsFields::CTRL_PRIMARY_PROCESSOR_VM_EXEC_CTRLS as u32,
  340. adjust_vmx_primary_process_exec_controls() as u64,
  341. )?;
  342. vmx_vmwrite(
  343. VmcsFields::CTRL_SECONDARY_PROCESSOR_VM_EXEC_CTRLS as u32,
  344. adjust_vmx_secondary_process_exec_controls() as u64,
  345. )?;
  346. self.vmcs_init_host()?;
  347. self.vmcs_init_guest()?;
  348. Ok(())
  349. }
  350. fn kvm_mmu_load(&mut self) -> Result<(), SystemError> {
  351. debug!("kvm_mmu_load!");
  352. // 申请并创建新的页表
  353. let mapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe {
  354. PageMapper::create(PageTableKind::EPT, LockedFrameAllocator)
  355. .ok_or(SystemError::ENOMEM)?
  356. };
  357. let ept_root_hpa = mapper.table().phys();
  358. let set_eptp_fn = self.mmu.set_eptp.unwrap();
  359. set_eptp_fn(ept_root_hpa.data() as u64)?;
  360. self.mmu.root_hpa = ept_root_hpa.data() as u64;
  361. debug!("ept_root_hpa:{:x}!", ept_root_hpa.data() as u64);
  362. return Ok(());
  363. }
  364. pub fn set_regs(&mut self, regs: VcpuContextFrame) -> Result<(), SystemError> {
  365. self.vcpu_ctx = regs;
  366. Ok(())
  367. }
  368. }
  369. impl Vcpu for VmxVcpu {
  370. /// Virtualize the CPU
  371. fn virtualize_cpu(&mut self) -> Result<(), SystemError> {
  372. match has_intel_vmx_support() {
  373. Ok(_) => {
  374. debug!("[+] CPU supports Intel VMX");
  375. }
  376. Err(e) => {
  377. debug!("[-] CPU does not support Intel VMX: {:?}", e);
  378. return Err(SystemError::ENOSYS);
  379. }
  380. };
  381. match enable_vmx_operation() {
  382. Ok(_) => {
  383. debug!("[+] Enabling Virtual Machine Extensions (VMX)");
  384. }
  385. Err(_) => {
  386. debug!("[-] VMX operation is not supported on this processor.");
  387. return Err(SystemError::ENOSYS);
  388. }
  389. }
  390. vmxon(self.data.vmxon_region_physical_address)?;
  391. debug!("[+] VMXON successful!");
  392. vmx_vmclear(self.data.vmcs_region_physical_address)?;
  393. vmx_vmptrld(self.data.vmcs_region_physical_address)?;
  394. debug!("[+] VMPTRLD successful!");
  395. self.vmcs_init().expect("vncs_init fail");
  396. debug!("[+] VMCS init!");
  397. // debug!("vmcs init host rip: {:#x}", vmx_return as *const () as u64);
  398. // debug!("vmcs init host rsp: {:#x}", x86::bits64::registers::rsp());
  399. // vmx_vmwrite(VmcsFields::HOST_RSP as u32, x86::bits64::registers::rsp())?;
  400. // vmx_vmwrite(VmcsFields::HOST_RIP as u32, vmx_return as *const () as u64)?;
  401. // vmx_vmwrite(VmcsFields::HOST_RSP as u32, x86::bits64::registers::rsp())?;
  402. self.kvm_mmu_load()?;
  403. Ok(())
  404. }
  405. fn devirtualize_cpu(&self) -> Result<(), SystemError> {
  406. vmxoff()?;
  407. Ok(())
  408. }
  409. /// Gets the index of the current logical/virtual processor
  410. fn id(&self) -> u32 {
  411. self.vcpu_id
  412. }
  413. }
  414. pub fn get_segment_base(gdt_base: *const u64, gdt_size: u16, segment_selector: u16) -> u64 {
  415. let table = segment_selector & 0x0004; // get table indicator in selector
  416. let index = (segment_selector >> 3) as usize; // get index in selector
  417. if table == 0 && index == 0 {
  418. return 0;
  419. }
  420. let descriptor_table = unsafe { slice::from_raw_parts(gdt_base, gdt_size.into()) };
  421. let descriptor = descriptor_table[index];
  422. let base_high = (descriptor & 0xFF00_0000_0000_0000) >> 32;
  423. let base_mid = (descriptor & 0x0000_00FF_0000_0000) >> 16;
  424. let base_low = (descriptor & 0x0000_0000_FFFF_0000) >> 16;
  425. let segment_base = (base_high | base_mid | base_low) & 0xFFFFFFFF;
  426. let virtaddr = unsafe { MMArch::phys_2_virt(PhysAddr::new(segment_base as usize)).unwrap() };
  427. return virtaddr.data() as u64;
  428. }
  429. // FIXME: may have bug
  430. // pub fn read_segment_access_rights(segement_selector: u16) -> u32{
  431. // let table = segement_selector & 0x0004; // get table indicator in selector
  432. // let index = segement_selector & 0xFFF8; // get index in selector
  433. // let mut flag: u16;
  434. // if table==0 && index==0 {
  435. // return 0;
  436. // }
  437. // unsafe{
  438. // asm!(
  439. // "lar {0:r}, rcx",
  440. // "mov {1:r}, {0:r}",
  441. // in(reg) segement_selector,
  442. // out(reg) flag,
  443. // );
  444. // }
  445. // return (flag >> 8) as u32;
  446. // }
  447. pub fn adjust_vmx_controls(ctl_min: u32, ctl_opt: u32, msr: u32, result: &mut u32) {
  448. let vmx_msr_low: u32 = unsafe { (msr::rdmsr(msr) & 0x0000_0000_FFFF_FFFF) as u32 };
  449. let vmx_msr_high: u32 = unsafe { (msr::rdmsr(msr) << 32) as u32 };
  450. let mut ctl: u32 = ctl_min | ctl_opt;
  451. ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
  452. ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
  453. *result = ctl;
  454. }
  455. pub fn adjust_vmx_entry_controls() -> u32 {
  456. let mut entry_controls: u32 = 0;
  457. adjust_vmx_controls(
  458. VmxEntryCtrl::LOAD_DBG_CTRLS.bits(),
  459. VmxEntryCtrl::IA32E_MODE_GUEST.bits(),
  460. msr::IA32_VMX_ENTRY_CTLS, //Capability Reporting Register of VM-entry Controls (R/O)
  461. &mut entry_controls,
  462. );
  463. return entry_controls;
  464. // msr::IA32_VMX_TRUE_ENTRY_CTLS//Capability Reporting Register of VM-entry Flex Controls (R/O) See Table 35-2
  465. }
  466. pub fn adjust_vmx_exit_controls() -> u32 {
  467. let mut exit_controls: u32 = 0;
  468. adjust_vmx_controls(
  469. VmxPrimaryExitCtrl::SAVE_DBG_CTRLS.bits(),
  470. VmxPrimaryExitCtrl::HOST_ADDR_SPACE_SIZE.bits(),
  471. msr::IA32_VMX_EXIT_CTLS,
  472. &mut exit_controls,
  473. );
  474. return exit_controls;
  475. }
  476. pub fn adjust_vmx_pinbased_controls() -> u32 {
  477. let mut controls: u32 = 16;
  478. adjust_vmx_controls(0, 0, msr::IA32_VMX_TRUE_PINBASED_CTLS, &mut controls);
  479. // debug!("adjust_vmx_pinbased_controls: {:x}", controls);
  480. return controls;
  481. }
  482. pub fn adjust_vmx_primary_process_exec_controls() -> u32 {
  483. let mut controls: u32 = 0;
  484. adjust_vmx_controls(
  485. 0,
  486. VmxPrimaryProcessBasedExecuteCtrl::USE_MSR_BITMAPS.bits()
  487. | VmxPrimaryProcessBasedExecuteCtrl::ACTIVATE_SECONDARY_CONTROLS.bits(),
  488. msr::IA32_VMX_PROCBASED_CTLS,
  489. &mut controls,
  490. );
  491. return controls;
  492. }
  493. pub fn adjust_vmx_secondary_process_exec_controls() -> u32 {
  494. let mut controls: u32 = 0;
  495. adjust_vmx_controls(
  496. 0,
  497. VmxSecondaryProcessBasedExecuteCtrl::ENABLE_RDTSCP.bits()
  498. | VmxSecondaryProcessBasedExecuteCtrl::ENABLE_XSAVES_XRSTORS.bits()
  499. | VmxSecondaryProcessBasedExecuteCtrl::ENABLE_INVPCID.bits()
  500. | VmxSecondaryProcessBasedExecuteCtrl::ENABLE_EPT.bits()
  501. | VmxSecondaryProcessBasedExecuteCtrl::UNRESTRICTED_GUEST.bits(),
  502. msr::IA32_VMX_PROCBASED_CTLS2,
  503. &mut controls,
  504. );
  505. return controls;
  506. }
  507. /// Check to see if CPU is Intel (“GenuineIntel”).
  508. /// Check processor supports for Virtual Machine Extension (VMX) technology
  509. // CPUID.1:ECX.VMX[bit 5] = 1 (Intel Manual: 24.6 Discovering Support for VMX)
  510. pub fn has_intel_vmx_support() -> Result<(), SystemError> {
  511. let cpuid = CpuId::new();
  512. if let Some(vi) = cpuid.get_vendor_info() {
  513. if vi.as_str() != "GenuineIntel" {
  514. return Err(SystemError::ENOSYS);
  515. }
  516. }
  517. if let Some(fi) = cpuid.get_feature_info() {
  518. if !fi.has_vmx() {
  519. return Err(SystemError::ENOSYS);
  520. }
  521. }
  522. Ok(())
  523. }
  524. /// Enables Virtual Machine Extensions
  525. // - CR4.VMXE[bit 13] = 1 (Intel Manual: 24.7 Enabling and Entering VMX Operation)
  526. pub fn enable_vmx_operation() -> Result<(), SystemError> {
  527. let mut cr4 = unsafe { controlregs::cr4() };
  528. cr4.set(controlregs::Cr4::CR4_ENABLE_VMX, true);
  529. unsafe { controlregs::cr4_write(cr4) };
  530. set_lock_bit()?;
  531. debug!("[+] Lock bit set via IA32_FEATURE_CONTROL");
  532. set_cr0_bits();
  533. debug!("[+] Mandatory bits in CR0 set/cleared");
  534. set_cr4_bits();
  535. debug!("[+] Mandatory bits in CR4 set/cleared");
  536. Ok(())
  537. }
  538. /// Check if we need to set bits in IA32_FEATURE_CONTROL
  539. // (Intel Manual: 24.7 Enabling and Entering VMX Operation)
  540. fn set_lock_bit() -> Result<(), SystemError> {
  541. const VMX_LOCK_BIT: u64 = 1 << 0;
  542. const VMXON_OUTSIDE_SMX: u64 = 1 << 2;
  543. let ia32_feature_control = unsafe { msr::rdmsr(msr::IA32_FEATURE_CONTROL) };
  544. if (ia32_feature_control & VMX_LOCK_BIT) == 0 {
  545. unsafe {
  546. msr::wrmsr(
  547. msr::IA32_FEATURE_CONTROL,
  548. VMXON_OUTSIDE_SMX | VMX_LOCK_BIT | ia32_feature_control,
  549. )
  550. };
  551. } else if (ia32_feature_control & VMXON_OUTSIDE_SMX) == 0 {
  552. return Err(SystemError::EPERM);
  553. }
  554. Ok(())
  555. }
  556. /// Set the mandatory bits in CR0 and clear bits that are mandatory zero
  557. /// (Intel Manual: 24.8 Restrictions on VMX Operation)
  558. fn set_cr0_bits() {
  559. let ia32_vmx_cr0_fixed0 = unsafe { msr::rdmsr(msr::IA32_VMX_CR0_FIXED0) };
  560. let ia32_vmx_cr0_fixed1 = unsafe { msr::rdmsr(msr::IA32_VMX_CR0_FIXED1) };
  561. let mut cr0 = unsafe { controlregs::cr0() };
  562. cr0 |= controlregs::Cr0::from_bits_truncate(ia32_vmx_cr0_fixed0 as usize);
  563. cr0 &= controlregs::Cr0::from_bits_truncate(ia32_vmx_cr0_fixed1 as usize);
  564. unsafe { controlregs::cr0_write(cr0) };
  565. }
  566. /// Set the mandatory bits in CR4 and clear bits that are mandatory zero
  567. /// (Intel Manual: 24.8 Restrictions on VMX Operation)
  568. fn set_cr4_bits() {
  569. let ia32_vmx_cr4_fixed0 = unsafe { msr::rdmsr(msr::IA32_VMX_CR4_FIXED0) };
  570. let ia32_vmx_cr4_fixed1 = unsafe { msr::rdmsr(msr::IA32_VMX_CR4_FIXED1) };
  571. let mut cr4 = unsafe { controlregs::cr4() };
  572. cr4 |= controlregs::Cr4::from_bits_truncate(ia32_vmx_cr4_fixed0 as usize);
  573. cr4 &= controlregs::Cr4::from_bits_truncate(ia32_vmx_cr4_fixed1 as usize);
  574. unsafe { controlregs::cr4_write(cr4) };
  575. }