mod.rs 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. use core::intrinsics::unlikely;
  2. use alloc::{boxed::Box, collections::LinkedList, sync::Arc};
  3. use bitmap::{traits::BitMapOps, AllocBitmap};
  4. use x86::{
  5. controlregs::Cr4,
  6. vmx::vmcs::{
  7. control::{self, PrimaryControls},
  8. host,
  9. },
  10. };
  11. use x86_64::{registers::control::Cr3Flags, structures::paging::PhysFrame};
  12. use crate::{
  13. arch::{
  14. vm::asm::{IntrInfo, IntrType, VmxAsm},
  15. MMArch,
  16. },
  17. libs::spinlock::{SpinLock, SpinLockGuard},
  18. mm::{percpu::PerCpuVar, MemoryManagementArch, PhysAddr, VirtAddr},
  19. smp::cpu::ProcessorId,
  20. };
  21. use super::vmx_info;
  22. pub mod feat;
  23. pub static mut PERCPU_VMCS: Option<PerCpuVar<Option<Arc<LockedVMControlStructure>>>> = None;
  24. pub static mut PERCPU_LOADED_VMCS_LIST: Option<PerCpuVar<LinkedList<Arc<LockedLoadedVmcs>>>> = None;
  25. pub static mut VMXAREA: Option<PerCpuVar<Box<VMControlStructure>>> = None;
  26. pub fn current_vmcs() -> &'static Option<Arc<LockedVMControlStructure>> {
  27. unsafe { PERCPU_VMCS.as_ref().unwrap().get() }
  28. }
  29. pub fn current_vmcs_mut() -> &'static mut Option<Arc<LockedVMControlStructure>> {
  30. unsafe { PERCPU_VMCS.as_ref().unwrap().get_mut() }
  31. }
  32. pub fn current_loaded_vmcs_list_mut() -> &'static mut LinkedList<Arc<LockedLoadedVmcs>> {
  33. unsafe { PERCPU_LOADED_VMCS_LIST.as_ref().unwrap().get_mut() }
  34. }
  35. #[allow(dead_code)]
  36. pub fn current_loaded_vmcs_list() -> &'static LinkedList<Arc<LockedLoadedVmcs>> {
  37. unsafe { PERCPU_LOADED_VMCS_LIST.as_ref().unwrap().get() }
  38. }
  39. pub fn vmx_area() -> &'static PerCpuVar<Box<VMControlStructure>> {
  40. unsafe { VMXAREA.as_ref().unwrap() }
  41. }
  42. #[repr(C, align(4096))]
  43. #[derive(Debug, Clone)]
  44. pub struct VMControlStructure {
  45. pub header: u32,
  46. pub abort: u32,
  47. pub data: [u8; MMArch::PAGE_SIZE - core::mem::size_of::<u32>() - core::mem::size_of::<u32>()],
  48. }
  49. impl VMControlStructure {
  50. pub fn new() -> Box<Self> {
  51. let mut vmcs: Box<VMControlStructure> = unsafe {
  52. Box::try_new_zeroed()
  53. .expect("alloc vmcs failed")
  54. .assume_init()
  55. };
  56. vmcs.set_revision_id(vmx_info().vmcs_config.revision_id);
  57. vmcs
  58. }
  59. pub fn revision_id(&self) -> u32 {
  60. self.header & 0x7FFF_FFFF
  61. }
  62. #[allow(dead_code)]
  63. pub fn is_shadow_vmcs(&self) -> bool {
  64. self.header & 0x8000_0000 == 1
  65. }
  66. pub fn set_shadow_vmcs(&mut self, shadow: bool) {
  67. self.header |= (shadow as u32) << 31;
  68. }
  69. pub fn set_revision_id(&mut self, id: u32) {
  70. self.header = self.header & 0x8000_0000 | (id & 0x7FFF_FFFF);
  71. }
  72. }
  73. #[derive(Debug)]
  74. pub struct LockedVMControlStructure {
  75. /// 记录内部的vmcs的物理地址
  76. phys_addr: PhysAddr,
  77. inner: SpinLock<Box<VMControlStructure>>,
  78. }
  79. impl LockedVMControlStructure {
  80. #[inline(never)]
  81. pub fn new(shadow: bool) -> Arc<Self> {
  82. let mut vmcs = VMControlStructure::new();
  83. let phys_addr = unsafe {
  84. MMArch::virt_2_phys(VirtAddr::new(vmcs.as_ref() as *const _ as usize)).unwrap()
  85. };
  86. vmcs.set_shadow_vmcs(shadow);
  87. Arc::new(Self {
  88. phys_addr,
  89. inner: SpinLock::new(vmcs),
  90. })
  91. }
  92. pub fn lock(&self) -> SpinLockGuard<'_, Box<VMControlStructure>> {
  93. self.inner.lock()
  94. }
  95. pub fn phys_addr(&self) -> PhysAddr {
  96. self.phys_addr
  97. }
  98. }
  99. #[derive(Debug)]
  100. pub struct VmcsHostState {
  101. pub cr3: (PhysFrame, Cr3Flags),
  102. pub cr4: Cr4,
  103. pub gs_base: usize,
  104. pub fs_base: usize,
  105. pub rsp: usize,
  106. pub fs_sel: u16,
  107. pub gs_sel: u16,
  108. pub ldt_sel: u16,
  109. pub ds_sel: u16,
  110. pub es_sel: u16,
  111. }
  112. impl VmcsHostState {
  113. pub fn set_host_fsgs(&mut self, fs_sel: u16, gs_sel: u16, fs_base: usize, gs_base: usize) {
  114. if unlikely(self.fs_sel != fs_sel) {
  115. if (fs_sel & 7) == 0 {
  116. VmxAsm::vmx_vmwrite(host::FS_SELECTOR, fs_sel as u64);
  117. } else {
  118. VmxAsm::vmx_vmwrite(host::FS_SELECTOR, 0);
  119. }
  120. self.fs_sel = fs_sel;
  121. }
  122. if unlikely(self.gs_sel != gs_sel) {
  123. if (gs_sel & 7) == 0 {
  124. VmxAsm::vmx_vmwrite(host::GS_SELECTOR, gs_sel as u64);
  125. } else {
  126. VmxAsm::vmx_vmwrite(host::GS_SELECTOR, 0);
  127. }
  128. self.gs_sel = gs_sel;
  129. }
  130. if unlikely(fs_base != self.fs_base) {
  131. VmxAsm::vmx_vmwrite(host::FS_BASE, fs_base as u64);
  132. self.fs_base = fs_base;
  133. }
  134. if unlikely(self.gs_base != gs_base) {
  135. VmxAsm::vmx_vmwrite(host::GS_BASE, gs_base as u64);
  136. self.gs_base = gs_base;
  137. }
  138. }
  139. }
  140. impl Default for VmcsHostState {
  141. fn default() -> Self {
  142. Self {
  143. cr3: (
  144. PhysFrame::containing_address(x86_64::PhysAddr::new(0)),
  145. Cr3Flags::empty(),
  146. ),
  147. cr4: Cr4::empty(),
  148. gs_base: 0,
  149. fs_base: 0,
  150. rsp: 0,
  151. fs_sel: 0,
  152. gs_sel: 0,
  153. ldt_sel: 0,
  154. ds_sel: 0,
  155. es_sel: 0,
  156. }
  157. }
  158. }
  159. #[derive(Debug, Default)]
  160. pub struct VmcsControlsShadow {
  161. vm_entry: u32,
  162. vm_exit: u32,
  163. pin: u32,
  164. exec: u32,
  165. secondary_exec: u32,
  166. tertiary_exec: u64,
  167. }
  168. #[derive(Debug)]
  169. #[allow(dead_code)]
  170. pub struct LoadedVmcs {
  171. pub vmcs: Arc<LockedVMControlStructure>,
  172. pub shadow_vmcs: Option<Arc<LockedVMControlStructure>>,
  173. pub cpu: ProcessorId,
  174. /// 是否已经执行了 VMLAUNCH 指令
  175. pub launched: bool,
  176. /// NMI 是否已知未被屏蔽
  177. nmi_known_unmasked: bool,
  178. /// Hypervisor 定时器是否被软禁用
  179. hv_timer_soft_disabled: bool,
  180. /// 支持 vnmi-less CPU 的字段,指示 VNMI 是否被软阻止
  181. pub soft_vnmi_blocked: bool,
  182. /// 记录 VM 进入时间
  183. entry_time: u64,
  184. /// 记录 VNMI 被阻止的时间
  185. vnmi_blocked_time: u64,
  186. /// msr位图
  187. pub msr_bitmap: VmxMsrBitmap,
  188. /// 保存 VMCS 主机状态的结构体
  189. pub host_state: VmcsHostState,
  190. /// 保存 VMCS 控制字段的shadow状态的结构体。
  191. controls_shadow: VmcsControlsShadow,
  192. }
  193. impl LoadedVmcs {
  194. pub fn controls_set(&mut self, ctl_type: ControlsType, value: u64) {
  195. match ctl_type {
  196. ControlsType::VmEntry => {
  197. if self.controls_shadow.vm_entry != value as u32 {
  198. VmxAsm::vmx_vmwrite(control::VMENTRY_CONTROLS, value);
  199. self.controls_shadow.vm_entry = value as u32;
  200. }
  201. }
  202. ControlsType::VmExit => {
  203. if self.controls_shadow.vm_exit != value as u32 {
  204. VmxAsm::vmx_vmwrite(control::VMEXIT_CONTROLS, value);
  205. self.controls_shadow.vm_exit = value as u32;
  206. }
  207. }
  208. ControlsType::Pin => {
  209. if self.controls_shadow.pin != value as u32 {
  210. VmxAsm::vmx_vmwrite(control::PINBASED_EXEC_CONTROLS, value);
  211. self.controls_shadow.pin = value as u32;
  212. }
  213. }
  214. ControlsType::Exec => {
  215. if self.controls_shadow.exec != value as u32 {
  216. VmxAsm::vmx_vmwrite(control::PRIMARY_PROCBASED_EXEC_CONTROLS, value);
  217. self.controls_shadow.exec = value as u32;
  218. }
  219. }
  220. ControlsType::SecondaryExec => {
  221. if self.controls_shadow.secondary_exec != value as u32 {
  222. VmxAsm::vmx_vmwrite(control::SECONDARY_PROCBASED_EXEC_CONTROLS, value);
  223. self.controls_shadow.secondary_exec = value as u32;
  224. }
  225. }
  226. ControlsType::TertiaryExec => {
  227. if self.controls_shadow.tertiary_exec != value {
  228. VmxAsm::vmx_vmwrite(0x2034, value);
  229. self.controls_shadow.tertiary_exec = value;
  230. }
  231. }
  232. }
  233. }
  234. pub fn controls_get(&self, ctl_type: ControlsType) -> u64 {
  235. match ctl_type {
  236. ControlsType::VmEntry => self.controls_shadow.vm_entry as u64,
  237. ControlsType::VmExit => self.controls_shadow.vm_exit as u64,
  238. ControlsType::Pin => self.controls_shadow.pin as u64,
  239. ControlsType::Exec => self.controls_shadow.exec as u64,
  240. ControlsType::SecondaryExec => self.controls_shadow.secondary_exec as u64,
  241. ControlsType::TertiaryExec => self.controls_shadow.tertiary_exec,
  242. }
  243. }
  244. pub fn controls_setbit(&mut self, ctl_type: ControlsType, value: u64) {
  245. let val = self.controls_get(ctl_type) | value;
  246. self.controls_set(ctl_type, val)
  247. }
  248. pub fn controls_clearbit(&mut self, ctl_type: ControlsType, value: u64) {
  249. let val = self.controls_get(ctl_type) & (!value);
  250. self.controls_set(ctl_type, val)
  251. }
  252. pub fn msr_write_intercepted(&mut self, msr: u32) -> bool {
  253. if unsafe {
  254. PrimaryControls::from_bits_unchecked(self.controls_get(ControlsType::Exec) as u32)
  255. .contains(PrimaryControls::USE_MSR_BITMAPS)
  256. } {
  257. return true;
  258. }
  259. return self
  260. .msr_bitmap
  261. .ctl(msr, VmxMsrBitmapAction::Test, VmxMsrBitmapAccess::Write);
  262. }
  263. }
  264. #[derive(Debug)]
  265. pub struct LockedLoadedVmcs {
  266. inner: SpinLock<LoadedVmcs>,
  267. }
  268. #[derive(Debug, Clone, Copy)]
  269. #[allow(dead_code)]
  270. pub enum ControlsType {
  271. VmEntry,
  272. VmExit,
  273. Pin,
  274. Exec,
  275. SecondaryExec,
  276. TertiaryExec,
  277. }
  278. impl LockedLoadedVmcs {
  279. pub fn new() -> Arc<Self> {
  280. let bitmap = if vmx_info().has_msr_bitmap() {
  281. let bitmap = VmxMsrBitmap::new(true, MMArch::PAGE_SIZE * u8::BITS as usize);
  282. bitmap
  283. } else {
  284. VmxMsrBitmap::new(true, 0)
  285. };
  286. let vmcs = LockedVMControlStructure::new(false);
  287. VmxAsm::vmclear(vmcs.phys_addr);
  288. Arc::new(Self {
  289. inner: SpinLock::new(LoadedVmcs {
  290. vmcs,
  291. shadow_vmcs: None,
  292. cpu: ProcessorId::INVALID,
  293. launched: false,
  294. hv_timer_soft_disabled: false,
  295. msr_bitmap: bitmap,
  296. host_state: VmcsHostState::default(),
  297. controls_shadow: VmcsControlsShadow::default(),
  298. nmi_known_unmasked: false,
  299. soft_vnmi_blocked: false,
  300. entry_time: 0,
  301. vnmi_blocked_time: 0,
  302. }),
  303. })
  304. }
  305. pub fn lock(&self) -> SpinLockGuard<'_, LoadedVmcs> {
  306. self.inner.lock()
  307. }
  308. }
  309. #[derive(Debug)]
  310. pub struct VmxMsrBitmap {
  311. data: AllocBitmap,
  312. phys_addr: usize,
  313. }
  314. pub enum VmxMsrBitmapAction {
  315. Test,
  316. Set,
  317. Clear,
  318. }
  319. pub enum VmxMsrBitmapAccess {
  320. Write,
  321. Read,
  322. }
  323. impl VmxMsrBitmapAccess {
  324. pub const fn base(&self) -> usize {
  325. match self {
  326. VmxMsrBitmapAccess::Write => 0x800 * core::mem::size_of::<usize>(),
  327. VmxMsrBitmapAccess::Read => 0,
  328. }
  329. }
  330. }
  331. impl VmxMsrBitmap {
  332. pub fn new(init_val: bool, size: usize) -> Self {
  333. let mut data = AllocBitmap::new(size);
  334. data.set_all(init_val);
  335. let addr = data.data() as *const [usize] as *const usize as usize;
  336. Self {
  337. data,
  338. phys_addr: unsafe { MMArch::virt_2_phys(VirtAddr::new(addr)).unwrap().data() },
  339. }
  340. }
  341. pub fn phys_addr(&self) -> usize {
  342. self.phys_addr
  343. }
  344. pub fn ctl(
  345. &mut self,
  346. msr: u32,
  347. action: VmxMsrBitmapAction,
  348. access: VmxMsrBitmapAccess,
  349. ) -> bool {
  350. if msr <= 0x1fff {
  351. return self.bit_op(msr as usize, access.base(), action);
  352. } else if (0xc0000000..=0xc0001fff).contains(&msr) {
  353. // 这里是有问题的,需要后续检查
  354. // https://code.dragonos.org.cn/xref/linux-6.6.21/arch/x86/kvm/vmx/vmx.h#450
  355. return self.bit_op(msr as usize & 0x1fff, access.base() + 0x400, action);
  356. } else {
  357. return true;
  358. }
  359. }
  360. fn bit_op(&mut self, msr: usize, base: usize, action: VmxMsrBitmapAction) -> bool {
  361. match action {
  362. VmxMsrBitmapAction::Test => {
  363. let ret = self.data.get(msr + base);
  364. ret.unwrap_or(false)
  365. }
  366. VmxMsrBitmapAction::Set => {
  367. self.data.set(msr + base, true);
  368. true
  369. }
  370. VmxMsrBitmapAction::Clear => {
  371. self.data.set(msr + base, false);
  372. true
  373. }
  374. }
  375. }
  376. }
  377. /// 中断相关辅助函数载体
  378. pub struct VmcsIntrHelper;
  379. impl VmcsIntrHelper {
  380. pub fn is_nmi(intr_info: &IntrInfo) -> bool {
  381. return Self::is_intr_type(intr_info, IntrType::INTR_TYPE_NMI_INTR);
  382. }
  383. pub fn is_intr_type(intr_info: &IntrInfo, intr_type: IntrType) -> bool {
  384. return (*intr_info
  385. & (IntrInfo::INTR_INFO_VALID_MASK | IntrInfo::INTR_INFO_INTR_TYPE_MASK))
  386. .bits()
  387. == IntrInfo::INTR_INFO_VALID_MASK.bits() | intr_type.bits();
  388. }
  389. pub fn is_external_intr(intr_info: &IntrInfo) -> bool {
  390. return Self::is_intr_type(intr_info, IntrType::INTR_TYPE_EXT_INTR);
  391. }
  392. }