mod.rs 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. use core::{
  2. fmt::Debug,
  3. sync::atomic::{AtomicUsize, Ordering},
  4. };
  5. use alloc::{
  6. boxed::Box,
  7. sync::{Arc, Weak},
  8. vec::Vec,
  9. };
  10. use hashbrown::HashMap;
  11. use log::debug;
  12. use mem::LockedKvmMemSlot;
  13. use system_error::SystemError;
  14. use crate::{
  15. arch::{
  16. vm::{kvm_host::vcpu::VirtCpuRequest, vmx::KvmVmx, x86_kvm_manager},
  17. CurrentKvmManager, KvmArch, VirtCpuArch,
  18. },
  19. filesystem::vfs::file::{File, FileMode},
  20. libs::spinlock::{SpinLock, SpinLockGuard},
  21. mm::ucontext::AddressSpace,
  22. process::ProcessManager,
  23. smp::cpu::ProcessorId,
  24. virt::vm::{
  25. kvm_dev::KvmVcpuDev,
  26. kvm_host::vcpu::{LockedVirtCpu, VirtCpu},
  27. },
  28. };
  29. use self::{
  30. mem::{GfnToHvaCache, KvmMemSlotSet, LockedVmMemSlotSet, PfnCacheUsage},
  31. vcpu::{GuestDebug, VcpuMode},
  32. };
  33. pub mod mem;
  34. pub mod vcpu;
  35. const KVM_ADDRESS_SPACE_NUM: usize = 1;
  36. pub const KVM_USERSAPCE_IRQ_SOURCE_ID: usize = 0;
  37. pub const KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID: usize = 1;
  38. #[derive(Debug)]
  39. pub struct LockedVm {
  40. inner: SpinLock<Vm>,
  41. }
  42. static KVM_USAGE_COUNT: AtomicUsize = AtomicUsize::new(0);
  43. impl LockedVm {
  44. pub fn lock(&self) -> SpinLockGuard<Vm> {
  45. self.inner.lock()
  46. }
  47. pub fn create(vm_type: usize) -> Result<Arc<Self>, SystemError> {
  48. let mut memslots_set = vec![];
  49. let mut memslots = vec![];
  50. for i in 0..KVM_ADDRESS_SPACE_NUM {
  51. let mut tmp = vec![];
  52. for j in 0..2 {
  53. let mut slots = KvmMemSlotSet::default();
  54. slots.last_use = None;
  55. slots.node_idx = j;
  56. slots.generation = i as u64;
  57. tmp.push(LockedVmMemSlotSet::new(slots));
  58. }
  59. memslots_set.push(tmp);
  60. memslots.push(memslots_set[i][0].clone());
  61. }
  62. let kvm = Vm {
  63. mm: ProcessManager::current_pcb()
  64. .basic()
  65. .user_vm()
  66. .unwrap()
  67. .write()
  68. .try_clone()?,
  69. max_vcpus: CurrentKvmManager::KVM_MAX_VCPUS,
  70. memslots_set,
  71. memslots,
  72. arch: KvmArch::init(vm_type)?,
  73. created_vcpus: 0,
  74. lock_vm_ref: Weak::new(),
  75. nr_memslot_pages: 0,
  76. online_vcpus: 0,
  77. dirty_ring_size: 0,
  78. dirty_ring_with_bitmap: false,
  79. vcpus: HashMap::new(),
  80. #[cfg(target_arch = "x86_64")]
  81. kvm_vmx: KvmVmx::default(),
  82. nr_memslots_dirty_logging: 0,
  83. mmu_invalidate_seq: 0,
  84. };
  85. let ret = Arc::new(Self {
  86. inner: SpinLock::new(kvm),
  87. });
  88. Self::hardware_enable_all()?;
  89. ret.lock().lock_vm_ref = Arc::downgrade(&ret);
  90. return Ok(ret);
  91. }
  92. fn hardware_enable_all() -> Result<(), SystemError> {
  93. KVM_USAGE_COUNT.fetch_add(1, Ordering::SeqCst);
  94. // 如果是第一个启动的,则需要对所有cpu都初始化硬件
  95. if KVM_USAGE_COUNT.load(Ordering::SeqCst) == 1 {
  96. // FIXME!!!!
  97. // 这里是要对每个cpu都进行初始化,目前这里只对当前cpu调用了初始化流程
  98. x86_kvm_manager().arch_hardware_enable()?;
  99. }
  100. Ok(())
  101. }
  102. }
  103. #[derive(Debug)]
  104. #[allow(dead_code)]
  105. pub struct Vm {
  106. lock_vm_ref: Weak<LockedVm>,
  107. mm: Arc<AddressSpace>,
  108. max_vcpus: usize,
  109. created_vcpus: usize,
  110. online_vcpus: usize,
  111. /// vcpu集合
  112. vcpus: HashMap<usize, Arc<LockedVirtCpu>>,
  113. // name: String,
  114. /// 对应活动和非活动内存槽,实际为:[[Arc<LockedVmMemSlots>; 2]; KVM_ADDRESS_SPACE_NUM],这里暂时写Vec
  115. memslots_set: Vec<Vec<Arc<LockedVmMemSlotSet>>>,
  116. /// 当前活动内存槽,实际为:[Arc<LockedVmMemSlots>; KVM_ADDRESS_SPACE_NUM],这里暂时写Vec
  117. pub memslots: Vec<Arc<LockedVmMemSlotSet>>,
  118. /// 内存槽对应的页数
  119. nr_memslot_pages: usize,
  120. pub arch: KvmArch,
  121. pub dirty_ring_size: u32,
  122. pub nr_memslots_dirty_logging: u32,
  123. dirty_ring_with_bitmap: bool,
  124. #[cfg(target_arch = "x86_64")]
  125. pub kvm_vmx: KvmVmx,
  126. pub mmu_invalidate_seq: u64, //用于表示内存管理单元(MMU)无效化序列号
  127. }
  128. impl Vm {
  129. #[inline(never)]
  130. pub fn create_vcpu(&mut self, id: usize) -> Result<usize, SystemError> {
  131. if id >= self.max_vcpus {
  132. return Err(SystemError::EINVAL);
  133. }
  134. if self.created_vcpus >= self.max_vcpus {
  135. return Err(SystemError::EINVAL);
  136. }
  137. self.created_vcpus += 1;
  138. let vcpu = self._create_vcpu(id)?;
  139. if self.dirty_ring_size != 0 {
  140. todo!()
  141. }
  142. vcpu.lock().vcpu_id = self.online_vcpus;
  143. self.vcpus.insert(self.online_vcpus, vcpu.clone());
  144. self.online_vcpus += 1;
  145. let vcpu_inode = KvmVcpuDev::new(vcpu);
  146. let file = File::new(vcpu_inode, FileMode::from_bits_truncate(0x777))?;
  147. let fd = ProcessManager::current_pcb()
  148. .fd_table()
  149. .write()
  150. .alloc_fd(file, None)?;
  151. Ok(fd as usize)
  152. }
  153. /// ### 创建一个vcpu,并且初始化部分数据
  154. #[inline(never)]
  155. pub fn _create_vcpu(&mut self, id: usize) -> Result<Arc<LockedVirtCpu>, SystemError> {
  156. let mut vcpu = self.new_vcpu(id);
  157. vcpu.init_arch(self, id)?;
  158. Ok(Arc::new(LockedVirtCpu::new(vcpu)))
  159. }
  160. #[inline(never)]
  161. pub fn new_vcpu(&self, id: usize) -> VirtCpu {
  162. return VirtCpu {
  163. cpu: ProcessorId::INVALID,
  164. kvm: Some(self.lock_vm_ref.clone()),
  165. vcpu_id: id,
  166. pid: None,
  167. preempted: false,
  168. ready: false,
  169. last_used_slot: None,
  170. stats_id: format!("kvm-{}/vcpu-{}", ProcessManager::current_pid().data(), id),
  171. pv_time: GfnToHvaCache::init(self.lock_vm_ref.clone(), PfnCacheUsage::HOST_USES_PFN),
  172. arch: VirtCpuArch::new(),
  173. private: None,
  174. request: VirtCpuRequest::empty(),
  175. guest_debug: GuestDebug::empty(),
  176. run: unsafe { Some(Box::new_zeroed().assume_init()) },
  177. vcpu_idx: 0,
  178. mode: VcpuMode::OutsideGuestMode,
  179. stat: Default::default(),
  180. };
  181. }
  182. #[cfg(target_arch = "x86_64")]
  183. pub fn kvm_vmx_mut(&mut self) -> &mut KvmVmx {
  184. &mut self.kvm_vmx
  185. }
  186. #[cfg(target_arch = "x86_64")]
  187. pub fn kvm_vmx(&self) -> &KvmVmx {
  188. &self.kvm_vmx
  189. }
  190. }
  191. /// ## 多处理器状态(有些状态在某些架构并不合法)
  192. #[derive(Debug, Clone, Copy, PartialEq)]
  193. #[allow(dead_code)]
  194. pub enum MutilProcessorState {
  195. Runnable,
  196. Uninitialized,
  197. InitReceived,
  198. Halted,
  199. SipiReceived,
  200. Stopped,
  201. CheckStop,
  202. Operating,
  203. Load,
  204. ApResetHold,
  205. Suspended,
  206. }
  207. ///返回包含 gfn 的 memslot 的指针。如果没有找到,则返回 NULL。
  208. ///当 "approx" 设置为 true 时,即使地址落在空洞中,也会返回 memslot。
  209. ///在这种情况下,将返回空洞边界的其中一个 memslot。
  210. /// 先简陋完成,原本是二分,现在先遍历
  211. pub fn search_memslots(
  212. slot_set: Arc<LockedVmMemSlotSet>,
  213. gfn: u64, /*_approx:bool*/
  214. ) -> Option<Arc<LockedKvmMemSlot>> {
  215. let slots = slot_set.lock();
  216. let node = &slots.gfn_tree;
  217. //let(start,end)=(0,node.len()-1);
  218. for (_gfn_num, slot) in node.iter() {
  219. let slot_guard = slot.read();
  220. debug!(
  221. "gfn:{gfn},slot base_gfn: {},slot npages: {}",
  222. slot_guard.base_gfn, slot_guard.npages
  223. );
  224. if gfn >= slot_guard.base_gfn && gfn < slot_guard.base_gfn + slot_guard.npages as u64 {
  225. return Some(slot.clone());
  226. }
  227. }
  228. return None;
  229. }