mem.rs 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714
  1. use alloc::{
  2. sync::{Arc, Weak},
  3. vec::Vec,
  4. };
  5. use bitmap::AllocBitmap;
  6. use hashbrown::HashMap;
  7. use log::debug;
  8. use system_error::SystemError;
  9. use crate::{
  10. arch::{vm::mmu::kvm_mmu::PAGE_SIZE, MMArch},
  11. libs::{
  12. rbtree::RBTree,
  13. rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard},
  14. spinlock::{SpinLock, SpinLockGuard},
  15. },
  16. mm::{kernel_mapper::KernelMapper, page::EntryFlags, MemoryManagementArch, VirtAddr},
  17. virt::{
  18. kvm::host_mem::PAGE_SHIFT,
  19. vm::{kvm_host::KVM_ADDRESS_SPACE_NUM, user_api::KvmUserspaceMemoryRegion},
  20. },
  21. };
  22. use super::{LockedVm, Vm};
  23. pub const KVM_USER_MEM_SLOTS: u16 = u16::MAX;
  24. pub const KVM_INTERNAL_MEM_SLOTS: u16 = 3;
  25. pub const KVM_MEM_SLOTS_NUM: u16 = KVM_USER_MEM_SLOTS - KVM_INTERNAL_MEM_SLOTS;
  26. pub const KVM_MEM_MAX_NR_PAGES: usize = (1 << 31) - 1;
  27. // pub const APIC_ACCESS_PAGE_PRIVATE_MEMSLOT: u16 = KVM_MEM_SLOTS_NUM + 1;
  28. /// 对于普通的页帧号(PFN),最高的12位应该为零,
  29. /// 因此我们可以mask位62到位52来表示错误的PFN,
  30. /// mask位63来表示无槽的PFN。
  31. // const KVM_PFN_ERR_MASK: u64 = 0x7ff << 52; //0x7FF0000000000000
  32. // const KVM_PFN_ERR_NOSLOT_MASK: u64 = 0xfff << 52; //0xFFF0000000000000
  33. // const KVM_PFN_NOSLOT: u64 = 1 << 63; //0x8000000000000000
  34. // const KVM_PFN_ERR_FAULT: u64 = KVM_PFN_ERR_MASK;
  35. // const KVM_PFN_ERR_HWPOISON: u64 = KVM_PFN_ERR_MASK + 1;
  36. // const KVM_PFN_ERR_RO_FAULT: u64 = KVM_PFN_ERR_MASK + 2;
  37. // const KVM_PFN_ERR_SIGPENDING: u64 = KVM_PFN_ERR_MASK + 3;
  38. #[derive(Debug, Default)]
  39. #[allow(dead_code)]
  40. pub struct KvmMmuMemoryCache {
  41. gfp_zero: u32,
  42. gfp_custom: u32,
  43. capacity: usize,
  44. nobjs: usize,
  45. objects: Option<Vec<u8>>,
  46. }
  47. impl KvmMmuMemoryCache {
  48. #[allow(dead_code)]
  49. pub fn kvm_mmu_totup_memory_cache(
  50. &mut self,
  51. _capacity: usize,
  52. _min: usize,
  53. ) -> Result<(), SystemError> {
  54. // let gfp = if self.gfp_custom != 0 {
  55. // self.gfp_custom
  56. // } else {
  57. // todo!();
  58. // };
  59. // if self.nobjs >= min {
  60. // return Ok(());
  61. // }
  62. // if unlikely(self.objects.is_none()) {
  63. // if self.capacity == 0 {
  64. // return Err(SystemError::EIO);
  65. // }
  66. // // self.objects = Some(Box::new)
  67. // }
  68. Ok(())
  69. }
  70. }
  71. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default)]
  72. pub struct AddrRange {
  73. pub start: VirtAddr,
  74. pub last: VirtAddr,
  75. }
  76. #[derive(Debug, Default)]
  77. pub struct KvmMemSlotSet {
  78. /// 最后一次使用到的内存插槽
  79. pub last_use: Option<Arc<LockedKvmMemSlot>>,
  80. /// 存储虚拟地址(hva)和内存插槽之间的映射关系
  81. hva_tree: RBTree<AddrRange, Arc<LockedKvmMemSlot>>,
  82. /// 用于存储全局页帧号(gfn)和内存插槽之间的映射关系
  83. pub gfn_tree: RBTree<u64, Arc<LockedKvmMemSlot>>,
  84. /// 将内存插槽的ID映射到对应的内存插槽。
  85. slots: HashMap<u16, Arc<LockedKvmMemSlot>>,
  86. pub node_idx: usize,
  87. pub generation: u64,
  88. }
  89. impl KvmMemSlotSet {
  90. pub fn get_slot(&self, id: u16) -> Option<Arc<LockedKvmMemSlot>> {
  91. self.slots.get(&id).cloned()
  92. }
  93. }
  94. #[derive(Debug)]
  95. pub struct LockedKvmMemSlot {
  96. inner: RwLock<KvmMemSlot>,
  97. }
  98. impl LockedKvmMemSlot {
  99. pub fn new() -> Arc<Self> {
  100. Arc::new(Self {
  101. inner: RwLock::new(KvmMemSlot::default()),
  102. })
  103. }
  104. #[inline]
  105. pub fn read(&self) -> RwLockReadGuard<KvmMemSlot> {
  106. self.inner.read()
  107. }
  108. #[inline]
  109. pub fn write(&self) -> RwLockWriteGuard<KvmMemSlot> {
  110. self.inner.write()
  111. }
  112. #[inline]
  113. pub fn copy_from(&self, other: &Arc<LockedKvmMemSlot>) {
  114. let mut guard = self.write();
  115. let other = other.read();
  116. guard.base_gfn = other.base_gfn;
  117. guard.npages = other.npages;
  118. guard.dirty_bitmap = other.dirty_bitmap.clone();
  119. guard.arch = other.arch;
  120. guard.userspace_addr = other.userspace_addr;
  121. guard.flags = other.flags;
  122. guard.id = other.id;
  123. guard.as_id = other.as_id;
  124. }
  125. }
  126. #[derive(Debug, Default)]
  127. pub struct KvmMemSlot {
  128. /// 首个gfn
  129. pub base_gfn: u64,
  130. /// 页数量
  131. pub npages: usize,
  132. /// 脏页位图
  133. dirty_bitmap: Option<AllocBitmap>,
  134. /// 架构相关
  135. arch: (),
  136. userspace_addr: VirtAddr,
  137. flags: UserMemRegionFlag,
  138. id: u16,
  139. as_id: u16,
  140. hva_node_key: [AddrRange; 2],
  141. }
  142. #[allow(dead_code)]
  143. impl KvmMemSlot {
  144. pub fn check_aligned_addr(&self, align: usize) -> bool {
  145. self.userspace_addr.data() % align == 0
  146. }
  147. pub fn get_flags(&self) -> UserMemRegionFlag {
  148. self.flags
  149. }
  150. pub fn get_id(&self) -> u16 {
  151. self.id
  152. }
  153. // 检查内存槽是否可见
  154. pub fn is_visible(&self) -> bool {
  155. self.id < KVM_USER_MEM_SLOTS
  156. && (self.flags.bits() & UserMemRegionFlag::KVM_MEMSLOT_INVALID.bits()) == 0
  157. }
  158. }
  159. #[derive(Debug)]
  160. pub struct LockedVmMemSlotSet {
  161. inner: SpinLock<KvmMemSlotSet>,
  162. }
  163. impl LockedVmMemSlotSet {
  164. pub fn new(slots: KvmMemSlotSet) -> Arc<Self> {
  165. Arc::new(Self {
  166. inner: SpinLock::new(slots),
  167. })
  168. }
  169. pub fn lock(&self) -> SpinLockGuard<KvmMemSlotSet> {
  170. self.inner.lock()
  171. }
  172. }
  173. #[derive(Debug, Default)]
  174. #[allow(dead_code)]
  175. pub struct GfnToHvaCache {
  176. generation: u64,
  177. /// 客户机对应物理地址(Guest Physical Address)
  178. gpa: u64,
  179. /// 主机用户空间虚拟地址(User Host Virtual Address)
  180. uhva: Option<u64>,
  181. /// 主机内核空间虚拟地址(Kernel Host Virtual Address)
  182. khva: u64,
  183. /// 对应内存插槽
  184. memslot: Option<Arc<LockedKvmMemSlot>>,
  185. /// 对应物理页帧号(Page Frame Number)
  186. pfn: Option<u64>,
  187. /// 缓存项的使用情况
  188. usage: PfnCacheUsage,
  189. /// 是否处于活动状态
  190. active: bool,
  191. /// 是否有效
  192. valid: bool,
  193. vm: Option<Weak<LockedVm>>,
  194. }
  195. impl GfnToHvaCache {
  196. pub fn init(vm: Weak<LockedVm>, usage: PfnCacheUsage) -> Self {
  197. // check_stack_usage();
  198. // let mut ret: Box<GfnToHvaCache> = unsafe { Box::new_zeroed().assume_init() };
  199. // ret.usage = usage;
  200. // ret.vm = Some(vm);
  201. // *ret
  202. Self {
  203. usage,
  204. vm: Some(vm),
  205. ..Default::default()
  206. }
  207. }
  208. }
  209. bitflags! {
  210. #[derive(Default)]
  211. pub struct PfnCacheUsage: u8 {
  212. const GUEST_USES_PFN = 1 << 0;
  213. const HOST_USES_PFN = 1 << 1;
  214. const GUEST_AND_HOST_USES_PFN = Self::GUEST_USES_PFN.bits | Self::HOST_USES_PFN.bits;
  215. }
  216. pub struct UserMemRegionFlag: u32 {
  217. /// 用来开启内存脏页
  218. const LOG_DIRTY_PAGES = 1 << 0;
  219. /// 开启内存只读
  220. const READONLY = 1 << 1;
  221. /// 标记invalid
  222. const KVM_MEMSLOT_INVALID = 1 << 16;
  223. }
  224. }
  225. impl Default for UserMemRegionFlag {
  226. fn default() -> Self {
  227. Self::empty()
  228. }
  229. }
  230. #[derive(PartialEq, Eq, Debug, Clone, Copy)]
  231. pub enum KvmMemoryChangeMode {
  232. Create,
  233. Delete,
  234. Move,
  235. FlagsOnly,
  236. }
  237. impl Vm {
  238. #[inline(never)]
  239. pub fn set_memory_region(&mut self, mem: KvmUserspaceMemoryRegion) -> Result<(), SystemError> {
  240. if mem.slot >= u16::MAX as u32 {
  241. return Err(SystemError::EINVAL);
  242. }
  243. let as_id = mem.slot >> 16;
  244. let id = mem.slot as u16;
  245. // 检查内存对齐以及32位检测(虽然现在没什么用<)
  246. if (mem.memory_size as usize & MMArch::PAGE_SIZE != 0)
  247. || mem.memory_size != mem.memory_size as usize as u64
  248. {
  249. return Err(SystemError::EINVAL);
  250. }
  251. if !mem.guest_phys_addr.check_aligned(MMArch::PAGE_SIZE) {
  252. return Err(SystemError::EINVAL);
  253. }
  254. if !mem.userspace_addr.check_aligned(MMArch::PAGE_SIZE) {
  255. // 这里应该还需要判断从userspace_addr->userspace_addr+memory_size这段区间都是合法的
  256. return Err(SystemError::EINVAL);
  257. }
  258. if as_id >= KVM_ADDRESS_SPACE_NUM as u32 || id >= KVM_MEM_SLOTS_NUM {
  259. return Err(SystemError::EINVAL);
  260. }
  261. if (mem.memory_size >> MMArch::PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES as u64 {
  262. return Err(SystemError::EINVAL);
  263. }
  264. let slots = self.memslot_set(as_id as usize).clone();
  265. let slots_guard = slots.lock();
  266. let old = slots_guard.get_slot(id);
  267. if mem.memory_size == 0 {
  268. if let Some(old) = &old {
  269. let old_npages = old.read().npages;
  270. if old_npages == 0 {
  271. return Err(SystemError::EINVAL);
  272. }
  273. if self.nr_memslot_pages < old_npages {
  274. return Err(SystemError::EIO);
  275. }
  276. drop(slots_guard);
  277. return self.set_memslot(Some(old), None, KvmMemoryChangeMode::Delete);
  278. } else {
  279. return Err(SystemError::EINVAL);
  280. }
  281. }
  282. let base_gfn = (mem.guest_phys_addr.data() >> MMArch::PAGE_SHIFT) as u64;
  283. let npages = mem.memory_size >> MMArch::PAGE_SHIFT;
  284. let change;
  285. if let Some(old) = &old {
  286. let old_guard = old.read();
  287. if old_guard.npages == 0 {
  288. change = KvmMemoryChangeMode::Create;
  289. // 避免溢出
  290. if let Some(new_pages) = self.nr_memslot_pages.checked_add(npages as usize) {
  291. if new_pages < self.nr_memslot_pages {
  292. return Err(SystemError::EINVAL);
  293. }
  294. } else {
  295. return Err(SystemError::EINVAL);
  296. }
  297. } else {
  298. if mem.userspace_addr != old_guard.userspace_addr
  299. || npages != old_guard.npages as u64
  300. || (mem.flags ^ old_guard.flags).contains(UserMemRegionFlag::READONLY)
  301. {
  302. return Err(SystemError::EINVAL);
  303. }
  304. if base_gfn != old_guard.base_gfn {
  305. change = KvmMemoryChangeMode::Move;
  306. } else if mem.flags != old_guard.flags {
  307. change = KvmMemoryChangeMode::FlagsOnly;
  308. } else {
  309. return Ok(());
  310. }
  311. }
  312. } else {
  313. change = KvmMemoryChangeMode::Create;
  314. // 避免溢出
  315. if let Some(new_pages) = self.nr_memslot_pages.checked_add(npages as usize) {
  316. if new_pages < self.nr_memslot_pages {
  317. return Err(SystemError::EINVAL);
  318. }
  319. } else {
  320. return Err(SystemError::EINVAL);
  321. }
  322. };
  323. if (change == KvmMemoryChangeMode::Create || change == KvmMemoryChangeMode::Move)
  324. && slots_guard.gfn_tree.contains_key(&base_gfn)
  325. {
  326. return Err(SystemError::EEXIST);
  327. }
  328. let new = LockedKvmMemSlot::new();
  329. let mut new_guard = new.write();
  330. new_guard.as_id = as_id as u16;
  331. new_guard.id = id;
  332. new_guard.base_gfn = base_gfn;
  333. new_guard.npages = npages as usize;
  334. new_guard.flags = mem.flags;
  335. new_guard.userspace_addr = mem.userspace_addr;
  336. drop(new_guard);
  337. drop(slots_guard);
  338. return self.set_memslot(old.as_ref(), Some(&new), change);
  339. }
  340. #[allow(clippy::modulo_one)]
  341. #[inline]
  342. /// 获取活动内存插槽
  343. fn memslot_set(&self, id: usize) -> &Arc<LockedVmMemSlotSet> {
  344. // 避免越界
  345. let id = id % KVM_ADDRESS_SPACE_NUM;
  346. &self.memslots[id]
  347. }
  348. #[inline(never)]
  349. fn set_memslot(
  350. &mut self,
  351. old: Option<&Arc<LockedKvmMemSlot>>,
  352. new: Option<&Arc<LockedKvmMemSlot>>,
  353. change: KvmMemoryChangeMode,
  354. ) -> Result<(), SystemError> {
  355. let invalid_slot = LockedKvmMemSlot::new();
  356. if change == KvmMemoryChangeMode::Delete || change == KvmMemoryChangeMode::Move {
  357. self.invalidate_memslot(old.unwrap(), &invalid_slot)
  358. }
  359. match self.prepare_memory_region(old, new, change) {
  360. Ok(_) => {}
  361. Err(e) => {
  362. if change == KvmMemoryChangeMode::Delete || change == KvmMemoryChangeMode::Move {
  363. self.active_memslot(Some(&invalid_slot), old)
  364. }
  365. return Err(e);
  366. }
  367. }
  368. match change {
  369. KvmMemoryChangeMode::Create => self.create_memslot(new),
  370. KvmMemoryChangeMode::Delete => self.delete_memslot(old, &invalid_slot),
  371. KvmMemoryChangeMode::Move => self.move_memslot(old, new, &invalid_slot),
  372. KvmMemoryChangeMode::FlagsOnly => self.update_flags_memslot(old, new),
  373. }
  374. // TODO:kvm_commit_memory_region(kvm, old, new, change);
  375. Ok(())
  376. }
  377. fn create_memslot(&mut self, new: Option<&Arc<LockedKvmMemSlot>>) {
  378. self.replace_memslot(None, new);
  379. self.active_memslot(None, new);
  380. }
  381. fn delete_memslot(
  382. &mut self,
  383. old: Option<&Arc<LockedKvmMemSlot>>,
  384. invalid_slot: &Arc<LockedKvmMemSlot>,
  385. ) {
  386. self.replace_memslot(old, None);
  387. self.active_memslot(Some(invalid_slot), None);
  388. }
  389. fn move_memslot(
  390. &mut self,
  391. old: Option<&Arc<LockedKvmMemSlot>>,
  392. new: Option<&Arc<LockedKvmMemSlot>>,
  393. invalid_slot: &Arc<LockedKvmMemSlot>,
  394. ) {
  395. self.replace_memslot(old, new);
  396. self.active_memslot(Some(invalid_slot), new);
  397. }
  398. fn update_flags_memslot(
  399. &mut self,
  400. old: Option<&Arc<LockedKvmMemSlot>>,
  401. new: Option<&Arc<LockedKvmMemSlot>>,
  402. ) {
  403. self.replace_memslot(old, new);
  404. self.active_memslot(old, new);
  405. }
  406. fn prepare_memory_region(
  407. &self,
  408. old: Option<&Arc<LockedKvmMemSlot>>,
  409. new: Option<&Arc<LockedKvmMemSlot>>,
  410. change: KvmMemoryChangeMode,
  411. ) -> Result<(), SystemError> {
  412. if change != KvmMemoryChangeMode::Delete {
  413. let new = new.unwrap();
  414. let mut new_guard = new.write();
  415. if !new_guard.flags.contains(UserMemRegionFlag::LOG_DIRTY_PAGES) {
  416. new_guard.dirty_bitmap = None;
  417. } else if old.is_some() {
  418. let old_guard = old.unwrap().read();
  419. if old_guard.dirty_bitmap.is_some() {
  420. new_guard.dirty_bitmap = old_guard.dirty_bitmap.clone();
  421. } else {
  422. new_guard.dirty_bitmap = Some(AllocBitmap::new(new_guard.npages * 2));
  423. }
  424. }
  425. }
  426. return self.arch_prepare_memory_region(old, new, change);
  427. }
  428. fn invalidate_memslot(
  429. &mut self,
  430. old: &Arc<LockedKvmMemSlot>,
  431. invalid_slot: &Arc<LockedKvmMemSlot>,
  432. ) {
  433. invalid_slot.copy_from(old);
  434. let mut old_guard = old.write();
  435. let mut invalid_slot_guard = invalid_slot.write();
  436. invalid_slot_guard
  437. .flags
  438. .insert(UserMemRegionFlag::KVM_MEMSLOT_INVALID);
  439. self.swap_active_memslots(old_guard.as_id as usize);
  440. old_guard.arch = invalid_slot_guard.arch;
  441. }
  442. #[inline(never)]
  443. fn active_memslot(
  444. &mut self,
  445. old: Option<&Arc<LockedKvmMemSlot>>,
  446. new: Option<&Arc<LockedKvmMemSlot>>,
  447. ) {
  448. let as_id = if let Some(slot) = old.or(new) {
  449. slot.read().as_id
  450. } else {
  451. 0
  452. };
  453. self.swap_active_memslots(as_id as usize);
  454. self.replace_memslot(old, new);
  455. }
  456. #[inline(never)]
  457. fn replace_memslot(
  458. &self,
  459. old: Option<&Arc<LockedKvmMemSlot>>,
  460. new: Option<&Arc<LockedKvmMemSlot>>,
  461. ) {
  462. let as_id = if let Some(slot) = old.or(new) {
  463. slot.read().as_id
  464. } else {
  465. 0
  466. };
  467. let slot_set = self.get_inactive_memslot_set(as_id as usize);
  468. let mut slots_guard = slot_set.lock();
  469. let idx = slots_guard.node_idx;
  470. if let Some(old) = old {
  471. slots_guard.hva_tree.remove(&old.read().hva_node_key[idx]);
  472. if let Some(last) = &slots_guard.last_use {
  473. if Arc::ptr_eq(last, old) {
  474. slots_guard.last_use = new.cloned();
  475. }
  476. }
  477. if new.is_none() {
  478. slots_guard.gfn_tree.remove(&old.read().base_gfn);
  479. return;
  480. }
  481. }
  482. let new = new.unwrap();
  483. let mut new_guard = new.write();
  484. new_guard.hva_node_key[idx].start = new_guard.userspace_addr;
  485. new_guard.hva_node_key[idx].last =
  486. new_guard.userspace_addr + VirtAddr::new((new_guard.npages << MMArch::PAGE_SHIFT) - 1);
  487. slots_guard
  488. .hva_tree
  489. .insert(new_guard.hva_node_key[idx], new.clone());
  490. if let Some(old) = old {
  491. slots_guard.gfn_tree.remove(&old.read().base_gfn);
  492. }
  493. slots_guard.gfn_tree.insert(new_guard.base_gfn, new.clone());
  494. }
  495. fn get_inactive_memslot_set(&self, as_id: usize) -> Arc<LockedVmMemSlotSet> {
  496. let active = self.memslot_set(as_id);
  497. let inactive_idx = active.lock().node_idx ^ 1;
  498. return self.memslots_set[as_id][inactive_idx].clone();
  499. }
  500. fn swap_active_memslots(&mut self, as_id: usize) {
  501. self.memslots[as_id] = self.get_inactive_memslot_set(as_id);
  502. }
  503. }
  504. /// 将给定的客户机帧号(GFN)转换为用户空间虚拟地址(HVA),并根据内存槽的状态和标志进行相应的检查。
  505. ///
  506. /// # 参数
  507. /// - `slot`: 可选的 `KvmMemSlot`,表示内存槽。
  508. /// - `gfn`: 客户机帧号(GFN),表示要转换的帧号。
  509. /// - `nr_pages`: 可选的可变引用,用于存储计算出的页数。
  510. /// - `write`: 布尔值,表示是否为写操作。
  511. ///
  512. /// # 返回
  513. /// 如果成功,返回转换后的用户空间虚拟地址(HVA);如果失败,返回相应的错误。
  514. ///
  515. /// # 错误
  516. /// 如果内存槽为空或无效,或者尝试对只读内存槽进行写操作,则返回 `SystemError::KVM_HVA_ERR_BAD`。
  517. pub fn __gfn_to_hva_many(
  518. slot: &Option<&KvmMemSlot>,
  519. gfn: u64,
  520. nr_pages: Option<&mut u64>,
  521. write: bool,
  522. ) -> Result<u64, SystemError> {
  523. debug!("__gfn_to_hva_many");
  524. // 检查内存槽是否为空
  525. if slot.is_none() {
  526. return Err(SystemError::KVM_HVA_ERR_BAD);
  527. }
  528. let slot = slot.as_ref().unwrap();
  529. // 检查内存槽是否无效或尝试对只读内存槽进行写操作
  530. if slot.flags.bits() & UserMemRegionFlag::KVM_MEMSLOT_INVALID.bits() != 0
  531. || (slot.flags.bits() & UserMemRegionFlag::READONLY.bits() != 0) && write
  532. {
  533. return Err(SystemError::KVM_HVA_ERR_BAD);
  534. }
  535. // 如果 `nr_pages` 不为空,计算并更新页数
  536. if let Some(nr_pages) = nr_pages {
  537. *nr_pages = slot.npages as u64 - (gfn - slot.base_gfn);
  538. }
  539. // 调用辅助函数将 GFN 转换为 HVA
  540. return Ok(__gfn_to_hva_memslot(slot, gfn));
  541. }
  542. /// 将给定的全局帧号(GFN)转换为用户空间虚拟地址(HVA)。
  543. ///
  544. /// # 参数
  545. /// - `slot`: `KvmMemSlot`,表示内存槽。
  546. /// - `gfn`: 全局帧号(GFN),表示要转换的帧号。
  547. ///
  548. /// # 返回
  549. /// 转换后的用户空间虚拟地址(HVA)。
  550. fn __gfn_to_hva_memslot(slot: &KvmMemSlot, gfn: u64) -> u64 {
  551. return slot.userspace_addr.data() as u64 + (gfn - slot.base_gfn) * PAGE_SIZE;
  552. }
  553. /// 将给定的全局帧号(GFN)转换为页帧号(PFN),并根据内存槽的状态和标志进行相应的检查。
  554. ///
  555. /// # 参数
  556. /// - `slot`: 内存槽的引用。
  557. /// - `gfn`: 全局帧号(GFN),表示要转换的帧号。
  558. /// - `atomic`: 布尔值,表示是否为原子操作。
  559. /// - `interruptible`: 布尔值,表示操作是否可中断。
  560. /// - `async`: 可变引用,表示操作是否为异步。
  561. /// - `write_fault`: 布尔值,表示是否为写操作。
  562. /// - `writable`: 可变引用,表示是否可写。
  563. /// - `hva`: 可变引用,表示用户空间虚拟地址(HVA)。
  564. ///
  565. /// # 返回
  566. /// 如果成功,返回转换后的页帧号(PFN);如果失败,返回相应的错误。
  567. pub fn __gfn_to_pfn_memslot(
  568. slot: Option<&KvmMemSlot>,
  569. gfn: u64,
  570. atomic_or_async: (bool, &mut bool),
  571. interruptible: bool,
  572. write: bool,
  573. writable: &mut bool,
  574. hva: &mut u64,
  575. ) -> Result<u64, SystemError> {
  576. let addr = __gfn_to_hva_many(&slot, gfn, None, write)?;
  577. *hva = addr;
  578. //todo:检查地址是否为错误
  579. // 如果内存槽为只读,且 writable 不为空,则更新 writable 的值
  580. if slot.unwrap().flags.bits() & UserMemRegionFlag::READONLY.bits() != 0 {
  581. *writable = false;
  582. }
  583. let pfn = hva_to_pfn(addr, atomic_or_async, interruptible, write, writable)?;
  584. return Ok(pfn);
  585. }
  586. /// 将用户空间虚拟地址(HVA)转换为页帧号(PFN)。
  587. ///
  588. /// # 参数
  589. /// - `addr`: 用户空间虚拟地址(HVA)。
  590. /// - `atomic`: 布尔值,表示是否为原子操作。
  591. /// - `interruptible`: 布尔值,表示操作是否可中断。
  592. /// - `is_async`: 可变引用,表示操作是否为异步。
  593. /// - `write_fault`: 布尔值,表示是否为写操作。
  594. /// - `writable`: 可变引用,表示是否可写。
  595. ///
  596. /// # 返回
  597. /// 如果成功,返回转换后的页帧号(PFN);如果失败,返回相应的错误。
  598. // 正确性待验证
  599. pub fn hva_to_pfn(
  600. addr: u64,
  601. atomic_or_async: (bool, &mut bool),
  602. _interruptible: bool,
  603. _write_fault: bool,
  604. _writable: &mut bool,
  605. ) -> Result<u64, SystemError> {
  606. // 我们可以原子地或异步地执行,但不能同时执行
  607. assert!(
  608. !(atomic_or_async.0 && *atomic_or_async.1),
  609. "Cannot be both atomic and async"
  610. );
  611. debug!("hva_to_pfn");
  612. // let hpa = MMArch::virt_2_phys(VirtAddr::new(addr)).unwrap().data() as u64;
  613. let hva = VirtAddr::new(addr as usize);
  614. let mut mapper = KernelMapper::lock();
  615. let mapper = mapper.as_mut().unwrap();
  616. if let Some((hpa, _)) = mapper.translate(hva) {
  617. return Ok(hpa.data() as u64 >> PAGE_SHIFT);
  618. }
  619. debug!("hva_to_pfn NOT FOUND,try map a new pfn");
  620. unsafe {
  621. mapper.map(hva, EntryFlags::mmio_flags());
  622. }
  623. let (hpa, _) = mapper.translate(hva).unwrap();
  624. return Ok(hpa.data() as u64 >> PAGE_SHIFT);
  625. }