memblock.rs 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. use core::intrinsics::unlikely;
  2. use system_error::SystemError;
  3. use crate::libs::{
  4. align::{page_align_down, page_align_up},
  5. spinlock::{SpinLock, SpinLockGuard},
  6. };
  7. use super::{PhysAddr, PhysMemoryArea};
  8. pub const INITIAL_MEMORY_REGIONS_NUM: usize = 128;
  9. /// 初始内存区域
  10. static MEM_BLOCK_MANAGER: MemBlockManager = MemBlockManager::new();
  11. #[inline(always)]
  12. pub fn mem_block_manager() -> &'static MemBlockManager {
  13. &MEM_BLOCK_MANAGER
  14. }
  15. /// 内存区域管理器
  16. #[derive(Debug)]
  17. pub struct MemBlockManager {
  18. inner: SpinLock<InnerMemBlockManager>,
  19. }
  20. #[derive(Debug)]
  21. pub struct InnerMemBlockManager {
  22. /// 初始内存区域
  23. ///
  24. /// 用于记录内核启动时的内存布局, 这些区域保持升序、不重叠
  25. initial_memory_regions: [PhysMemoryArea; INITIAL_MEMORY_REGIONS_NUM],
  26. initial_memory_regions_num: usize,
  27. }
  28. impl MemBlockManager {
  29. #[allow(dead_code)]
  30. pub const MIN_MEMBLOCK_ADDR: PhysAddr = PhysAddr::new(0);
  31. #[allow(dead_code)]
  32. pub const MAX_MEMBLOCK_ADDR: PhysAddr = PhysAddr::new(usize::MAX);
  33. const fn new() -> Self {
  34. Self {
  35. inner: SpinLock::new(InnerMemBlockManager {
  36. initial_memory_regions: [PhysMemoryArea::DEFAULT; INITIAL_MEMORY_REGIONS_NUM],
  37. initial_memory_regions_num: 0,
  38. }),
  39. }
  40. }
  41. /// 添加内存区域
  42. ///
  43. /// 如果添加的区域与已有区域有重叠,会将重叠的区域合并
  44. #[allow(dead_code)]
  45. pub fn add_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> {
  46. let r = self.add_range(base, size, MemoryAreaAttr::empty());
  47. return r;
  48. }
  49. /// 添加内存区域
  50. ///
  51. /// 如果添加的区域与已有区域有重叠,会将重叠的区域合并
  52. fn add_range(
  53. &self,
  54. base: PhysAddr,
  55. size: usize,
  56. flags: MemoryAreaAttr,
  57. ) -> Result<(), SystemError> {
  58. if size == 0 {
  59. return Ok(());
  60. }
  61. let mut inner = self.inner.lock();
  62. if inner.initial_memory_regions_num >= INITIAL_MEMORY_REGIONS_NUM {
  63. panic!("Too many memory regions!");
  64. }
  65. let block = PhysMemoryArea::new(base, size, MemoryAreaAttr::empty());
  66. // 特判第一个区域
  67. if inner.initial_memory_regions_num == 0 {
  68. inner.initial_memory_regions[0] = block;
  69. inner.initial_memory_regions_num += 1;
  70. return Ok(());
  71. }
  72. // 先计算需要添加的区域数量
  73. let blocks_to_add = self
  74. .do_add_block(&mut inner, block, false, flags)
  75. .expect("Failed to count blocks to add!");
  76. if inner.initial_memory_regions_num + blocks_to_add > INITIAL_MEMORY_REGIONS_NUM {
  77. kerror!("Too many memory regions!");
  78. return Err(SystemError::ENOMEM);
  79. }
  80. // 然后添加区域
  81. self.do_add_block(&mut inner, block, true, flags)
  82. .expect("Failed to add block!");
  83. return Ok(());
  84. }
  85. fn do_add_block(
  86. &self,
  87. inner: &mut SpinLockGuard<'_, InnerMemBlockManager>,
  88. block: PhysMemoryArea,
  89. insert: bool,
  90. flags: MemoryAreaAttr,
  91. ) -> Result<usize, SystemError> {
  92. let mut base = block.base;
  93. let end = block.base + block.size;
  94. let mut i = 0;
  95. let mut start_index = -1;
  96. let mut end_index = -1;
  97. let mut num_to_add = 0;
  98. while i < inner.initial_memory_regions_num {
  99. let range_base = inner.initial_memory_regions[i].base;
  100. let range_end =
  101. inner.initial_memory_regions[i].base + inner.initial_memory_regions[i].size;
  102. if range_base >= end {
  103. break;
  104. }
  105. if range_end <= base {
  106. i += 1;
  107. continue;
  108. }
  109. // 有重叠
  110. if range_base > base {
  111. num_to_add += 1;
  112. if insert {
  113. if start_index == -1 {
  114. start_index = i as isize;
  115. }
  116. end_index = (i + 1) as isize;
  117. self.do_insert_area(inner, i, base, range_base - base, flags);
  118. i += 1;
  119. }
  120. }
  121. i += 1;
  122. base = core::cmp::min(range_end, end);
  123. }
  124. if base < end {
  125. num_to_add += 1;
  126. if insert {
  127. if start_index == -1 {
  128. start_index = i as isize;
  129. }
  130. end_index = (i + 1) as isize;
  131. self.do_insert_area(inner, i, base, end - base, flags);
  132. }
  133. }
  134. if num_to_add == 0 {
  135. return Ok(0);
  136. }
  137. if insert {
  138. self.do_merge_blocks(inner, start_index, end_index);
  139. }
  140. return Ok(num_to_add);
  141. }
  142. fn do_insert_area(
  143. &self,
  144. inner: &mut SpinLockGuard<'_, InnerMemBlockManager>,
  145. index: usize,
  146. base: PhysAddr,
  147. size: usize,
  148. flags: MemoryAreaAttr,
  149. ) {
  150. let copy_elements = inner.initial_memory_regions_num - index;
  151. inner
  152. .initial_memory_regions
  153. .copy_within(index..index + copy_elements, index + 1);
  154. inner.initial_memory_regions[index] = PhysMemoryArea::new(base, size, flags);
  155. inner.initial_memory_regions_num += 1;
  156. }
  157. fn do_merge_blocks(
  158. &self,
  159. inner: &mut SpinLockGuard<'_, InnerMemBlockManager>,
  160. start_index: isize,
  161. mut end_index: isize,
  162. ) {
  163. let mut i = 0;
  164. if start_index > 0 {
  165. i = start_index - 1;
  166. }
  167. end_index = core::cmp::min(end_index, inner.initial_memory_regions_num as isize - 1);
  168. while i < end_index {
  169. {
  170. let next_base = inner.initial_memory_regions[(i + 1) as usize].base;
  171. let next_size = inner.initial_memory_regions[(i + 1) as usize].size;
  172. let next_flags = inner.initial_memory_regions[(i + 1) as usize].flags;
  173. let this = &mut inner.initial_memory_regions[i as usize];
  174. if this.base + this.size != next_base || this.flags != next_flags {
  175. if unlikely(this.base + this.size > next_base) {
  176. kBUG!("this->base + this->size > next->base");
  177. }
  178. i += 1;
  179. continue;
  180. }
  181. this.size += next_size;
  182. }
  183. // 移动后面的区域
  184. let copy_elements = inner.initial_memory_regions_num - (i + 2) as usize;
  185. inner.initial_memory_regions.copy_within(
  186. (i + 2) as usize..(i as usize + 2 + copy_elements),
  187. (i + 1) as usize,
  188. );
  189. inner.initial_memory_regions_num -= 1;
  190. end_index -= 1;
  191. }
  192. }
  193. /// 移除内存区域
  194. ///
  195. /// 如果移除的区域与已有区域有重叠,会将重叠的区域分割
  196. #[allow(dead_code)]
  197. pub fn remove_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> {
  198. if size == 0 {
  199. return Ok(());
  200. }
  201. let mut inner = self.inner.lock();
  202. if inner.initial_memory_regions_num == 0 {
  203. return Ok(());
  204. }
  205. let (start_index, end_index) = self
  206. .isolate_range(&mut inner, base, size)
  207. .expect("Failed to isolate range!");
  208. for i in (start_index..end_index).rev() {
  209. self.do_remove_region(&mut inner, i);
  210. }
  211. return Ok(());
  212. }
  213. fn do_remove_region(&self, inner: &mut SpinLockGuard<'_, InnerMemBlockManager>, index: usize) {
  214. let copy_elements = inner.initial_memory_regions_num - index - 1;
  215. inner
  216. .initial_memory_regions
  217. .copy_within(index + 1..index + 1 + copy_elements, index);
  218. inner.initial_memory_regions_num -= 1;
  219. if inner.initial_memory_regions_num == 0 {
  220. inner.initial_memory_regions[0].base = PhysAddr::new(0);
  221. inner.initial_memory_regions[0].size = 0;
  222. }
  223. }
  224. /// 在一个内存块管理器中找到一个物理地址范围内的
  225. /// 空闲块,并隔离出所需的内存大小
  226. ///
  227. /// ## 返回值
  228. ///
  229. /// - Ok((start_index, end_index)) 表示成功找到了一个连续的内存区域来满足所需的 size。这里:
  230. /// - start_index 是指定的起始内存区域的索引。
  231. /// - end_index 是指定的结束内存区域的索引,它实际上不包含在返回的连续区域中,但它标志着下一个可能的不连续区域的开始。
  232. /// - Err(SystemError) 则表示没有找到足够的空间来满足请求的 size,可能是因为内存区域不足或存在其他系统错误
  233. fn isolate_range(
  234. &self,
  235. inner: &mut SpinLockGuard<'_, InnerMemBlockManager>,
  236. base: PhysAddr,
  237. size: usize,
  238. ) -> Result<(usize, usize), SystemError> {
  239. let end = base + size;
  240. let mut idx = 0;
  241. let mut start_index = 0;
  242. let mut end_index = 0;
  243. if size == 0 {
  244. return Ok((0, 0));
  245. }
  246. while idx < inner.initial_memory_regions_num {
  247. let range_base = inner.initial_memory_regions[idx].base;
  248. let range_end = range_base + inner.initial_memory_regions[idx].size;
  249. if range_base >= end {
  250. break;
  251. }
  252. if range_end <= base {
  253. idx = idx.checked_add(1).unwrap_or(0);
  254. continue;
  255. }
  256. if range_base < base {
  257. // regions[idx] intersects from below
  258. inner.initial_memory_regions[idx].base = base;
  259. inner.initial_memory_regions[idx].size -= base - range_base;
  260. self.do_insert_area(
  261. inner,
  262. idx,
  263. range_base,
  264. base - range_base,
  265. inner.initial_memory_regions[idx].flags,
  266. );
  267. } else if range_end > end {
  268. // regions[idx] intersects from above
  269. inner.initial_memory_regions[idx].base = end;
  270. inner.initial_memory_regions[idx].size -= end - range_base;
  271. self.do_insert_area(
  272. inner,
  273. idx,
  274. range_base,
  275. end - range_base,
  276. inner.initial_memory_regions[idx].flags,
  277. );
  278. if idx == 0 {
  279. idx = usize::MAX;
  280. } else {
  281. idx -= 1;
  282. }
  283. } else {
  284. // regions[idx] is inside the range, record it
  285. if end_index == 0 {
  286. start_index = idx;
  287. }
  288. end_index = idx + 1;
  289. }
  290. idx = idx.checked_add(1).unwrap_or(0);
  291. }
  292. return Ok((start_index, end_index));
  293. }
  294. /// mark_nomap - 用`MemoryAreaAttr::NOMAP`标志标记内存区域
  295. ///
  296. /// ## 参数
  297. ///
  298. /// - base: 区域的物理基地址
  299. /// - size: 区域的大小
  300. ///
  301. /// 使用`MemoryAreaAttr::NOMAP`标志标记的内存区域将不会被添加到物理内存的直接映射中。这些区域仍然会被内存映射所覆盖。内存映射中代表NOMAP内存帧的struct page将被PageReserved()。
  302. /// 注意:如果被标记为`MemoryAreaAttr::NOMAP`的内存是从memblock分配的,调用者必须忽略该内存
  303. pub fn mark_nomap(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> {
  304. return self.set_or_clear_flags(base, size, true, MemoryAreaAttr::NOMAP);
  305. }
  306. fn set_or_clear_flags(
  307. &self,
  308. mut base: PhysAddr,
  309. mut size: usize,
  310. set: bool,
  311. flags: MemoryAreaAttr,
  312. ) -> Result<(), SystemError> {
  313. let rsvd_base = PhysAddr::new(page_align_down(base.data()));
  314. size = page_align_up((size as usize) + base.data() - rsvd_base.data());
  315. base = rsvd_base;
  316. let mut inner = self.inner.lock();
  317. let (start_index, end_index) = self.isolate_range(&mut inner, base, size)?;
  318. for i in start_index..end_index {
  319. if set {
  320. inner.initial_memory_regions[i].flags |= flags;
  321. } else {
  322. inner.initial_memory_regions[i].flags &= !flags;
  323. }
  324. }
  325. let num = inner.initial_memory_regions_num as isize;
  326. self.do_merge_blocks(&mut inner, 0, num);
  327. return Ok(());
  328. }
  329. /// 标记内存区域为保留区域
  330. pub fn reserve_block(&self, base: PhysAddr, size: usize) -> Result<(), SystemError> {
  331. return self.set_or_clear_flags(base, size, true, MemoryAreaAttr::RESERVED);
  332. }
  333. /// 判断[base, base+size)与已有区域是否有重叠
  334. pub fn is_overlapped(&self, base: PhysAddr, size: usize) -> bool {
  335. let inner = self.inner.lock();
  336. return self.do_is_overlapped(base, size, false, &inner);
  337. }
  338. /// 判断[base, base+size)与已有Reserved区域是否有重叠
  339. pub fn is_overlapped_with_reserved(&self, base: PhysAddr, size: usize) -> bool {
  340. let inner = self.inner.lock();
  341. return self.do_is_overlapped(base, size, true, &inner);
  342. }
  343. fn do_is_overlapped(
  344. &self,
  345. base: PhysAddr,
  346. size: usize,
  347. require_reserved: bool,
  348. inner: &SpinLockGuard<'_, InnerMemBlockManager>,
  349. ) -> bool {
  350. let mut res = false;
  351. for i in 0..inner.initial_memory_regions_num {
  352. if require_reserved
  353. && !inner.initial_memory_regions[i]
  354. .flags
  355. .contains(MemoryAreaAttr::RESERVED)
  356. {
  357. // 忽略非保留区域
  358. continue;
  359. }
  360. let range_base = inner.initial_memory_regions[i].base;
  361. let range_end = range_base + inner.initial_memory_regions[i].size;
  362. if (base >= range_base && base < range_end)
  363. || (base + size > range_base && base + size <= range_end)
  364. || (base <= range_base && base + size >= range_end)
  365. {
  366. res = true;
  367. break;
  368. }
  369. }
  370. return res;
  371. }
  372. /// 生成迭代器
  373. pub fn to_iter(&self) -> MemBlockIter {
  374. let inner = self.inner.lock();
  375. return MemBlockIter {
  376. inner,
  377. index: 0,
  378. usable_only: false,
  379. };
  380. }
  381. /// 生成迭代器,迭代所有可用的物理内存区域
  382. pub fn to_iter_available(&self) -> MemBlockIter {
  383. let inner = self.inner.lock();
  384. return MemBlockIter {
  385. inner,
  386. index: 0,
  387. usable_only: true,
  388. };
  389. }
  390. /// 获取初始内存区域数量
  391. pub fn total_initial_memory_regions(&self) -> usize {
  392. let inner = self.inner.lock();
  393. return inner.initial_memory_regions_num;
  394. }
  395. /// 根据索引获取初始内存区域
  396. pub fn get_initial_memory_region(&self, index: usize) -> Option<PhysMemoryArea> {
  397. let inner = self.inner.lock();
  398. return inner.initial_memory_regions.get(index).copied();
  399. }
  400. }
  401. pub struct MemBlockIter<'a> {
  402. inner: SpinLockGuard<'a, InnerMemBlockManager>,
  403. index: usize,
  404. usable_only: bool,
  405. }
  406. #[allow(dead_code)]
  407. impl<'a> MemBlockIter<'a> {
  408. /// 获取内存区域数量
  409. pub fn total_num(&self) -> usize {
  410. self.inner.initial_memory_regions_num
  411. }
  412. /// 获取指定索引的内存区域
  413. pub fn get_area(&self, index: usize) -> &PhysMemoryArea {
  414. &self.inner.initial_memory_regions[index]
  415. }
  416. /// 获取当前索引
  417. pub fn current_index(&self) -> usize {
  418. self.index
  419. }
  420. }
  421. impl<'a> Iterator for MemBlockIter<'a> {
  422. type Item = PhysMemoryArea;
  423. fn next(&mut self) -> Option<Self::Item> {
  424. while self.index < self.inner.initial_memory_regions_num {
  425. if self.usable_only {
  426. if self.inner.initial_memory_regions[self.index]
  427. .flags
  428. .is_empty()
  429. == false
  430. {
  431. self.index += 1;
  432. if self.index >= self.inner.initial_memory_regions_num {
  433. return None;
  434. }
  435. continue;
  436. }
  437. }
  438. break;
  439. }
  440. if self.index >= self.inner.initial_memory_regions_num {
  441. return None;
  442. }
  443. let ret = self.inner.initial_memory_regions[self.index];
  444. self.index += 1;
  445. return Some(ret);
  446. }
  447. }
  448. bitflags! {
  449. /// 内存区域属性
  450. pub struct MemoryAreaAttr: u32 {
  451. /// No special request
  452. const NONE = 0x0;
  453. /// Hotpluggable region
  454. const HOTPLUG = (1 << 0);
  455. /// Mirrored region
  456. const MIRROR = (1 << 1);
  457. /// do not add to kenrel direct mapping
  458. const NOMAP = (1 << 2);
  459. /// Always detected via a driver
  460. const DRIVER_MANAGED = (1 << 3);
  461. /// Memory is reserved
  462. const RESERVED = (1 << 4);
  463. }
  464. }