page_cache.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. use core::{
  2. cmp::min,
  3. sync::atomic::{AtomicUsize, Ordering},
  4. };
  5. use alloc::{
  6. sync::{Arc, Weak},
  7. vec::Vec,
  8. };
  9. use hashbrown::HashMap;
  10. use system_error::SystemError;
  11. use super::vfs::IndexNode;
  12. use crate::libs::spinlock::SpinLockGuard;
  13. use crate::mm::page::FileMapInfo;
  14. use crate::{arch::mm::LockedFrameAllocator, libs::lazy_init::Lazy};
  15. use crate::{
  16. arch::MMArch,
  17. libs::spinlock::SpinLock,
  18. mm::{
  19. page::{page_manager_lock_irqsave, page_reclaimer_lock_irqsave, Page, PageFlags},
  20. MemoryManagementArch,
  21. },
  22. };
  23. use crate::{libs::align::page_align_up, mm::page::PageType};
  24. static PAGE_CACHE_ID: AtomicUsize = AtomicUsize::new(0);
  25. /// 页面缓存
  26. #[derive(Debug)]
  27. pub struct PageCache {
  28. id: usize,
  29. inner: SpinLock<InnerPageCache>,
  30. inode: Lazy<Weak<dyn IndexNode>>,
  31. }
  32. #[derive(Debug)]
  33. pub struct InnerPageCache {
  34. #[allow(unused)]
  35. id: usize,
  36. pages: HashMap<usize, Arc<Page>>,
  37. page_cache_ref: Weak<PageCache>,
  38. }
  39. impl InnerPageCache {
  40. pub fn new(page_cache_ref: Weak<PageCache>, id: usize) -> InnerPageCache {
  41. Self {
  42. id,
  43. pages: HashMap::new(),
  44. page_cache_ref,
  45. }
  46. }
  47. pub fn add_page(&mut self, offset: usize, page: &Arc<Page>) {
  48. self.pages.insert(offset, page.clone());
  49. }
  50. pub fn get_page(&self, offset: usize) -> Option<Arc<Page>> {
  51. self.pages.get(&offset).cloned()
  52. }
  53. pub fn remove_page(&mut self, offset: usize) -> Option<Arc<Page>> {
  54. self.pages.remove(&offset)
  55. }
  56. fn create_pages(&mut self, start_page_index: usize, buf: &[u8]) -> Result<(), SystemError> {
  57. assert!(buf.len() % MMArch::PAGE_SIZE == 0);
  58. let page_num = buf.len() / MMArch::PAGE_SIZE;
  59. let len = buf.len();
  60. if len == 0 {
  61. return Ok(());
  62. }
  63. let mut page_manager_guard = page_manager_lock_irqsave();
  64. for i in 0..page_num {
  65. let buf_offset = i * MMArch::PAGE_SIZE;
  66. let page_index = start_page_index + i;
  67. let page = page_manager_guard.create_one_page(
  68. PageType::File(FileMapInfo {
  69. page_cache: self
  70. .page_cache_ref
  71. .upgrade()
  72. .expect("failed to get self_arc of pagecache"),
  73. index: page_index,
  74. }),
  75. PageFlags::PG_LRU,
  76. &mut LockedFrameAllocator,
  77. )?;
  78. let mut page_guard = page.write_irqsave();
  79. unsafe {
  80. page_guard.copy_from_slice(&buf[buf_offset..buf_offset + MMArch::PAGE_SIZE]);
  81. }
  82. self.add_page(page_index, &page);
  83. }
  84. Ok(())
  85. }
  86. /// 从PageCache中读取数据。
  87. ///
  88. /// ## 参数
  89. ///
  90. /// - `offset` 偏移量
  91. /// - `buf` 缓冲区
  92. ///
  93. /// ## 返回值
  94. ///
  95. /// - `Ok(usize)` 成功读取的长度
  96. /// - `Err(SystemError)` 失败返回错误码
  97. pub fn read(&mut self, offset: usize, buf: &mut [u8]) -> Result<usize, SystemError> {
  98. let inode: Arc<dyn IndexNode> = self
  99. .page_cache_ref
  100. .upgrade()
  101. .unwrap()
  102. .inode
  103. .upgrade()
  104. .unwrap();
  105. let file_size = inode.metadata().unwrap().size;
  106. let len = if offset < file_size as usize {
  107. core::cmp::min(file_size as usize, offset + buf.len()) - offset
  108. } else {
  109. 0
  110. };
  111. if len == 0 {
  112. return Ok(0);
  113. }
  114. let mut not_exist = Vec::new();
  115. let start_page_index = offset >> MMArch::PAGE_SHIFT;
  116. let page_num = (page_align_up(offset + len) >> MMArch::PAGE_SHIFT) - start_page_index;
  117. let mut buf_offset = 0;
  118. let mut ret = 0;
  119. for i in 0..page_num {
  120. let page_index = start_page_index + i;
  121. // 第一个页可能需要计算页内偏移
  122. let page_offset = if i == 0 {
  123. offset % MMArch::PAGE_SIZE
  124. } else {
  125. 0
  126. };
  127. // 第一个页和最后一个页可能不满
  128. let sub_len = if i == 0 {
  129. min(len, MMArch::PAGE_SIZE - page_offset)
  130. } else if i == page_num - 1 {
  131. (offset + len - 1) % MMArch::PAGE_SIZE + 1
  132. } else {
  133. MMArch::PAGE_SIZE
  134. };
  135. if let Some(page) = self.get_page(page_index) {
  136. let sub_buf = &mut buf[buf_offset..(buf_offset + sub_len)];
  137. unsafe {
  138. sub_buf.copy_from_slice(
  139. &page.read_irqsave().as_slice()[page_offset..page_offset + sub_len],
  140. );
  141. }
  142. ret += sub_len;
  143. } else if let Some((index, count)) = not_exist.last_mut() {
  144. if *index + *count == page_index {
  145. *count += 1;
  146. } else {
  147. not_exist.push((page_index, 1));
  148. }
  149. } else {
  150. not_exist.push((page_index, 1));
  151. }
  152. buf_offset += sub_len;
  153. }
  154. for (page_index, count) in not_exist {
  155. // TODO 这里使用buffer避免多次读取磁盘,将来引入异步IO直接写入页面,减少内存开销和拷贝
  156. let mut page_buf = vec![0u8; MMArch::PAGE_SIZE * count];
  157. inode.read_sync(page_index * MMArch::PAGE_SIZE, page_buf.as_mut())?;
  158. self.create_pages(page_index, page_buf.as_mut())?;
  159. // 实际要拷贝的内容在文件中的偏移量
  160. let copy_offset = core::cmp::max(page_index * MMArch::PAGE_SIZE, offset);
  161. // 实际要拷贝的内容的长度
  162. let copy_len = core::cmp::min((page_index + count) * MMArch::PAGE_SIZE, offset + len)
  163. - copy_offset;
  164. let page_buf_offset = if page_index * MMArch::PAGE_SIZE < copy_offset {
  165. copy_offset - page_index * MMArch::PAGE_SIZE
  166. } else {
  167. 0
  168. };
  169. let buf_offset = copy_offset.saturating_sub(offset);
  170. buf[buf_offset..buf_offset + copy_len]
  171. .copy_from_slice(&page_buf[page_buf_offset..page_buf_offset + copy_len]);
  172. ret += copy_len;
  173. // log::debug!("page_offset:{page_offset}, count:{count}");
  174. // log::debug!("copy_offset:{copy_offset}, copy_len:{copy_len}");
  175. // log::debug!("buf_offset:{buf_offset}, page_buf_offset:{page_buf_offset}");
  176. }
  177. Ok(ret)
  178. }
  179. /// 向PageCache中写入数据。
  180. ///
  181. /// ## 参数
  182. ///
  183. /// - `offset` 偏移量
  184. /// - `buf` 缓冲区
  185. ///
  186. /// ## 返回值
  187. ///
  188. /// - `Ok(usize)` 成功读取的长度
  189. /// - `Err(SystemError)` 失败返回错误码
  190. pub fn write(&mut self, offset: usize, buf: &[u8]) -> Result<usize, SystemError> {
  191. let len = buf.len();
  192. if len == 0 {
  193. return Ok(0);
  194. }
  195. // log::debug!("offset:{offset}, len:{len}");
  196. let start_page_index = offset >> MMArch::PAGE_SHIFT;
  197. let page_num = (page_align_up(offset + len) >> MMArch::PAGE_SHIFT) - start_page_index;
  198. let mut buf_offset = 0;
  199. let mut ret = 0;
  200. for i in 0..page_num {
  201. let page_index = start_page_index + i;
  202. // 第一个页可能需要计算页内偏移
  203. let page_offset = if i == 0 {
  204. offset % MMArch::PAGE_SIZE
  205. } else {
  206. 0
  207. };
  208. // 第一个页和最后一个页可能不满
  209. let sub_len = if i == 0 {
  210. min(len, MMArch::PAGE_SIZE - page_offset)
  211. } else if i == page_num - 1 {
  212. (offset + len - 1) % MMArch::PAGE_SIZE + 1
  213. } else {
  214. MMArch::PAGE_SIZE
  215. };
  216. let mut page = self.get_page(page_index);
  217. if page.is_none() {
  218. let page_buf = vec![0u8; MMArch::PAGE_SIZE];
  219. self.create_pages(page_index, &page_buf)?;
  220. page = self.get_page(page_index);
  221. }
  222. if let Some(page) = page {
  223. let sub_buf = &buf[buf_offset..(buf_offset + sub_len)];
  224. let mut page_guard = page.write_irqsave();
  225. unsafe {
  226. page_guard.as_slice_mut()[page_offset..page_offset + sub_len]
  227. .copy_from_slice(sub_buf);
  228. }
  229. page_guard.add_flags(PageFlags::PG_DIRTY);
  230. ret += sub_len;
  231. // log::debug!(
  232. // "page_offset:{page_offset}, buf_offset:{buf_offset}, sub_len:{sub_len}"
  233. // );
  234. } else {
  235. return Err(SystemError::EIO);
  236. };
  237. buf_offset += sub_len;
  238. }
  239. Ok(ret)
  240. }
  241. pub fn resize(&mut self, len: usize) -> Result<(), SystemError> {
  242. let page_num = page_align_up(len) / MMArch::PAGE_SIZE;
  243. let mut reclaimer = page_reclaimer_lock_irqsave();
  244. for (_i, page) in self.pages.drain_filter(|index, _page| *index >= page_num) {
  245. let _ = reclaimer.remove_page(&page.phys_address());
  246. }
  247. if page_num > 0 {
  248. let last_page_index = page_num - 1;
  249. let last_len = len - last_page_index * MMArch::PAGE_SIZE;
  250. if let Some(page) = self.get_page(last_page_index) {
  251. unsafe {
  252. page.write_irqsave().truncate(last_len);
  253. };
  254. } else {
  255. return Err(SystemError::EIO);
  256. }
  257. }
  258. Ok(())
  259. }
  260. pub fn pages_count(&self) -> usize {
  261. return self.pages.len();
  262. }
  263. /// Synchronize the page cache with the storage device.
  264. pub fn sync(&mut self) -> Result<(), SystemError> {
  265. for page in self.pages.values() {
  266. let mut guard = page.write_irqsave();
  267. if guard.flags().contains(PageFlags::PG_DIRTY) {
  268. crate::mm::page::PageReclaimer::page_writeback(&mut guard, false);
  269. }
  270. }
  271. Ok(())
  272. }
  273. }
  274. impl Drop for InnerPageCache {
  275. fn drop(&mut self) {
  276. // log::debug!("page cache drop");
  277. let mut page_manager = page_manager_lock_irqsave();
  278. for page in self.pages.values() {
  279. page_manager.remove_page(&page.phys_address());
  280. }
  281. }
  282. }
  283. impl PageCache {
  284. pub fn new(inode: Option<Weak<dyn IndexNode>>) -> Arc<PageCache> {
  285. let id = PAGE_CACHE_ID.fetch_add(1, Ordering::SeqCst);
  286. Arc::new_cyclic(|weak| Self {
  287. id,
  288. inner: SpinLock::new(InnerPageCache::new(weak.clone(), id)),
  289. inode: {
  290. let v: Lazy<Weak<dyn IndexNode>> = Lazy::new();
  291. if let Some(inode) = inode {
  292. v.init(inode);
  293. }
  294. v
  295. },
  296. })
  297. }
  298. /// # 获取页缓存的ID
  299. #[inline]
  300. #[allow(unused)]
  301. pub fn id(&self) -> usize {
  302. self.id
  303. }
  304. pub fn inode(&self) -> Option<Weak<dyn IndexNode>> {
  305. self.inode.try_get().cloned()
  306. }
  307. pub fn set_inode(&self, inode: Weak<dyn IndexNode>) -> Result<(), SystemError> {
  308. if self.inode.initialized() {
  309. return Err(SystemError::EINVAL);
  310. }
  311. self.inode.init(inode);
  312. Ok(())
  313. }
  314. pub fn lock_irqsave(&self) -> SpinLockGuard<InnerPageCache> {
  315. if self.inner.is_locked() {
  316. log::error!("page cache already locked");
  317. }
  318. self.inner.lock_irqsave()
  319. }
  320. pub fn is_locked(&self) -> bool {
  321. self.inner.is_locked()
  322. }
  323. }