virtio_impl.rs 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. use crate::arch::mm::kernel_page_flags;
  2. use crate::arch::MMArch;
  3. use crate::mm::kernel_mapper::KernelMapper;
  4. use crate::mm::page::{page_manager_lock_irqsave, PageFlags};
  5. use crate::mm::{
  6. allocator::page_frame::{
  7. allocate_page_frames, deallocate_page_frames, PageFrameCount, PhysPageFrame,
  8. },
  9. MemoryManagementArch, PhysAddr, VirtAddr,
  10. };
  11. use core::ptr::NonNull;
  12. use virtio_drivers::{BufferDirection, Hal, PAGE_SIZE};
  13. pub struct HalImpl;
  14. unsafe impl Hal for HalImpl {
  15. /// @brief 申请用于DMA的内存页
  16. /// @param pages 页数(4k一页)
  17. /// @return PhysAddr 获得的内存页的初始物理地址
  18. fn dma_alloc(
  19. pages: usize,
  20. _direction: BufferDirection,
  21. ) -> (virtio_drivers::PhysAddr, NonNull<u8>) {
  22. let page_num = PageFrameCount::new(
  23. ((pages * PAGE_SIZE + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE).next_power_of_two(),
  24. );
  25. unsafe {
  26. let (paddr, count) =
  27. allocate_page_frames(page_num).expect("VirtIO Impl: alloc page failed");
  28. let virt = MMArch::phys_2_virt(paddr).unwrap();
  29. // 清空这块区域,防止出现脏数据
  30. core::ptr::write_bytes(virt.data() as *mut u8, 0, count.data() * MMArch::PAGE_SIZE);
  31. let dma_flags: PageFlags<MMArch> = PageFlags::mmio_flags();
  32. let mut kernel_mapper = KernelMapper::lock();
  33. let kernel_mapper = kernel_mapper.as_mut().unwrap();
  34. let flusher = kernel_mapper
  35. .remap(virt, dma_flags)
  36. .expect("VirtIO Impl: remap failed");
  37. flusher.flush();
  38. return (
  39. paddr.data(),
  40. NonNull::new(MMArch::phys_2_virt(paddr).unwrap().data() as _).unwrap(),
  41. );
  42. }
  43. }
  44. /// @brief 释放用于DMA的内存页
  45. /// @param paddr 起始物理地址 pages 页数(4k一页)
  46. /// @return i32 0表示成功
  47. unsafe fn dma_dealloc(
  48. paddr: virtio_drivers::PhysAddr,
  49. vaddr: NonNull<u8>,
  50. pages: usize,
  51. ) -> i32 {
  52. let page_count = PageFrameCount::new(
  53. ((pages * PAGE_SIZE + MMArch::PAGE_SIZE - 1) / MMArch::PAGE_SIZE).next_power_of_two(),
  54. );
  55. // 恢复页面属性
  56. let vaddr = VirtAddr::new(vaddr.as_ptr() as usize);
  57. let mut kernel_mapper = KernelMapper::lock();
  58. let kernel_mapper = kernel_mapper.as_mut().unwrap();
  59. let flusher = kernel_mapper
  60. .remap(vaddr, kernel_page_flags(vaddr))
  61. .expect("VirtIO Impl: remap failed");
  62. flusher.flush();
  63. unsafe {
  64. deallocate_page_frames(
  65. PhysPageFrame::new(PhysAddr::new(paddr)),
  66. page_count,
  67. &mut page_manager_lock_irqsave(),
  68. );
  69. }
  70. return 0;
  71. }
  72. /// @brief mmio物理地址转换为虚拟地址,不需要使用
  73. /// @param paddr 起始物理地址
  74. /// @return NonNull<u8> 虚拟地址的指针
  75. unsafe fn mmio_phys_to_virt(paddr: virtio_drivers::PhysAddr, _size: usize) -> NonNull<u8> {
  76. NonNull::new((MMArch::phys_2_virt(PhysAddr::new(paddr))).unwrap().data() as _).unwrap()
  77. }
  78. /// @brief 与真实物理设备共享
  79. /// @param buffer 要共享的buffer _direction:设备到driver或driver到设备
  80. /// @return buffer在内存中的物理地址
  81. unsafe fn share(
  82. buffer: NonNull<[u8]>,
  83. _direction: BufferDirection,
  84. ) -> virtio_drivers::PhysAddr {
  85. let vaddr = VirtAddr::new(buffer.as_ptr() as *mut u8 as usize);
  86. //debug!("virt:{:x}", vaddr);
  87. // Nothing to do, as the host already has access to all memory.
  88. return MMArch::virt_2_phys(vaddr).unwrap().data();
  89. }
  90. /// @brief 停止共享(让主机可以访问全部内存的话什么都不用做)
  91. unsafe fn unshare(
  92. _paddr: virtio_drivers::PhysAddr,
  93. _buffer: NonNull<[u8]>,
  94. _direction: BufferDirection,
  95. ) {
  96. // Nothing to do, as the host already has access to all memory and we didn't copy the buffer
  97. // anywhere else.
  98. }
  99. }