virtio_impl.rs 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. use crate::arch::mm::kernel_page_flags;
  2. use crate::arch::MMArch;
  3. use crate::mm::kernel_mapper::KernelMapper;
  4. use crate::mm::page::{page_manager_lock_irqsave, EntryFlags};
  5. use crate::mm::{
  6. allocator::page_frame::{
  7. allocate_page_frames, deallocate_page_frames, PageFrameCount, PhysPageFrame,
  8. },
  9. MemoryManagementArch, PhysAddr, VirtAddr,
  10. };
  11. use core::ptr::NonNull;
  12. use virtio_drivers::{BufferDirection, Hal, PAGE_SIZE};
  13. pub struct HalImpl;
  14. unsafe impl Hal for HalImpl {
  15. /// @brief 申请用于DMA的内存页
  16. /// @param pages 页数(4k一页)
  17. /// @return PhysAddr 获得的内存页的初始物理地址
  18. fn dma_alloc(
  19. pages: usize,
  20. _direction: BufferDirection,
  21. ) -> (virtio_drivers::PhysAddr, NonNull<u8>) {
  22. let page_num = PageFrameCount::new(
  23. (pages * PAGE_SIZE)
  24. .div_ceil(MMArch::PAGE_SIZE)
  25. .next_power_of_two(),
  26. );
  27. unsafe {
  28. let (paddr, count) =
  29. allocate_page_frames(page_num).expect("VirtIO Impl: alloc page failed");
  30. let virt = MMArch::phys_2_virt(paddr).unwrap();
  31. // 清空这块区域,防止出现脏数据
  32. core::ptr::write_bytes(virt.data() as *mut u8, 0, count.data() * MMArch::PAGE_SIZE);
  33. let dma_flags: EntryFlags<MMArch> = EntryFlags::mmio_flags();
  34. let mut kernel_mapper = KernelMapper::lock();
  35. let kernel_mapper = kernel_mapper.as_mut().unwrap();
  36. let flusher = kernel_mapper
  37. .remap(virt, dma_flags)
  38. .expect("VirtIO Impl: remap failed");
  39. flusher.flush();
  40. return (
  41. paddr.data(),
  42. NonNull::new(MMArch::phys_2_virt(paddr).unwrap().data() as _).unwrap(),
  43. );
  44. }
  45. }
  46. /// @brief 释放用于DMA的内存页
  47. /// @param paddr 起始物理地址 pages 页数(4k一页)
  48. /// @return i32 0表示成功
  49. unsafe fn dma_dealloc(
  50. paddr: virtio_drivers::PhysAddr,
  51. vaddr: NonNull<u8>,
  52. pages: usize,
  53. ) -> i32 {
  54. let page_count = PageFrameCount::new(
  55. (pages * PAGE_SIZE)
  56. .div_ceil(MMArch::PAGE_SIZE)
  57. .next_power_of_two(),
  58. );
  59. // 恢复页面属性
  60. let vaddr = VirtAddr::new(vaddr.as_ptr() as usize);
  61. let mut kernel_mapper = KernelMapper::lock();
  62. let kernel_mapper = kernel_mapper.as_mut().unwrap();
  63. let flusher = kernel_mapper
  64. .remap(vaddr, kernel_page_flags(vaddr))
  65. .expect("VirtIO Impl: remap failed");
  66. flusher.flush();
  67. unsafe {
  68. deallocate_page_frames(
  69. PhysPageFrame::new(PhysAddr::new(paddr)),
  70. page_count,
  71. &mut page_manager_lock_irqsave(),
  72. );
  73. }
  74. return 0;
  75. }
  76. /// @brief mmio物理地址转换为虚拟地址,不需要使用
  77. /// @param paddr 起始物理地址
  78. /// @return NonNull<u8> 虚拟地址的指针
  79. unsafe fn mmio_phys_to_virt(paddr: virtio_drivers::PhysAddr, _size: usize) -> NonNull<u8> {
  80. NonNull::new((MMArch::phys_2_virt(PhysAddr::new(paddr))).unwrap().data() as _).unwrap()
  81. }
  82. /// @brief 与真实物理设备共享
  83. /// @param buffer 要共享的buffer _direction:设备到driver或driver到设备
  84. /// @return buffer在内存中的物理地址
  85. unsafe fn share(
  86. buffer: NonNull<[u8]>,
  87. _direction: BufferDirection,
  88. ) -> virtio_drivers::PhysAddr {
  89. let vaddr = VirtAddr::new(buffer.as_ptr() as *mut u8 as usize);
  90. //debug!("virt:{:x}", vaddr);
  91. // Nothing to do, as the host already has access to all memory.
  92. return MMArch::virt_2_phys(vaddr).unwrap().data();
  93. }
  94. /// @brief 停止共享(让主机可以访问全部内存的话什么都不用做)
  95. unsafe fn unshare(
  96. _paddr: virtio_drivers::PhysAddr,
  97. _buffer: NonNull<[u8]>,
  98. _direction: BufferDirection,
  99. ) {
  100. // Nothing to do, as the host already has access to all memory and we didn't copy the buffer
  101. // anywhere else.
  102. }
  103. }