stub.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. #include "dragonstub/printk.h"
  2. #include "efidef.h"
  3. #include <efi.h>
  4. #include <efilib.h>
  5. #include <elf.h>
  6. #include <dragonstub/dragonstub.h>
  7. #include <dragonstub/elfloader.h>
  8. #include <dragonstub/linux/math.h>
  9. #include <dragonstub/linux/align.h>
  10. /*
  11. * This is the base address at which to start allocating virtual memory ranges
  12. * for UEFI Runtime Services.
  13. *
  14. * For ARM/ARM64:
  15. * This is in the low TTBR0 range so that we can use
  16. * any allocation we choose, and eliminate the risk of a conflict after kexec.
  17. * The value chosen is the largest non-zero power of 2 suitable for this purpose
  18. * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
  19. * be mapped efficiently.
  20. * Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split,
  21. * map everything below 1 GB. (512 MB is a reasonable upper bound for the
  22. * entire footprint of the UEFI runtime services memory regions)
  23. *
  24. * For RISC-V:
  25. * There is no specific reason for which, this address (512MB) can't be used
  26. * EFI runtime virtual address for RISC-V. It also helps to use EFI runtime
  27. * services on both RV32/RV64. Keep the same runtime virtual address for RISC-V
  28. * as well to minimize the code churn.
  29. */
  30. #define EFI_RT_VIRTUAL_BASE SZ_512M
  31. /*
  32. * Some architectures map the EFI regions into the kernel's linear map using a
  33. * fixed offset.
  34. */
  35. #ifndef EFI_RT_VIRTUAL_OFFSET
  36. #define EFI_RT_VIRTUAL_OFFSET 0
  37. #endif
  38. extern void _image_end(void);
  39. static u64 image_base = 0;
  40. static u64 image_size = 0;
  41. static u64 image_end = 0;
  42. static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
  43. static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
  44. EFI_STATUS efi_handle_cmdline(EFI_LOADED_IMAGE *image, char **cmdline_ptr)
  45. {
  46. int cmdline_size = 0;
  47. EFI_STATUS status;
  48. char *cmdline;
  49. /*
  50. * Get the command line from EFI, using the LOADED_IMAGE
  51. * protocol. We are going to copy the command line into the
  52. * device tree, so this can be allocated anywhere.
  53. */
  54. cmdline = efi_convert_cmdline(image, &cmdline_size);
  55. if (!cmdline) {
  56. efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
  57. return EFI_OUT_OF_RESOURCES;
  58. }
  59. // if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
  60. // IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
  61. // cmdline_size == 0) {
  62. // status = efi_parse_options(CONFIG_CMDLINE);
  63. // if (status != EFI_SUCCESS) {
  64. // efi_err("Failed to parse options\n");
  65. // goto fail_free_cmdline;
  66. // }
  67. // }
  68. // if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) {
  69. if (cmdline_size > 0) {
  70. status = efi_parse_options(cmdline);
  71. if (status != EFI_SUCCESS) {
  72. efi_err("Failed to parse options\n");
  73. goto fail_free_cmdline;
  74. }
  75. }
  76. *cmdline_ptr = cmdline;
  77. return EFI_SUCCESS;
  78. fail_free_cmdline:
  79. efi_bs_call(FreePool, cmdline_ptr);
  80. return status;
  81. }
  82. static efi_status_t init_efi_program_info(efi_loaded_image_t *loaded_image)
  83. {
  84. image_base = (u64)loaded_image->ImageBase;
  85. image_size = loaded_image->ImageSize;
  86. image_end = (u64)_image_end;
  87. efi_info("DragonStub loaded at 0x%p\n", image_base);
  88. efi_info("DragonStub + payload size: 0x%p\n", image_size);
  89. efi_info("DragonStub end addr: 0x%p\n", image_end);
  90. return EFI_SUCCESS;
  91. }
  92. /// @brief payload_info的构造函数
  93. static struct payload_info payload_info_new(u64 payload_addr, u64 payload_size)
  94. {
  95. struct payload_info info = { .payload_addr = payload_addr,
  96. .payload_size = payload_size,
  97. .loaded_paddr = 0,
  98. .loaded_size = 0,
  99. .kernel_entry = 0 };
  100. return info;
  101. }
  102. static efi_status_t find_elf(struct payload_info *info)
  103. {
  104. extern __weak void _binary_payload_start(void);
  105. extern __weak void _binary_payload_end(void);
  106. extern __weak void _binary_payload_size(void);
  107. u64 payload_start = (u64)_binary_payload_start;
  108. u64 payload_end = (u64)_binary_payload_end;
  109. u64 payload_size = payload_end - payload_start;
  110. efi_info("payload_addr: %p\n", payload_start);
  111. efi_info("payload_end: %p\n", payload_end);
  112. efi_info("payload_size: %p\n", payload_size);
  113. if (payload_start == 0 || payload_end <= payload_start + 4 ||
  114. payload_size == 0) {
  115. return EFI_NOT_FOUND;
  116. }
  117. efi_info("Checking payload's ELF header...\n");
  118. bool found = elf_check((void *)payload_start, payload_size);
  119. if (found) {
  120. info->payload_addr = payload_start;
  121. info->payload_size = payload_size;
  122. efi_info("Found payload ELF header\n");
  123. return EFI_SUCCESS;
  124. }
  125. return EFI_NOT_FOUND;
  126. }
  127. /// @brief 寻找要加载的内核负载
  128. /// @param handle efi_handle
  129. /// @param image efi_loaded_image_t
  130. /// @param ret_info 返回的负载信息
  131. /// @return
  132. efi_status_t find_payload(efi_handle_t handle, efi_loaded_image_t *loaded_image,
  133. struct payload_info *ret_info)
  134. {
  135. efi_info("Try to find payload to boot\n");
  136. efi_status_t status = init_efi_program_info(loaded_image);
  137. if (status != EFI_SUCCESS) {
  138. efi_err("Failed to init efi program info\n");
  139. return status;
  140. }
  141. struct payload_info info = payload_info_new(0, 0);
  142. status = find_elf(&info);
  143. if (status != EFI_SUCCESS) {
  144. efi_err("Payload not found: Did you forget to add the payload by setting PAYLOAD_ELF at compile time?\n"
  145. "Or the payload is not an ELF file?\n");
  146. return status;
  147. }
  148. *ret_info = info;
  149. return EFI_SUCCESS;
  150. }
  151. /*
  152. * efi_allocate_virtmap() - create a pool allocation for the virtmap
  153. *
  154. * Create an allocation that is of sufficient size to hold all the memory
  155. * descriptors that will be passed to SetVirtualAddressMap() to inform the
  156. * firmware about the virtual mapping that will be used under the OS to call
  157. * into the firmware.
  158. */
  159. efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap,
  160. unsigned long *desc_size, u32 *desc_ver)
  161. {
  162. unsigned long size, mmap_key;
  163. efi_status_t status;
  164. /*
  165. * Use the size of the current memory map as an upper bound for the
  166. * size of the buffer we need to pass to SetVirtualAddressMap() to
  167. * cover all EFI_MEMORY_RUNTIME regions.
  168. */
  169. size = 0;
  170. status = efi_bs_call(GetMemoryMap, &size, NULL, &mmap_key, desc_size,
  171. desc_ver);
  172. if (status != EFI_BUFFER_TOO_SMALL)
  173. return EFI_LOAD_ERROR;
  174. return efi_bs_call(AllocatePool, EfiLoaderData, size, (void **)virtmap);
  175. }
  176. /*
  177. * efi_get_virtmap() - create a virtual mapping for the EFI memory map
  178. *
  179. * This function populates the virt_addr fields of all memory region descriptors
  180. * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors
  181. * are also copied to @runtime_map, and their total count is returned in @count.
  182. */
  183. void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
  184. unsigned long desc_size, efi_memory_desc_t *runtime_map,
  185. int *count)
  186. {
  187. u64 efi_virt_base = virtmap_base;
  188. efi_memory_desc_t *in, *out = runtime_map;
  189. int l;
  190. *count = 0;
  191. for (l = 0; l < map_size; l += desc_size) {
  192. u64 paddr, size;
  193. in = (void *)memory_map + l;
  194. if (!(in->Attribute & EFI_MEMORY_RUNTIME))
  195. continue;
  196. paddr = in->PhysicalStart;
  197. size = in->NumberOfPages * EFI_PAGE_SIZE;
  198. in->VirtualStart = in->PhysicalStart + EFI_RT_VIRTUAL_OFFSET;
  199. if (efi_novamap) {
  200. continue;
  201. }
  202. /*
  203. * Make the mapping compatible with 64k pages: this allows
  204. * a 4k page size kernel to kexec a 64k page size kernel and
  205. * vice versa.
  206. */
  207. if (!flat_va_mapping) {
  208. paddr = round_down(in->PhysicalStart, SZ_64K);
  209. size += in->PhysicalStart - paddr;
  210. /*
  211. * Avoid wasting memory on PTEs by choosing a virtual
  212. * base that is compatible with section mappings if this
  213. * region has the appropriate size and physical
  214. * alignment. (Sections are 2 MB on 4k granule kernels)
  215. */
  216. if (IS_ALIGNED(in->PhysicalStart, SZ_2M) &&
  217. size >= SZ_2M)
  218. efi_virt_base = round_up(efi_virt_base, SZ_2M);
  219. else
  220. efi_virt_base = round_up(efi_virt_base, SZ_64K);
  221. in->VirtualStart += efi_virt_base - paddr;
  222. efi_virt_base += size;
  223. }
  224. memcpy(out, in, desc_size);
  225. out = (void *)out + desc_size;
  226. ++*count;
  227. }
  228. }
  229. /// @brief 设置内存保留表
  230. /// @param
  231. static void install_memreserve_table(void)
  232. {
  233. struct linux_efi_memreserve *rsv;
  234. efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
  235. efi_status_t status;
  236. status = efi_bs_call(AllocatePool, EfiLoaderData, sizeof(*rsv),
  237. (void **)&rsv);
  238. if (status != EFI_SUCCESS) {
  239. efi_err("Failed to allocate memreserve entry!\n");
  240. return;
  241. }
  242. rsv->next = 0;
  243. rsv->size = 0;
  244. rsv->count = 0;
  245. status = efi_bs_call(InstallConfigurationTable, &memreserve_table_guid,
  246. rsv);
  247. if (status != EFI_SUCCESS)
  248. efi_err("Failed to install memreserve config table!\n");
  249. }
  250. static u32 get_supported_rt_services(void)
  251. {
  252. const efi_rt_properties_table_t *rt_prop_table;
  253. u32 supported = EFI_RT_SUPPORTED_ALL;
  254. rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID);
  255. if (rt_prop_table)
  256. supported &= rt_prop_table->runtime_services_supported;
  257. return supported;
  258. }
  259. efi_status_t efi_stub_common(efi_handle_t handle,
  260. efi_loaded_image_t *loaded_image,
  261. struct payload_info *payload_info,
  262. char *cmdline_ptr)
  263. {
  264. struct screen_info *si;
  265. efi_status_t status;
  266. status = check_platform_features();
  267. if (status != EFI_SUCCESS)
  268. return status;
  269. // si = setup_graphics();
  270. // efi_retrieve_tpm2_eventlog();
  271. // /* Ask the firmware to clear memory on unclean shutdown */
  272. // efi_enable_reset_attack_mitigation();
  273. // efi_load_initrd(image, ULONG_MAX, efi_get_max_initrd_addr(image_addr),
  274. // NULL);
  275. // efi_random_get_seed();
  276. /* force efi_novamap if SetVirtualAddressMap() is unsupported */
  277. efi_novamap |= !(get_supported_rt_services() &
  278. EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP);
  279. install_memreserve_table();
  280. efi_info("Memreserve table installed\n");
  281. efi_info("Booting DragonOS kernel...\n");
  282. status = efi_boot_kernel(handle, loaded_image, payload_info,
  283. cmdline_ptr);
  284. // free_screen_info(si);
  285. return status;
  286. }