UnwindRegistersRestore.S 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161
  1. //===-------------------- UnwindRegistersRestore.S ------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "assembly.h"
  9. .text
  10. #if !defined(__USING_SJLJ_EXCEPTIONS__)
  11. #if defined(__i386__)
  12. DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
  13. #
  14. # extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *);
  15. #
  16. # On entry:
  17. # + +
  18. # +-----------------------+
  19. # + thread_state pointer +
  20. # +-----------------------+
  21. # + return address +
  22. # +-----------------------+ <-- SP
  23. # + +
  24. movl 4(%esp), %eax
  25. # set up eax and ret on new stack location
  26. movl 28(%eax), %edx # edx holds new stack pointer
  27. subl $8,%edx
  28. movl %edx, 28(%eax)
  29. movl 0(%eax), %ebx
  30. movl %ebx, 0(%edx)
  31. movl 40(%eax), %ebx
  32. movl %ebx, 4(%edx)
  33. # we now have ret and eax pushed onto where new stack will be
  34. # restore all registers
  35. movl 4(%eax), %ebx
  36. movl 8(%eax), %ecx
  37. movl 12(%eax), %edx
  38. movl 16(%eax), %edi
  39. movl 20(%eax), %esi
  40. movl 24(%eax), %ebp
  41. movl 28(%eax), %esp
  42. # skip ss
  43. # skip eflags
  44. pop %eax # eax was already pushed on new stack
  45. ret # eip was already pushed on new stack
  46. # skip cs
  47. # skip ds
  48. # skip es
  49. # skip fs
  50. # skip gs
  51. #elif defined(__x86_64__)
  52. DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto)
  53. #
  54. # extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *);
  55. #
  56. #if defined(_WIN64)
  57. # On entry, thread_state pointer is in rcx; move it into rdi
  58. # to share restore code below. Since this routine restores and
  59. # overwrites all registers, we can use the same registers for
  60. # pointers and temporaries as on unix even though win64 normally
  61. # mustn't clobber some of them.
  62. movq %rcx, %rdi
  63. #else
  64. # On entry, thread_state pointer is in rdi
  65. #endif
  66. movq 56(%rdi), %rax # rax holds new stack pointer
  67. subq $16, %rax
  68. movq %rax, 56(%rdi)
  69. movq 32(%rdi), %rbx # store new rdi on new stack
  70. movq %rbx, 0(%rax)
  71. movq 128(%rdi), %rbx # store new rip on new stack
  72. movq %rbx, 8(%rax)
  73. # restore all registers
  74. movq 0(%rdi), %rax
  75. movq 8(%rdi), %rbx
  76. movq 16(%rdi), %rcx
  77. movq 24(%rdi), %rdx
  78. # restore rdi later
  79. movq 40(%rdi), %rsi
  80. movq 48(%rdi), %rbp
  81. # restore rsp later
  82. movq 64(%rdi), %r8
  83. movq 72(%rdi), %r9
  84. movq 80(%rdi), %r10
  85. movq 88(%rdi), %r11
  86. movq 96(%rdi), %r12
  87. movq 104(%rdi), %r13
  88. movq 112(%rdi), %r14
  89. movq 120(%rdi), %r15
  90. # skip rflags
  91. # skip cs
  92. # skip fs
  93. # skip gs
  94. #if defined(_WIN64)
  95. movdqu 176(%rdi),%xmm0
  96. movdqu 192(%rdi),%xmm1
  97. movdqu 208(%rdi),%xmm2
  98. movdqu 224(%rdi),%xmm3
  99. movdqu 240(%rdi),%xmm4
  100. movdqu 256(%rdi),%xmm5
  101. movdqu 272(%rdi),%xmm6
  102. movdqu 288(%rdi),%xmm7
  103. movdqu 304(%rdi),%xmm8
  104. movdqu 320(%rdi),%xmm9
  105. movdqu 336(%rdi),%xmm10
  106. movdqu 352(%rdi),%xmm11
  107. movdqu 368(%rdi),%xmm12
  108. movdqu 384(%rdi),%xmm13
  109. movdqu 400(%rdi),%xmm14
  110. movdqu 416(%rdi),%xmm15
  111. #endif
  112. movq 56(%rdi), %rsp # cut back rsp to new location
  113. pop %rdi # rdi was saved here earlier
  114. ret # rip was saved here
  115. #elif defined(__powerpc64__)
  116. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
  117. //
  118. // void libunwind::Registers_ppc64::jumpto()
  119. //
  120. // On entry:
  121. // thread_state pointer is in r3
  122. //
  123. // load register (GPR)
  124. #define PPC64_LR(n) \
  125. ld n, (8 * (n + 2))(3)
  126. // restore integral registers
  127. // skip r0 for now
  128. // skip r1 for now
  129. PPC64_LR(2)
  130. // skip r3 for now
  131. // skip r4 for now
  132. // skip r5 for now
  133. PPC64_LR(6)
  134. PPC64_LR(7)
  135. PPC64_LR(8)
  136. PPC64_LR(9)
  137. PPC64_LR(10)
  138. PPC64_LR(11)
  139. PPC64_LR(12)
  140. PPC64_LR(13)
  141. PPC64_LR(14)
  142. PPC64_LR(15)
  143. PPC64_LR(16)
  144. PPC64_LR(17)
  145. PPC64_LR(18)
  146. PPC64_LR(19)
  147. PPC64_LR(20)
  148. PPC64_LR(21)
  149. PPC64_LR(22)
  150. PPC64_LR(23)
  151. PPC64_LR(24)
  152. PPC64_LR(25)
  153. PPC64_LR(26)
  154. PPC64_LR(27)
  155. PPC64_LR(28)
  156. PPC64_LR(29)
  157. PPC64_LR(30)
  158. PPC64_LR(31)
  159. #if defined(__VSX__)
  160. // restore VS registers
  161. // (note that this also restores floating point registers and V registers,
  162. // because part of VS is mapped to these registers)
  163. addi 4, 3, PPC64_OFFS_FP
  164. // load VS register
  165. #define PPC64_LVS(n) \
  166. lxvd2x n, 0, 4 ;\
  167. addi 4, 4, 16
  168. // restore the first 32 VS regs (and also all floating point regs)
  169. PPC64_LVS(0)
  170. PPC64_LVS(1)
  171. PPC64_LVS(2)
  172. PPC64_LVS(3)
  173. PPC64_LVS(4)
  174. PPC64_LVS(5)
  175. PPC64_LVS(6)
  176. PPC64_LVS(7)
  177. PPC64_LVS(8)
  178. PPC64_LVS(9)
  179. PPC64_LVS(10)
  180. PPC64_LVS(11)
  181. PPC64_LVS(12)
  182. PPC64_LVS(13)
  183. PPC64_LVS(14)
  184. PPC64_LVS(15)
  185. PPC64_LVS(16)
  186. PPC64_LVS(17)
  187. PPC64_LVS(18)
  188. PPC64_LVS(19)
  189. PPC64_LVS(20)
  190. PPC64_LVS(21)
  191. PPC64_LVS(22)
  192. PPC64_LVS(23)
  193. PPC64_LVS(24)
  194. PPC64_LVS(25)
  195. PPC64_LVS(26)
  196. PPC64_LVS(27)
  197. PPC64_LVS(28)
  198. PPC64_LVS(29)
  199. PPC64_LVS(30)
  200. PPC64_LVS(31)
  201. // use VRSAVE to conditionally restore the remaining VS regs,
  202. // that are where the V regs are mapped
  203. ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave
  204. cmpwi 5, 0
  205. beq Lnovec
  206. // conditionally load VS
  207. #define PPC64_CLVS_BOTTOM(n) \
  208. beq Ldone##n ;\
  209. addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
  210. lxvd2x n, 0, 4 ;\
  211. Ldone##n:
  212. #define PPC64_CLVSl(n) \
  213. andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n)) ;\
  214. PPC64_CLVS_BOTTOM(n)
  215. #define PPC64_CLVSh(n) \
  216. andi. 0, 5, (1 PPC_LEFT_SHIFT(63-n)) ;\
  217. PPC64_CLVS_BOTTOM(n)
  218. PPC64_CLVSl(32)
  219. PPC64_CLVSl(33)
  220. PPC64_CLVSl(34)
  221. PPC64_CLVSl(35)
  222. PPC64_CLVSl(36)
  223. PPC64_CLVSl(37)
  224. PPC64_CLVSl(38)
  225. PPC64_CLVSl(39)
  226. PPC64_CLVSl(40)
  227. PPC64_CLVSl(41)
  228. PPC64_CLVSl(42)
  229. PPC64_CLVSl(43)
  230. PPC64_CLVSl(44)
  231. PPC64_CLVSl(45)
  232. PPC64_CLVSl(46)
  233. PPC64_CLVSl(47)
  234. PPC64_CLVSh(48)
  235. PPC64_CLVSh(49)
  236. PPC64_CLVSh(50)
  237. PPC64_CLVSh(51)
  238. PPC64_CLVSh(52)
  239. PPC64_CLVSh(53)
  240. PPC64_CLVSh(54)
  241. PPC64_CLVSh(55)
  242. PPC64_CLVSh(56)
  243. PPC64_CLVSh(57)
  244. PPC64_CLVSh(58)
  245. PPC64_CLVSh(59)
  246. PPC64_CLVSh(60)
  247. PPC64_CLVSh(61)
  248. PPC64_CLVSh(62)
  249. PPC64_CLVSh(63)
  250. #else
  251. // load FP register
  252. #define PPC64_LF(n) \
  253. lfd n, (PPC64_OFFS_FP + n * 16)(3)
  254. // restore float registers
  255. PPC64_LF(0)
  256. PPC64_LF(1)
  257. PPC64_LF(2)
  258. PPC64_LF(3)
  259. PPC64_LF(4)
  260. PPC64_LF(5)
  261. PPC64_LF(6)
  262. PPC64_LF(7)
  263. PPC64_LF(8)
  264. PPC64_LF(9)
  265. PPC64_LF(10)
  266. PPC64_LF(11)
  267. PPC64_LF(12)
  268. PPC64_LF(13)
  269. PPC64_LF(14)
  270. PPC64_LF(15)
  271. PPC64_LF(16)
  272. PPC64_LF(17)
  273. PPC64_LF(18)
  274. PPC64_LF(19)
  275. PPC64_LF(20)
  276. PPC64_LF(21)
  277. PPC64_LF(22)
  278. PPC64_LF(23)
  279. PPC64_LF(24)
  280. PPC64_LF(25)
  281. PPC64_LF(26)
  282. PPC64_LF(27)
  283. PPC64_LF(28)
  284. PPC64_LF(29)
  285. PPC64_LF(30)
  286. PPC64_LF(31)
  287. #if defined(__ALTIVEC__)
  288. // restore vector registers if any are in use
  289. ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave
  290. cmpwi 5, 0
  291. beq Lnovec
  292. subi 4, 1, 16
  293. // r4 is now a 16-byte aligned pointer into the red zone
  294. // the _vectorScalarRegisters may not be 16-byte aligned
  295. // so copy via red zone temp buffer
  296. #define PPC64_CLV_UNALIGNED_BOTTOM(n) \
  297. beq Ldone##n ;\
  298. ld 0, (PPC64_OFFS_V + n * 16)(3) ;\
  299. std 0, 0(4) ;\
  300. ld 0, (PPC64_OFFS_V + n * 16 + 8)(3) ;\
  301. std 0, 8(4) ;\
  302. lvx n, 0, 4 ;\
  303. Ldone ## n:
  304. #define PPC64_CLV_UNALIGNEDl(n) \
  305. andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n)) ;\
  306. PPC64_CLV_UNALIGNED_BOTTOM(n)
  307. #define PPC64_CLV_UNALIGNEDh(n) \
  308. andi. 0, 5, (1 PPC_LEFT_SHIFT(31-n)) ;\
  309. PPC64_CLV_UNALIGNED_BOTTOM(n)
  310. PPC64_CLV_UNALIGNEDl(0)
  311. PPC64_CLV_UNALIGNEDl(1)
  312. PPC64_CLV_UNALIGNEDl(2)
  313. PPC64_CLV_UNALIGNEDl(3)
  314. PPC64_CLV_UNALIGNEDl(4)
  315. PPC64_CLV_UNALIGNEDl(5)
  316. PPC64_CLV_UNALIGNEDl(6)
  317. PPC64_CLV_UNALIGNEDl(7)
  318. PPC64_CLV_UNALIGNEDl(8)
  319. PPC64_CLV_UNALIGNEDl(9)
  320. PPC64_CLV_UNALIGNEDl(10)
  321. PPC64_CLV_UNALIGNEDl(11)
  322. PPC64_CLV_UNALIGNEDl(12)
  323. PPC64_CLV_UNALIGNEDl(13)
  324. PPC64_CLV_UNALIGNEDl(14)
  325. PPC64_CLV_UNALIGNEDl(15)
  326. PPC64_CLV_UNALIGNEDh(16)
  327. PPC64_CLV_UNALIGNEDh(17)
  328. PPC64_CLV_UNALIGNEDh(18)
  329. PPC64_CLV_UNALIGNEDh(19)
  330. PPC64_CLV_UNALIGNEDh(20)
  331. PPC64_CLV_UNALIGNEDh(21)
  332. PPC64_CLV_UNALIGNEDh(22)
  333. PPC64_CLV_UNALIGNEDh(23)
  334. PPC64_CLV_UNALIGNEDh(24)
  335. PPC64_CLV_UNALIGNEDh(25)
  336. PPC64_CLV_UNALIGNEDh(26)
  337. PPC64_CLV_UNALIGNEDh(27)
  338. PPC64_CLV_UNALIGNEDh(28)
  339. PPC64_CLV_UNALIGNEDh(29)
  340. PPC64_CLV_UNALIGNEDh(30)
  341. PPC64_CLV_UNALIGNEDh(31)
  342. #endif
  343. #endif
  344. Lnovec:
  345. ld 0, PPC64_OFFS_CR(3)
  346. mtcr 0
  347. ld 0, PPC64_OFFS_SRR0(3)
  348. mtctr 0
  349. PPC64_LR(0)
  350. PPC64_LR(5)
  351. PPC64_LR(4)
  352. PPC64_LR(1)
  353. PPC64_LR(3)
  354. bctr
  355. #elif defined(__ppc__)
  356. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
  357. //
  358. // void libunwind::Registers_ppc::jumpto()
  359. //
  360. // On entry:
  361. // thread_state pointer is in r3
  362. //
  363. // restore integral registerrs
  364. // skip r0 for now
  365. // skip r1 for now
  366. lwz 2, 16(3)
  367. // skip r3 for now
  368. // skip r4 for now
  369. // skip r5 for now
  370. lwz 6, 32(3)
  371. lwz 7, 36(3)
  372. lwz 8, 40(3)
  373. lwz 9, 44(3)
  374. lwz 10, 48(3)
  375. lwz 11, 52(3)
  376. lwz 12, 56(3)
  377. lwz 13, 60(3)
  378. lwz 14, 64(3)
  379. lwz 15, 68(3)
  380. lwz 16, 72(3)
  381. lwz 17, 76(3)
  382. lwz 18, 80(3)
  383. lwz 19, 84(3)
  384. lwz 20, 88(3)
  385. lwz 21, 92(3)
  386. lwz 22, 96(3)
  387. lwz 23,100(3)
  388. lwz 24,104(3)
  389. lwz 25,108(3)
  390. lwz 26,112(3)
  391. lwz 27,116(3)
  392. lwz 28,120(3)
  393. lwz 29,124(3)
  394. lwz 30,128(3)
  395. lwz 31,132(3)
  396. #ifndef __NO_FPRS__
  397. // restore float registers
  398. lfd 0, 160(3)
  399. lfd 1, 168(3)
  400. lfd 2, 176(3)
  401. lfd 3, 184(3)
  402. lfd 4, 192(3)
  403. lfd 5, 200(3)
  404. lfd 6, 208(3)
  405. lfd 7, 216(3)
  406. lfd 8, 224(3)
  407. lfd 9, 232(3)
  408. lfd 10,240(3)
  409. lfd 11,248(3)
  410. lfd 12,256(3)
  411. lfd 13,264(3)
  412. lfd 14,272(3)
  413. lfd 15,280(3)
  414. lfd 16,288(3)
  415. lfd 17,296(3)
  416. lfd 18,304(3)
  417. lfd 19,312(3)
  418. lfd 20,320(3)
  419. lfd 21,328(3)
  420. lfd 22,336(3)
  421. lfd 23,344(3)
  422. lfd 24,352(3)
  423. lfd 25,360(3)
  424. lfd 26,368(3)
  425. lfd 27,376(3)
  426. lfd 28,384(3)
  427. lfd 29,392(3)
  428. lfd 30,400(3)
  429. lfd 31,408(3)
  430. #endif
  431. #if defined(__ALTIVEC__)
  432. // restore vector registers if any are in use
  433. lwz 5, 156(3) // test VRsave
  434. cmpwi 5, 0
  435. beq Lnovec
  436. subi 4, 1, 16
  437. rlwinm 4, 4, 0, 0, 27 // mask low 4-bits
  438. // r4 is now a 16-byte aligned pointer into the red zone
  439. // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
  440. #define LOAD_VECTOR_UNALIGNEDl(_index) \
  441. andis. 0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \
  442. beq Ldone ## _index SEPARATOR \
  443. lwz 0, 424+_index*16(3) SEPARATOR \
  444. stw 0, 0(%r4) SEPARATOR \
  445. lwz 0, 424+_index*16+4(%r3) SEPARATOR \
  446. stw 0, 4(%r4) SEPARATOR \
  447. lwz 0, 424+_index*16+8(%r3) SEPARATOR \
  448. stw 0, 8(%r4) SEPARATOR \
  449. lwz 0, 424+_index*16+12(%r3) SEPARATOR \
  450. stw 0, 12(%r4) SEPARATOR \
  451. lvx _index, 0, 4 SEPARATOR \
  452. Ldone ## _index:
  453. #define LOAD_VECTOR_UNALIGNEDh(_index) \
  454. andi. 0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \
  455. beq Ldone ## _index SEPARATOR \
  456. lwz 0, 424+_index*16(3) SEPARATOR \
  457. stw 0, 0(4) SEPARATOR \
  458. lwz 0, 424+_index*16+4(3) SEPARATOR \
  459. stw 0, 4(4) SEPARATOR \
  460. lwz 0, 424+_index*16+8(3) SEPARATOR \
  461. stw 0, 8(%r4) SEPARATOR \
  462. lwz 0, 424+_index*16+12(3) SEPARATOR \
  463. stw 0, 12(4) SEPARATOR \
  464. lvx _index, 0, 4 SEPARATOR \
  465. Ldone ## _index:
  466. LOAD_VECTOR_UNALIGNEDl(0)
  467. LOAD_VECTOR_UNALIGNEDl(1)
  468. LOAD_VECTOR_UNALIGNEDl(2)
  469. LOAD_VECTOR_UNALIGNEDl(3)
  470. LOAD_VECTOR_UNALIGNEDl(4)
  471. LOAD_VECTOR_UNALIGNEDl(5)
  472. LOAD_VECTOR_UNALIGNEDl(6)
  473. LOAD_VECTOR_UNALIGNEDl(7)
  474. LOAD_VECTOR_UNALIGNEDl(8)
  475. LOAD_VECTOR_UNALIGNEDl(9)
  476. LOAD_VECTOR_UNALIGNEDl(10)
  477. LOAD_VECTOR_UNALIGNEDl(11)
  478. LOAD_VECTOR_UNALIGNEDl(12)
  479. LOAD_VECTOR_UNALIGNEDl(13)
  480. LOAD_VECTOR_UNALIGNEDl(14)
  481. LOAD_VECTOR_UNALIGNEDl(15)
  482. LOAD_VECTOR_UNALIGNEDh(16)
  483. LOAD_VECTOR_UNALIGNEDh(17)
  484. LOAD_VECTOR_UNALIGNEDh(18)
  485. LOAD_VECTOR_UNALIGNEDh(19)
  486. LOAD_VECTOR_UNALIGNEDh(20)
  487. LOAD_VECTOR_UNALIGNEDh(21)
  488. LOAD_VECTOR_UNALIGNEDh(22)
  489. LOAD_VECTOR_UNALIGNEDh(23)
  490. LOAD_VECTOR_UNALIGNEDh(24)
  491. LOAD_VECTOR_UNALIGNEDh(25)
  492. LOAD_VECTOR_UNALIGNEDh(26)
  493. LOAD_VECTOR_UNALIGNEDh(27)
  494. LOAD_VECTOR_UNALIGNEDh(28)
  495. LOAD_VECTOR_UNALIGNEDh(29)
  496. LOAD_VECTOR_UNALIGNEDh(30)
  497. LOAD_VECTOR_UNALIGNEDh(31)
  498. #endif
  499. Lnovec:
  500. lwz 0, 136(3) // __cr
  501. mtcr 0
  502. lwz 0, 148(3) // __ctr
  503. mtctr 0
  504. lwz 0, 0(3) // __ssr0
  505. mtctr 0
  506. lwz 0, 8(3) // do r0 now
  507. lwz 5, 28(3) // do r5 now
  508. lwz 4, 24(3) // do r4 now
  509. lwz 1, 12(3) // do sp now
  510. lwz 3, 20(3) // do r3 last
  511. bctr
  512. #elif defined(__aarch64__)
  513. //
  514. // extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *);
  515. //
  516. // On entry:
  517. // thread_state pointer is in x0
  518. //
  519. .p2align 2
  520. DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto)
  521. // skip restore of x0,x1 for now
  522. ldp x2, x3, [x0, #0x010]
  523. ldp x4, x5, [x0, #0x020]
  524. ldp x6, x7, [x0, #0x030]
  525. ldp x8, x9, [x0, #0x040]
  526. ldp x10,x11, [x0, #0x050]
  527. ldp x12,x13, [x0, #0x060]
  528. ldp x14,x15, [x0, #0x070]
  529. // x16 and x17 were clobbered by the call into the unwinder, so no point in
  530. // restoring them.
  531. ldp x18,x19, [x0, #0x090]
  532. ldp x20,x21, [x0, #0x0A0]
  533. ldp x22,x23, [x0, #0x0B0]
  534. ldp x24,x25, [x0, #0x0C0]
  535. ldp x26,x27, [x0, #0x0D0]
  536. ldp x28,x29, [x0, #0x0E0]
  537. ldr x30, [x0, #0x100] // restore pc into lr
  538. ldp d0, d1, [x0, #0x110]
  539. ldp d2, d3, [x0, #0x120]
  540. ldp d4, d5, [x0, #0x130]
  541. ldp d6, d7, [x0, #0x140]
  542. ldp d8, d9, [x0, #0x150]
  543. ldp d10,d11, [x0, #0x160]
  544. ldp d12,d13, [x0, #0x170]
  545. ldp d14,d15, [x0, #0x180]
  546. ldp d16,d17, [x0, #0x190]
  547. ldp d18,d19, [x0, #0x1A0]
  548. ldp d20,d21, [x0, #0x1B0]
  549. ldp d22,d23, [x0, #0x1C0]
  550. ldp d24,d25, [x0, #0x1D0]
  551. ldp d26,d27, [x0, #0x1E0]
  552. ldp d28,d29, [x0, #0x1F0]
  553. ldr d30, [x0, #0x200]
  554. ldr d31, [x0, #0x208]
  555. // Finally, restore sp. This must be done after the the last read from the
  556. // context struct, because it is allocated on the stack, and an exception
  557. // could clobber the de-allocated portion of the stack after sp has been
  558. // restored.
  559. ldr x16, [x0, #0x0F8]
  560. ldp x0, x1, [x0, #0x000] // restore x0,x1
  561. mov sp,x16 // restore sp
  562. ret x30 // jump to pc
  563. #elif defined(__arm__) && !defined(__APPLE__)
  564. #if !defined(__ARM_ARCH_ISA_ARM)
  565. #if (__ARM_ARCH_ISA_THUMB == 2)
  566. .syntax unified
  567. #endif
  568. .thumb
  569. #endif
  570. @
  571. @ void libunwind::Registers_arm::restoreCoreAndJumpTo()
  572. @
  573. @ On entry:
  574. @ thread_state pointer is in r0
  575. @
  576. .p2align 2
  577. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
  578. #if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
  579. @ r8-r11: ldm into r1-r4, then mov to r8-r11
  580. adds r0, #0x20
  581. ldm r0!, {r1-r4}
  582. subs r0, #0x30
  583. mov r8, r1
  584. mov r9, r2
  585. mov r10, r3
  586. mov r11, r4
  587. @ r12 does not need loading, it it the intra-procedure-call scratch register
  588. ldr r2, [r0, #0x34]
  589. ldr r3, [r0, #0x3c]
  590. mov sp, r2
  591. mov lr, r3 @ restore pc into lr
  592. ldm r0, {r0-r7}
  593. #else
  594. @ Use lr as base so that r0 can be restored.
  595. mov lr, r0
  596. @ 32bit thumb-2 restrictions for ldm:
  597. @ . the sp (r13) cannot be in the list
  598. @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
  599. ldm lr, {r0-r12}
  600. ldr sp, [lr, #52]
  601. ldr lr, [lr, #60] @ restore pc into lr
  602. #endif
  603. JMP(lr)
  604. @
  605. @ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
  606. @
  607. @ On entry:
  608. @ values pointer is in r0
  609. @
  610. .p2align 2
  611. #if defined(__ELF__)
  612. .fpu vfpv3-d16
  613. #endif
  614. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
  615. @ VFP and iwMMX instructions are only available when compiling with the flags
  616. @ that enable them. We do not want to do that in the library (because we do not
  617. @ want the compiler to generate instructions that access those) but this is
  618. @ only accessed if the personality routine needs these registers. Use of
  619. @ these registers implies they are, actually, available on the target, so
  620. @ it's ok to execute.
  621. @ So, generate the instruction using the corresponding coprocessor mnemonic.
  622. vldmia r0, {d0-d15}
  623. JMP(lr)
  624. @
  625. @ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
  626. @
  627. @ On entry:
  628. @ values pointer is in r0
  629. @
  630. .p2align 2
  631. #if defined(__ELF__)
  632. .fpu vfpv3-d16
  633. #endif
  634. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
  635. vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
  636. JMP(lr)
  637. @
  638. @ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
  639. @
  640. @ On entry:
  641. @ values pointer is in r0
  642. @
  643. .p2align 2
  644. #if defined(__ELF__)
  645. .fpu vfpv3
  646. #endif
  647. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
  648. vldmia r0, {d16-d31}
  649. JMP(lr)
  650. #if defined(__ARM_WMMX)
  651. @
  652. @ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
  653. @
  654. @ On entry:
  655. @ values pointer is in r0
  656. @
  657. .p2align 2
  658. #if defined(__ELF__)
  659. .arch armv5te
  660. #endif
  661. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
  662. ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8
  663. ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8
  664. ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8
  665. ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8
  666. ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8
  667. ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8
  668. ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8
  669. ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8
  670. ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8
  671. ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8
  672. ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8
  673. ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8
  674. ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8
  675. ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8
  676. ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8
  677. ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8
  678. JMP(lr)
  679. @
  680. @ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
  681. @
  682. @ On entry:
  683. @ values pointer is in r0
  684. @
  685. .p2align 2
  686. #if defined(__ELF__)
  687. .arch armv5te
  688. #endif
  689. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
  690. ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4
  691. ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4
  692. ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4
  693. ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4
  694. JMP(lr)
  695. #endif
  696. #elif defined(__or1k__)
  697. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
  698. #
  699. # void libunwind::Registers_or1k::jumpto()
  700. #
  701. # On entry:
  702. # thread_state pointer is in r3
  703. #
  704. # restore integral registers
  705. l.lwz r0, 0(r3)
  706. l.lwz r1, 4(r3)
  707. l.lwz r2, 8(r3)
  708. # skip r3 for now
  709. l.lwz r4, 16(r3)
  710. l.lwz r5, 20(r3)
  711. l.lwz r6, 24(r3)
  712. l.lwz r7, 28(r3)
  713. l.lwz r8, 32(r3)
  714. # skip r9
  715. l.lwz r10, 40(r3)
  716. l.lwz r11, 44(r3)
  717. l.lwz r12, 48(r3)
  718. l.lwz r13, 52(r3)
  719. l.lwz r14, 56(r3)
  720. l.lwz r15, 60(r3)
  721. l.lwz r16, 64(r3)
  722. l.lwz r17, 68(r3)
  723. l.lwz r18, 72(r3)
  724. l.lwz r19, 76(r3)
  725. l.lwz r20, 80(r3)
  726. l.lwz r21, 84(r3)
  727. l.lwz r22, 88(r3)
  728. l.lwz r23, 92(r3)
  729. l.lwz r24, 96(r3)
  730. l.lwz r25,100(r3)
  731. l.lwz r26,104(r3)
  732. l.lwz r27,108(r3)
  733. l.lwz r28,112(r3)
  734. l.lwz r29,116(r3)
  735. l.lwz r30,120(r3)
  736. l.lwz r31,124(r3)
  737. # at last, restore r3
  738. l.lwz r3, 12(r3)
  739. # load new pc into ra
  740. l.lwz r9, 128(r3)
  741. # jump to pc
  742. l.jr r9
  743. l.nop
  744. #elif defined(__hexagon__)
  745. # On entry:
  746. # thread_state pointer is in r2
  747. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)
  748. #
  749. # void libunwind::Registers_hexagon::jumpto()
  750. #
  751. r8 = memw(r0+#32)
  752. r9 = memw(r0+#36)
  753. r10 = memw(r0+#40)
  754. r11 = memw(r0+#44)
  755. r12 = memw(r0+#48)
  756. r13 = memw(r0+#52)
  757. r14 = memw(r0+#56)
  758. r15 = memw(r0+#60)
  759. r16 = memw(r0+#64)
  760. r17 = memw(r0+#68)
  761. r18 = memw(r0+#72)
  762. r19 = memw(r0+#76)
  763. r20 = memw(r0+#80)
  764. r21 = memw(r0+#84)
  765. r22 = memw(r0+#88)
  766. r23 = memw(r0+#92)
  767. r24 = memw(r0+#96)
  768. r25 = memw(r0+#100)
  769. r26 = memw(r0+#104)
  770. r27 = memw(r0+#108)
  771. r28 = memw(r0+#112)
  772. r29 = memw(r0+#116)
  773. r30 = memw(r0+#120)
  774. r31 = memw(r0+#132)
  775. r1 = memw(r0+#128)
  776. c4 = r1 // Predicate register
  777. r1 = memw(r0+#4)
  778. r0 = memw(r0)
  779. jumpr r31
  780. #elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
  781. //
  782. // void libunwind::Registers_mips_o32::jumpto()
  783. //
  784. // On entry:
  785. // thread state pointer is in a0 ($4)
  786. //
  787. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
  788. .set push
  789. .set noat
  790. .set noreorder
  791. .set nomacro
  792. #ifdef __mips_hard_float
  793. #if __mips_fpr != 64
  794. ldc1 $f0, (4 * 36 + 8 * 0)($4)
  795. ldc1 $f2, (4 * 36 + 8 * 2)($4)
  796. ldc1 $f4, (4 * 36 + 8 * 4)($4)
  797. ldc1 $f6, (4 * 36 + 8 * 6)($4)
  798. ldc1 $f8, (4 * 36 + 8 * 8)($4)
  799. ldc1 $f10, (4 * 36 + 8 * 10)($4)
  800. ldc1 $f12, (4 * 36 + 8 * 12)($4)
  801. ldc1 $f14, (4 * 36 + 8 * 14)($4)
  802. ldc1 $f16, (4 * 36 + 8 * 16)($4)
  803. ldc1 $f18, (4 * 36 + 8 * 18)($4)
  804. ldc1 $f20, (4 * 36 + 8 * 20)($4)
  805. ldc1 $f22, (4 * 36 + 8 * 22)($4)
  806. ldc1 $f24, (4 * 36 + 8 * 24)($4)
  807. ldc1 $f26, (4 * 36 + 8 * 26)($4)
  808. ldc1 $f28, (4 * 36 + 8 * 28)($4)
  809. ldc1 $f30, (4 * 36 + 8 * 30)($4)
  810. #else
  811. ldc1 $f0, (4 * 36 + 8 * 0)($4)
  812. ldc1 $f1, (4 * 36 + 8 * 1)($4)
  813. ldc1 $f2, (4 * 36 + 8 * 2)($4)
  814. ldc1 $f3, (4 * 36 + 8 * 3)($4)
  815. ldc1 $f4, (4 * 36 + 8 * 4)($4)
  816. ldc1 $f5, (4 * 36 + 8 * 5)($4)
  817. ldc1 $f6, (4 * 36 + 8 * 6)($4)
  818. ldc1 $f7, (4 * 36 + 8 * 7)($4)
  819. ldc1 $f8, (4 * 36 + 8 * 8)($4)
  820. ldc1 $f9, (4 * 36 + 8 * 9)($4)
  821. ldc1 $f10, (4 * 36 + 8 * 10)($4)
  822. ldc1 $f11, (4 * 36 + 8 * 11)($4)
  823. ldc1 $f12, (4 * 36 + 8 * 12)($4)
  824. ldc1 $f13, (4 * 36 + 8 * 13)($4)
  825. ldc1 $f14, (4 * 36 + 8 * 14)($4)
  826. ldc1 $f15, (4 * 36 + 8 * 15)($4)
  827. ldc1 $f16, (4 * 36 + 8 * 16)($4)
  828. ldc1 $f17, (4 * 36 + 8 * 17)($4)
  829. ldc1 $f18, (4 * 36 + 8 * 18)($4)
  830. ldc1 $f19, (4 * 36 + 8 * 19)($4)
  831. ldc1 $f20, (4 * 36 + 8 * 20)($4)
  832. ldc1 $f21, (4 * 36 + 8 * 21)($4)
  833. ldc1 $f22, (4 * 36 + 8 * 22)($4)
  834. ldc1 $f23, (4 * 36 + 8 * 23)($4)
  835. ldc1 $f24, (4 * 36 + 8 * 24)($4)
  836. ldc1 $f25, (4 * 36 + 8 * 25)($4)
  837. ldc1 $f26, (4 * 36 + 8 * 26)($4)
  838. ldc1 $f27, (4 * 36 + 8 * 27)($4)
  839. ldc1 $f28, (4 * 36 + 8 * 28)($4)
  840. ldc1 $f29, (4 * 36 + 8 * 29)($4)
  841. ldc1 $f30, (4 * 36 + 8 * 30)($4)
  842. ldc1 $f31, (4 * 36 + 8 * 31)($4)
  843. #endif
  844. #endif
  845. // restore hi and lo
  846. lw $8, (4 * 33)($4)
  847. mthi $8
  848. lw $8, (4 * 34)($4)
  849. mtlo $8
  850. // r0 is zero
  851. lw $1, (4 * 1)($4)
  852. lw $2, (4 * 2)($4)
  853. lw $3, (4 * 3)($4)
  854. // skip a0 for now
  855. lw $5, (4 * 5)($4)
  856. lw $6, (4 * 6)($4)
  857. lw $7, (4 * 7)($4)
  858. lw $8, (4 * 8)($4)
  859. lw $9, (4 * 9)($4)
  860. lw $10, (4 * 10)($4)
  861. lw $11, (4 * 11)($4)
  862. lw $12, (4 * 12)($4)
  863. lw $13, (4 * 13)($4)
  864. lw $14, (4 * 14)($4)
  865. lw $15, (4 * 15)($4)
  866. lw $16, (4 * 16)($4)
  867. lw $17, (4 * 17)($4)
  868. lw $18, (4 * 18)($4)
  869. lw $19, (4 * 19)($4)
  870. lw $20, (4 * 20)($4)
  871. lw $21, (4 * 21)($4)
  872. lw $22, (4 * 22)($4)
  873. lw $23, (4 * 23)($4)
  874. lw $24, (4 * 24)($4)
  875. lw $25, (4 * 25)($4)
  876. lw $26, (4 * 26)($4)
  877. lw $27, (4 * 27)($4)
  878. lw $28, (4 * 28)($4)
  879. lw $29, (4 * 29)($4)
  880. lw $30, (4 * 30)($4)
  881. // load new pc into ra
  882. lw $31, (4 * 32)($4)
  883. // jump to ra, load a0 in the delay slot
  884. jr $31
  885. lw $4, (4 * 4)($4)
  886. .set pop
  887. #elif defined(__mips64)
  888. //
  889. // void libunwind::Registers_mips_newabi::jumpto()
  890. //
  891. // On entry:
  892. // thread state pointer is in a0 ($4)
  893. //
  894. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
  895. .set push
  896. .set noat
  897. .set noreorder
  898. .set nomacro
  899. #ifdef __mips_hard_float
  900. ldc1 $f0, (8 * 35)($4)
  901. ldc1 $f1, (8 * 36)($4)
  902. ldc1 $f2, (8 * 37)($4)
  903. ldc1 $f3, (8 * 38)($4)
  904. ldc1 $f4, (8 * 39)($4)
  905. ldc1 $f5, (8 * 40)($4)
  906. ldc1 $f6, (8 * 41)($4)
  907. ldc1 $f7, (8 * 42)($4)
  908. ldc1 $f8, (8 * 43)($4)
  909. ldc1 $f9, (8 * 44)($4)
  910. ldc1 $f10, (8 * 45)($4)
  911. ldc1 $f11, (8 * 46)($4)
  912. ldc1 $f12, (8 * 47)($4)
  913. ldc1 $f13, (8 * 48)($4)
  914. ldc1 $f14, (8 * 49)($4)
  915. ldc1 $f15, (8 * 50)($4)
  916. ldc1 $f16, (8 * 51)($4)
  917. ldc1 $f17, (8 * 52)($4)
  918. ldc1 $f18, (8 * 53)($4)
  919. ldc1 $f19, (8 * 54)($4)
  920. ldc1 $f20, (8 * 55)($4)
  921. ldc1 $f21, (8 * 56)($4)
  922. ldc1 $f22, (8 * 57)($4)
  923. ldc1 $f23, (8 * 58)($4)
  924. ldc1 $f24, (8 * 59)($4)
  925. ldc1 $f25, (8 * 60)($4)
  926. ldc1 $f26, (8 * 61)($4)
  927. ldc1 $f27, (8 * 62)($4)
  928. ldc1 $f28, (8 * 63)($4)
  929. ldc1 $f29, (8 * 64)($4)
  930. ldc1 $f30, (8 * 65)($4)
  931. ldc1 $f31, (8 * 66)($4)
  932. #endif
  933. // restore hi and lo
  934. ld $8, (8 * 33)($4)
  935. mthi $8
  936. ld $8, (8 * 34)($4)
  937. mtlo $8
  938. // r0 is zero
  939. ld $1, (8 * 1)($4)
  940. ld $2, (8 * 2)($4)
  941. ld $3, (8 * 3)($4)
  942. // skip a0 for now
  943. ld $5, (8 * 5)($4)
  944. ld $6, (8 * 6)($4)
  945. ld $7, (8 * 7)($4)
  946. ld $8, (8 * 8)($4)
  947. ld $9, (8 * 9)($4)
  948. ld $10, (8 * 10)($4)
  949. ld $11, (8 * 11)($4)
  950. ld $12, (8 * 12)($4)
  951. ld $13, (8 * 13)($4)
  952. ld $14, (8 * 14)($4)
  953. ld $15, (8 * 15)($4)
  954. ld $16, (8 * 16)($4)
  955. ld $17, (8 * 17)($4)
  956. ld $18, (8 * 18)($4)
  957. ld $19, (8 * 19)($4)
  958. ld $20, (8 * 20)($4)
  959. ld $21, (8 * 21)($4)
  960. ld $22, (8 * 22)($4)
  961. ld $23, (8 * 23)($4)
  962. ld $24, (8 * 24)($4)
  963. ld $25, (8 * 25)($4)
  964. ld $26, (8 * 26)($4)
  965. ld $27, (8 * 27)($4)
  966. ld $28, (8 * 28)($4)
  967. ld $29, (8 * 29)($4)
  968. ld $30, (8 * 30)($4)
  969. // load new pc into ra
  970. ld $31, (8 * 32)($4)
  971. // jump to ra, load a0 in the delay slot
  972. jr $31
  973. ld $4, (8 * 4)($4)
  974. .set pop
  975. #elif defined(__sparc__)
  976. //
  977. // void libunwind::Registers_sparc_o32::jumpto()
  978. //
  979. // On entry:
  980. // thread_state pointer is in o0
  981. //
  982. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
  983. ta 3
  984. ldd [%o0 + 64], %l0
  985. ldd [%o0 + 72], %l2
  986. ldd [%o0 + 80], %l4
  987. ldd [%o0 + 88], %l6
  988. ldd [%o0 + 96], %i0
  989. ldd [%o0 + 104], %i2
  990. ldd [%o0 + 112], %i4
  991. ldd [%o0 + 120], %i6
  992. ld [%o0 + 60], %o7
  993. jmp %o7
  994. nop
  995. #elif defined(__riscv)
  996. //
  997. // void libunwind::Registers_riscv::jumpto()
  998. //
  999. // On entry:
  1000. // thread_state pointer is in a0
  1001. //
  1002. .p2align 2
  1003. DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
  1004. # if defined(__riscv_flen)
  1005. FLOAD f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0)
  1006. FLOAD f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0)
  1007. FLOAD f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0)
  1008. FLOAD f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0)
  1009. FLOAD f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0)
  1010. FLOAD f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0)
  1011. FLOAD f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0)
  1012. FLOAD f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0)
  1013. FLOAD f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0)
  1014. FLOAD f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0)
  1015. FLOAD f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0)
  1016. FLOAD f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0)
  1017. FLOAD f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0)
  1018. FLOAD f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0)
  1019. FLOAD f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0)
  1020. FLOAD f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0)
  1021. FLOAD f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0)
  1022. FLOAD f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0)
  1023. FLOAD f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0)
  1024. FLOAD f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0)
  1025. FLOAD f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0)
  1026. FLOAD f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0)
  1027. FLOAD f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0)
  1028. FLOAD f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0)
  1029. FLOAD f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0)
  1030. FLOAD f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0)
  1031. FLOAD f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0)
  1032. FLOAD f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0)
  1033. FLOAD f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0)
  1034. FLOAD f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0)
  1035. FLOAD f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0)
  1036. FLOAD f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0)
  1037. # endif
  1038. // x0 is zero
  1039. ILOAD x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra
  1040. ILOAD x2, (RISCV_ISIZE * 2)(a0)
  1041. ILOAD x3, (RISCV_ISIZE * 3)(a0)
  1042. ILOAD x4, (RISCV_ISIZE * 4)(a0)
  1043. ILOAD x5, (RISCV_ISIZE * 5)(a0)
  1044. ILOAD x6, (RISCV_ISIZE * 6)(a0)
  1045. ILOAD x7, (RISCV_ISIZE * 7)(a0)
  1046. ILOAD x8, (RISCV_ISIZE * 8)(a0)
  1047. ILOAD x9, (RISCV_ISIZE * 9)(a0)
  1048. // skip a0 for now
  1049. ILOAD x11, (RISCV_ISIZE * 11)(a0)
  1050. ILOAD x12, (RISCV_ISIZE * 12)(a0)
  1051. ILOAD x13, (RISCV_ISIZE * 13)(a0)
  1052. ILOAD x14, (RISCV_ISIZE * 14)(a0)
  1053. ILOAD x15, (RISCV_ISIZE * 15)(a0)
  1054. ILOAD x16, (RISCV_ISIZE * 16)(a0)
  1055. ILOAD x17, (RISCV_ISIZE * 17)(a0)
  1056. ILOAD x18, (RISCV_ISIZE * 18)(a0)
  1057. ILOAD x19, (RISCV_ISIZE * 19)(a0)
  1058. ILOAD x20, (RISCV_ISIZE * 20)(a0)
  1059. ILOAD x21, (RISCV_ISIZE * 21)(a0)
  1060. ILOAD x22, (RISCV_ISIZE * 22)(a0)
  1061. ILOAD x23, (RISCV_ISIZE * 23)(a0)
  1062. ILOAD x24, (RISCV_ISIZE * 24)(a0)
  1063. ILOAD x25, (RISCV_ISIZE * 25)(a0)
  1064. ILOAD x26, (RISCV_ISIZE * 26)(a0)
  1065. ILOAD x27, (RISCV_ISIZE * 27)(a0)
  1066. ILOAD x28, (RISCV_ISIZE * 28)(a0)
  1067. ILOAD x29, (RISCV_ISIZE * 29)(a0)
  1068. ILOAD x30, (RISCV_ISIZE * 30)(a0)
  1069. ILOAD x31, (RISCV_ISIZE * 31)(a0)
  1070. ILOAD x10, (RISCV_ISIZE * 10)(a0) // restore a0
  1071. ret // jump to ra
  1072. #endif
  1073. #endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
  1074. NO_EXEC_STACK_DIRECTIVE