arm.rs 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. use core::intrinsics;
  2. #[cfg(feature = "mem")]
  3. use mem::{memcpy, memmove, memset};
  4. // NOTE This function and the ones below are implemented using assembly because they using a custom
  5. // calling convention which can't be implemented using a normal Rust function
  6. #[naked]
  7. #[cfg_attr(not(test), no_mangle)]
  8. pub unsafe fn __aeabi_uidivmod() {
  9. asm!("push {lr}
  10. sub sp, sp, #4
  11. mov r2, sp
  12. bl __udivmodsi4
  13. ldr r1, [sp]
  14. add sp, sp, #4
  15. pop {pc}");
  16. intrinsics::unreachable();
  17. }
  18. #[naked]
  19. #[cfg_attr(not(test), no_mangle)]
  20. pub unsafe fn __aeabi_uldivmod() {
  21. asm!("push {r4, lr}
  22. sub sp, sp, #16
  23. add r4, sp, #8
  24. str r4, [sp]
  25. bl __udivmoddi4
  26. ldr r2, [sp, #8]
  27. ldr r3, [sp, #12]
  28. add sp, sp, #16
  29. pop {r4, pc}");
  30. intrinsics::unreachable();
  31. }
  32. #[naked]
  33. #[cfg_attr(not(test), no_mangle)]
  34. pub unsafe fn __aeabi_idivmod() {
  35. asm!("push {r0, r1, r4, lr}
  36. bl __divsi3
  37. pop {r1, r2}
  38. muls r2, r2, r0
  39. subs r1, r1, r2
  40. pop {r4, pc}");
  41. intrinsics::unreachable();
  42. }
  43. #[naked]
  44. #[cfg_attr(not(test), no_mangle)]
  45. pub unsafe fn __aeabi_ldivmod() {
  46. asm!("push {r4, lr}
  47. sub sp, sp, #16
  48. add r4, sp, #8
  49. str r4, [sp]
  50. bl __divmoddi4
  51. ldr r2, [sp, #8]
  52. ldr r3, [sp, #12]
  53. add sp, sp, #16
  54. pop {r4, pc}");
  55. intrinsics::unreachable();
  56. }
  57. // TODO: These aeabi_* functions should be defined as aliases
  58. #[cfg_attr(not(test), no_mangle)]
  59. pub extern "C" fn __aeabi_dadd(a: f64, b: f64) -> f64 {
  60. ::float::add::__adddf3(a, b)
  61. }
  62. #[cfg_attr(not(test), no_mangle)]
  63. pub extern "C" fn __aeabi_fadd(a: f32, b: f32) -> f32 {
  64. ::float::add::__addsf3(a, b)
  65. }
  66. #[cfg(not(all(feature = "c", target_arch = "arm", not(target_os = "ios"), not(thumbv6m))))]
  67. #[cfg_attr(not(test), no_mangle)]
  68. pub extern "C" fn __aeabi_idiv(a: i32, b: i32) -> i32 {
  69. ::int::sdiv::__divsi3(a, b)
  70. }
  71. #[cfg_attr(not(test), no_mangle)]
  72. pub extern "C" fn __aeabi_lasr(a: i64, b: u32) -> i64 {
  73. ::int::shift::__ashrdi3(a, b)
  74. }
  75. #[cfg_attr(not(test), no_mangle)]
  76. pub extern "C" fn __aeabi_llsl(a: u64, b: u32) -> u64 {
  77. ::int::shift::__ashldi3(a, b)
  78. }
  79. #[cfg_attr(not(test), no_mangle)]
  80. pub extern "C" fn __aeabi_llsr(a: u64, b: u32) -> u64 {
  81. ::int::shift::__lshrdi3(a, b)
  82. }
  83. #[cfg_attr(not(test), no_mangle)]
  84. pub extern "C" fn __aeabi_lmul(a: u64, b: u64) -> u64 {
  85. ::int::mul::__muldi3(a, b)
  86. }
  87. #[cfg(not(all(feature = "c", target_arch = "arm", not(target_os = "ios"), not(thumbv6m))))]
  88. #[cfg_attr(not(test), no_mangle)]
  89. pub extern "C" fn __aeabi_uidiv(a: u32, b: u32) -> u32 {
  90. ::int::udiv::__udivsi3(a, b)
  91. }
  92. #[cfg(not(feature = "mem"))]
  93. extern "C" {
  94. fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8;
  95. fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8;
  96. fn memset(dest: *mut u8, c: i32, n: usize) -> *mut u8;
  97. }
  98. // FIXME: The `*4` and `*8` variants should be defined as aliases.
  99. #[cfg_attr(not(test), no_mangle)]
  100. pub unsafe extern "C" fn __aeabi_memcpy(dest: *mut u8, src: *const u8, n: usize) {
  101. memcpy(dest, src, n);
  102. }
  103. #[cfg_attr(not(test), no_mangle)]
  104. pub unsafe extern "C" fn __aeabi_memcpy4(dest: *mut u8, src: *const u8, n: usize) {
  105. memcpy(dest, src, n);
  106. }
  107. #[cfg_attr(not(test), no_mangle)]
  108. pub unsafe extern "C" fn __aeabi_memcpy8(dest: *mut u8, src: *const u8, n: usize) {
  109. memcpy(dest, src, n);
  110. }
  111. #[cfg_attr(not(test), no_mangle)]
  112. pub unsafe extern "C" fn __aeabi_memmove(dest: *mut u8, src: *const u8, n: usize) {
  113. memmove(dest, src, n);
  114. }
  115. #[cfg_attr(not(test), no_mangle)]
  116. pub unsafe extern "C" fn __aeabi_memmove4(dest: *mut u8, src: *const u8, n: usize) {
  117. memmove(dest, src, n);
  118. }
  119. #[cfg_attr(not(test), no_mangle)]
  120. pub unsafe extern "C" fn __aeabi_memmove8(dest: *mut u8, src: *const u8, n: usize) {
  121. memmove(dest, src, n);
  122. }
  123. // Note the different argument order
  124. #[cfg_attr(not(test), no_mangle)]
  125. pub unsafe extern "C" fn __aeabi_memset(dest: *mut u8, n: usize, c: i32) {
  126. memset(dest, c, n);
  127. }
  128. #[cfg_attr(not(test), no_mangle)]
  129. pub unsafe extern "C" fn __aeabi_memset4(dest: *mut u8, n: usize, c: i32) {
  130. memset(dest, c, n);
  131. }
  132. #[cfg_attr(not(test), no_mangle)]
  133. pub unsafe extern "C" fn __aeabi_memset8(dest: *mut u8, n: usize, c: i32) {
  134. memset(dest, c, n);
  135. }
  136. #[cfg_attr(not(test), no_mangle)]
  137. pub unsafe extern "C" fn __aeabi_memclr(dest: *mut u8, n: usize) {
  138. memset(dest, 0, n);
  139. }
  140. #[cfg_attr(not(test), no_mangle)]
  141. pub unsafe extern "C" fn __aeabi_memclr4(dest: *mut u8, n: usize) {
  142. memset(dest, 0, n);
  143. }
  144. #[cfg_attr(not(test), no_mangle)]
  145. pub unsafe extern "C" fn __aeabi_memclr8(dest: *mut u8, n: usize) {
  146. memset(dest, 0, n);
  147. }
  148. #[cfg(test)]
  149. mod tests {
  150. use quickcheck::TestResult;
  151. use qc::{U32, U64};
  152. quickcheck!{
  153. fn uldivmod(n: U64, d: U64) -> TestResult {
  154. let (n, d) = (n.0, d.0);
  155. if d == 0 {
  156. TestResult::discard()
  157. } else {
  158. let q: u64;
  159. let r: u64;
  160. unsafe {
  161. // The inline asm is a bit tricky here, LLVM will allocate
  162. // both r0 and r1 when we specify a 64-bit value for {r0}.
  163. asm!("bl __aeabi_uldivmod"
  164. : "={r0}" (q), "={r2}" (r)
  165. : "{r0}" (n), "{r2}" (d)
  166. : "r12", "lr", "flags");
  167. }
  168. TestResult::from_bool(q == n / d && r == n % d)
  169. }
  170. }
  171. fn uidivmod(n: U32, d: U32) -> TestResult {
  172. let (n, d) = (n.0, d.0);
  173. if d == 0 {
  174. TestResult::discard()
  175. } else {
  176. let q: u32;
  177. let r: u32;
  178. unsafe {
  179. asm!("bl __aeabi_uidivmod"
  180. : "={r0}" (q), "={r1}" (r)
  181. : "{r0}" (n), "{r1}" (d)
  182. : "r2", "r3", "r12", "lr", "flags");
  183. }
  184. TestResult::from_bool(q == n / d && r == n % d)
  185. }
  186. }
  187. fn ldivmod(n: U64, d: U64) -> TestResult {
  188. let (n, d) = (n.0 as i64, d.0 as i64);
  189. if d == 0 {
  190. TestResult::discard()
  191. } else {
  192. let q: i64;
  193. let r: i64;
  194. unsafe {
  195. // The inline asm is a bit tricky here, LLVM will allocate
  196. // both r0 and r1 when we specify a 64-bit value for {r0}.
  197. asm!("bl __aeabi_ldivmod"
  198. : "={r0}" (q), "={r2}" (r)
  199. : "{r0}" (n), "{r2}" (d)
  200. : "r12", "lr", "flags");
  201. }
  202. TestResult::from_bool(q == n / d && r == n % d)
  203. }
  204. }
  205. fn idivmod(n: U32, d: U32) -> TestResult {
  206. let (n, d) = (n.0 as i32, d.0 as i32);
  207. if d == 0 || (n == i32::min_value() && d == -1) {
  208. TestResult::discard()
  209. } else {
  210. let q: i32;
  211. let r: i32;
  212. unsafe {
  213. asm!("bl __aeabi_idivmod"
  214. : "={r0}" (q), "={r1}" (r)
  215. : "{r0}" (n), "{r1}" (d)
  216. : "r2", "r3", "r12", "lr", "flags");
  217. }
  218. TestResult::from_bool(q == n / d && r == n % d)
  219. }
  220. }
  221. }
  222. }