mod.rs 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. #[allow(warnings)]
  2. #[cfg(target_pointer_width = "16")]
  3. type c_int = i16;
  4. #[allow(warnings)]
  5. #[cfg(not(target_pointer_width = "16"))]
  6. type c_int = i32;
  7. use core::intrinsics::{atomic_load_unordered, atomic_store_unordered, exact_div};
  8. use core::mem;
  9. use core::ops::{BitOr, Shl};
  10. // memcpy/memmove/memset have optimized implementations on some architectures
  11. #[cfg_attr(
  12. all(not(feature = "no-asm"), target_arch = "x86_64"),
  13. path = "x86_64.rs"
  14. )]
  15. mod impls;
  16. #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
  17. pub unsafe extern "C" fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
  18. impls::copy_forward(dest, src, n);
  19. dest
  20. }
  21. #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
  22. pub unsafe extern "C" fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
  23. let delta = (dest as usize).wrapping_sub(src as usize);
  24. if delta >= n {
  25. // We can copy forwards because either dest is far enough ahead of src,
  26. // or src is ahead of dest (and delta overflowed).
  27. impls::copy_forward(dest, src, n);
  28. } else {
  29. impls::copy_backward(dest, src, n);
  30. }
  31. dest
  32. }
  33. #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
  34. pub unsafe extern "C" fn memset(s: *mut u8, c: c_int, n: usize) -> *mut u8 {
  35. impls::set_bytes(s, c as u8, n);
  36. s
  37. }
  38. #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
  39. pub unsafe extern "C" fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
  40. let mut i = 0;
  41. while i < n {
  42. let a = *s1.add(i);
  43. let b = *s2.add(i);
  44. if a != b {
  45. return a as i32 - b as i32;
  46. }
  47. i += 1;
  48. }
  49. 0
  50. }
  51. #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
  52. pub unsafe extern "C" fn bcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
  53. memcmp(s1, s2, n)
  54. }
  55. // `bytes` must be a multiple of `mem::size_of::<T>()`
  56. fn memcpy_element_unordered_atomic<T: Copy>(dest: *mut T, src: *const T, bytes: usize) {
  57. unsafe {
  58. let n = exact_div(bytes, mem::size_of::<T>());
  59. let mut i = 0;
  60. while i < n {
  61. atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
  62. i += 1;
  63. }
  64. }
  65. }
  66. // `bytes` must be a multiple of `mem::size_of::<T>()`
  67. fn memmove_element_unordered_atomic<T: Copy>(dest: *mut T, src: *const T, bytes: usize) {
  68. unsafe {
  69. let n = exact_div(bytes, mem::size_of::<T>());
  70. if src < dest as *const T {
  71. // copy from end
  72. let mut i = n;
  73. while i != 0 {
  74. i -= 1;
  75. atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
  76. }
  77. } else {
  78. // copy from beginning
  79. let mut i = 0;
  80. while i < n {
  81. atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
  82. i += 1;
  83. }
  84. }
  85. }
  86. }
  87. // `T` must be a primitive integer type, and `bytes` must be a multiple of `mem::size_of::<T>()`
  88. fn memset_element_unordered_atomic<T>(s: *mut T, c: u8, bytes: usize)
  89. where
  90. T: Copy + From<u8> + Shl<u32, Output = T> + BitOr<T, Output = T>,
  91. {
  92. unsafe {
  93. let n = exact_div(bytes, mem::size_of::<T>());
  94. // Construct a value of type `T` consisting of repeated `c`
  95. // bytes, to let us ensure we write each `T` atomically.
  96. let mut x = T::from(c);
  97. let mut i = 1;
  98. while i < mem::size_of::<T>() {
  99. x = x << 8 | T::from(c);
  100. i += 1;
  101. }
  102. // Write it to `s`
  103. let mut i = 0;
  104. while i < n {
  105. atomic_store_unordered(s.add(i), x);
  106. i += 1;
  107. }
  108. }
  109. }
  110. intrinsics! {
  111. #[cfg(target_has_atomic_load_store = "8")]
  112. pub extern "C" fn __llvm_memcpy_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
  113. memcpy_element_unordered_atomic(dest, src, bytes);
  114. }
  115. #[cfg(target_has_atomic_load_store = "16")]
  116. pub extern "C" fn __llvm_memcpy_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) -> () {
  117. memcpy_element_unordered_atomic(dest, src, bytes);
  118. }
  119. #[cfg(target_has_atomic_load_store = "32")]
  120. pub extern "C" fn __llvm_memcpy_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) -> () {
  121. memcpy_element_unordered_atomic(dest, src, bytes);
  122. }
  123. #[cfg(target_has_atomic_load_store = "64")]
  124. pub extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
  125. memcpy_element_unordered_atomic(dest, src, bytes);
  126. }
  127. #[cfg(target_has_atomic_load_store = "128")]
  128. pub extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
  129. memcpy_element_unordered_atomic(dest, src, bytes);
  130. }
  131. #[cfg(target_has_atomic_load_store = "8")]
  132. pub extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
  133. memmove_element_unordered_atomic(dest, src, bytes);
  134. }
  135. #[cfg(target_has_atomic_load_store = "16")]
  136. pub extern "C" fn __llvm_memmove_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) -> () {
  137. memmove_element_unordered_atomic(dest, src, bytes);
  138. }
  139. #[cfg(target_has_atomic_load_store = "32")]
  140. pub extern "C" fn __llvm_memmove_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) -> () {
  141. memmove_element_unordered_atomic(dest, src, bytes);
  142. }
  143. #[cfg(target_has_atomic_load_store = "64")]
  144. pub extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
  145. memmove_element_unordered_atomic(dest, src, bytes);
  146. }
  147. #[cfg(target_has_atomic_load_store = "128")]
  148. pub extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
  149. memmove_element_unordered_atomic(dest, src, bytes);
  150. }
  151. #[cfg(target_has_atomic_load_store = "8")]
  152. pub extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () {
  153. memset_element_unordered_atomic(s, c, bytes);
  154. }
  155. #[cfg(target_has_atomic_load_store = "16")]
  156. pub extern "C" fn __llvm_memset_element_unordered_atomic_2(s: *mut u16, c: u8, bytes: usize) -> () {
  157. memset_element_unordered_atomic(s, c, bytes);
  158. }
  159. #[cfg(target_has_atomic_load_store = "32")]
  160. pub extern "C" fn __llvm_memset_element_unordered_atomic_4(s: *mut u32, c: u8, bytes: usize) -> () {
  161. memset_element_unordered_atomic(s, c, bytes);
  162. }
  163. #[cfg(target_has_atomic_load_store = "64")]
  164. pub extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () {
  165. memset_element_unordered_atomic(s, c, bytes);
  166. }
  167. #[cfg(target_has_atomic_load_store = "128")]
  168. pub extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () {
  169. memset_element_unordered_atomic(s, c, bytes);
  170. }
  171. }