arm_linux.rs 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. use core::intrinsics;
  2. use core::mem;
  3. // Kernel-provided user-mode helper functions:
  4. // https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
  5. unsafe fn __kuser_cmpxchg(oldval: u32, newval: u32, ptr: *mut u32) -> bool {
  6. let f: extern "C" fn(u32, u32, *mut u32) -> u32 = mem::transmute(0xffff0fc0u32);
  7. f(oldval, newval, ptr) == 0
  8. }
  9. unsafe fn __kuser_memory_barrier() {
  10. let f: extern "C" fn() = mem::transmute(0xffff0fa0u32);
  11. f();
  12. }
  13. // Word-align a pointer
  14. fn align_ptr<T>(ptr: *mut T) -> *mut u32 {
  15. // This gives us a mask of 0 when T == u32 since the pointer is already
  16. // supposed to be aligned, which avoids any masking in that case.
  17. let ptr_mask = 3 & (4 - mem::size_of::<T>());
  18. (ptr as usize & !ptr_mask) as *mut u32
  19. }
  20. // Calculate the shift and mask of a value inside an aligned word
  21. fn get_shift_mask<T>(ptr: *mut T) -> (u32, u32) {
  22. // Mask to get the low byte/halfword/word
  23. let mask = match mem::size_of::<T>() {
  24. 1 => 0xff,
  25. 2 => 0xffff,
  26. 4 => 0xffffffff,
  27. _ => unreachable!(),
  28. };
  29. // If we are on big-endian then we need to adjust the shift accordingly
  30. let endian_adjust = if cfg!(target_endian = "little") {
  31. 0
  32. } else {
  33. 4 - mem::size_of::<T>() as u32
  34. };
  35. // Shift to get the desired element in the word
  36. let ptr_mask = 3 & (4 - mem::size_of::<T>());
  37. let shift = ((ptr as usize & ptr_mask) as u32 ^ endian_adjust) * 8;
  38. (shift, mask)
  39. }
  40. // Extract a value from an aligned word
  41. fn extract_aligned(aligned: u32, shift: u32, mask: u32) -> u32 {
  42. (aligned >> shift) & mask
  43. }
  44. // Insert a value into an aligned word
  45. fn insert_aligned(aligned: u32, val: u32, shift: u32, mask: u32) -> u32 {
  46. (aligned & !(mask << shift)) | ((val & mask) << shift)
  47. }
  48. // Generic atomic read-modify-write operation
  49. unsafe fn atomic_rmw<T, F: Fn(u32) -> u32>(ptr: *mut T, f: F) -> u32 {
  50. let aligned_ptr = align_ptr(ptr);
  51. let (shift, mask) = get_shift_mask(ptr);
  52. loop {
  53. let curval_aligned = intrinsics::atomic_load_unordered(aligned_ptr);
  54. let curval = extract_aligned(curval_aligned, shift, mask);
  55. let newval = f(curval);
  56. let newval_aligned = insert_aligned(curval_aligned, newval, shift, mask);
  57. if __kuser_cmpxchg(curval_aligned, newval_aligned, aligned_ptr) {
  58. return curval;
  59. }
  60. }
  61. }
  62. // Generic atomic compare-exchange operation
  63. unsafe fn atomic_cmpxchg<T>(ptr: *mut T, oldval: u32, newval: u32) -> u32 {
  64. let aligned_ptr = align_ptr(ptr);
  65. let (shift, mask) = get_shift_mask(ptr);
  66. loop {
  67. let curval_aligned = intrinsics::atomic_load_unordered(aligned_ptr);
  68. let curval = extract_aligned(curval_aligned, shift, mask);
  69. if curval != oldval {
  70. return curval;
  71. }
  72. let newval_aligned = insert_aligned(curval_aligned, newval, shift, mask);
  73. if __kuser_cmpxchg(curval_aligned, newval_aligned, aligned_ptr) {
  74. return oldval;
  75. }
  76. }
  77. }
  78. macro_rules! atomic_rmw {
  79. ($name:ident, $ty:ty, $op:expr) => {
  80. #[cfg_attr(not(feature = "mangled-names"), no_mangle)]
  81. pub unsafe extern "C" fn $name(ptr: *mut $ty, val: $ty) -> $ty {
  82. atomic_rmw(ptr, |x| $op(x as $ty, val) as u32) as $ty
  83. }
  84. };
  85. }
  86. macro_rules! atomic_cmpxchg {
  87. ($name:ident, $ty:ty) => {
  88. #[cfg_attr(not(feature = "mangled-names"), no_mangle)]
  89. pub unsafe extern "C" fn $name(ptr: *mut $ty, oldval: $ty, newval: $ty) -> $ty {
  90. atomic_cmpxchg(ptr, oldval as u32, newval as u32) as $ty
  91. }
  92. };
  93. }
  94. atomic_rmw!(__sync_fetch_and_add_1, u8, |a: u8, b: u8| a.wrapping_add(b));
  95. atomic_rmw!(__sync_fetch_and_add_2, u16, |a: u16, b: u16| a
  96. .wrapping_add(b));
  97. atomic_rmw!(__sync_fetch_and_add_4, u32, |a: u32, b: u32| a
  98. .wrapping_add(b));
  99. atomic_rmw!(__sync_fetch_and_sub_1, u8, |a: u8, b: u8| a.wrapping_sub(b));
  100. atomic_rmw!(__sync_fetch_and_sub_2, u16, |a: u16, b: u16| a
  101. .wrapping_sub(b));
  102. atomic_rmw!(__sync_fetch_and_sub_4, u32, |a: u32, b: u32| a
  103. .wrapping_sub(b));
  104. atomic_rmw!(__sync_fetch_and_and_1, u8, |a: u8, b: u8| a & b);
  105. atomic_rmw!(__sync_fetch_and_and_2, u16, |a: u16, b: u16| a & b);
  106. atomic_rmw!(__sync_fetch_and_and_4, u32, |a: u32, b: u32| a & b);
  107. atomic_rmw!(__sync_fetch_and_or_1, u8, |a: u8, b: u8| a | b);
  108. atomic_rmw!(__sync_fetch_and_or_2, u16, |a: u16, b: u16| a | b);
  109. atomic_rmw!(__sync_fetch_and_or_4, u32, |a: u32, b: u32| a | b);
  110. atomic_rmw!(__sync_fetch_and_xor_1, u8, |a: u8, b: u8| a ^ b);
  111. atomic_rmw!(__sync_fetch_and_xor_2, u16, |a: u16, b: u16| a ^ b);
  112. atomic_rmw!(__sync_fetch_and_xor_4, u32, |a: u32, b: u32| a ^ b);
  113. atomic_rmw!(__sync_fetch_and_nand_1, u8, |a: u8, b: u8| !(a & b));
  114. atomic_rmw!(__sync_fetch_and_nand_2, u16, |a: u16, b: u16| !(a & b));
  115. atomic_rmw!(__sync_fetch_and_nand_4, u32, |a: u32, b: u32| !(a & b));
  116. atomic_rmw!(__sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b {
  117. a
  118. } else {
  119. b
  120. });
  121. atomic_rmw!(__sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b {
  122. a
  123. } else {
  124. b
  125. });
  126. atomic_rmw!(__sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b {
  127. a
  128. } else {
  129. b
  130. });
  131. atomic_rmw!(__sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b {
  132. a
  133. } else {
  134. b
  135. });
  136. atomic_rmw!(__sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b {
  137. a
  138. } else {
  139. b
  140. });
  141. atomic_rmw!(__sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b {
  142. a
  143. } else {
  144. b
  145. });
  146. atomic_rmw!(__sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b {
  147. a
  148. } else {
  149. b
  150. });
  151. atomic_rmw!(__sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b {
  152. a
  153. } else {
  154. b
  155. });
  156. atomic_rmw!(__sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b {
  157. a
  158. } else {
  159. b
  160. });
  161. atomic_rmw!(__sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b {
  162. a
  163. } else {
  164. b
  165. });
  166. atomic_rmw!(__sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b {
  167. a
  168. } else {
  169. b
  170. });
  171. atomic_rmw!(__sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b {
  172. a
  173. } else {
  174. b
  175. });
  176. atomic_rmw!(__sync_lock_test_and_set_1, u8, |_: u8, b: u8| b);
  177. atomic_rmw!(__sync_lock_test_and_set_2, u16, |_: u16, b: u16| b);
  178. atomic_rmw!(__sync_lock_test_and_set_4, u32, |_: u32, b: u32| b);
  179. atomic_cmpxchg!(__sync_val_compare_and_swap_1, u8);
  180. atomic_cmpxchg!(__sync_val_compare_and_swap_2, u16);
  181. atomic_cmpxchg!(__sync_val_compare_and_swap_4, u32);
  182. #[cfg_attr(not(feature = "mangled-names"), no_mangle)]
  183. pub unsafe extern "C" fn __sync_synchronize() {
  184. __kuser_memory_barrier();
  185. }