util.rs 1.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. use crate::loom;
  2. use core::ops::{Deref, DerefMut};
  3. #[derive(Debug)]
  4. pub(crate) struct Backoff(u8);
  5. #[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
  6. #[cfg_attr(
  7. not(any(target_arch = "x86_64", target_arch = "aarch64")),
  8. repr(align(64))
  9. )]
  10. #[derive(Clone, Copy, Default, Hash, PartialEq, Eq, Debug)]
  11. pub(crate) struct CachePadded<T>(pub(crate) T);
  12. #[cfg(feature = "std")]
  13. pub(crate) fn panicking() -> bool {
  14. std::thread::panicking()
  15. }
  16. #[cfg(not(feature = "std"))]
  17. pub(crate) fn panicking() -> bool {
  18. false
  19. }
  20. // === impl Backoff ===
  21. impl Backoff {
  22. const MAX_SPINS: u8 = 6;
  23. const MAX_YIELDS: u8 = 10;
  24. #[inline]
  25. pub(crate) fn new() -> Self {
  26. Self(0)
  27. }
  28. #[inline]
  29. pub(crate) fn spin(&mut self) {
  30. for _ in 0..test_dbg!(1 << self.0.min(Self::MAX_SPINS)) {
  31. loom::hint::spin_loop();
  32. test_println!("spin_loop_hint");
  33. }
  34. if self.0 <= Self::MAX_SPINS {
  35. self.0 += 1;
  36. }
  37. }
  38. #[inline]
  39. pub(crate) fn spin_yield(&mut self) {
  40. if self.0 <= Self::MAX_SPINS || cfg!(not(any(feature = "std", test))) {
  41. for _ in 0..1 << self.0 {
  42. loom::hint::spin_loop();
  43. test_println!("spin_loop_hint");
  44. }
  45. }
  46. #[cfg(any(test, feature = "std"))]
  47. loom::thread::yield_now();
  48. if self.0 <= Self::MAX_YIELDS {
  49. self.0 += 1;
  50. }
  51. }
  52. }
  53. // === impl CachePadded ===
  54. impl<T> Deref for CachePadded<T> {
  55. type Target = T;
  56. fn deref(&self) -> &T {
  57. &self.0
  58. }
  59. }
  60. impl<T> DerefMut for CachePadded<T> {
  61. fn deref_mut(&mut self) -> &mut T {
  62. &mut self.0
  63. }
  64. }