mutex.rs 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. use core::cell::UnsafeCell;
  2. use core::intrinsics;
  3. use core::ops::{Deref, DerefMut};
  4. use core::sync::atomic;
  5. use platform::types::*;
  6. use platform::{Pal, Sys};
  7. pub const FUTEX_WAIT: c_int = 0;
  8. pub const FUTEX_WAKE: c_int = 1;
  9. pub struct Mutex<T> {
  10. lock: UnsafeCell<c_int>,
  11. content: UnsafeCell<T>,
  12. }
  13. unsafe impl<T: Send> Send for Mutex<T> {}
  14. unsafe impl<T: Send> Sync for Mutex<T> {}
  15. impl<T> Mutex<T> {
  16. /// Create a new mutex
  17. pub fn new(content: T) -> Self {
  18. Self {
  19. lock: UnsafeCell::new(0),
  20. content: UnsafeCell::new(content),
  21. }
  22. }
  23. /// Tries to lock the mutex, fails if it's already locked. Manual means
  24. /// it's up to you to unlock it after mutex. Returns the last atomic value
  25. /// on failure. You should probably not worry about this, it's used for
  26. /// internal optimizations.
  27. pub unsafe fn manual_try_lock(&self) -> Result<&mut T, c_int> {
  28. let value = intrinsics::atomic_cxchg(self.lock.get(), 0, 1).0;
  29. if value == 0 {
  30. return Ok(&mut *self.content.get());
  31. }
  32. Err(value)
  33. }
  34. /// Lock the mutex, returning the inner content. After doing this, it's
  35. /// your responsibility to unlock it after usage. Mostly useful for FFI:
  36. /// Prefer normal .lock() where possible.
  37. pub unsafe fn manual_lock(&self) -> &mut T {
  38. let mut last = 0;
  39. // First, try spinning for really short durations:
  40. for _ in 0..100 {
  41. atomic::spin_loop_hint();
  42. last = match self.manual_try_lock() {
  43. Ok(content) => return content,
  44. Err(value) => value,
  45. };
  46. }
  47. // We're waiting for a longer duration, so let's employ a futex.
  48. loop {
  49. // If the value is 1, set it to 2 to signify that we're waiting for
  50. // it to to send a FUTEX_WAKE on unlock.
  51. //
  52. // - Skip the atomic operation if the last value was 2, since it most likely hasn't changed.
  53. // - Skip the futex wait if the atomic operation says the mutex is unlocked.
  54. if last == 2 || intrinsics::atomic_cxchg(self.lock.get(), 1, 2).0 != 0 {
  55. Sys::futex(self.lock.get(), FUTEX_WAIT, 2);
  56. }
  57. last = match self.manual_try_lock() {
  58. Ok(content) => return content,
  59. Err(value) => value,
  60. };
  61. }
  62. }
  63. /// Unlock the mutex, if it's locked.
  64. pub unsafe fn manual_unlock(&self) {
  65. if intrinsics::atomic_xchg(self.lock.get(), 0) == 2 {
  66. // At least one futex is up, so let's notify it
  67. Sys::futex(self.lock.get(), FUTEX_WAKE, 1);
  68. }
  69. }
  70. /// Tries to lock the mutex and returns a guard that automatically unlocks
  71. /// the mutex when it falls out of scope.
  72. pub fn try_lock(&self) -> Option<MutexGuard<T>> {
  73. unsafe {
  74. self.manual_try_lock().ok().map(|content| MutexGuard {
  75. mutex: self,
  76. content,
  77. })
  78. }
  79. }
  80. /// Locks the mutex and returns a guard that automatically unlocks the
  81. /// mutex when it falls out of scope.
  82. pub fn lock(&self) -> MutexGuard<T> {
  83. MutexGuard {
  84. mutex: self,
  85. content: unsafe { self.manual_lock() },
  86. }
  87. }
  88. }
  89. pub struct MutexGuard<'a, T: 'a> {
  90. mutex: &'a Mutex<T>,
  91. content: &'a mut T,
  92. }
  93. impl<'a, T> Deref for MutexGuard<'a, T> {
  94. type Target = T;
  95. fn deref(&self) -> &Self::Target {
  96. &self.content
  97. }
  98. }
  99. impl<'a, T> DerefMut for MutexGuard<'a, T> {
  100. fn deref_mut(&mut self) -> &mut Self::Target {
  101. self.content
  102. }
  103. }
  104. impl<'a, T> Drop for MutexGuard<'a, T> {
  105. fn drop(&mut self) {
  106. unsafe {
  107. self.mutex.manual_unlock();
  108. }
  109. }
  110. }