123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169 |
- use core::{
- cell::UnsafeCell,
- ops::{Deref, DerefMut},
- };
- use alloc::{collections::LinkedList, sync::Arc};
- use system_error::SystemError;
- use crate::{
- arch::CurrentIrqArch,
- exception::InterruptArch,
- libs::spinlock::SpinLockGuard,
- process::{Pid, ProcessControlBlock, ProcessManager},
- sched::{schedule, SchedMode},
- };
- use super::spinlock::SpinLock;
- #[derive(Debug)]
- struct MutexInner {
-
- is_locked: bool,
-
- wait_list: LinkedList<Arc<ProcessControlBlock>>,
- }
- #[derive(Debug)]
- pub struct Mutex<T> {
-
- data: UnsafeCell<T>,
-
- inner: SpinLock<MutexInner>,
- }
- #[derive(Debug)]
- pub struct MutexGuard<'a, T: 'a> {
- lock: &'a Mutex<T>,
- }
- unsafe impl<T> Sync for Mutex<T> where T: Send {}
- impl<T> Mutex<T> {
-
- #[allow(dead_code)]
- pub const fn new(value: T) -> Self {
- return Self {
- data: UnsafeCell::new(value),
- inner: SpinLock::new(MutexInner {
- is_locked: false,
- wait_list: LinkedList::new(),
- }),
- };
- }
-
-
- #[inline(always)]
- #[allow(dead_code)]
- pub fn lock(&self) -> MutexGuard<T> {
- loop {
- let mut inner: SpinLockGuard<MutexInner> = self.inner.lock();
-
- if inner.is_locked {
-
- if !self.check_pid_in_wait_list(&inner, ProcessManager::current_pcb().pid()) {
- inner.wait_list.push_back(ProcessManager::current_pcb());
- }
-
- drop(inner);
- self.__sleep();
- } else {
-
- inner.is_locked = true;
- drop(inner);
- break;
- }
- }
-
- return MutexGuard { lock: self };
- }
-
-
-
- #[inline(always)]
- #[allow(dead_code)]
- pub fn try_lock(&self) -> Result<MutexGuard<T>, SystemError> {
- let mut inner = self.inner.lock();
-
- if inner.is_locked {
- return Err(SystemError::EBUSY);
- } else {
-
- inner.is_locked = true;
- return Ok(MutexGuard { lock: self });
- }
- }
-
- fn __sleep(&self) {
- let irq_guard = unsafe { CurrentIrqArch::save_and_disable_irq() };
- ProcessManager::mark_sleep(true).ok();
- drop(irq_guard);
- schedule(SchedMode::SM_NONE);
- }
-
-
-
- fn unlock(&self) {
- let mut inner: SpinLockGuard<MutexInner> = self.inner.lock();
-
- assert!(inner.is_locked);
-
- inner.is_locked = false;
- if inner.wait_list.is_empty() {
- return;
- }
-
- let to_wakeup: Arc<ProcessControlBlock> = inner.wait_list.pop_front().unwrap();
- drop(inner);
- ProcessManager::wakeup(&to_wakeup).ok();
- }
-
- #[inline]
- fn check_pid_in_wait_list(&self, inner: &MutexInner, pid: Pid) -> bool {
- for p in inner.wait_list.iter() {
- if p.pid() == pid {
-
- return true;
- }
- }
-
- return false;
- }
- }
- impl<T> Deref for MutexGuard<'_, T> {
- type Target = T;
- fn deref(&self) -> &Self::Target {
- return unsafe { &*self.lock.data.get() };
- }
- }
- impl<T> DerefMut for MutexGuard<'_, T> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- return unsafe { &mut *self.lock.data.get() };
- }
- }
- impl<T> Drop for MutexGuard<'_, T> {
- fn drop(&mut self) {
- self.lock.unlock();
- }
- }
|