pte.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. #![allow(non_snake_case)]
  2. use alloc::{boxed::Box, collections::BTreeMap};
  3. use core::{
  4. intrinsics, ptr,
  5. sync::atomic::{AtomicU32, Ordering},
  6. };
  7. use crate::{
  8. header::{sys_mman, time::timespec},
  9. ld_so::{
  10. linker::Linker,
  11. tcb::{Master, Tcb},
  12. },
  13. platform::{
  14. types::{c_int, c_uint, c_void, pid_t, size_t},
  15. Pal, Sys,
  16. },
  17. sync::Mutex,
  18. ALLOCATOR,
  19. };
  20. pub struct Semaphore {
  21. lock: Mutex<()>,
  22. count: i32,
  23. }
  24. type pte_osThreadHandle = pid_t;
  25. type pte_osMutexHandle = *mut Mutex<()>;
  26. type pte_osSemaphoreHandle = *mut Semaphore;
  27. type pte_osThreadEntryPoint = unsafe extern "C" fn(params: *mut c_void) -> *mut c_void;
  28. #[repr(C)]
  29. #[derive(Eq, PartialEq)]
  30. #[allow(dead_code)]
  31. pub enum pte_osResult {
  32. PTE_OS_OK = 0,
  33. PTE_OS_NO_RESOURCES,
  34. PTE_OS_GENERAL_FAILURE,
  35. PTE_OS_TIMEOUT,
  36. PTE_OS_INTERRUPTED,
  37. PTE_OS_INVALID_PARAM,
  38. }
  39. use self::pte_osResult::*;
  40. static mut pid_mutexes: Option<BTreeMap<pte_osThreadHandle, pte_osMutexHandle>> = None;
  41. static mut pid_mutexes_lock: Mutex<()> = Mutex::new(());
  42. static mut pid_stacks: Option<BTreeMap<pte_osThreadHandle, (*mut c_void, size_t)>> = None;
  43. static mut pid_stacks_lock: Mutex<()> = Mutex::new(());
  44. #[thread_local]
  45. static mut LOCALS: *mut BTreeMap<c_uint, *mut c_void> = ptr::null_mut();
  46. static NEXT_KEY: AtomicU32 = AtomicU32::new(0);
  47. unsafe fn locals() -> &'static mut BTreeMap<c_uint, *mut c_void> {
  48. if LOCALS.is_null() {
  49. LOCALS = Box::into_raw(Box::new(BTreeMap::new()));
  50. }
  51. &mut *LOCALS
  52. }
  53. // pte_osResult pte_osInit(void)
  54. #[no_mangle]
  55. pub unsafe extern "C" fn pte_osInit() -> pte_osResult {
  56. PTE_OS_OK
  57. }
  58. /// A shim to wrap thread entry points in logic to set up TLS, for example
  59. unsafe extern "C" fn pte_osThreadShim(
  60. entryPoint: pte_osThreadEntryPoint,
  61. argv: *mut c_void,
  62. mutex: pte_osMutexHandle,
  63. tls_size: usize,
  64. tls_masters_ptr: *mut Master,
  65. tls_masters_len: usize,
  66. tls_linker_ptr: *const Mutex<Linker>,
  67. tls_mspace: usize,
  68. ) {
  69. // The kernel allocated TLS does not have masters set, so do not attempt to copy it.
  70. // It will be copied by the kernel.
  71. if !tls_masters_ptr.is_null() {
  72. let tcb = Tcb::new(tls_size).unwrap();
  73. tcb.masters_ptr = tls_masters_ptr;
  74. tcb.masters_len = tls_masters_len;
  75. tcb.linker_ptr = tls_linker_ptr;
  76. tcb.mspace = tls_mspace;
  77. tcb.copy_masters().unwrap();
  78. tcb.activate();
  79. }
  80. // Wait until pte_osThreadStart
  81. pte_osMutexLock(mutex);
  82. entryPoint(argv);
  83. pte_osThreadExit();
  84. }
  85. #[no_mangle]
  86. pub unsafe extern "C" fn pte_osThreadCreate(
  87. entryPoint: pte_osThreadEntryPoint,
  88. stackSize: c_int,
  89. _initialPriority: c_int,
  90. argv: *mut c_void,
  91. ppte_osThreadHandle: *mut pte_osThreadHandle,
  92. ) -> pte_osResult {
  93. // Create a locked mutex, unlocked by pte_osThreadStart
  94. let mutex: pte_osMutexHandle = Box::into_raw(Box::new(Mutex::locked(())));
  95. let stack_size = if stackSize == 0 {
  96. 1024 * 1024
  97. } else {
  98. stackSize as usize
  99. };
  100. let stack_base = sys_mman::mmap(
  101. ptr::null_mut(),
  102. stack_size,
  103. sys_mman::PROT_READ | sys_mman::PROT_WRITE,
  104. sys_mman::MAP_SHARED | sys_mman::MAP_ANONYMOUS,
  105. -1,
  106. 0,
  107. );
  108. if stack_base as isize == -1 {
  109. return PTE_OS_GENERAL_FAILURE;
  110. }
  111. ptr::write_bytes(stack_base as *mut u8, 0, stack_size);
  112. let stack_end = stack_base.add(stack_size);
  113. let mut stack = stack_end as *mut usize;
  114. {
  115. let mut push = |value: usize| {
  116. stack = stack.offset(-1);
  117. *stack = value;
  118. };
  119. //WARNING: Stack must be 128-bit aligned for SSE
  120. if let Some(tcb) = Tcb::current() {
  121. push(tcb.mspace as usize);
  122. push(tcb.linker_ptr as usize);
  123. push(tcb.masters_len);
  124. push(tcb.masters_ptr as usize);
  125. push(tcb.tls_len);
  126. } else {
  127. push(ALLOCATOR.get_book_keeper());
  128. push(0);
  129. push(0);
  130. push(0);
  131. push(0);
  132. }
  133. push(mutex as usize);
  134. push(argv as usize);
  135. push(entryPoint as usize);
  136. push(pte_osThreadShim as usize);
  137. }
  138. let id = Sys::pte_clone(stack);
  139. if id < 0 {
  140. return PTE_OS_GENERAL_FAILURE;
  141. }
  142. pte_osMutexLock(&mut pid_mutexes_lock);
  143. if pid_mutexes.is_none() {
  144. pid_mutexes = Some(BTreeMap::new());
  145. }
  146. pid_mutexes.as_mut().unwrap().insert(id, mutex);
  147. pte_osMutexUnlock(&mut pid_mutexes_lock);
  148. pte_osMutexLock(&mut pid_stacks_lock);
  149. if pid_stacks.is_none() {
  150. pid_stacks = Some(BTreeMap::new());
  151. }
  152. pid_stacks
  153. .as_mut()
  154. .unwrap()
  155. .insert(id, (stack_base, stack_size));
  156. pte_osMutexUnlock(&mut pid_stacks_lock);
  157. *ppte_osThreadHandle = id;
  158. PTE_OS_OK
  159. }
  160. #[no_mangle]
  161. pub unsafe extern "C" fn pte_osThreadStart(handle: pte_osThreadHandle) -> pte_osResult {
  162. let mut ret = PTE_OS_GENERAL_FAILURE;
  163. pte_osMutexLock(&mut pid_mutexes_lock);
  164. if let Some(ref mutexes) = pid_mutexes {
  165. if let Some(mutex) = mutexes.get(&handle) {
  166. pte_osMutexUnlock(*mutex);
  167. ret = PTE_OS_OK;
  168. }
  169. }
  170. pte_osMutexUnlock(&mut pid_mutexes_lock);
  171. ret
  172. }
  173. #[no_mangle]
  174. pub unsafe extern "C" fn pte_osThreadExit() {
  175. Sys::exit(0);
  176. }
  177. #[no_mangle]
  178. pub unsafe extern "C" fn pte_osThreadExitAndDelete(handle: pte_osThreadHandle) -> pte_osResult {
  179. let res = pte_osThreadDelete(handle);
  180. if res != PTE_OS_OK {
  181. return res;
  182. }
  183. pte_osThreadExit();
  184. PTE_OS_OK
  185. }
  186. #[no_mangle]
  187. pub unsafe extern "C" fn pte_osThreadDelete(handle: pte_osThreadHandle) -> pte_osResult {
  188. pte_osMutexLock(&mut pid_mutexes_lock);
  189. if let Some(ref mut mutexes) = pid_mutexes {
  190. if let Some(mutex) = mutexes.remove(&handle) {
  191. Box::from_raw(mutex);
  192. }
  193. }
  194. pte_osMutexUnlock(&mut pid_mutexes_lock);
  195. pte_osMutexLock(&mut pid_stacks_lock);
  196. if let Some(ref mut stacks) = pid_stacks {
  197. if let Some((stack_base, stack_size)) = stacks.remove(&handle) {
  198. sys_mman::munmap(stack_base, stack_size);
  199. }
  200. }
  201. pte_osMutexUnlock(&mut pid_stacks_lock);
  202. PTE_OS_OK
  203. }
  204. #[no_mangle]
  205. pub unsafe extern "C" fn pte_osThreadWaitForEnd(handle: pte_osThreadHandle) -> pte_osResult {
  206. let mut status = 0;
  207. Sys::waitpid(handle, &mut status, 0);
  208. PTE_OS_OK
  209. }
  210. #[no_mangle]
  211. pub unsafe extern "C" fn pte_osThreadCancel(handle: pte_osThreadHandle) -> pte_osResult {
  212. //TODO: allow cancel of thread
  213. PTE_OS_OK
  214. }
  215. #[no_mangle]
  216. pub unsafe extern "C" fn pte_osThreadCheckCancel(handle: pte_osThreadHandle) -> pte_osResult {
  217. PTE_OS_OK
  218. }
  219. #[no_mangle]
  220. pub unsafe extern "C" fn pte_osThreadSleep(msecs: c_uint) {
  221. let tm = timespec {
  222. tv_sec: msecs as i64 / 1000,
  223. tv_nsec: (msecs % 1000) as i64 * 1000000,
  224. };
  225. Sys::nanosleep(&tm, ptr::null_mut());
  226. }
  227. #[no_mangle]
  228. pub unsafe extern "C" fn pte_osThreadGetHandle() -> pte_osThreadHandle {
  229. Sys::gettid()
  230. }
  231. #[no_mangle]
  232. pub unsafe extern "C" fn pte_osThreadGetPriority(threadHandle: pte_osThreadHandle) -> c_int {
  233. // XXX Shouldn't Redox support priorities?
  234. 1
  235. }
  236. #[no_mangle]
  237. pub unsafe extern "C" fn pte_osThreadSetPriority(
  238. threadHandle: pte_osThreadHandle,
  239. newPriority: c_int,
  240. ) -> pte_osResult {
  241. PTE_OS_OK
  242. }
  243. #[no_mangle]
  244. pub unsafe extern "C" fn pte_osThreadGetMinPriority() -> c_int {
  245. 1
  246. }
  247. #[no_mangle]
  248. pub unsafe extern "C" fn pte_osThreadGetMaxPriority() -> c_int {
  249. 1
  250. }
  251. #[no_mangle]
  252. pub unsafe extern "C" fn pte_osThreadGetDefaultPriority() -> c_int {
  253. 1
  254. }
  255. #[no_mangle]
  256. pub unsafe extern "C" fn pte_osMutexCreate(pHandle: *mut pte_osMutexHandle) -> pte_osResult {
  257. *pHandle = Box::into_raw(Box::new(Mutex::new(())));
  258. PTE_OS_OK
  259. }
  260. #[no_mangle]
  261. pub unsafe extern "C" fn pte_osMutexDelete(handle: pte_osMutexHandle) -> pte_osResult {
  262. Box::from_raw(handle);
  263. PTE_OS_OK
  264. }
  265. #[no_mangle]
  266. pub unsafe extern "C" fn pte_osMutexLock(handle: pte_osMutexHandle) -> pte_osResult {
  267. (*handle).manual_lock();
  268. PTE_OS_OK
  269. }
  270. #[no_mangle]
  271. pub unsafe extern "C" fn pte_osMutexUnlock(handle: pte_osMutexHandle) -> pte_osResult {
  272. (*handle).manual_unlock();
  273. PTE_OS_OK
  274. }
  275. #[no_mangle]
  276. pub unsafe extern "C" fn pte_osSemaphoreCreate(
  277. initialValue: c_int,
  278. pHandle: *mut pte_osSemaphoreHandle,
  279. ) -> pte_osResult {
  280. *pHandle = Box::into_raw(Box::new(Semaphore {
  281. lock: Mutex::new(()),
  282. count: initialValue,
  283. }));
  284. PTE_OS_OK
  285. }
  286. #[no_mangle]
  287. pub unsafe extern "C" fn pte_osSemaphoreDelete(handle: pte_osSemaphoreHandle) -> pte_osResult {
  288. Box::from_raw(handle);
  289. PTE_OS_OK
  290. }
  291. #[no_mangle]
  292. pub unsafe extern "C" fn pte_osSemaphorePost(
  293. handle: pte_osSemaphoreHandle,
  294. count: c_int,
  295. ) -> pte_osResult {
  296. let semaphore = &mut *handle;
  297. let _guard = semaphore.lock.lock();
  298. intrinsics::atomic_xadd(&mut semaphore.count, 1);
  299. PTE_OS_OK
  300. }
  301. #[no_mangle]
  302. pub unsafe extern "C" fn pte_osSemaphorePend(
  303. handle: pte_osSemaphoreHandle,
  304. pTimeout: *mut c_uint,
  305. ) -> pte_osResult {
  306. //TODO: pTimeout
  307. let semaphore = &mut *handle;
  308. loop {
  309. {
  310. let _guard = semaphore.lock.lock();
  311. if intrinsics::atomic_load(&semaphore.count) > 0 {
  312. intrinsics::atomic_xsub(&mut semaphore.count, 1);
  313. break;
  314. }
  315. }
  316. Sys::sched_yield();
  317. }
  318. PTE_OS_OK
  319. }
  320. #[no_mangle]
  321. pub unsafe extern "C" fn pte_osSemaphoreCancellablePend(
  322. handle: pte_osSemaphoreHandle,
  323. pTimeout: *mut c_uint,
  324. ) -> pte_osResult {
  325. //TODO
  326. pte_osSemaphorePend(handle, pTimeout)
  327. }
  328. #[no_mangle]
  329. pub unsafe extern "C" fn pte_osAtomicExchange(ptarg: *mut c_int, val: c_int) -> c_int {
  330. intrinsics::atomic_xchg(ptarg, val)
  331. }
  332. #[no_mangle]
  333. pub unsafe extern "C" fn pte_osAtomicCompareExchange(
  334. pdest: *mut c_int,
  335. exchange: c_int,
  336. comp: c_int,
  337. ) -> c_int {
  338. intrinsics::atomic_cxchg(pdest, comp, exchange).0
  339. }
  340. #[no_mangle]
  341. pub unsafe extern "C" fn pte_osAtomicExchangeAdd(pAppend: *mut c_int, value: c_int) -> c_int {
  342. intrinsics::atomic_xadd(pAppend, value)
  343. }
  344. #[no_mangle]
  345. pub unsafe extern "C" fn pte_osAtomicDecrement(pdest: *mut c_int) -> c_int {
  346. intrinsics::atomic_xadd(pdest, -1) - 1
  347. }
  348. #[no_mangle]
  349. pub unsafe extern "C" fn pte_osAtomicIncrement(pdest: *mut c_int) -> c_int {
  350. intrinsics::atomic_xadd(pdest, 1) + 1
  351. }
  352. #[no_mangle]
  353. pub unsafe extern "C" fn pte_osTlsSetValue(index: c_uint, value: *mut c_void) -> pte_osResult {
  354. locals().insert(index, value);
  355. PTE_OS_OK
  356. }
  357. #[no_mangle]
  358. pub unsafe extern "C" fn pte_osTlsGetValue(index: c_uint) -> *mut c_void {
  359. locals().get_mut(&index).copied().unwrap_or(ptr::null_mut())
  360. }
  361. #[no_mangle]
  362. pub unsafe extern "C" fn pte_osTlsAlloc(pKey: *mut c_uint) -> pte_osResult {
  363. *pKey = NEXT_KEY.fetch_add(1, Ordering::SeqCst);
  364. PTE_OS_OK
  365. }
  366. #[no_mangle]
  367. pub unsafe extern "C" fn pte_osTlsFree(index: c_uint) -> pte_osResult {
  368. // XXX free keys
  369. PTE_OS_OK
  370. }