pte.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. #![allow(non_snake_case)]
  2. use alloc::{boxed::Box, collections::BTreeMap};
  3. use core::{
  4. cell::UnsafeCell,
  5. intrinsics, ptr,
  6. sync::atomic::{AtomicU32, Ordering},
  7. };
  8. use crate::{
  9. header::{sys_mman, time::timespec},
  10. ld_so::{
  11. linker::Linker,
  12. tcb::{Master, Tcb},
  13. },
  14. platform::{
  15. types::{c_int, c_uint, c_void, pid_t, size_t},
  16. Pal, Sys,
  17. },
  18. sync::{Mutex, Semaphore},
  19. ALLOCATOR,
  20. };
  21. type pte_osThreadHandle = pid_t;
  22. type pte_osMutexHandle = *mut Mutex<()>;
  23. type pte_osSemaphoreHandle = *mut Semaphore;
  24. type pte_osThreadEntryPoint = unsafe extern "C" fn(params: *mut c_void) -> *mut c_void;
  25. #[repr(C)]
  26. #[derive(Eq, PartialEq)]
  27. #[allow(dead_code)]
  28. pub enum pte_osResult {
  29. PTE_OS_OK = 0,
  30. PTE_OS_NO_RESOURCES,
  31. PTE_OS_GENERAL_FAILURE,
  32. PTE_OS_TIMEOUT,
  33. PTE_OS_INTERRUPTED,
  34. PTE_OS_INVALID_PARAM,
  35. }
  36. use self::pte_osResult::*;
  37. static mut pid_mutexes: Option<BTreeMap<pte_osThreadHandle, pte_osMutexHandle>> = None;
  38. static mut pid_mutexes_lock: Mutex<()> = Mutex::new(());
  39. static mut pid_stacks: Option<BTreeMap<pte_osThreadHandle, (*mut c_void, size_t)>> = None;
  40. static mut pid_stacks_lock: Mutex<()> = Mutex::new(());
  41. // TODO: VecMap/SLOB (speed) / radix tree (speed while allowing randomization for security).
  42. #[thread_local]
  43. static LOCALS: UnsafeCell<BTreeMap<c_uint, *mut c_void>> = UnsafeCell::new(BTreeMap::new());
  44. static NEXT_KEY: AtomicU32 = AtomicU32::new(0);
  45. unsafe fn locals<'a>() -> &'a mut BTreeMap<c_uint, *mut c_void> {
  46. &mut *LOCALS.get()
  47. }
  48. // pte_osResult pte_osInit(void)
  49. #[no_mangle]
  50. pub unsafe extern "C" fn pte_osInit() -> pte_osResult {
  51. PTE_OS_OK
  52. }
  53. /// A shim to wrap thread entry points in logic to set up TLS, for example
  54. unsafe extern "C" fn pte_osThreadShim(
  55. entryPoint: pte_osThreadEntryPoint,
  56. argv: *mut c_void,
  57. mutex: pte_osMutexHandle,
  58. tls_size: usize,
  59. tls_masters_ptr: *mut Master,
  60. tls_masters_len: usize,
  61. tls_linker_ptr: *const Mutex<Linker>,
  62. tls_mspace: usize,
  63. ) {
  64. // The kernel allocated TLS does not have masters set, so do not attempt to copy it.
  65. // It will be copied by the kernel.
  66. if !tls_masters_ptr.is_null() {
  67. let tcb = Tcb::new(tls_size).unwrap();
  68. tcb.masters_ptr = tls_masters_ptr;
  69. tcb.masters_len = tls_masters_len;
  70. tcb.linker_ptr = tls_linker_ptr;
  71. tcb.mspace = tls_mspace;
  72. tcb.copy_masters().unwrap();
  73. tcb.activate();
  74. }
  75. // Wait until pte_osThreadStart
  76. pte_osMutexLock(mutex);
  77. entryPoint(argv);
  78. pte_osThreadExit();
  79. }
  80. #[no_mangle]
  81. pub unsafe extern "C" fn pte_osThreadCreate(
  82. entryPoint: pte_osThreadEntryPoint,
  83. stackSize: c_int,
  84. _initialPriority: c_int,
  85. argv: *mut c_void,
  86. ppte_osThreadHandle: *mut pte_osThreadHandle,
  87. ) -> pte_osResult {
  88. // Create a locked mutex, unlocked by pte_osThreadStart
  89. let mutex: pte_osMutexHandle = Box::into_raw(Box::new(Mutex::locked(())));
  90. let stack_size = if stackSize == 0 {
  91. 1024 * 1024
  92. } else {
  93. stackSize as usize
  94. };
  95. let stack_base = sys_mman::mmap(
  96. ptr::null_mut(),
  97. stack_size,
  98. sys_mman::PROT_READ | sys_mman::PROT_WRITE,
  99. sys_mman::MAP_SHARED | sys_mman::MAP_ANONYMOUS,
  100. -1,
  101. 0,
  102. );
  103. if stack_base as isize == -1 {
  104. return PTE_OS_GENERAL_FAILURE;
  105. }
  106. ptr::write_bytes(stack_base as *mut u8, 0, stack_size);
  107. let stack_end = stack_base.add(stack_size);
  108. let mut stack = stack_end as *mut usize;
  109. {
  110. let mut push = |value: usize| {
  111. stack = stack.offset(-1);
  112. *stack = value;
  113. };
  114. //WARNING: Stack must be 128-bit aligned for SSE
  115. if let Some(tcb) = Tcb::current() {
  116. push(tcb.mspace as usize);
  117. push(tcb.linker_ptr as usize);
  118. push(tcb.masters_len);
  119. push(tcb.masters_ptr as usize);
  120. push(tcb.tls_len);
  121. } else {
  122. push(ALLOCATOR.get_book_keeper());
  123. push(0);
  124. push(0);
  125. push(0);
  126. push(0);
  127. }
  128. push(mutex as usize);
  129. push(argv as usize);
  130. push(entryPoint as usize);
  131. push(pte_osThreadShim as usize);
  132. }
  133. let id = Sys::pte_clone(stack);
  134. if id < 0 {
  135. return PTE_OS_GENERAL_FAILURE;
  136. }
  137. pte_osMutexLock(&mut pid_mutexes_lock);
  138. if pid_mutexes.is_none() {
  139. pid_mutexes = Some(BTreeMap::new());
  140. }
  141. pid_mutexes.as_mut().unwrap().insert(id, mutex);
  142. pte_osMutexUnlock(&mut pid_mutexes_lock);
  143. pte_osMutexLock(&mut pid_stacks_lock);
  144. if pid_stacks.is_none() {
  145. pid_stacks = Some(BTreeMap::new());
  146. }
  147. pid_stacks
  148. .as_mut()
  149. .unwrap()
  150. .insert(id, (stack_base, stack_size));
  151. pte_osMutexUnlock(&mut pid_stacks_lock);
  152. *ppte_osThreadHandle = id;
  153. PTE_OS_OK
  154. }
  155. #[no_mangle]
  156. pub unsafe extern "C" fn pte_osThreadStart(handle: pte_osThreadHandle) -> pte_osResult {
  157. let mut ret = PTE_OS_GENERAL_FAILURE;
  158. pte_osMutexLock(&mut pid_mutexes_lock);
  159. if let Some(ref mutexes) = pid_mutexes {
  160. if let Some(mutex) = mutexes.get(&handle) {
  161. pte_osMutexUnlock(*mutex);
  162. ret = PTE_OS_OK;
  163. }
  164. }
  165. pte_osMutexUnlock(&mut pid_mutexes_lock);
  166. ret
  167. }
  168. #[no_mangle]
  169. pub unsafe extern "C" fn pte_osThreadExit() {
  170. Sys::exit(0);
  171. }
  172. #[no_mangle]
  173. pub unsafe extern "C" fn pte_osThreadExitAndDelete(handle: pte_osThreadHandle) -> pte_osResult {
  174. let res = pte_osThreadDelete(handle);
  175. if res != PTE_OS_OK {
  176. return res;
  177. }
  178. pte_osThreadExit();
  179. PTE_OS_OK
  180. }
  181. #[no_mangle]
  182. pub unsafe extern "C" fn pte_osThreadDelete(handle: pte_osThreadHandle) -> pte_osResult {
  183. pte_osMutexLock(&mut pid_mutexes_lock);
  184. if let Some(ref mut mutexes) = pid_mutexes {
  185. if let Some(mutex) = mutexes.remove(&handle) {
  186. Box::from_raw(mutex);
  187. }
  188. }
  189. pte_osMutexUnlock(&mut pid_mutexes_lock);
  190. pte_osMutexLock(&mut pid_stacks_lock);
  191. if let Some(ref mut stacks) = pid_stacks {
  192. if let Some((stack_base, stack_size)) = stacks.remove(&handle) {
  193. //TODO: this currently unmaps the thread's stack, while it is being used!
  194. //sys_mman::munmap(stack_base, stack_size);
  195. }
  196. }
  197. pte_osMutexUnlock(&mut pid_stacks_lock);
  198. PTE_OS_OK
  199. }
  200. #[no_mangle]
  201. pub unsafe extern "C" fn pte_osThreadWaitForEnd(handle: pte_osThreadHandle) -> pte_osResult {
  202. let mut status = 0;
  203. Sys::waitpid(handle, &mut status, 0);
  204. PTE_OS_OK
  205. }
  206. #[no_mangle]
  207. pub unsafe extern "C" fn pte_osThreadCancel(handle: pte_osThreadHandle) -> pte_osResult {
  208. //TODO: allow cancel of thread
  209. PTE_OS_OK
  210. }
  211. #[no_mangle]
  212. pub unsafe extern "C" fn pte_osThreadCheckCancel(handle: pte_osThreadHandle) -> pte_osResult {
  213. PTE_OS_OK
  214. }
  215. #[no_mangle]
  216. pub unsafe extern "C" fn pte_osThreadSleep(msecs: c_uint) {
  217. if msecs == 0 {
  218. Sys::sched_yield();
  219. } else {
  220. let tm = timespec {
  221. tv_sec: msecs as i64 / 1000,
  222. tv_nsec: (msecs % 1000) as i64 * 1000000,
  223. };
  224. Sys::nanosleep(&tm, ptr::null_mut());
  225. }
  226. }
  227. #[no_mangle]
  228. pub unsafe extern "C" fn pte_osThreadGetHandle() -> pte_osThreadHandle {
  229. Sys::gettid()
  230. }
  231. #[no_mangle]
  232. pub unsafe extern "C" fn pte_osThreadGetPriority(threadHandle: pte_osThreadHandle) -> c_int {
  233. // XXX Shouldn't Redox support priorities?
  234. 1
  235. }
  236. #[no_mangle]
  237. pub unsafe extern "C" fn pte_osThreadSetPriority(
  238. threadHandle: pte_osThreadHandle,
  239. newPriority: c_int,
  240. ) -> pte_osResult {
  241. PTE_OS_OK
  242. }
  243. #[no_mangle]
  244. pub unsafe extern "C" fn pte_osThreadGetMinPriority() -> c_int {
  245. 1
  246. }
  247. #[no_mangle]
  248. pub unsafe extern "C" fn pte_osThreadGetMaxPriority() -> c_int {
  249. 1
  250. }
  251. #[no_mangle]
  252. pub unsafe extern "C" fn pte_osThreadGetDefaultPriority() -> c_int {
  253. 1
  254. }
  255. #[no_mangle]
  256. pub unsafe extern "C" fn pte_osMutexCreate(pHandle: *mut pte_osMutexHandle) -> pte_osResult {
  257. *pHandle = Box::into_raw(Box::new(Mutex::new(())));
  258. PTE_OS_OK
  259. }
  260. #[no_mangle]
  261. pub unsafe extern "C" fn pte_osMutexDelete(handle: pte_osMutexHandle) -> pte_osResult {
  262. Box::from_raw(handle);
  263. PTE_OS_OK
  264. }
  265. #[no_mangle]
  266. pub unsafe extern "C" fn pte_osMutexLock(handle: pte_osMutexHandle) -> pte_osResult {
  267. (*handle).manual_lock();
  268. PTE_OS_OK
  269. }
  270. #[no_mangle]
  271. pub unsafe extern "C" fn pte_osMutexUnlock(handle: pte_osMutexHandle) -> pte_osResult {
  272. (*handle).manual_unlock();
  273. PTE_OS_OK
  274. }
  275. #[no_mangle]
  276. pub unsafe extern "C" fn pte_osSemaphoreCreate(
  277. initialValue: c_int,
  278. pHandle: *mut pte_osSemaphoreHandle,
  279. ) -> pte_osResult {
  280. *pHandle = Box::into_raw(Box::new(Semaphore::new(initialValue)));
  281. PTE_OS_OK
  282. }
  283. #[no_mangle]
  284. pub unsafe extern "C" fn pte_osSemaphoreDelete(handle: pte_osSemaphoreHandle) -> pte_osResult {
  285. Box::from_raw(handle);
  286. PTE_OS_OK
  287. }
  288. #[no_mangle]
  289. pub unsafe extern "C" fn pte_osSemaphorePost(
  290. handle: pte_osSemaphoreHandle,
  291. count: c_int,
  292. ) -> pte_osResult {
  293. (*handle).post();
  294. PTE_OS_OK
  295. }
  296. #[no_mangle]
  297. pub unsafe extern "C" fn pte_osSemaphorePend(
  298. handle: pte_osSemaphoreHandle,
  299. pTimeout: *mut c_uint,
  300. ) -> pte_osResult {
  301. let timeout_opt = if ! pTimeout.is_null() {
  302. let timeout = *pTimeout as i64;
  303. let tv_sec = timeout / 1000;
  304. let tv_nsec = (timeout % 1000) * 1000000;
  305. Some(timespec { tv_sec, tv_nsec })
  306. } else {
  307. None
  308. };
  309. (*handle).wait(timeout_opt.as_ref());
  310. PTE_OS_OK
  311. }
  312. #[no_mangle]
  313. pub unsafe extern "C" fn pte_osSemaphoreCancellablePend(
  314. handle: pte_osSemaphoreHandle,
  315. pTimeout: *mut c_uint,
  316. ) -> pte_osResult {
  317. //TODO: thread cancel
  318. pte_osSemaphorePend(handle, pTimeout)
  319. }
  320. #[no_mangle]
  321. pub unsafe extern "C" fn pte_osAtomicExchange(ptarg: *mut c_int, val: c_int) -> c_int {
  322. intrinsics::atomic_xchg(ptarg, val)
  323. }
  324. #[no_mangle]
  325. pub unsafe extern "C" fn pte_osAtomicCompareExchange(
  326. pdest: *mut c_int,
  327. exchange: c_int,
  328. comp: c_int,
  329. ) -> c_int {
  330. intrinsics::atomic_cxchg(pdest, comp, exchange).0
  331. }
  332. #[no_mangle]
  333. pub unsafe extern "C" fn pte_osAtomicExchangeAdd(pAppend: *mut c_int, value: c_int) -> c_int {
  334. intrinsics::atomic_xadd(pAppend, value)
  335. }
  336. #[no_mangle]
  337. pub unsafe extern "C" fn pte_osAtomicDecrement(pdest: *mut c_int) -> c_int {
  338. intrinsics::atomic_xadd(pdest, -1) - 1
  339. }
  340. #[no_mangle]
  341. pub unsafe extern "C" fn pte_osAtomicIncrement(pdest: *mut c_int) -> c_int {
  342. intrinsics::atomic_xadd(pdest, 1) + 1
  343. }
  344. #[no_mangle]
  345. pub unsafe extern "C" fn pte_osTlsSetValue(index: c_uint, value: *mut c_void) -> pte_osResult {
  346. locals().insert(index, value);
  347. PTE_OS_OK
  348. }
  349. #[no_mangle]
  350. pub unsafe extern "C" fn pte_osTlsGetValue(index: c_uint) -> *mut c_void {
  351. locals().get_mut(&index).copied().unwrap_or(ptr::null_mut())
  352. }
  353. #[no_mangle]
  354. pub unsafe extern "C" fn pte_osTlsAlloc(pKey: *mut c_uint) -> pte_osResult {
  355. *pKey = NEXT_KEY.fetch_add(1, Ordering::Relaxed);
  356. PTE_OS_OK
  357. }
  358. #[no_mangle]
  359. pub unsafe extern "C" fn pte_osTlsFree(index: c_uint) -> pte_osResult {
  360. // XXX free keys
  361. PTE_OS_OK
  362. }