mod.rs 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. use alloc::sync::{Arc, Weak};
  2. use core::sync::atomic::{AtomicBool, AtomicUsize};
  3. use system_error::SystemError::{self, *};
  4. use crate::libs::rwlock::RwLock;
  5. use crate::net::event_poll::EPollEventType;
  6. use crate::net::net_core::poll_ifaces;
  7. use crate::net::socket::*;
  8. use crate::sched::SchedMode;
  9. use inet::{InetSocket, UNSPECIFIED_LOCAL_ENDPOINT};
  10. use smoltcp;
  11. pub mod inner;
  12. use inner::*;
  13. type EP = EPollEventType;
  14. #[derive(Debug)]
  15. pub struct TcpSocket {
  16. inner: RwLock<Option<Inner>>,
  17. shutdown: Shutdown,
  18. nonblock: AtomicBool,
  19. epitems: EPollItems,
  20. wait_queue: WaitQueue,
  21. self_ref: Weak<Self>,
  22. pollee: AtomicUsize,
  23. }
  24. impl TcpSocket {
  25. pub fn new(nonblock: bool) -> Arc<Self> {
  26. Arc::new_cyclic(|me| Self {
  27. inner: RwLock::new(Some(Inner::Init(Init::new()))),
  28. shutdown: Shutdown::new(),
  29. nonblock: AtomicBool::new(nonblock),
  30. epitems: EPollItems::default(),
  31. wait_queue: WaitQueue::default(),
  32. self_ref: me.clone(),
  33. pollee: AtomicUsize::new((EP::EPOLLIN.bits() | EP::EPOLLOUT.bits()) as usize),
  34. })
  35. }
  36. pub fn new_established(inner: Established, nonblock: bool) -> Arc<Self> {
  37. Arc::new_cyclic(|me| Self {
  38. inner: RwLock::new(Some(Inner::Established(inner))),
  39. shutdown: Shutdown::new(),
  40. nonblock: AtomicBool::new(nonblock),
  41. epitems: EPollItems::default(),
  42. wait_queue: WaitQueue::default(),
  43. self_ref: me.clone(),
  44. pollee: AtomicUsize::new((EP::EPOLLIN.bits() | EP::EPOLLOUT.bits()) as usize),
  45. })
  46. }
  47. pub fn is_nonblock(&self) -> bool {
  48. self.nonblock.load(core::sync::atomic::Ordering::Relaxed)
  49. }
  50. #[inline]
  51. fn write_state<F>(&self, mut f: F) -> Result<(), SystemError>
  52. where
  53. F: FnMut(Inner) -> Result<Inner, SystemError>,
  54. {
  55. let mut inner_guard = self.inner.write();
  56. let inner = inner_guard.take().expect("Tcp Inner is None");
  57. let update = f(inner)?;
  58. inner_guard.replace(update);
  59. Ok(())
  60. }
  61. pub fn do_bind(&self, local_endpoint: smoltcp::wire::IpEndpoint) -> Result<(), SystemError> {
  62. let mut writer = self.inner.write();
  63. match writer.take().expect("Tcp Inner is None") {
  64. Inner::Init(inner) => {
  65. let bound = inner.bind(local_endpoint)?;
  66. if let Init::Bound((ref bound, _)) = bound {
  67. bound
  68. .iface()
  69. .common()
  70. .bind_socket(self.self_ref.upgrade().unwrap());
  71. }
  72. writer.replace(Inner::Init(bound));
  73. Ok(())
  74. }
  75. _ => Err(EINVAL),
  76. }
  77. }
  78. pub fn do_listen(&self, backlog: usize) -> Result<(), SystemError> {
  79. let mut writer = self.inner.write();
  80. let inner = writer.take().expect("Tcp Inner is None");
  81. let (listening, err) = match inner {
  82. Inner::Init(init) => {
  83. let listen_result = init.listen(backlog);
  84. match listen_result {
  85. Ok(listening) => (Inner::Listening(listening), None),
  86. Err((init, err)) => (Inner::Init(init), Some(err)),
  87. }
  88. }
  89. _ => (inner, Some(EINVAL)),
  90. };
  91. writer.replace(listening);
  92. drop(writer);
  93. if let Some(err) = err {
  94. return Err(err);
  95. }
  96. return Ok(());
  97. }
  98. pub fn try_accept(&self) -> Result<(Arc<TcpSocket>, smoltcp::wire::IpEndpoint), SystemError> {
  99. poll_ifaces();
  100. match self.inner.write().as_mut().expect("Tcp Inner is None") {
  101. Inner::Listening(listening) => listening.accept().map(|(stream, remote)| {
  102. (
  103. TcpSocket::new_established(stream, self.is_nonblock()),
  104. remote,
  105. )
  106. }),
  107. _ => Err(EINVAL),
  108. }
  109. }
  110. pub fn start_connect(
  111. &self,
  112. remote_endpoint: smoltcp::wire::IpEndpoint,
  113. ) -> Result<(), SystemError> {
  114. let mut writer = self.inner.write();
  115. let inner = writer.take().expect("Tcp Inner is None");
  116. let (init, err) = match inner {
  117. Inner::Init(init) => {
  118. let conn_result = init.connect(remote_endpoint);
  119. match conn_result {
  120. Ok(connecting) => (
  121. Inner::Connecting(connecting),
  122. if self.is_nonblock() {
  123. None
  124. } else {
  125. Some(EINPROGRESS)
  126. },
  127. ),
  128. Err((init, err)) => (Inner::Init(init), Some(err)),
  129. }
  130. }
  131. Inner::Connecting(connecting) if self.is_nonblock() => {
  132. (Inner::Connecting(connecting), Some(EALREADY))
  133. }
  134. Inner::Connecting(connecting) => (Inner::Connecting(connecting), None),
  135. Inner::Listening(inner) => (Inner::Listening(inner), Some(EISCONN)),
  136. Inner::Established(inner) => (Inner::Established(inner), Some(EISCONN)),
  137. };
  138. writer.replace(init);
  139. drop(writer);
  140. poll_ifaces();
  141. if let Some(err) = err {
  142. return Err(err);
  143. }
  144. return Ok(());
  145. }
  146. pub fn finish_connect(&self) -> Result<(), SystemError> {
  147. let mut writer = self.inner.write();
  148. let Inner::Connecting(conn) = writer.take().expect("Tcp Inner is None") else {
  149. log::error!("TcpSocket::finish_connect: not Connecting");
  150. return Err(EINVAL);
  151. };
  152. let (inner, err) = conn.into_result();
  153. writer.replace(inner);
  154. drop(writer);
  155. if let Some(err) = err {
  156. return Err(err);
  157. }
  158. return Ok(());
  159. }
  160. pub fn check_connect(&self) -> Result<(), SystemError> {
  161. match self.inner.read().as_ref().expect("Tcp Inner is None") {
  162. Inner::Connecting(_) => Err(EAGAIN_OR_EWOULDBLOCK),
  163. Inner::Established(_) => Ok(()), // TODO check established
  164. _ => Err(EINVAL), // TODO socket error options
  165. }
  166. }
  167. pub fn try_recv(&self, buf: &mut [u8]) -> Result<usize, SystemError> {
  168. self.inner
  169. .read()
  170. .as_ref()
  171. .map(|inner| {
  172. inner.iface().unwrap().poll();
  173. let result = match inner {
  174. Inner::Established(inner) => inner.recv_slice(buf),
  175. _ => Err(EINVAL),
  176. };
  177. inner.iface().unwrap().poll();
  178. result
  179. })
  180. .unwrap()
  181. }
  182. pub fn try_send(&self, buf: &[u8]) -> Result<usize, SystemError> {
  183. match self.inner.read().as_ref().expect("Tcp Inner is None") {
  184. Inner::Established(inner) => {
  185. let sent = inner.send_slice(buf);
  186. poll_ifaces();
  187. sent
  188. }
  189. _ => Err(EINVAL),
  190. }
  191. }
  192. fn update_events(&self) -> bool {
  193. match self.inner.read().as_ref().expect("Tcp Inner is None") {
  194. Inner::Init(_) => false,
  195. Inner::Connecting(connecting) => connecting.update_io_events(),
  196. Inner::Established(established) => {
  197. established.update_io_events(&self.pollee);
  198. false
  199. }
  200. Inner::Listening(listening) => {
  201. listening.update_io_events(&self.pollee);
  202. false
  203. }
  204. }
  205. }
  206. // should only call on accept
  207. fn is_acceptable(&self) -> bool {
  208. // (self.poll() & EP::EPOLLIN.bits() as usize) != 0
  209. self.inner.read().as_ref().unwrap().iface().unwrap().poll();
  210. EP::from_bits_truncate(self.poll() as u32).contains(EP::EPOLLIN)
  211. }
  212. }
  213. impl Socket for TcpSocket {
  214. fn wait_queue(&self) -> &WaitQueue {
  215. &self.wait_queue
  216. }
  217. fn get_name(&self) -> Result<Endpoint, SystemError> {
  218. match self.inner.read().as_ref().expect("Tcp Inner is None") {
  219. Inner::Init(Init::Unbound(_)) => Ok(Endpoint::Ip(UNSPECIFIED_LOCAL_ENDPOINT)),
  220. Inner::Init(Init::Bound((_, local))) => Ok(Endpoint::Ip(*local)),
  221. Inner::Connecting(connecting) => Ok(Endpoint::Ip(connecting.get_name())),
  222. Inner::Established(established) => Ok(Endpoint::Ip(established.local_endpoint())),
  223. Inner::Listening(listening) => Ok(Endpoint::Ip(listening.get_name())),
  224. }
  225. }
  226. fn bind(&self, endpoint: Endpoint) -> Result<(), SystemError> {
  227. if let Endpoint::Ip(addr) = endpoint {
  228. return self.do_bind(addr);
  229. }
  230. return Err(EINVAL);
  231. }
  232. fn connect(&self, endpoint: Endpoint) -> Result<(), SystemError> {
  233. if let Endpoint::Ip(addr) = endpoint {
  234. return self.start_connect(addr);
  235. }
  236. return Err(EINVAL);
  237. }
  238. fn poll(&self) -> usize {
  239. self.pollee.load(core::sync::atomic::Ordering::SeqCst)
  240. }
  241. fn listen(&self, backlog: usize) -> Result<(), SystemError> {
  242. self.do_listen(backlog)
  243. }
  244. fn accept(&self) -> Result<(Arc<Inode>, Endpoint), SystemError> {
  245. // could block io
  246. if self.is_nonblock() {
  247. self.try_accept()
  248. } else {
  249. loop {
  250. // log::debug!("TcpSocket::accept: wake up");
  251. match self.try_accept() {
  252. Err(EAGAIN_OR_EWOULDBLOCK) => {
  253. wq_wait_event_interruptible!(self.wait_queue, self.is_acceptable(), {})?;
  254. }
  255. result => break result,
  256. }
  257. }
  258. }
  259. .map(|(inner, endpoint)| (Inode::new(inner), Endpoint::Ip(endpoint)))
  260. }
  261. fn recv(&self, buffer: &mut [u8], _flags: MessageFlag) -> Result<usize, SystemError> {
  262. self.try_recv(buffer)
  263. }
  264. fn send(&self, buffer: &[u8], _flags: MessageFlag) -> Result<usize, SystemError> {
  265. self.try_send(buffer)
  266. }
  267. fn send_buffer_size(&self) -> usize {
  268. self.inner
  269. .read()
  270. .as_ref()
  271. .expect("Tcp Inner is None")
  272. .send_buffer_size()
  273. }
  274. fn recv_buffer_size(&self) -> usize {
  275. self.inner
  276. .read()
  277. .as_ref()
  278. .expect("Tcp Inner is None")
  279. .recv_buffer_size()
  280. }
  281. fn close(&self) -> Result<(), SystemError> {
  282. match self.inner.read().as_ref().expect("Tcp Inner is None") {
  283. Inner::Init(_) => {}
  284. Inner::Connecting(_) => {
  285. return Err(EINPROGRESS);
  286. }
  287. Inner::Established(es) => {
  288. es.close();
  289. es.release();
  290. }
  291. Inner::Listening(_) => {}
  292. }
  293. Ok(())
  294. }
  295. }
  296. impl InetSocket for TcpSocket {
  297. fn on_iface_events(&self) {
  298. if self.update_events() {
  299. let result = self.finish_connect();
  300. // set error
  301. }
  302. }
  303. }
  304. // #[derive(Debug)]
  305. // // #[cast_to([sync] IndexNode)]
  306. // struct TcpStream {
  307. // inner: Established,
  308. // shutdown: Shutdown,
  309. // nonblock: AtomicBool,
  310. // epitems: EPollItems,
  311. // wait_queue: WaitQueue,
  312. // self_ref: Weak<Self>,
  313. // }
  314. // impl TcpStream {
  315. // pub fn is_nonblock(&self) -> bool {
  316. // self.nonblock.load(core::sync::atomic::Ordering::Relaxed)
  317. // }
  318. // pub fn read(&self, buf: &mut [u8]) -> Result<usize, SystemError> {
  319. // if self.nonblock.load(core::sync::atomic::Ordering::Relaxed) {
  320. // return self.recv_slice(buf);
  321. // } else {
  322. // return self.wait_queue().busy_wait(
  323. // EP::EPOLLIN,
  324. // || self.recv_slice(buf)
  325. // )
  326. // }
  327. // }
  328. // pub fn recv_slice(&self, buf: &mut [u8]) -> Result<usize, SystemError> {
  329. // let received = self.inner.recv_slice(buf);
  330. // poll_ifaces();
  331. // received
  332. // }
  333. // pub fn send_slice(&self, buf: &[u8]) -> Result<usize, SystemError> {
  334. // let sent = self.inner.send_slice(buf);
  335. // poll_ifaces();
  336. // sent
  337. // }
  338. // }
  339. // use crate::net::socket::{Inode, Socket};
  340. // use crate::filesystem::vfs::IndexNode;
  341. // impl IndexNode for TcpStream {
  342. // fn read_at(
  343. // &self,
  344. // _offset: usize,
  345. // _len: usize,
  346. // buf: &mut [u8],
  347. // data: crate::libs::spinlock::SpinLockGuard<crate::filesystem::vfs::FilePrivateData>,
  348. // ) -> Result<usize, SystemError> {
  349. // drop(data);
  350. // self.read(buf)
  351. // }
  352. // fn write_at(
  353. // &self,
  354. // _offset: usize,
  355. // _len: usize,
  356. // buf: &[u8],
  357. // data: crate::libs::spinlock::SpinLockGuard<crate::filesystem::vfs::FilePrivateData>,
  358. // ) -> Result<usize, SystemError> {
  359. // drop(data);
  360. // self.send_slice(buf)
  361. // }
  362. // fn fs(&self) -> alloc::sync::Arc<dyn crate::filesystem::vfs::FileSystem> {
  363. // todo!("TcpSocket::fs")
  364. // }
  365. // fn as_any_ref(&self) -> &dyn core::any::Any {
  366. // self
  367. // }
  368. // fn list(&self) -> Result<alloc::vec::Vec<alloc::string::String>, SystemError> {
  369. // todo!("TcpSocket::list")
  370. // }
  371. // }
  372. // impl Socket for TcpStream {
  373. // fn wait_queue(&self) -> WaitQueue {
  374. // self.wait_queue.clone()
  375. // }
  376. // fn poll(&self) -> usize {
  377. // // self.inner.with(|socket| {
  378. // // let mut mask = EPollEventType::empty();
  379. // // let shutdown = self.shutdown.get();
  380. // // let state = socket.state();
  381. // // use smoltcp::socket::tcp::State::*;
  382. // // type EP = crate::net::event_poll::EPollEventType;
  383. // // if shutdown.is_both_shutdown() || state == Closed {
  384. // // mask |= EP::EPOLLHUP;
  385. // // }
  386. // // if shutdown.is_recv_shutdown() {
  387. // // mask |= EP::EPOLLIN | EP::EPOLLRDNORM | EP::EPOLLRDHUP;
  388. // // }
  389. // // if state != SynSent && state != SynReceived {
  390. // // if socket.can_recv() {
  391. // // mask |= EP::EPOLLIN | EP::EPOLLRDNORM;
  392. // // }
  393. // // if !shutdown.is_send_shutdown() {
  394. // // // __sk_stream_is_writeable,这是一个内联函数,用于判断一个TCP套接字是否可写。
  395. // // //
  396. // // // 以下是函数的逐行解释:
  397. // // // static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
  398. // // // - 这行定义了函数__sk_stream_is_writeable,它是一个内联函数(static inline),
  399. // // // 这意味着在调用点直接展开代码,而不是调用函数体。函数接收两个参数:
  400. // // // 一个指向struct sock对象的指针sk(代表套接字),和一个整型变量wake。
  401. // // //
  402. // // // return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
  403. // // // - 这行代码调用了sk_stream_wspace函数,获取套接字sk的可写空间(write space)大小。
  404. // // // 随后与sk_stream_min_wspace调用结果进行比较,该函数返回套接字为了保持稳定写入速度所需的
  405. // // // 最小可写空间。如果当前可写空间大于或等于最小可写空间,则表达式为真。
  406. // // // __sk_stream_memory_free(sk, wake);
  407. // // // - 这行代码调用了__sk_stream_memory_free函数,它可能用于检查套接字的内存缓冲区是否
  408. // // // 有足够的空间可供写入数据。参数wake可能用于通知网络协议栈有数据需要发送,如果设置了相应的标志。
  409. // // // 综上所述,__sk_stream_is_writeable函数的目的是判断一个TCP套接字是否可以安全地进行写操作,
  410. // // // 它基于套接字的当前可写空间和所需的最小空间以及内存缓冲区的可用性。只有当这两个条件都满足时,
  411. // // // 函数才会返回true,表示套接字是可写的。
  412. // // if socket.can_send() {
  413. // // mask |= EP::EPOLLOUT | EP::EPOLLWRNORM | EP::EPOLLWRBAND;
  414. // // } else {
  415. // // todo!("TcpStream::poll: buffer space not enough");
  416. // // }
  417. // // } else {
  418. // // mask |= EP::EPOLLOUT | EP::EPOLLWRNORM;
  419. // // }
  420. // // // TODO tcp urg data => EPOLLPRI
  421. // // } else if state == SynSent /* inet_test_bit */ {
  422. // // log::warn!("Active TCP fastopen socket with defer_connect");
  423. // // mask |= EP::EPOLLOUT | EP::EPOLLWRNORM;
  424. // // }
  425. // // // TODO socket error
  426. // // return Ok(mask);
  427. // // })
  428. // self.pollee.load(core::sync::atomic::Ordering::Relaxed)
  429. // }
  430. // fn send_buffer_size(&self) -> usize {
  431. // self.inner.with(|socket| socket.send_capacity())
  432. // }
  433. // fn recv_buffer_size(&self) -> usize {
  434. // self.inner.with(|socket| socket.recv_capacity())
  435. // }
  436. // }