ring_buffer.rs 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. // Some of the functions in ring buffer is marked as #[must_use]. It notes that
  2. // these functions may have side effects, and it's implemented by [RFC 1940].
  3. // [RFC 1940]: https://github.com/rust-lang/rust/issues/43302
  4. use core::cmp;
  5. use managed::ManagedSlice;
  6. use crate::storage::Resettable;
  7. use crate::{Error, Result};
  8. /// A ring buffer.
  9. ///
  10. /// This ring buffer implementation provides many ways to interact with it:
  11. ///
  12. /// * Enqueueing or dequeueing one element from corresponding side of the buffer;
  13. /// * Enqueueing or dequeueing a slice of elements from corresponding side of the buffer;
  14. /// * Accessing allocated and unallocated areas directly.
  15. ///
  16. /// It is also zero-copy; all methods provide references into the buffer's storage.
  17. /// Note that all references are mutable; it is considered more important to allow
  18. /// in-place processing than to protect from accidental mutation.
  19. ///
  20. /// This implementation is suitable for both simple uses such as a FIFO queue
  21. /// of UDP packets, and advanced ones such as a TCP reassembly buffer.
  22. #[derive(Debug)]
  23. pub struct RingBuffer<'a, T: 'a> {
  24. storage: ManagedSlice<'a, T>,
  25. read_at: usize,
  26. length: usize,
  27. }
  28. impl<'a, T: 'a> RingBuffer<'a, T> {
  29. /// Create a ring buffer with the given storage.
  30. ///
  31. /// During creation, every element in `storage` is reset.
  32. pub fn new<S>(storage: S) -> RingBuffer<'a, T>
  33. where
  34. S: Into<ManagedSlice<'a, T>>,
  35. {
  36. RingBuffer {
  37. storage: storage.into(),
  38. read_at: 0,
  39. length: 0,
  40. }
  41. }
  42. /// Clear the ring buffer.
  43. pub fn clear(&mut self) {
  44. self.read_at = 0;
  45. self.length = 0;
  46. }
  47. /// Return the maximum number of elements in the ring buffer.
  48. pub fn capacity(&self) -> usize {
  49. self.storage.len()
  50. }
  51. /// Clear the ring buffer, and reset every element.
  52. pub fn reset(&mut self)
  53. where
  54. T: Resettable,
  55. {
  56. self.clear();
  57. for elem in self.storage.iter_mut() {
  58. elem.reset();
  59. }
  60. }
  61. /// Return the current number of elements in the ring buffer.
  62. pub fn len(&self) -> usize {
  63. self.length
  64. }
  65. /// Return the number of elements that can be added to the ring buffer.
  66. pub fn window(&self) -> usize {
  67. self.capacity() - self.len()
  68. }
  69. /// Return the largest number of elements that can be added to the buffer
  70. /// without wrapping around (i.e. in a single `enqueue_many` call).
  71. pub fn contiguous_window(&self) -> usize {
  72. cmp::min(self.window(), self.capacity() - self.get_idx(self.length))
  73. }
  74. /// Query whether the buffer is empty.
  75. pub fn is_empty(&self) -> bool {
  76. self.len() == 0
  77. }
  78. /// Query whether the buffer is full.
  79. pub fn is_full(&self) -> bool {
  80. self.window() == 0
  81. }
  82. /// Shorthand for `(self.read + idx) % self.capacity()` with an
  83. /// additional check to ensure that the capacity is not zero.
  84. fn get_idx(&self, idx: usize) -> usize {
  85. let len = self.capacity();
  86. if len > 0 {
  87. (self.read_at + idx) % len
  88. } else {
  89. 0
  90. }
  91. }
  92. /// Shorthand for `(self.read + idx) % self.capacity()` with no
  93. /// additional checks to ensure the capacity is not zero.
  94. fn get_idx_unchecked(&self, idx: usize) -> usize {
  95. (self.read_at + idx) % self.capacity()
  96. }
  97. }
  98. /// This is the "discrete" ring buffer interface: it operates with single elements,
  99. /// and boundary conditions (empty/full) are errors.
  100. impl<'a, T: 'a> RingBuffer<'a, T> {
  101. /// Call `f` with a single buffer element, and enqueue the element if `f`
  102. /// returns successfully, or return `Err(Error::Exhausted)` if the buffer is full.
  103. pub fn enqueue_one_with<'b, R, F>(&'b mut self, f: F) -> Result<R>
  104. where
  105. F: FnOnce(&'b mut T) -> Result<R>,
  106. {
  107. if self.is_full() {
  108. return Err(Error::Exhausted);
  109. }
  110. let index = self.get_idx_unchecked(self.length);
  111. match f(&mut self.storage[index]) {
  112. Ok(result) => {
  113. self.length += 1;
  114. Ok(result)
  115. }
  116. Err(error) => Err(error),
  117. }
  118. }
  119. /// Enqueue a single element into the buffer, and return a reference to it,
  120. /// or return `Err(Error::Exhausted)` if the buffer is full.
  121. ///
  122. /// This function is a shortcut for `ring_buf.enqueue_one_with(Ok)`.
  123. pub fn enqueue_one(&mut self) -> Result<&mut T> {
  124. self.enqueue_one_with(Ok)
  125. }
  126. /// Call `f` with a single buffer element, and dequeue the element if `f`
  127. /// returns successfully, or return `Err(Error::Exhausted)` if the buffer is empty.
  128. pub fn dequeue_one_with<'b, R, F>(&'b mut self, f: F) -> Result<R>
  129. where
  130. F: FnOnce(&'b mut T) -> Result<R>,
  131. {
  132. if self.is_empty() {
  133. return Err(Error::Exhausted);
  134. }
  135. let next_at = self.get_idx_unchecked(1);
  136. match f(&mut self.storage[self.read_at]) {
  137. Ok(result) => {
  138. self.length -= 1;
  139. self.read_at = next_at;
  140. Ok(result)
  141. }
  142. Err(error) => Err(error),
  143. }
  144. }
  145. /// Dequeue an element from the buffer, and return a reference to it,
  146. /// or return `Err(Error::Exhausted)` if the buffer is empty.
  147. ///
  148. /// This function is a shortcut for `ring_buf.dequeue_one_with(Ok)`.
  149. pub fn dequeue_one(&mut self) -> Result<&mut T> {
  150. self.dequeue_one_with(Ok)
  151. }
  152. }
  153. /// This is the "continuous" ring buffer interface: it operates with element slices,
  154. /// and boundary conditions (empty/full) simply result in empty slices.
  155. impl<'a, T: 'a> RingBuffer<'a, T> {
  156. /// Call `f` with the largest contiguous slice of unallocated buffer elements,
  157. /// and enqueue the amount of elements returned by `f`.
  158. ///
  159. /// # Panics
  160. /// This function panics if the amount of elements returned by `f` is larger
  161. /// than the size of the slice passed into it.
  162. pub fn enqueue_many_with<'b, R, F>(&'b mut self, f: F) -> (usize, R)
  163. where
  164. F: FnOnce(&'b mut [T]) -> (usize, R),
  165. {
  166. if self.length == 0 {
  167. // Ring is currently empty. Reset `read_at` to optimize
  168. // for contiguous space.
  169. self.read_at = 0;
  170. }
  171. let write_at = self.get_idx(self.length);
  172. let max_size = self.contiguous_window();
  173. let (size, result) = f(&mut self.storage[write_at..write_at + max_size]);
  174. assert!(size <= max_size);
  175. self.length += size;
  176. (size, result)
  177. }
  178. /// Enqueue a slice of elements up to the given size into the buffer,
  179. /// and return a reference to them.
  180. ///
  181. /// This function may return a slice smaller than the given size
  182. /// if the free space in the buffer is not contiguous.
  183. #[must_use]
  184. pub fn enqueue_many(&mut self, size: usize) -> &mut [T] {
  185. self.enqueue_many_with(|buf| {
  186. let size = cmp::min(size, buf.len());
  187. (size, &mut buf[..size])
  188. })
  189. .1
  190. }
  191. /// Enqueue as many elements from the given slice into the buffer as possible,
  192. /// and return the amount of elements that could fit.
  193. #[must_use]
  194. pub fn enqueue_slice(&mut self, data: &[T]) -> usize
  195. where
  196. T: Copy,
  197. {
  198. let (size_1, data) = self.enqueue_many_with(|buf| {
  199. let size = cmp::min(buf.len(), data.len());
  200. buf[..size].copy_from_slice(&data[..size]);
  201. (size, &data[size..])
  202. });
  203. let (size_2, ()) = self.enqueue_many_with(|buf| {
  204. let size = cmp::min(buf.len(), data.len());
  205. buf[..size].copy_from_slice(&data[..size]);
  206. (size, ())
  207. });
  208. size_1 + size_2
  209. }
  210. /// Call `f` with the largest contiguous slice of allocated buffer elements,
  211. /// and dequeue the amount of elements returned by `f`.
  212. ///
  213. /// # Panics
  214. /// This function panics if the amount of elements returned by `f` is larger
  215. /// than the size of the slice passed into it.
  216. pub fn dequeue_many_with<'b, R, F>(&'b mut self, f: F) -> (usize, R)
  217. where
  218. F: FnOnce(&'b mut [T]) -> (usize, R),
  219. {
  220. let capacity = self.capacity();
  221. let max_size = cmp::min(self.len(), capacity - self.read_at);
  222. let (size, result) = f(&mut self.storage[self.read_at..self.read_at + max_size]);
  223. assert!(size <= max_size);
  224. self.read_at = if capacity > 0 {
  225. (self.read_at + size) % capacity
  226. } else {
  227. 0
  228. };
  229. self.length -= size;
  230. (size, result)
  231. }
  232. /// Dequeue a slice of elements up to the given size from the buffer,
  233. /// and return a reference to them.
  234. ///
  235. /// This function may return a slice smaller than the given size
  236. /// if the allocated space in the buffer is not contiguous.
  237. #[must_use]
  238. pub fn dequeue_many(&mut self, size: usize) -> &mut [T] {
  239. self.dequeue_many_with(|buf| {
  240. let size = cmp::min(size, buf.len());
  241. (size, &mut buf[..size])
  242. })
  243. .1
  244. }
  245. /// Dequeue as many elements from the buffer into the given slice as possible,
  246. /// and return the amount of elements that could fit.
  247. #[must_use]
  248. pub fn dequeue_slice(&mut self, data: &mut [T]) -> usize
  249. where
  250. T: Copy,
  251. {
  252. let (size_1, data) = self.dequeue_many_with(|buf| {
  253. let size = cmp::min(buf.len(), data.len());
  254. data[..size].copy_from_slice(&buf[..size]);
  255. (size, &mut data[size..])
  256. });
  257. let (size_2, ()) = self.dequeue_many_with(|buf| {
  258. let size = cmp::min(buf.len(), data.len());
  259. data[..size].copy_from_slice(&buf[..size]);
  260. (size, ())
  261. });
  262. size_1 + size_2
  263. }
  264. }
  265. /// This is the "random access" ring buffer interface: it operates with element slices,
  266. /// and allows to access elements of the buffer that are not adjacent to its head or tail.
  267. impl<'a, T: 'a> RingBuffer<'a, T> {
  268. /// Return the largest contiguous slice of unallocated buffer elements starting
  269. /// at the given offset past the last allocated element, and up to the given size.
  270. #[must_use]
  271. pub fn get_unallocated(&mut self, offset: usize, mut size: usize) -> &mut [T] {
  272. let start_at = self.get_idx(self.length + offset);
  273. // We can't access past the end of unallocated data.
  274. if offset > self.window() {
  275. return &mut [];
  276. }
  277. // We can't enqueue more than there is free space.
  278. let clamped_window = self.window() - offset;
  279. if size > clamped_window {
  280. size = clamped_window
  281. }
  282. // We can't contiguously enqueue past the end of the storage.
  283. let until_end = self.capacity() - start_at;
  284. if size > until_end {
  285. size = until_end
  286. }
  287. &mut self.storage[start_at..start_at + size]
  288. }
  289. /// Write as many elements from the given slice into unallocated buffer elements
  290. /// starting at the given offset past the last allocated element, and return
  291. /// the amount written.
  292. #[must_use]
  293. pub fn write_unallocated(&mut self, offset: usize, data: &[T]) -> usize
  294. where
  295. T: Copy,
  296. {
  297. let (size_1, offset, data) = {
  298. let slice = self.get_unallocated(offset, data.len());
  299. let slice_len = slice.len();
  300. slice.copy_from_slice(&data[..slice_len]);
  301. (slice_len, offset + slice_len, &data[slice_len..])
  302. };
  303. let size_2 = {
  304. let slice = self.get_unallocated(offset, data.len());
  305. let slice_len = slice.len();
  306. slice.copy_from_slice(&data[..slice_len]);
  307. slice_len
  308. };
  309. size_1 + size_2
  310. }
  311. /// Enqueue the given number of unallocated buffer elements.
  312. ///
  313. /// # Panics
  314. /// Panics if the number of elements given exceeds the number of unallocated elements.
  315. pub fn enqueue_unallocated(&mut self, count: usize) {
  316. assert!(count <= self.window());
  317. self.length += count;
  318. }
  319. /// Return the largest contiguous slice of allocated buffer elements starting
  320. /// at the given offset past the first allocated element, and up to the given size.
  321. #[must_use]
  322. pub fn get_allocated(&self, offset: usize, mut size: usize) -> &[T] {
  323. let start_at = self.get_idx(offset);
  324. // We can't read past the end of the allocated data.
  325. if offset > self.length {
  326. return &mut [];
  327. }
  328. // We can't read more than we have allocated.
  329. let clamped_length = self.length - offset;
  330. if size > clamped_length {
  331. size = clamped_length
  332. }
  333. // We can't contiguously dequeue past the end of the storage.
  334. let until_end = self.capacity() - start_at;
  335. if size > until_end {
  336. size = until_end
  337. }
  338. &self.storage[start_at..start_at + size]
  339. }
  340. /// Read as many elements from allocated buffer elements into the given slice
  341. /// starting at the given offset past the first allocated element, and return
  342. /// the amount read.
  343. #[must_use]
  344. pub fn read_allocated(&mut self, offset: usize, data: &mut [T]) -> usize
  345. where
  346. T: Copy,
  347. {
  348. let (size_1, offset, data) = {
  349. let slice = self.get_allocated(offset, data.len());
  350. data[..slice.len()].copy_from_slice(slice);
  351. (slice.len(), offset + slice.len(), &mut data[slice.len()..])
  352. };
  353. let size_2 = {
  354. let slice = self.get_allocated(offset, data.len());
  355. data[..slice.len()].copy_from_slice(slice);
  356. slice.len()
  357. };
  358. size_1 + size_2
  359. }
  360. /// Dequeue the given number of allocated buffer elements.
  361. ///
  362. /// # Panics
  363. /// Panics if the number of elements given exceeds the number of allocated elements.
  364. pub fn dequeue_allocated(&mut self, count: usize) {
  365. assert!(count <= self.len());
  366. self.length -= count;
  367. self.read_at = self.get_idx(count);
  368. }
  369. }
  370. impl<'a, T: 'a> From<ManagedSlice<'a, T>> for RingBuffer<'a, T> {
  371. fn from(slice: ManagedSlice<'a, T>) -> RingBuffer<'a, T> {
  372. RingBuffer::new(slice)
  373. }
  374. }
  375. #[cfg(test)]
  376. mod test {
  377. use super::*;
  378. #[test]
  379. fn test_buffer_length_changes() {
  380. let mut ring = RingBuffer::new(vec![0; 2]);
  381. assert!(ring.is_empty());
  382. assert!(!ring.is_full());
  383. assert_eq!(ring.len(), 0);
  384. assert_eq!(ring.capacity(), 2);
  385. assert_eq!(ring.window(), 2);
  386. ring.length = 1;
  387. assert!(!ring.is_empty());
  388. assert!(!ring.is_full());
  389. assert_eq!(ring.len(), 1);
  390. assert_eq!(ring.capacity(), 2);
  391. assert_eq!(ring.window(), 1);
  392. ring.length = 2;
  393. assert!(!ring.is_empty());
  394. assert!(ring.is_full());
  395. assert_eq!(ring.len(), 2);
  396. assert_eq!(ring.capacity(), 2);
  397. assert_eq!(ring.window(), 0);
  398. }
  399. #[test]
  400. fn test_buffer_enqueue_dequeue_one_with() {
  401. let mut ring = RingBuffer::new(vec![0; 5]);
  402. assert_eq!(
  403. ring.dequeue_one_with(|_| unreachable!()) as Result<()>,
  404. Err(Error::Exhausted)
  405. );
  406. ring.enqueue_one_with(Ok).unwrap();
  407. assert!(!ring.is_empty());
  408. assert!(!ring.is_full());
  409. for i in 1..5 {
  410. ring.enqueue_one_with(|e| Ok(*e = i)).unwrap();
  411. assert!(!ring.is_empty());
  412. }
  413. assert!(ring.is_full());
  414. assert_eq!(
  415. ring.enqueue_one_with(|_| unreachable!()) as Result<()>,
  416. Err(Error::Exhausted)
  417. );
  418. for i in 0..5 {
  419. assert_eq!(ring.dequeue_one_with(|e| Ok(*e)).unwrap(), i);
  420. assert!(!ring.is_full());
  421. }
  422. assert_eq!(
  423. ring.dequeue_one_with(|_| unreachable!()) as Result<()>,
  424. Err(Error::Exhausted)
  425. );
  426. assert!(ring.is_empty());
  427. }
  428. #[test]
  429. fn test_buffer_enqueue_dequeue_one() {
  430. let mut ring = RingBuffer::new(vec![0; 5]);
  431. assert_eq!(ring.dequeue_one(), Err(Error::Exhausted));
  432. ring.enqueue_one().unwrap();
  433. assert!(!ring.is_empty());
  434. assert!(!ring.is_full());
  435. for i in 1..5 {
  436. *ring.enqueue_one().unwrap() = i;
  437. assert!(!ring.is_empty());
  438. }
  439. assert!(ring.is_full());
  440. assert_eq!(ring.enqueue_one(), Err(Error::Exhausted));
  441. for i in 0..5 {
  442. assert_eq!(*ring.dequeue_one().unwrap(), i);
  443. assert!(!ring.is_full());
  444. }
  445. assert_eq!(ring.dequeue_one(), Err(Error::Exhausted));
  446. assert!(ring.is_empty());
  447. }
  448. #[test]
  449. fn test_buffer_enqueue_many_with() {
  450. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  451. assert_eq!(
  452. ring.enqueue_many_with(|buf| {
  453. assert_eq!(buf.len(), 12);
  454. buf[0..2].copy_from_slice(b"ab");
  455. (2, true)
  456. }),
  457. (2, true)
  458. );
  459. assert_eq!(ring.len(), 2);
  460. assert_eq!(&ring.storage[..], b"ab..........");
  461. ring.enqueue_many_with(|buf| {
  462. assert_eq!(buf.len(), 12 - 2);
  463. buf[0..4].copy_from_slice(b"cdXX");
  464. (2, ())
  465. });
  466. assert_eq!(ring.len(), 4);
  467. assert_eq!(&ring.storage[..], b"abcdXX......");
  468. ring.enqueue_many_with(|buf| {
  469. assert_eq!(buf.len(), 12 - 4);
  470. buf[0..4].copy_from_slice(b"efgh");
  471. (4, ())
  472. });
  473. assert_eq!(ring.len(), 8);
  474. assert_eq!(&ring.storage[..], b"abcdefgh....");
  475. for _ in 0..4 {
  476. *ring.dequeue_one().unwrap() = b'.';
  477. }
  478. assert_eq!(ring.len(), 4);
  479. assert_eq!(&ring.storage[..], b"....efgh....");
  480. ring.enqueue_many_with(|buf| {
  481. assert_eq!(buf.len(), 12 - 8);
  482. buf[0..4].copy_from_slice(b"ijkl");
  483. (4, ())
  484. });
  485. assert_eq!(ring.len(), 8);
  486. assert_eq!(&ring.storage[..], b"....efghijkl");
  487. ring.enqueue_many_with(|buf| {
  488. assert_eq!(buf.len(), 4);
  489. buf[0..4].copy_from_slice(b"abcd");
  490. (4, ())
  491. });
  492. assert_eq!(ring.len(), 12);
  493. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  494. for _ in 0..4 {
  495. *ring.dequeue_one().unwrap() = b'.';
  496. }
  497. assert_eq!(ring.len(), 8);
  498. assert_eq!(&ring.storage[..], b"abcd....ijkl");
  499. }
  500. #[test]
  501. fn test_buffer_enqueue_many() {
  502. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  503. ring.enqueue_many(8).copy_from_slice(b"abcdefgh");
  504. assert_eq!(ring.len(), 8);
  505. assert_eq!(&ring.storage[..], b"abcdefgh....");
  506. ring.enqueue_many(8).copy_from_slice(b"ijkl");
  507. assert_eq!(ring.len(), 12);
  508. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  509. }
  510. #[test]
  511. fn test_buffer_enqueue_slice() {
  512. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  513. assert_eq!(ring.enqueue_slice(b"abcdefgh"), 8);
  514. assert_eq!(ring.len(), 8);
  515. assert_eq!(&ring.storage[..], b"abcdefgh....");
  516. for _ in 0..4 {
  517. *ring.dequeue_one().unwrap() = b'.';
  518. }
  519. assert_eq!(ring.len(), 4);
  520. assert_eq!(&ring.storage[..], b"....efgh....");
  521. assert_eq!(ring.enqueue_slice(b"ijklabcd"), 8);
  522. assert_eq!(ring.len(), 12);
  523. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  524. }
  525. #[test]
  526. fn test_buffer_dequeue_many_with() {
  527. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  528. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  529. assert_eq!(
  530. ring.dequeue_many_with(|buf| {
  531. assert_eq!(buf.len(), 12);
  532. assert_eq!(buf, b"abcdefghijkl");
  533. buf[..4].copy_from_slice(b"....");
  534. (4, true)
  535. }),
  536. (4, true)
  537. );
  538. assert_eq!(ring.len(), 8);
  539. assert_eq!(&ring.storage[..], b"....efghijkl");
  540. ring.dequeue_many_with(|buf| {
  541. assert_eq!(buf, b"efghijkl");
  542. buf[..4].copy_from_slice(b"....");
  543. (4, ())
  544. });
  545. assert_eq!(ring.len(), 4);
  546. assert_eq!(&ring.storage[..], b"........ijkl");
  547. assert_eq!(ring.enqueue_slice(b"abcd"), 4);
  548. assert_eq!(ring.len(), 8);
  549. ring.dequeue_many_with(|buf| {
  550. assert_eq!(buf, b"ijkl");
  551. buf[..4].copy_from_slice(b"....");
  552. (4, ())
  553. });
  554. ring.dequeue_many_with(|buf| {
  555. assert_eq!(buf, b"abcd");
  556. buf[..4].copy_from_slice(b"....");
  557. (4, ())
  558. });
  559. assert_eq!(ring.len(), 0);
  560. assert_eq!(&ring.storage[..], b"............");
  561. }
  562. #[test]
  563. fn test_buffer_dequeue_many() {
  564. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  565. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  566. {
  567. let buf = ring.dequeue_many(8);
  568. assert_eq!(buf, b"abcdefgh");
  569. buf.copy_from_slice(b"........");
  570. }
  571. assert_eq!(ring.len(), 4);
  572. assert_eq!(&ring.storage[..], b"........ijkl");
  573. {
  574. let buf = ring.dequeue_many(8);
  575. assert_eq!(buf, b"ijkl");
  576. buf.copy_from_slice(b"....");
  577. }
  578. assert_eq!(ring.len(), 0);
  579. assert_eq!(&ring.storage[..], b"............");
  580. }
  581. #[test]
  582. fn test_buffer_dequeue_slice() {
  583. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  584. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  585. {
  586. let mut buf = [0; 8];
  587. assert_eq!(ring.dequeue_slice(&mut buf[..]), 8);
  588. assert_eq!(&buf[..], b"abcdefgh");
  589. assert_eq!(ring.len(), 4);
  590. }
  591. assert_eq!(ring.enqueue_slice(b"abcd"), 4);
  592. {
  593. let mut buf = [0; 8];
  594. assert_eq!(ring.dequeue_slice(&mut buf[..]), 8);
  595. assert_eq!(&buf[..], b"ijklabcd");
  596. assert_eq!(ring.len(), 0);
  597. }
  598. }
  599. #[test]
  600. fn test_buffer_get_unallocated() {
  601. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  602. assert_eq!(ring.get_unallocated(16, 4), b"");
  603. {
  604. let buf = ring.get_unallocated(0, 4);
  605. buf.copy_from_slice(b"abcd");
  606. }
  607. assert_eq!(&ring.storage[..], b"abcd........");
  608. let buf_enqueued = ring.enqueue_many(4);
  609. assert_eq!(buf_enqueued.len(), 4);
  610. assert_eq!(ring.len(), 4);
  611. {
  612. let buf = ring.get_unallocated(4, 8);
  613. buf.copy_from_slice(b"ijkl");
  614. }
  615. assert_eq!(&ring.storage[..], b"abcd....ijkl");
  616. ring.enqueue_many(8).copy_from_slice(b"EFGHIJKL");
  617. ring.dequeue_many(4).copy_from_slice(b"abcd");
  618. assert_eq!(ring.len(), 8);
  619. assert_eq!(&ring.storage[..], b"abcdEFGHIJKL");
  620. {
  621. let buf = ring.get_unallocated(0, 8);
  622. buf.copy_from_slice(b"ABCD");
  623. }
  624. assert_eq!(&ring.storage[..], b"ABCDEFGHIJKL");
  625. }
  626. #[test]
  627. fn test_buffer_write_unallocated() {
  628. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  629. ring.enqueue_many(6).copy_from_slice(b"abcdef");
  630. ring.dequeue_many(6).copy_from_slice(b"ABCDEF");
  631. assert_eq!(ring.write_unallocated(0, b"ghi"), 3);
  632. assert_eq!(ring.get_unallocated(0, 3), b"ghi");
  633. assert_eq!(ring.write_unallocated(3, b"jklmno"), 6);
  634. assert_eq!(ring.get_unallocated(3, 3), b"jkl");
  635. assert_eq!(ring.write_unallocated(9, b"pqrstu"), 3);
  636. assert_eq!(ring.get_unallocated(9, 3), b"pqr");
  637. }
  638. #[test]
  639. fn test_buffer_get_allocated() {
  640. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  641. assert_eq!(ring.get_allocated(16, 4), b"");
  642. assert_eq!(ring.get_allocated(0, 4), b"");
  643. let len_enqueued = ring.enqueue_slice(b"abcd");
  644. assert_eq!(ring.get_allocated(0, 8), b"abcd");
  645. assert_eq!(len_enqueued, 4);
  646. let len_enqueued = ring.enqueue_slice(b"efghijkl");
  647. ring.dequeue_many(4).copy_from_slice(b"....");
  648. assert_eq!(ring.get_allocated(4, 8), b"ijkl");
  649. assert_eq!(len_enqueued, 8);
  650. let len_enqueued = ring.enqueue_slice(b"abcd");
  651. assert_eq!(ring.get_allocated(4, 8), b"ijkl");
  652. assert_eq!(len_enqueued, 4);
  653. }
  654. #[test]
  655. fn test_buffer_read_allocated() {
  656. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  657. ring.enqueue_many(12).copy_from_slice(b"abcdefghijkl");
  658. let mut data = [0; 6];
  659. assert_eq!(ring.read_allocated(0, &mut data[..]), 6);
  660. assert_eq!(&data[..], b"abcdef");
  661. ring.dequeue_many(6).copy_from_slice(b"ABCDEF");
  662. ring.enqueue_many(3).copy_from_slice(b"mno");
  663. let mut data = [0; 6];
  664. assert_eq!(ring.read_allocated(3, &mut data[..]), 6);
  665. assert_eq!(&data[..], b"jklmno");
  666. let mut data = [0; 6];
  667. assert_eq!(ring.read_allocated(6, &mut data[..]), 3);
  668. assert_eq!(&data[..], b"mno\x00\x00\x00");
  669. }
  670. #[test]
  671. fn test_buffer_with_no_capacity() {
  672. let mut no_capacity: RingBuffer<u8> = RingBuffer::new(vec![]);
  673. // Call all functions that calculate the remainder against rx_buffer.capacity()
  674. // with a backing storage with a length of 0.
  675. assert_eq!(no_capacity.get_unallocated(0, 0), &[]);
  676. assert_eq!(no_capacity.get_allocated(0, 0), &[]);
  677. no_capacity.dequeue_allocated(0);
  678. assert_eq!(no_capacity.enqueue_many(0), &[]);
  679. assert_eq!(no_capacity.enqueue_one(), Err(Error::Exhausted));
  680. assert_eq!(no_capacity.contiguous_window(), 0);
  681. }
  682. /// Use the buffer a bit. Then empty it and put in an item of
  683. /// maximum size. By detecting a length of 0, the implementation
  684. /// can reset the current buffer position.
  685. #[test]
  686. fn test_buffer_write_wholly() {
  687. let mut ring = RingBuffer::new(vec![b'.'; 8]);
  688. ring.enqueue_many(2).copy_from_slice(b"ab");
  689. ring.enqueue_many(2).copy_from_slice(b"cd");
  690. assert_eq!(ring.len(), 4);
  691. let buf_dequeued = ring.dequeue_many(4);
  692. assert_eq!(buf_dequeued, b"abcd");
  693. assert_eq!(ring.len(), 0);
  694. let large = ring.enqueue_many(8);
  695. assert_eq!(large.len(), 8);
  696. }
  697. }