ring_buffer.rs 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. // Uncomment the #[must_use]s here once [RFC 1940] hits stable.
  2. // [RFC 1940]: https://github.com/rust-lang/rust/issues/43302
  3. use core::cmp;
  4. use managed::ManagedSlice;
  5. use crate::{Error, Result};
  6. use crate::storage::Resettable;
  7. /// A ring buffer.
  8. ///
  9. /// This ring buffer implementation provides many ways to interact with it:
  10. ///
  11. /// * Enqueueing or dequeueing one element from corresponding side of the buffer;
  12. /// * Enqueueing or dequeueing a slice of elements from corresponding side of the buffer;
  13. /// * Accessing allocated and unallocated areas directly.
  14. ///
  15. /// It is also zero-copy; all methods provide references into the buffer's storage.
  16. /// Note that all references are mutable; it is considered more important to allow
  17. /// in-place processing than to protect from accidental mutation.
  18. ///
  19. /// This implementation is suitable for both simple uses such as a FIFO queue
  20. /// of UDP packets, and advanced ones such as a TCP reassembly buffer.
  21. #[derive(Debug)]
  22. pub struct RingBuffer<'a, T: 'a> {
  23. storage: ManagedSlice<'a, T>,
  24. read_at: usize,
  25. length: usize,
  26. }
  27. impl<'a, T: 'a> RingBuffer<'a, T> {
  28. /// Create a ring buffer with the given storage.
  29. ///
  30. /// During creation, every element in `storage` is reset.
  31. pub fn new<S>(storage: S) -> RingBuffer<'a, T>
  32. where S: Into<ManagedSlice<'a, T>>,
  33. {
  34. RingBuffer {
  35. storage: storage.into(),
  36. read_at: 0,
  37. length: 0,
  38. }
  39. }
  40. /// Clear the ring buffer.
  41. pub fn clear(&mut self) {
  42. self.read_at = 0;
  43. self.length = 0;
  44. }
  45. /// Return the maximum number of elements in the ring buffer.
  46. pub fn capacity(&self) -> usize {
  47. self.storage.len()
  48. }
  49. /// Clear the ring buffer, and reset every element.
  50. pub fn reset(&mut self)
  51. where T: Resettable {
  52. self.clear();
  53. for elem in self.storage.iter_mut() {
  54. elem.reset();
  55. }
  56. }
  57. /// Return the current number of elements in the ring buffer.
  58. pub fn len(&self) -> usize {
  59. self.length
  60. }
  61. /// Return the number of elements that can be added to the ring buffer.
  62. pub fn window(&self) -> usize {
  63. self.capacity() - self.len()
  64. }
  65. /// Return the largest number of elements that can be added to the buffer
  66. /// without wrapping around (i.e. in a single `enqueue_many` call).
  67. pub fn contiguous_window(&self) -> usize {
  68. cmp::min(self.window(), self.capacity() - self.get_idx(self.length))
  69. }
  70. /// Query whether the buffer is empty.
  71. pub fn is_empty(&self) -> bool {
  72. self.len() == 0
  73. }
  74. /// Query whether the buffer is full.
  75. pub fn is_full(&self) -> bool {
  76. self.window() == 0
  77. }
  78. /// Shorthand for `(self.read + idx) % self.capacity()` with an
  79. /// additional check to ensure that the capacity is not zero.
  80. fn get_idx(&self, idx: usize) -> usize {
  81. let len = self.capacity();
  82. if len > 0 {
  83. (self.read_at + idx) % len
  84. } else {
  85. 0
  86. }
  87. }
  88. /// Shorthand for `(self.read + idx) % self.capacity()` with no
  89. /// additional checks to ensure the capacity is not zero.
  90. fn get_idx_unchecked(&self, idx: usize) -> usize {
  91. (self.read_at + idx) % self.capacity()
  92. }
  93. }
  94. /// This is the "discrete" ring buffer interface: it operates with single elements,
  95. /// and boundary conditions (empty/full) are errors.
  96. impl<'a, T: 'a> RingBuffer<'a, T> {
  97. /// Call `f` with a single buffer element, and enqueue the element if `f`
  98. /// returns successfully, or return `Err(Error::Exhausted)` if the buffer is full.
  99. pub fn enqueue_one_with<'b, R, F>(&'b mut self, f: F) -> Result<R>
  100. where F: FnOnce(&'b mut T) -> Result<R> {
  101. if self.is_full() { return Err(Error::Exhausted) }
  102. let index = self.get_idx_unchecked(self.length);
  103. match f(&mut self.storage[index]) {
  104. Ok(result) => {
  105. self.length += 1;
  106. Ok(result)
  107. }
  108. Err(error) => Err(error)
  109. }
  110. }
  111. /// Enqueue a single element into the buffer, and return a reference to it,
  112. /// or return `Err(Error::Exhausted)` if the buffer is full.
  113. ///
  114. /// This function is a shortcut for `ring_buf.enqueue_one_with(Ok)`.
  115. pub fn enqueue_one(&mut self) -> Result<&mut T> {
  116. self.enqueue_one_with(Ok)
  117. }
  118. /// Call `f` with a single buffer element, and dequeue the element if `f`
  119. /// returns successfully, or return `Err(Error::Exhausted)` if the buffer is empty.
  120. pub fn dequeue_one_with<'b, R, F>(&'b mut self, f: F) -> Result<R>
  121. where F: FnOnce(&'b mut T) -> Result<R> {
  122. if self.is_empty() { return Err(Error::Exhausted) }
  123. let next_at = self.get_idx_unchecked(1);
  124. match f(&mut self.storage[self.read_at]) {
  125. Ok(result) => {
  126. self.length -= 1;
  127. self.read_at = next_at;
  128. Ok(result)
  129. }
  130. Err(error) => Err(error)
  131. }
  132. }
  133. /// Dequeue an element from the buffer, and return a reference to it,
  134. /// or return `Err(Error::Exhausted)` if the buffer is empty.
  135. ///
  136. /// This function is a shortcut for `ring_buf.dequeue_one_with(Ok)`.
  137. pub fn dequeue_one(&mut self) -> Result<&mut T> {
  138. self.dequeue_one_with(Ok)
  139. }
  140. }
  141. /// This is the "continuous" ring buffer interface: it operates with element slices,
  142. /// and boundary conditions (empty/full) simply result in empty slices.
  143. impl<'a, T: 'a> RingBuffer<'a, T> {
  144. /// Call `f` with the largest contiguous slice of unallocated buffer elements,
  145. /// and enqueue the amount of elements returned by `f`.
  146. ///
  147. /// # Panics
  148. /// This function panics if the amount of elements returned by `f` is larger
  149. /// than the size of the slice passed into it.
  150. pub fn enqueue_many_with<'b, R, F>(&'b mut self, f: F) -> (usize, R)
  151. where F: FnOnce(&'b mut [T]) -> (usize, R) {
  152. if self.length == 0 {
  153. // Ring is currently empty. Reset `read_at` to optimize
  154. // for contiguous space.
  155. self.read_at = 0;
  156. }
  157. let write_at = self.get_idx(self.length);
  158. let max_size = self.contiguous_window();
  159. let (size, result) = f(&mut self.storage[write_at..write_at + max_size]);
  160. assert!(size <= max_size);
  161. self.length += size;
  162. (size, result)
  163. }
  164. /// Enqueue a slice of elements up to the given size into the buffer,
  165. /// and return a reference to them.
  166. ///
  167. /// This function may return a slice smaller than the given size
  168. /// if the free space in the buffer is not contiguous.
  169. // #[must_use]
  170. pub fn enqueue_many(&mut self, size: usize) -> &mut [T] {
  171. self.enqueue_many_with(|buf| {
  172. let size = cmp::min(size, buf.len());
  173. (size, &mut buf[..size])
  174. }).1
  175. }
  176. /// Enqueue as many elements from the given slice into the buffer as possible,
  177. /// and return the amount of elements that could fit.
  178. // #[must_use]
  179. pub fn enqueue_slice(&mut self, data: &[T]) -> usize
  180. where T: Copy {
  181. let (size_1, data) = self.enqueue_many_with(|buf| {
  182. let size = cmp::min(buf.len(), data.len());
  183. buf[..size].copy_from_slice(&data[..size]);
  184. (size, &data[size..])
  185. });
  186. let (size_2, ()) = self.enqueue_many_with(|buf| {
  187. let size = cmp::min(buf.len(), data.len());
  188. buf[..size].copy_from_slice(&data[..size]);
  189. (size, ())
  190. });
  191. size_1 + size_2
  192. }
  193. /// Call `f` with the largest contiguous slice of allocated buffer elements,
  194. /// and dequeue the amount of elements returned by `f`.
  195. ///
  196. /// # Panics
  197. /// This function panics if the amount of elements returned by `f` is larger
  198. /// than the size of the slice passed into it.
  199. pub fn dequeue_many_with<'b, R, F>(&'b mut self, f: F) -> (usize, R)
  200. where F: FnOnce(&'b mut [T]) -> (usize, R) {
  201. let capacity = self.capacity();
  202. let max_size = cmp::min(self.len(), capacity - self.read_at);
  203. let (size, result) = f(&mut self.storage[self.read_at..self.read_at + max_size]);
  204. assert!(size <= max_size);
  205. self.read_at = if capacity > 0 {
  206. (self.read_at + size) % capacity
  207. } else {
  208. 0
  209. };
  210. self.length -= size;
  211. (size, result)
  212. }
  213. /// Dequeue a slice of elements up to the given size from the buffer,
  214. /// and return a reference to them.
  215. ///
  216. /// This function may return a slice smaller than the given size
  217. /// if the allocated space in the buffer is not contiguous.
  218. // #[must_use]
  219. pub fn dequeue_many(&mut self, size: usize) -> &mut [T] {
  220. self.dequeue_many_with(|buf| {
  221. let size = cmp::min(size, buf.len());
  222. (size, &mut buf[..size])
  223. }).1
  224. }
  225. /// Dequeue as many elements from the buffer into the given slice as possible,
  226. /// and return the amount of elements that could fit.
  227. // #[must_use]
  228. pub fn dequeue_slice(&mut self, data: &mut [T]) -> usize
  229. where T: Copy {
  230. let (size_1, data) = self.dequeue_many_with(|buf| {
  231. let size = cmp::min(buf.len(), data.len());
  232. data[..size].copy_from_slice(&buf[..size]);
  233. (size, &mut data[size..])
  234. });
  235. let (size_2, ()) = self.dequeue_many_with(|buf| {
  236. let size = cmp::min(buf.len(), data.len());
  237. data[..size].copy_from_slice(&buf[..size]);
  238. (size, ())
  239. });
  240. size_1 + size_2
  241. }
  242. }
  243. /// This is the "random access" ring buffer interface: it operates with element slices,
  244. /// and allows to access elements of the buffer that are not adjacent to its head or tail.
  245. impl<'a, T: 'a> RingBuffer<'a, T> {
  246. /// Return the largest contiguous slice of unallocated buffer elements starting
  247. /// at the given offset past the last allocated element, and up to the given size.
  248. // #[must_use]
  249. pub fn get_unallocated(&mut self, offset: usize, mut size: usize) -> &mut [T] {
  250. let start_at = self.get_idx(self.length + offset);
  251. // We can't access past the end of unallocated data.
  252. if offset > self.window() { return &mut [] }
  253. // We can't enqueue more than there is free space.
  254. let clamped_window = self.window() - offset;
  255. if size > clamped_window { size = clamped_window }
  256. // We can't contiguously enqueue past the end of the storage.
  257. let until_end = self.capacity() - start_at;
  258. if size > until_end { size = until_end }
  259. &mut self.storage[start_at..start_at + size]
  260. }
  261. /// Write as many elements from the given slice into unallocated buffer elements
  262. /// starting at the given offset past the last allocated element, and return
  263. /// the amount written.
  264. // #[must_use]
  265. pub fn write_unallocated(&mut self, offset: usize, data: &[T]) -> usize
  266. where T: Copy {
  267. let (size_1, offset, data) = {
  268. let slice = self.get_unallocated(offset, data.len());
  269. let slice_len = slice.len();
  270. slice.copy_from_slice(&data[..slice_len]);
  271. (slice_len, offset + slice_len, &data[slice_len..])
  272. };
  273. let size_2 = {
  274. let slice = self.get_unallocated(offset, data.len());
  275. let slice_len = slice.len();
  276. slice.copy_from_slice(&data[..slice_len]);
  277. slice_len
  278. };
  279. size_1 + size_2
  280. }
  281. /// Enqueue the given number of unallocated buffer elements.
  282. ///
  283. /// # Panics
  284. /// Panics if the number of elements given exceeds the number of unallocated elements.
  285. pub fn enqueue_unallocated(&mut self, count: usize) {
  286. assert!(count <= self.window());
  287. self.length += count;
  288. }
  289. /// Return the largest contiguous slice of allocated buffer elements starting
  290. /// at the given offset past the first allocated element, and up to the given size.
  291. // #[must_use]
  292. pub fn get_allocated(&self, offset: usize, mut size: usize) -> &[T] {
  293. let start_at = self.get_idx(offset);
  294. // We can't read past the end of the allocated data.
  295. if offset > self.length { return &mut [] }
  296. // We can't read more than we have allocated.
  297. let clamped_length = self.length - offset;
  298. if size > clamped_length { size = clamped_length }
  299. // We can't contiguously dequeue past the end of the storage.
  300. let until_end = self.capacity() - start_at;
  301. if size > until_end { size = until_end }
  302. &self.storage[start_at..start_at + size]
  303. }
  304. /// Read as many elements from allocated buffer elements into the given slice
  305. /// starting at the given offset past the first allocated element, and return
  306. /// the amount read.
  307. // #[must_use]
  308. pub fn read_allocated(&mut self, offset: usize, data: &mut [T]) -> usize
  309. where T: Copy {
  310. let (size_1, offset, data) = {
  311. let slice = self.get_allocated(offset, data.len());
  312. data[..slice.len()].copy_from_slice(slice);
  313. (slice.len(), offset + slice.len(), &mut data[slice.len()..])
  314. };
  315. let size_2 = {
  316. let slice = self.get_allocated(offset, data.len());
  317. data[..slice.len()].copy_from_slice(slice);
  318. slice.len()
  319. };
  320. size_1 + size_2
  321. }
  322. /// Dequeue the given number of allocated buffer elements.
  323. ///
  324. /// # Panics
  325. /// Panics if the number of elements given exceeds the number of allocated elements.
  326. pub fn dequeue_allocated(&mut self, count: usize) {
  327. assert!(count <= self.len());
  328. self.length -= count;
  329. self.read_at = self.get_idx(count);
  330. }
  331. }
  332. impl<'a, T: 'a> From<ManagedSlice<'a, T>> for RingBuffer<'a, T> {
  333. fn from(slice: ManagedSlice<'a, T>) -> RingBuffer<'a, T> {
  334. RingBuffer::new(slice)
  335. }
  336. }
  337. #[cfg(test)]
  338. mod test {
  339. use super::*;
  340. #[test]
  341. fn test_buffer_length_changes() {
  342. let mut ring = RingBuffer::new(vec![0; 2]);
  343. assert!(ring.is_empty());
  344. assert!(!ring.is_full());
  345. assert_eq!(ring.len(), 0);
  346. assert_eq!(ring.capacity(), 2);
  347. assert_eq!(ring.window(), 2);
  348. ring.length = 1;
  349. assert!(!ring.is_empty());
  350. assert!(!ring.is_full());
  351. assert_eq!(ring.len(), 1);
  352. assert_eq!(ring.capacity(), 2);
  353. assert_eq!(ring.window(), 1);
  354. ring.length = 2;
  355. assert!(!ring.is_empty());
  356. assert!(ring.is_full());
  357. assert_eq!(ring.len(), 2);
  358. assert_eq!(ring.capacity(), 2);
  359. assert_eq!(ring.window(), 0);
  360. }
  361. #[test]
  362. fn test_buffer_enqueue_dequeue_one_with() {
  363. let mut ring = RingBuffer::new(vec![0; 5]);
  364. assert_eq!(ring.dequeue_one_with(|_| unreachable!()) as Result<()>,
  365. Err(Error::Exhausted));
  366. ring.enqueue_one_with(|e| Ok(e)).unwrap();
  367. assert!(!ring.is_empty());
  368. assert!(!ring.is_full());
  369. for i in 1..5 {
  370. ring.enqueue_one_with(|e| Ok(*e = i)).unwrap();
  371. assert!(!ring.is_empty());
  372. }
  373. assert!(ring.is_full());
  374. assert_eq!(ring.enqueue_one_with(|_| unreachable!()) as Result<()>,
  375. Err(Error::Exhausted));
  376. for i in 0..5 {
  377. assert_eq!(ring.dequeue_one_with(|e| Ok(*e)).unwrap(), i);
  378. assert!(!ring.is_full());
  379. }
  380. assert_eq!(ring.dequeue_one_with(|_| unreachable!()) as Result<()>,
  381. Err(Error::Exhausted));
  382. assert!(ring.is_empty());
  383. }
  384. #[test]
  385. fn test_buffer_enqueue_dequeue_one() {
  386. let mut ring = RingBuffer::new(vec![0; 5]);
  387. assert_eq!(ring.dequeue_one(), Err(Error::Exhausted));
  388. ring.enqueue_one().unwrap();
  389. assert!(!ring.is_empty());
  390. assert!(!ring.is_full());
  391. for i in 1..5 {
  392. *ring.enqueue_one().unwrap() = i;
  393. assert!(!ring.is_empty());
  394. }
  395. assert!(ring.is_full());
  396. assert_eq!(ring.enqueue_one(), Err(Error::Exhausted));
  397. for i in 0..5 {
  398. assert_eq!(*ring.dequeue_one().unwrap(), i);
  399. assert!(!ring.is_full());
  400. }
  401. assert_eq!(ring.dequeue_one(), Err(Error::Exhausted));
  402. assert!(ring.is_empty());
  403. }
  404. #[test]
  405. fn test_buffer_enqueue_many_with() {
  406. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  407. assert_eq!(ring.enqueue_many_with(|buf| {
  408. assert_eq!(buf.len(), 12);
  409. buf[0..2].copy_from_slice(b"ab");
  410. (2, true)
  411. }), (2, true));
  412. assert_eq!(ring.len(), 2);
  413. assert_eq!(&ring.storage[..], b"ab..........");
  414. ring.enqueue_many_with(|buf| {
  415. assert_eq!(buf.len(), 12 - 2);
  416. buf[0..4].copy_from_slice(b"cdXX");
  417. (2, ())
  418. });
  419. assert_eq!(ring.len(), 4);
  420. assert_eq!(&ring.storage[..], b"abcdXX......");
  421. ring.enqueue_many_with(|buf| {
  422. assert_eq!(buf.len(), 12 - 4);
  423. buf[0..4].copy_from_slice(b"efgh");
  424. (4, ())
  425. });
  426. assert_eq!(ring.len(), 8);
  427. assert_eq!(&ring.storage[..], b"abcdefgh....");
  428. for _ in 0..4 {
  429. *ring.dequeue_one().unwrap() = b'.';
  430. }
  431. assert_eq!(ring.len(), 4);
  432. assert_eq!(&ring.storage[..], b"....efgh....");
  433. ring.enqueue_many_with(|buf| {
  434. assert_eq!(buf.len(), 12 - 8);
  435. buf[0..4].copy_from_slice(b"ijkl");
  436. (4, ())
  437. });
  438. assert_eq!(ring.len(), 8);
  439. assert_eq!(&ring.storage[..], b"....efghijkl");
  440. ring.enqueue_many_with(|buf| {
  441. assert_eq!(buf.len(), 4);
  442. buf[0..4].copy_from_slice(b"abcd");
  443. (4, ())
  444. });
  445. assert_eq!(ring.len(), 12);
  446. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  447. for _ in 0..4 {
  448. *ring.dequeue_one().unwrap() = b'.';
  449. }
  450. assert_eq!(ring.len(), 8);
  451. assert_eq!(&ring.storage[..], b"abcd....ijkl");
  452. }
  453. #[test]
  454. fn test_buffer_enqueue_many() {
  455. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  456. ring.enqueue_many(8).copy_from_slice(b"abcdefgh");
  457. assert_eq!(ring.len(), 8);
  458. assert_eq!(&ring.storage[..], b"abcdefgh....");
  459. ring.enqueue_many(8).copy_from_slice(b"ijkl");
  460. assert_eq!(ring.len(), 12);
  461. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  462. }
  463. #[test]
  464. fn test_buffer_enqueue_slice() {
  465. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  466. assert_eq!(ring.enqueue_slice(b"abcdefgh"), 8);
  467. assert_eq!(ring.len(), 8);
  468. assert_eq!(&ring.storage[..], b"abcdefgh....");
  469. for _ in 0..4 {
  470. *ring.dequeue_one().unwrap() = b'.';
  471. }
  472. assert_eq!(ring.len(), 4);
  473. assert_eq!(&ring.storage[..], b"....efgh....");
  474. assert_eq!(ring.enqueue_slice(b"ijklabcd"), 8);
  475. assert_eq!(ring.len(), 12);
  476. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  477. }
  478. #[test]
  479. fn test_buffer_dequeue_many_with() {
  480. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  481. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  482. assert_eq!(ring.dequeue_many_with(|buf| {
  483. assert_eq!(buf.len(), 12);
  484. assert_eq!(buf, b"abcdefghijkl");
  485. buf[..4].copy_from_slice(b"....");
  486. (4, true)
  487. }), (4, true));
  488. assert_eq!(ring.len(), 8);
  489. assert_eq!(&ring.storage[..], b"....efghijkl");
  490. ring.dequeue_many_with(|buf| {
  491. assert_eq!(buf, b"efghijkl");
  492. buf[..4].copy_from_slice(b"....");
  493. (4, ())
  494. });
  495. assert_eq!(ring.len(), 4);
  496. assert_eq!(&ring.storage[..], b"........ijkl");
  497. assert_eq!(ring.enqueue_slice(b"abcd"), 4);
  498. assert_eq!(ring.len(), 8);
  499. ring.dequeue_many_with(|buf| {
  500. assert_eq!(buf, b"ijkl");
  501. buf[..4].copy_from_slice(b"....");
  502. (4, ())
  503. });
  504. ring.dequeue_many_with(|buf| {
  505. assert_eq!(buf, b"abcd");
  506. buf[..4].copy_from_slice(b"....");
  507. (4, ())
  508. });
  509. assert_eq!(ring.len(), 0);
  510. assert_eq!(&ring.storage[..], b"............");
  511. }
  512. #[test]
  513. fn test_buffer_dequeue_many() {
  514. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  515. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  516. {
  517. let buf = ring.dequeue_many(8);
  518. assert_eq!(buf, b"abcdefgh");
  519. buf.copy_from_slice(b"........");
  520. }
  521. assert_eq!(ring.len(), 4);
  522. assert_eq!(&ring.storage[..], b"........ijkl");
  523. {
  524. let buf = ring.dequeue_many(8);
  525. assert_eq!(buf, b"ijkl");
  526. buf.copy_from_slice(b"....");
  527. }
  528. assert_eq!(ring.len(), 0);
  529. assert_eq!(&ring.storage[..], b"............");
  530. }
  531. #[test]
  532. fn test_buffer_dequeue_slice() {
  533. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  534. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  535. {
  536. let mut buf = [0; 8];
  537. assert_eq!(ring.dequeue_slice(&mut buf[..]), 8);
  538. assert_eq!(&buf[..], b"abcdefgh");
  539. assert_eq!(ring.len(), 4);
  540. }
  541. assert_eq!(ring.enqueue_slice(b"abcd"), 4);
  542. {
  543. let mut buf = [0; 8];
  544. assert_eq!(ring.dequeue_slice(&mut buf[..]), 8);
  545. assert_eq!(&buf[..], b"ijklabcd");
  546. assert_eq!(ring.len(), 0);
  547. }
  548. }
  549. #[test]
  550. fn test_buffer_get_unallocated() {
  551. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  552. assert_eq!(ring.get_unallocated(16, 4), b"");
  553. {
  554. let buf = ring.get_unallocated(0, 4);
  555. buf.copy_from_slice(b"abcd");
  556. }
  557. assert_eq!(&ring.storage[..], b"abcd........");
  558. ring.enqueue_many(4);
  559. assert_eq!(ring.len(), 4);
  560. {
  561. let buf = ring.get_unallocated(4, 8);
  562. buf.copy_from_slice(b"ijkl");
  563. }
  564. assert_eq!(&ring.storage[..], b"abcd....ijkl");
  565. ring.enqueue_many(8).copy_from_slice(b"EFGHIJKL");
  566. ring.dequeue_many(4).copy_from_slice(b"abcd");
  567. assert_eq!(ring.len(), 8);
  568. assert_eq!(&ring.storage[..], b"abcdEFGHIJKL");
  569. {
  570. let buf = ring.get_unallocated(0, 8);
  571. buf.copy_from_slice(b"ABCD");
  572. }
  573. assert_eq!(&ring.storage[..], b"ABCDEFGHIJKL");
  574. }
  575. #[test]
  576. fn test_buffer_write_unallocated() {
  577. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  578. ring.enqueue_many(6).copy_from_slice(b"abcdef");
  579. ring.dequeue_many(6).copy_from_slice(b"ABCDEF");
  580. assert_eq!(ring.write_unallocated(0, b"ghi"), 3);
  581. assert_eq!(ring.get_unallocated(0, 3), b"ghi");
  582. assert_eq!(ring.write_unallocated(3, b"jklmno"), 6);
  583. assert_eq!(ring.get_unallocated(3, 3), b"jkl");
  584. assert_eq!(ring.write_unallocated(9, b"pqrstu"), 3);
  585. assert_eq!(ring.get_unallocated(9, 3), b"pqr");
  586. }
  587. #[test]
  588. fn test_buffer_get_allocated() {
  589. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  590. assert_eq!(ring.get_allocated(16, 4), b"");
  591. assert_eq!(ring.get_allocated(0, 4), b"");
  592. ring.enqueue_slice(b"abcd");
  593. assert_eq!(ring.get_allocated(0, 8), b"abcd");
  594. ring.enqueue_slice(b"efghijkl");
  595. ring.dequeue_many(4).copy_from_slice(b"....");
  596. assert_eq!(ring.get_allocated(4, 8), b"ijkl");
  597. ring.enqueue_slice(b"abcd");
  598. assert_eq!(ring.get_allocated(4, 8), b"ijkl");
  599. }
  600. #[test]
  601. fn test_buffer_read_allocated() {
  602. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  603. ring.enqueue_many(12).copy_from_slice(b"abcdefghijkl");
  604. let mut data = [0; 6];
  605. assert_eq!(ring.read_allocated(0, &mut data[..]), 6);
  606. assert_eq!(&data[..], b"abcdef");
  607. ring.dequeue_many(6).copy_from_slice(b"ABCDEF");
  608. ring.enqueue_many(3).copy_from_slice(b"mno");
  609. let mut data = [0; 6];
  610. assert_eq!(ring.read_allocated(3, &mut data[..]), 6);
  611. assert_eq!(&data[..], b"jklmno");
  612. let mut data = [0; 6];
  613. assert_eq!(ring.read_allocated(6, &mut data[..]), 3);
  614. assert_eq!(&data[..], b"mno\x00\x00\x00");
  615. }
  616. #[test]
  617. fn test_buffer_with_no_capacity() {
  618. let mut no_capacity: RingBuffer<u8> = RingBuffer::new(vec![]);
  619. // Call all functions that calculate the remainder against rx_buffer.capacity()
  620. // with a backing storage with a length of 0.
  621. assert_eq!(no_capacity.get_unallocated(0, 0), &[]);
  622. assert_eq!(no_capacity.get_allocated(0, 0), &[]);
  623. no_capacity.dequeue_allocated(0);
  624. assert_eq!(no_capacity.enqueue_many(0), &[]);
  625. assert_eq!(no_capacity.enqueue_one(), Err(Error::Exhausted));
  626. assert_eq!(no_capacity.contiguous_window(), 0);
  627. }
  628. /// Use the buffer a bit. Then empty it and put in an item of
  629. /// maximum size. By detecting a length of 0, the implementation
  630. /// can reset the current buffer position.
  631. #[test]
  632. fn test_buffer_write_wholly() {
  633. let mut ring = RingBuffer::new(vec![b'.'; 8]);
  634. ring.enqueue_many(2).copy_from_slice(b"xx");
  635. ring.enqueue_many(2).copy_from_slice(b"xx");
  636. assert_eq!(ring.len(), 4);
  637. ring.dequeue_many(4);
  638. assert_eq!(ring.len(), 0);
  639. let large = ring.enqueue_many(8);
  640. assert_eq!(large.len(), 8);
  641. }
  642. }