ring_buffer.rs 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. // Uncomment the #[must_use]s here once [RFC 1940] hits stable.
  2. // [RFC 1940]: https://github.com/rust-lang/rust/issues/43302
  3. use core::cmp;
  4. use managed::ManagedSlice;
  5. use {Error, Result};
  6. use super::Resettable;
  7. /// A ring buffer.
  8. ///
  9. /// This ring buffer implementation provides many ways to interact with it:
  10. ///
  11. /// * Enqueueing or dequeueing one element from corresponding side of the buffer;
  12. /// * Enqueueing or dequeueing a slice of elements from corresponding side of the buffer;
  13. /// * Accessing allocated and unallocated areas directly.
  14. ///
  15. /// It is also zero-copy; all methods provide references into the buffer's storage.
  16. /// Note that all references are mutable; it is considered more important to allow
  17. /// in-place processing than to protect from accidental mutation.
  18. ///
  19. /// This implementation is suitable for both simple uses such as a FIFO queue
  20. /// of UDP packets, and advanced ones such as a TCP reassembly buffer.
  21. #[derive(Debug)]
  22. pub struct RingBuffer<'a, T: 'a> {
  23. storage: ManagedSlice<'a, T>,
  24. read_at: usize,
  25. length: usize,
  26. }
  27. impl<'a, T: 'a> RingBuffer<'a, T> {
  28. /// Create a ring buffer with the given storage.
  29. ///
  30. /// During creation, every element in `storage` is reset.
  31. pub fn new<S>(storage: S) -> RingBuffer<'a, T>
  32. where S: Into<ManagedSlice<'a, T>>,
  33. {
  34. RingBuffer {
  35. storage: storage.into(),
  36. read_at: 0,
  37. length: 0,
  38. }
  39. }
  40. /// Clear the ring buffer.
  41. pub fn clear(&mut self) {
  42. self.read_at = 0;
  43. self.length = 0;
  44. }
  45. /// Return the maximum number of elements in the ring buffer.
  46. pub fn capacity(&self) -> usize {
  47. self.storage.len()
  48. }
  49. /// Clear the ring buffer, and reset every element.
  50. pub fn reset(&mut self)
  51. where T: Resettable {
  52. self.clear();
  53. for elem in self.storage.iter_mut() {
  54. elem.reset();
  55. }
  56. }
  57. /// Return the current number of elements in the ring buffer.
  58. pub fn len(&self) -> usize {
  59. self.length
  60. }
  61. /// Return the number of elements that can be added to the ring buffer.
  62. pub fn window(&self) -> usize {
  63. self.capacity() - self.len()
  64. }
  65. /// Return the largest number of elements that can be added to the buffer
  66. /// without wrapping around (i.e. in a single `enqueue_many` call).
  67. pub fn contiguous_window(&self) -> usize {
  68. cmp::min(self.window(), self.capacity() - self.get_idx(self.length))
  69. }
  70. /// Query whether the buffer is empty.
  71. pub fn is_empty(&self) -> bool {
  72. self.len() == 0
  73. }
  74. /// Query whether the buffer is full.
  75. pub fn is_full(&self) -> bool {
  76. self.window() == 0
  77. }
  78. /// Shorthand for `(self.read + idx) % self.capacity()` with an
  79. /// additional check to ensure that the capacity is not zero.
  80. fn get_idx(&self, idx: usize) -> usize {
  81. let len = self.capacity();
  82. if len > 0 {
  83. (self.read_at + idx) % len
  84. } else {
  85. 0
  86. }
  87. }
  88. /// Shorthand for `(self.read + idx) % self.capacity()` with no
  89. /// additional checks to ensure the capacity is not zero.
  90. fn get_idx_unchecked(&self, idx: usize) -> usize {
  91. (self.read_at + idx) % self.capacity()
  92. }
  93. }
  94. /// This is the "discrete" ring buffer interface: it operates with single elements,
  95. /// and boundary conditions (empty/full) are errors.
  96. impl<'a, T: 'a> RingBuffer<'a, T> {
  97. /// Call `f` with a single buffer element, and enqueue the element if `f`
  98. /// returns successfully, or return `Err(Error::Exhausted)` if the buffer is full.
  99. pub fn enqueue_one_with<'b, R, F>(&'b mut self, f: F) -> Result<R>
  100. where F: FnOnce(&'b mut T) -> Result<R> {
  101. if self.is_full() { return Err(Error::Exhausted) }
  102. let index = self.get_idx_unchecked(self.length);
  103. match f(&mut self.storage[index]) {
  104. Ok(result) => {
  105. self.length += 1;
  106. Ok(result)
  107. }
  108. Err(error) => Err(error)
  109. }
  110. }
  111. /// Enqueue a single element into the buffer, and return a reference to it,
  112. /// or return `Err(Error::Exhausted)` if the buffer is full.
  113. ///
  114. /// This function is a shortcut for `ring_buf.enqueue_one_with(Ok)`.
  115. pub fn enqueue_one<'b>(&'b mut self) -> Result<&'b mut T> {
  116. self.enqueue_one_with(Ok)
  117. }
  118. /// Call `f` with a single buffer element, and dequeue the element if `f`
  119. /// returns successfully, or return `Err(Error::Exhausted)` if the buffer is empty.
  120. pub fn dequeue_one_with<'b, R, F>(&'b mut self, f: F) -> Result<R>
  121. where F: FnOnce(&'b mut T) -> Result<R> {
  122. if self.is_empty() { return Err(Error::Exhausted) }
  123. let next_at = self.get_idx_unchecked(1);
  124. match f(&mut self.storage[self.read_at]) {
  125. Ok(result) => {
  126. self.length -= 1;
  127. self.read_at = next_at;
  128. Ok(result)
  129. }
  130. Err(error) => Err(error)
  131. }
  132. }
  133. /// Dequeue an element from the buffer, and return a reference to it,
  134. /// or return `Err(Error::Exhausted)` if the buffer is empty.
  135. ///
  136. /// This function is a shortcut for `ring_buf.dequeue_one_with(Ok)`.
  137. pub fn dequeue_one(&mut self) -> Result<&mut T> {
  138. self.dequeue_one_with(Ok)
  139. }
  140. }
  141. /// This is the "continuous" ring buffer interface: it operates with element slices,
  142. /// and boundary conditions (empty/full) simply result in empty slices.
  143. impl<'a, T: 'a> RingBuffer<'a, T> {
  144. /// Call `f` with the largest contiguous slice of unallocated buffer elements,
  145. /// and enqueue the amount of elements returned by `f`.
  146. ///
  147. /// # Panics
  148. /// This function panics if the amount of elements returned by `f` is larger
  149. /// than the size of the slice passed into it.
  150. pub fn enqueue_many_with<'b, R, F>(&'b mut self, f: F) -> (usize, R)
  151. where F: FnOnce(&'b mut [T]) -> (usize, R) {
  152. let write_at = self.get_idx(self.length);
  153. let max_size = self.contiguous_window();
  154. let (size, result) = f(&mut self.storage[write_at..write_at + max_size]);
  155. assert!(size <= max_size);
  156. self.length += size;
  157. (size, result)
  158. }
  159. /// Enqueue a slice of elements up to the given size into the buffer,
  160. /// and return a reference to them.
  161. ///
  162. /// This function may return a slice smaller than the given size
  163. /// if the free space in the buffer is not contiguous.
  164. // #[must_use]
  165. pub fn enqueue_many<'b>(&'b mut self, size: usize) -> &'b mut [T] {
  166. self.enqueue_many_with(|buf| {
  167. let size = cmp::min(size, buf.len());
  168. (size, &mut buf[..size])
  169. }).1
  170. }
  171. /// Enqueue as many elements from the given slice into the buffer as possible,
  172. /// and return the amount of elements that could fit.
  173. // #[must_use]
  174. pub fn enqueue_slice(&mut self, data: &[T]) -> usize
  175. where T: Copy {
  176. let (size_1, data) = self.enqueue_many_with(|buf| {
  177. let size = cmp::min(buf.len(), data.len());
  178. buf[..size].copy_from_slice(&data[..size]);
  179. (size, &data[size..])
  180. });
  181. let (size_2, ()) = self.enqueue_many_with(|buf| {
  182. let size = cmp::min(buf.len(), data.len());
  183. buf[..size].copy_from_slice(&data[..size]);
  184. (size, ())
  185. });
  186. size_1 + size_2
  187. }
  188. /// Call `f` with the largest contiguous slice of allocated buffer elements,
  189. /// and dequeue the amount of elements returned by `f`.
  190. ///
  191. /// # Panics
  192. /// This function panics if the amount of elements returned by `f` is larger
  193. /// than the size of the slice passed into it.
  194. pub fn dequeue_many_with<'b, R, F>(&'b mut self, f: F) -> (usize, R)
  195. where F: FnOnce(&'b mut [T]) -> (usize, R) {
  196. let capacity = self.capacity();
  197. let max_size = cmp::min(self.len(), capacity - self.read_at);
  198. let (size, result) = f(&mut self.storage[self.read_at..self.read_at + max_size]);
  199. assert!(size <= max_size);
  200. self.read_at = if capacity > 0 {
  201. (self.read_at + size) % capacity
  202. } else {
  203. 0
  204. };
  205. self.length -= size;
  206. (size, result)
  207. }
  208. /// Dequeue a slice of elements up to the given size from the buffer,
  209. /// and return a reference to them.
  210. ///
  211. /// This function may return a slice smaller than the given size
  212. /// if the allocated space in the buffer is not contiguous.
  213. // #[must_use]
  214. pub fn dequeue_many<'b>(&'b mut self, size: usize) -> &'b mut [T] {
  215. self.dequeue_many_with(|buf| {
  216. let size = cmp::min(size, buf.len());
  217. (size, &mut buf[..size])
  218. }).1
  219. }
  220. /// Dequeue as many elements from the buffer into the given slice as possible,
  221. /// and return the amount of elements that could fit.
  222. // #[must_use]
  223. pub fn dequeue_slice(&mut self, data: &mut [T]) -> usize
  224. where T: Copy {
  225. let (size_1, data) = self.dequeue_many_with(|buf| {
  226. let size = cmp::min(buf.len(), data.len());
  227. data[..size].copy_from_slice(&buf[..size]);
  228. (size, &mut data[size..])
  229. });
  230. let (size_2, ()) = self.dequeue_many_with(|buf| {
  231. let size = cmp::min(buf.len(), data.len());
  232. data[..size].copy_from_slice(&buf[..size]);
  233. (size, ())
  234. });
  235. size_1 + size_2
  236. }
  237. }
  238. /// This is the "random access" ring buffer interface: it operates with element slices,
  239. /// and allows to access elements of the buffer that are not adjacent to its head or tail.
  240. impl<'a, T: 'a> RingBuffer<'a, T> {
  241. /// Return the largest contiguous slice of unallocated buffer elements starting
  242. /// at the given offset past the last allocated element, and up to the given size.
  243. // #[must_use]
  244. pub fn get_unallocated(&mut self, offset: usize, mut size: usize) -> &mut [T] {
  245. let start_at = self.get_idx(self.length + offset);
  246. // We can't access past the end of unallocated data.
  247. if offset > self.window() { return &mut [] }
  248. // We can't enqueue more than there is free space.
  249. let clamped_window = self.window() - offset;
  250. if size > clamped_window { size = clamped_window }
  251. // We can't contiguously enqueue past the end of the storage.
  252. let until_end = self.capacity() - start_at;
  253. if size > until_end { size = until_end }
  254. &mut self.storage[start_at..start_at + size]
  255. }
  256. /// Write as many elements from the given slice into unallocated buffer elements
  257. /// starting at the given offset past the last allocated element, and return
  258. /// the amount written.
  259. // #[must_use]
  260. pub fn write_unallocated(&mut self, offset: usize, data: &[T]) -> usize
  261. where T: Copy {
  262. let (size_1, offset, data) = {
  263. let slice = self.get_unallocated(offset, data.len());
  264. let slice_len = slice.len();
  265. slice.copy_from_slice(&data[..slice_len]);
  266. (slice_len, offset + slice_len, &data[slice_len..])
  267. };
  268. let size_2 = {
  269. let slice = self.get_unallocated(offset, data.len());
  270. let slice_len = slice.len();
  271. slice.copy_from_slice(&data[..slice_len]);
  272. slice_len
  273. };
  274. size_1 + size_2
  275. }
  276. /// Enqueue the given number of unallocated buffer elements.
  277. ///
  278. /// # Panics
  279. /// Panics if the number of elements given exceeds the number of unallocated elements.
  280. pub fn enqueue_unallocated(&mut self, count: usize) {
  281. assert!(count <= self.window());
  282. self.length += count;
  283. }
  284. /// Return the largest contiguous slice of allocated buffer elements starting
  285. /// at the given offset past the first allocated element, and up to the given size.
  286. // #[must_use]
  287. pub fn get_allocated(&self, offset: usize, mut size: usize) -> &[T] {
  288. let start_at = self.get_idx(offset);
  289. // We can't read past the end of the allocated data.
  290. if offset > self.length { return &mut [] }
  291. // We can't read more than we have allocated.
  292. let clamped_length = self.length - offset;
  293. if size > clamped_length { size = clamped_length }
  294. // We can't contiguously dequeue past the end of the storage.
  295. let until_end = self.capacity() - start_at;
  296. if size > until_end { size = until_end }
  297. &self.storage[start_at..start_at + size]
  298. }
  299. /// Read as many elements from allocated buffer elements into the given slice
  300. /// starting at the given offset past the first allocated element, and return
  301. /// the amount read.
  302. // #[must_use]
  303. pub fn read_allocated(&mut self, offset: usize, data: &mut [T]) -> usize
  304. where T: Copy {
  305. let (size_1, offset, data) = {
  306. let slice = self.get_allocated(offset, data.len());
  307. data[..slice.len()].copy_from_slice(slice);
  308. (slice.len(), offset + slice.len(), &mut data[slice.len()..])
  309. };
  310. let size_2 = {
  311. let slice = self.get_allocated(offset, data.len());
  312. data[..slice.len()].copy_from_slice(slice);
  313. slice.len()
  314. };
  315. size_1 + size_2
  316. }
  317. /// Dequeue the given number of allocated buffer elements.
  318. ///
  319. /// # Panics
  320. /// Panics if the number of elements given exceeds the number of allocated elements.
  321. pub fn dequeue_allocated(&mut self, count: usize) {
  322. assert!(count <= self.len());
  323. self.length -= count;
  324. self.read_at = self.get_idx(count);
  325. }
  326. }
  327. impl<'a, T: 'a> From<ManagedSlice<'a, T>> for RingBuffer<'a, T> {
  328. fn from(slice: ManagedSlice<'a, T>) -> RingBuffer<'a, T> {
  329. RingBuffer::new(slice)
  330. }
  331. }
  332. #[cfg(test)]
  333. mod test {
  334. use super::*;
  335. #[test]
  336. fn test_buffer_length_changes() {
  337. let mut ring = RingBuffer::new(vec![0; 2]);
  338. assert!(ring.is_empty());
  339. assert!(!ring.is_full());
  340. assert_eq!(ring.len(), 0);
  341. assert_eq!(ring.capacity(), 2);
  342. assert_eq!(ring.window(), 2);
  343. ring.length = 1;
  344. assert!(!ring.is_empty());
  345. assert!(!ring.is_full());
  346. assert_eq!(ring.len(), 1);
  347. assert_eq!(ring.capacity(), 2);
  348. assert_eq!(ring.window(), 1);
  349. ring.length = 2;
  350. assert!(!ring.is_empty());
  351. assert!(ring.is_full());
  352. assert_eq!(ring.len(), 2);
  353. assert_eq!(ring.capacity(), 2);
  354. assert_eq!(ring.window(), 0);
  355. }
  356. #[test]
  357. fn test_buffer_enqueue_dequeue_one_with() {
  358. let mut ring = RingBuffer::new(vec![0; 5]);
  359. assert_eq!(ring.dequeue_one_with(|_| unreachable!()) as Result<()>,
  360. Err(Error::Exhausted));
  361. ring.enqueue_one_with(|e| Ok(e)).unwrap();
  362. assert!(!ring.is_empty());
  363. assert!(!ring.is_full());
  364. for i in 1..5 {
  365. ring.enqueue_one_with(|e| Ok(*e = i)).unwrap();
  366. assert!(!ring.is_empty());
  367. }
  368. assert!(ring.is_full());
  369. assert_eq!(ring.enqueue_one_with(|_| unreachable!()) as Result<()>,
  370. Err(Error::Exhausted));
  371. for i in 0..5 {
  372. assert_eq!(ring.dequeue_one_with(|e| Ok(*e)).unwrap(), i);
  373. assert!(!ring.is_full());
  374. }
  375. assert_eq!(ring.dequeue_one_with(|_| unreachable!()) as Result<()>,
  376. Err(Error::Exhausted));
  377. assert!(ring.is_empty());
  378. }
  379. #[test]
  380. fn test_buffer_enqueue_dequeue_one() {
  381. let mut ring = RingBuffer::new(vec![0; 5]);
  382. assert_eq!(ring.dequeue_one(), Err(Error::Exhausted));
  383. ring.enqueue_one().unwrap();
  384. assert!(!ring.is_empty());
  385. assert!(!ring.is_full());
  386. for i in 1..5 {
  387. *ring.enqueue_one().unwrap() = i;
  388. assert!(!ring.is_empty());
  389. }
  390. assert!(ring.is_full());
  391. assert_eq!(ring.enqueue_one(), Err(Error::Exhausted));
  392. for i in 0..5 {
  393. assert_eq!(*ring.dequeue_one().unwrap(), i);
  394. assert!(!ring.is_full());
  395. }
  396. assert_eq!(ring.dequeue_one(), Err(Error::Exhausted));
  397. assert!(ring.is_empty());
  398. }
  399. #[test]
  400. fn test_buffer_enqueue_many_with() {
  401. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  402. assert_eq!(ring.enqueue_many_with(|buf| {
  403. assert_eq!(buf.len(), 12);
  404. buf[0..2].copy_from_slice(b"ab");
  405. (2, true)
  406. }), (2, true));
  407. assert_eq!(ring.len(), 2);
  408. assert_eq!(&ring.storage[..], b"ab..........");
  409. ring.enqueue_many_with(|buf| {
  410. assert_eq!(buf.len(), 12 - 2);
  411. buf[0..4].copy_from_slice(b"cdXX");
  412. (2, ())
  413. });
  414. assert_eq!(ring.len(), 4);
  415. assert_eq!(&ring.storage[..], b"abcdXX......");
  416. ring.enqueue_many_with(|buf| {
  417. assert_eq!(buf.len(), 12 - 4);
  418. buf[0..4].copy_from_slice(b"efgh");
  419. (4, ())
  420. });
  421. assert_eq!(ring.len(), 8);
  422. assert_eq!(&ring.storage[..], b"abcdefgh....");
  423. for _ in 0..4 {
  424. *ring.dequeue_one().unwrap() = b'.';
  425. }
  426. assert_eq!(ring.len(), 4);
  427. assert_eq!(&ring.storage[..], b"....efgh....");
  428. ring.enqueue_many_with(|buf| {
  429. assert_eq!(buf.len(), 12 - 8);
  430. buf[0..4].copy_from_slice(b"ijkl");
  431. (4, ())
  432. });
  433. assert_eq!(ring.len(), 8);
  434. assert_eq!(&ring.storage[..], b"....efghijkl");
  435. ring.enqueue_many_with(|buf| {
  436. assert_eq!(buf.len(), 4);
  437. buf[0..4].copy_from_slice(b"abcd");
  438. (4, ())
  439. });
  440. assert_eq!(ring.len(), 12);
  441. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  442. for _ in 0..4 {
  443. *ring.dequeue_one().unwrap() = b'.';
  444. }
  445. assert_eq!(ring.len(), 8);
  446. assert_eq!(&ring.storage[..], b"abcd....ijkl");
  447. }
  448. #[test]
  449. fn test_buffer_enqueue_many() {
  450. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  451. ring.enqueue_many(8).copy_from_slice(b"abcdefgh");
  452. assert_eq!(ring.len(), 8);
  453. assert_eq!(&ring.storage[..], b"abcdefgh....");
  454. ring.enqueue_many(8).copy_from_slice(b"ijkl");
  455. assert_eq!(ring.len(), 12);
  456. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  457. }
  458. #[test]
  459. fn test_buffer_enqueue_slice() {
  460. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  461. assert_eq!(ring.enqueue_slice(b"abcdefgh"), 8);
  462. assert_eq!(ring.len(), 8);
  463. assert_eq!(&ring.storage[..], b"abcdefgh....");
  464. for _ in 0..4 {
  465. *ring.dequeue_one().unwrap() = b'.';
  466. }
  467. assert_eq!(ring.len(), 4);
  468. assert_eq!(&ring.storage[..], b"....efgh....");
  469. assert_eq!(ring.enqueue_slice(b"ijklabcd"), 8);
  470. assert_eq!(ring.len(), 12);
  471. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  472. }
  473. #[test]
  474. fn test_buffer_dequeue_many_with() {
  475. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  476. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  477. assert_eq!(ring.dequeue_many_with(|buf| {
  478. assert_eq!(buf.len(), 12);
  479. assert_eq!(buf, b"abcdefghijkl");
  480. buf[..4].copy_from_slice(b"....");
  481. (4, true)
  482. }), (4, true));
  483. assert_eq!(ring.len(), 8);
  484. assert_eq!(&ring.storage[..], b"....efghijkl");
  485. ring.dequeue_many_with(|buf| {
  486. assert_eq!(buf, b"efghijkl");
  487. buf[..4].copy_from_slice(b"....");
  488. (4, ())
  489. });
  490. assert_eq!(ring.len(), 4);
  491. assert_eq!(&ring.storage[..], b"........ijkl");
  492. assert_eq!(ring.enqueue_slice(b"abcd"), 4);
  493. assert_eq!(ring.len(), 8);
  494. ring.dequeue_many_with(|buf| {
  495. assert_eq!(buf, b"ijkl");
  496. buf[..4].copy_from_slice(b"....");
  497. (4, ())
  498. });
  499. ring.dequeue_many_with(|buf| {
  500. assert_eq!(buf, b"abcd");
  501. buf[..4].copy_from_slice(b"....");
  502. (4, ())
  503. });
  504. assert_eq!(ring.len(), 0);
  505. assert_eq!(&ring.storage[..], b"............");
  506. }
  507. #[test]
  508. fn test_buffer_dequeue_many() {
  509. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  510. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  511. {
  512. let buf = ring.dequeue_many(8);
  513. assert_eq!(buf, b"abcdefgh");
  514. buf.copy_from_slice(b"........");
  515. }
  516. assert_eq!(ring.len(), 4);
  517. assert_eq!(&ring.storage[..], b"........ijkl");
  518. {
  519. let buf = ring.dequeue_many(8);
  520. assert_eq!(buf, b"ijkl");
  521. buf.copy_from_slice(b"....");
  522. }
  523. assert_eq!(ring.len(), 0);
  524. assert_eq!(&ring.storage[..], b"............");
  525. }
  526. #[test]
  527. fn test_buffer_dequeue_slice() {
  528. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  529. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  530. {
  531. let mut buf = [0; 8];
  532. assert_eq!(ring.dequeue_slice(&mut buf[..]), 8);
  533. assert_eq!(&buf[..], b"abcdefgh");
  534. assert_eq!(ring.len(), 4);
  535. }
  536. assert_eq!(ring.enqueue_slice(b"abcd"), 4);
  537. {
  538. let mut buf = [0; 8];
  539. assert_eq!(ring.dequeue_slice(&mut buf[..]), 8);
  540. assert_eq!(&buf[..], b"ijklabcd");
  541. assert_eq!(ring.len(), 0);
  542. }
  543. }
  544. #[test]
  545. fn test_buffer_get_unallocated() {
  546. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  547. assert_eq!(ring.get_unallocated(16, 4), b"");
  548. {
  549. let buf = ring.get_unallocated(0, 4);
  550. buf.copy_from_slice(b"abcd");
  551. }
  552. assert_eq!(&ring.storage[..], b"abcd........");
  553. ring.enqueue_many(4);
  554. assert_eq!(ring.len(), 4);
  555. {
  556. let buf = ring.get_unallocated(4, 8);
  557. buf.copy_from_slice(b"ijkl");
  558. }
  559. assert_eq!(&ring.storage[..], b"abcd....ijkl");
  560. ring.enqueue_many(8).copy_from_slice(b"EFGHIJKL");
  561. ring.dequeue_many(4).copy_from_slice(b"abcd");
  562. assert_eq!(ring.len(), 8);
  563. assert_eq!(&ring.storage[..], b"abcdEFGHIJKL");
  564. {
  565. let buf = ring.get_unallocated(0, 8);
  566. buf.copy_from_slice(b"ABCD");
  567. }
  568. assert_eq!(&ring.storage[..], b"ABCDEFGHIJKL");
  569. }
  570. #[test]
  571. fn test_buffer_write_unallocated() {
  572. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  573. ring.enqueue_many(6).copy_from_slice(b"abcdef");
  574. ring.dequeue_many(6).copy_from_slice(b"ABCDEF");
  575. assert_eq!(ring.write_unallocated(0, b"ghi"), 3);
  576. assert_eq!(&ring.storage[..], b"ABCDEFghi...");
  577. assert_eq!(ring.write_unallocated(3, b"jklmno"), 6);
  578. assert_eq!(&ring.storage[..], b"mnoDEFghijkl");
  579. assert_eq!(ring.write_unallocated(9, b"pqrstu"), 3);
  580. assert_eq!(&ring.storage[..], b"mnopqrghijkl");
  581. }
  582. #[test]
  583. fn test_buffer_get_allocated() {
  584. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  585. assert_eq!(ring.get_allocated(16, 4), b"");
  586. assert_eq!(ring.get_allocated(0, 4), b"");
  587. ring.enqueue_slice(b"abcd");
  588. assert_eq!(ring.get_allocated(0, 8), b"abcd");
  589. ring.enqueue_slice(b"efghijkl");
  590. ring.dequeue_many(4).copy_from_slice(b"....");
  591. assert_eq!(ring.get_allocated(4, 8), b"ijkl");
  592. ring.enqueue_slice(b"abcd");
  593. assert_eq!(ring.get_allocated(4, 8), b"ijkl");
  594. }
  595. #[test]
  596. fn test_buffer_read_allocated() {
  597. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  598. ring.enqueue_many(12).copy_from_slice(b"abcdefghijkl");
  599. let mut data = [0; 6];
  600. assert_eq!(ring.read_allocated(0, &mut data[..]), 6);
  601. assert_eq!(&data[..], b"abcdef");
  602. ring.dequeue_many(6).copy_from_slice(b"ABCDEF");
  603. ring.enqueue_many(3).copy_from_slice(b"mno");
  604. let mut data = [0; 6];
  605. assert_eq!(ring.read_allocated(3, &mut data[..]), 6);
  606. assert_eq!(&data[..], b"jklmno");
  607. let mut data = [0; 6];
  608. assert_eq!(ring.read_allocated(6, &mut data[..]), 3);
  609. assert_eq!(&data[..], b"mno\x00\x00\x00");
  610. }
  611. #[test]
  612. fn test_buffer_with_no_capacity() {
  613. let mut no_capacity: RingBuffer<u8> = RingBuffer::new(vec![]);
  614. // Call all functions that calculate the remainder against rx_buffer.capacity()
  615. // with a backing storage with a length of 0.
  616. assert_eq!(no_capacity.get_unallocated(0, 0), &[]);
  617. assert_eq!(no_capacity.get_allocated(0, 0), &[]);
  618. no_capacity.dequeue_allocated(0);
  619. assert_eq!(no_capacity.enqueue_many(0), &[]);
  620. assert_eq!(no_capacity.enqueue_one(), Err(Error::Exhausted));
  621. assert_eq!(no_capacity.contiguous_window(), 0);
  622. }
  623. }