ring_buffer.rs 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. // Uncomment the #[must_use]s here once [RFC 1940] hits stable.
  2. // [RFC 1940]: https://github.com/rust-lang/rust/issues/43302
  3. use core::cmp;
  4. use managed::ManagedSlice;
  5. use {Error, Result};
  6. use super::Resettable;
  7. /// A ring buffer.
  8. ///
  9. /// This ring buffer implementation provides many ways to interact with it:
  10. ///
  11. /// * Enqueueing or dequeueing one element from corresponding side of the buffer;
  12. /// * Enqueueing or dequeueing a slice of elements from corresponding side of the buffer;
  13. /// * Accessing allocated and unallocated areas directly.
  14. ///
  15. /// It is also zero-copy; all methods provide references into the buffer's storage.
  16. /// Note that all references are mutable; it is considered more important to allow
  17. /// in-place processing than to protect from accidental mutation.
  18. ///
  19. /// This implementation is suitable for both simple uses such as a FIFO queue
  20. /// of UDP packets, and advanced ones such as a TCP reassembly buffer.
  21. #[derive(Debug)]
  22. pub struct RingBuffer<'a, T: 'a> {
  23. storage: ManagedSlice<'a, T>,
  24. read_at: usize,
  25. length: usize,
  26. }
  27. impl<'a, T: 'a> RingBuffer<'a, T> {
  28. /// Create a ring buffer with the given storage.
  29. ///
  30. /// During creation, every element in `storage` is reset.
  31. pub fn new<S>(storage: S) -> RingBuffer<'a, T>
  32. where S: Into<ManagedSlice<'a, T>>,
  33. {
  34. RingBuffer {
  35. storage: storage.into(),
  36. read_at: 0,
  37. length: 0,
  38. }
  39. }
  40. /// Clear the ring buffer.
  41. pub fn clear(&mut self) {
  42. self.read_at = 0;
  43. self.length = 0;
  44. }
  45. /// Return the maximum number of elements in the ring buffer.
  46. pub fn capacity(&self) -> usize {
  47. self.storage.len()
  48. }
  49. /// Clear the ring buffer, and reset every element.
  50. pub fn reset(&mut self)
  51. where T: Resettable {
  52. self.clear();
  53. for elem in self.storage.iter_mut() {
  54. elem.reset();
  55. }
  56. }
  57. /// Return the current number of elements in the ring buffer.
  58. pub fn len(&self) -> usize {
  59. self.length
  60. }
  61. /// Return the number of elements that can be added to the ring buffer.
  62. pub fn window(&self) -> usize {
  63. self.capacity() - self.len()
  64. }
  65. /// Query whether the buffer is empty.
  66. pub fn is_empty(&self) -> bool {
  67. self.len() == 0
  68. }
  69. /// Query whether the buffer is full.
  70. pub fn is_full(&self) -> bool {
  71. self.window() == 0
  72. }
  73. }
  74. /// This is the "discrete" ring buffer interface: it operates with single elements,
  75. /// and boundary conditions (empty/full) are errors.
  76. impl<'a, T: 'a> RingBuffer<'a, T> {
  77. /// Call `f` with a single buffer element, and enqueue the element if `f`
  78. /// returns successfully, or return `Err(Error::Exhausted)` if the buffer is full.
  79. pub fn enqueue_one_with<'b, R, F>(&'b mut self, f: F) -> Result<R>
  80. where F: FnOnce(&'b mut T) -> Result<R> {
  81. if self.is_full() { return Err(Error::Exhausted) }
  82. let index = (self.read_at + self.length) % self.capacity();
  83. match f(&mut self.storage[index]) {
  84. Ok(result) => {
  85. self.length += 1;
  86. Ok(result)
  87. }
  88. Err(error) => Err(error)
  89. }
  90. }
  91. /// Enqueue a single element into the buffer, and return a reference to it,
  92. /// or return `Err(Error::Exhausted)` if the buffer is full.
  93. ///
  94. /// This function is a shortcut for `ring_buf.enqueue_one_with(Ok)`.
  95. pub fn enqueue_one<'b>(&'b mut self) -> Result<&'b mut T> {
  96. self.enqueue_one_with(Ok)
  97. }
  98. /// Call `f` with a single buffer element, and dequeue the element if `f`
  99. /// returns successfully, or return `Err(Error::Exhausted)` if the buffer is empty.
  100. pub fn dequeue_one_with<'b, R, F>(&'b mut self, f: F) -> Result<R>
  101. where F: FnOnce(&'b mut T) -> Result<R> {
  102. if self.is_empty() { return Err(Error::Exhausted) }
  103. let next_at = (self.read_at + 1) % self.capacity();
  104. match f(&mut self.storage[self.read_at]) {
  105. Ok(result) => {
  106. self.length -= 1;
  107. self.read_at = next_at;
  108. Ok(result)
  109. }
  110. Err(error) => Err(error)
  111. }
  112. }
  113. /// Dequeue an element from the buffer, and return a reference to it,
  114. /// or return `Err(Error::Exhausted)` if the buffer is empty.
  115. ///
  116. /// This function is a shortcut for `ring_buf.dequeue_one_with(Ok)`.
  117. pub fn dequeue_one(&mut self) -> Result<&mut T> {
  118. self.dequeue_one_with(Ok)
  119. }
  120. }
  121. /// This is the "continuous" ring buffer interface: it operates with element slices,
  122. /// and boundary conditions (empty/full) simply result in empty slices.
  123. impl<'a, T: 'a> RingBuffer<'a, T> {
  124. /// Call `f` with the largest contiguous slice of unallocated buffer elements,
  125. /// and enqueue the amount of elements returned by `f`.
  126. ///
  127. /// # Panics
  128. /// This function panics if the amount of elements returned by `f` is larger
  129. /// than the size of the slice passed into it.
  130. pub fn enqueue_many_with<'b, R, F>(&'b mut self, f: F) -> (usize, R)
  131. where F: FnOnce(&'b mut [T]) -> (usize, R) {
  132. let write_at = (self.read_at + self.length) % self.capacity();
  133. let max_size = cmp::min(self.window(), self.capacity() - write_at);
  134. let (size, result) = f(&mut self.storage[write_at..write_at + max_size]);
  135. assert!(size <= max_size);
  136. self.length += size;
  137. (size, result)
  138. }
  139. /// Enqueue a slice of elements up to the given size into the buffer,
  140. /// and return a reference to them.
  141. ///
  142. /// This function may return a slice smaller than the given size
  143. /// if the free space in the buffer is not contiguous.
  144. // #[must_use]
  145. pub fn enqueue_many<'b>(&'b mut self, size: usize) -> &'b mut [T] {
  146. self.enqueue_many_with(|buf| {
  147. let size = cmp::min(size, buf.len());
  148. (size, &mut buf[..size])
  149. }).1
  150. }
  151. /// Enqueue as many elements from the given slice into the buffer as possible,
  152. /// and return the amount of elements that could fit.
  153. // #[must_use]
  154. pub fn enqueue_slice(&mut self, data: &[T]) -> usize
  155. where T: Copy {
  156. let (size_1, data) = self.enqueue_many_with(|buf| {
  157. let size = cmp::min(buf.len(), data.len());
  158. buf[..size].copy_from_slice(&data[..size]);
  159. (size, &data[size..])
  160. });
  161. let (size_2, ()) = self.enqueue_many_with(|buf| {
  162. let size = cmp::min(buf.len(), data.len());
  163. buf[..size].copy_from_slice(&data[..size]);
  164. (size, ())
  165. });
  166. size_1 + size_2
  167. }
  168. /// Call `f` with the largest contiguous slice of allocated buffer elements,
  169. /// and dequeue the amount of elements returned by `f`.
  170. ///
  171. /// # Panics
  172. /// This function panics if the amount of elements returned by `f` is larger
  173. /// than the size of the slice passed into it.
  174. pub fn dequeue_many_with<'b, R, F>(&'b mut self, f: F) -> (usize, R)
  175. where F: FnOnce(&'b mut [T]) -> (usize, R) {
  176. let capacity = self.capacity();
  177. let max_size = cmp::min(self.len(), capacity - self.read_at);
  178. let (size, result) = f(&mut self.storage[self.read_at..self.read_at + max_size]);
  179. assert!(size <= max_size);
  180. self.read_at = (self.read_at + size) % capacity;
  181. self.length -= size;
  182. (size, result)
  183. }
  184. /// Dequeue a slice of elements up to the given size from the buffer,
  185. /// and return a reference to them.
  186. ///
  187. /// This function may return a slice smaller than the given size
  188. /// if the allocated space in the buffer is not contiguous.
  189. // #[must_use]
  190. pub fn dequeue_many<'b>(&'b mut self, size: usize) -> &'b mut [T] {
  191. self.dequeue_many_with(|buf| {
  192. let size = cmp::min(size, buf.len());
  193. (size, &mut buf[..size])
  194. }).1
  195. }
  196. /// Dequeue as many elements from the buffer into the given slice as possible,
  197. /// and return the amount of elements that could fit.
  198. // #[must_use]
  199. pub fn dequeue_slice(&mut self, data: &mut [T]) -> usize
  200. where T: Copy {
  201. let (size_1, data) = self.dequeue_many_with(|buf| {
  202. let size = cmp::min(buf.len(), data.len());
  203. data[..size].copy_from_slice(&buf[..size]);
  204. (size, &mut data[size..])
  205. });
  206. let (size_2, ()) = self.dequeue_many_with(|buf| {
  207. let size = cmp::min(buf.len(), data.len());
  208. data[..size].copy_from_slice(&buf[..size]);
  209. (size, ())
  210. });
  211. size_1 + size_2
  212. }
  213. }
  214. /// This is the "random access" ring buffer interface: it operates with element slices,
  215. /// and allows to access elements of the buffer that are not adjacent to its head or tail.
  216. impl<'a, T: 'a> RingBuffer<'a, T> {
  217. /// Return the largest contiguous slice of unallocated buffer elements starting
  218. /// at the given offset past the last allocated element, and up to the given size.
  219. // #[must_use]
  220. pub fn get_unallocated(&mut self, offset: usize, mut size: usize) -> &mut [T] {
  221. let start_at = (self.read_at + self.length + offset) % self.capacity();
  222. // We can't access past the end of unallocated data.
  223. if offset > self.window() { return &mut [] }
  224. // We can't enqueue more than there is free space.
  225. let clamped_window = self.window() - offset;
  226. if size > clamped_window { size = clamped_window }
  227. // We can't contiguously enqueue past the end of the storage.
  228. let until_end = self.capacity() - start_at;
  229. if size > until_end { size = until_end }
  230. &mut self.storage[start_at..start_at + size]
  231. }
  232. /// Write as many elements from the given slice into unallocated buffer elements
  233. /// starting at the given offset past the last allocated element, and return
  234. /// the amount written.
  235. // #[must_use]
  236. pub fn write_unallocated(&mut self, offset: usize, data: &[T]) -> usize
  237. where T: Copy {
  238. let (size_1, offset, data) = {
  239. let slice = self.get_unallocated(offset, data.len());
  240. let slice_len = slice.len();
  241. slice.copy_from_slice(&data[..slice_len]);
  242. (slice_len, offset + slice_len, &data[slice_len..])
  243. };
  244. let size_2 = {
  245. let slice = self.get_unallocated(offset, data.len());
  246. let slice_len = slice.len();
  247. slice.copy_from_slice(&data[..slice_len]);
  248. slice_len
  249. };
  250. size_1 + size_2
  251. }
  252. /// Enqueue the given number of unallocated buffer elements.
  253. ///
  254. /// # Panics
  255. /// Panics if the number of elements given exceeds the number of unallocated elements.
  256. pub fn enqueue_unallocated(&mut self, count: usize) {
  257. assert!(count <= self.window());
  258. self.length += count;
  259. }
  260. /// Return the largest contiguous slice of allocated buffer elements starting
  261. /// at the given offset past the first allocated element, and up to the given size.
  262. // #[must_use]
  263. pub fn get_allocated(&self, offset: usize, mut size: usize) -> &[T] {
  264. let start_at = (self.read_at + offset) % self.capacity();
  265. // We can't read past the end of the allocated data.
  266. if offset > self.length { return &mut [] }
  267. // We can't read more than we have allocated.
  268. let clamped_length = self.length - offset;
  269. if size > clamped_length { size = clamped_length }
  270. // We can't contiguously dequeue past the end of the storage.
  271. let until_end = self.capacity() - start_at;
  272. if size > until_end { size = until_end }
  273. &self.storage[start_at..start_at + size]
  274. }
  275. /// Read as many elements from allocated buffer elements into the given slice
  276. /// starting at the given offset past the first allocated element, and return
  277. /// the amount read.
  278. // #[must_use]
  279. pub fn read_allocated(&mut self, offset: usize, data: &mut [T]) -> usize
  280. where T: Copy {
  281. let (size_1, offset, data) = {
  282. let slice = self.get_allocated(offset, data.len());
  283. data[..slice.len()].copy_from_slice(slice);
  284. (slice.len(), offset + slice.len(), &mut data[slice.len()..])
  285. };
  286. let size_2 = {
  287. let slice = self.get_allocated(offset, data.len());
  288. data[..slice.len()].copy_from_slice(slice);
  289. slice.len()
  290. };
  291. size_1 + size_2
  292. }
  293. /// Dequeue the given number of allocated buffer elements.
  294. ///
  295. /// # Panics
  296. /// Panics if the number of elements given exceeds the number of allocated elements.
  297. pub fn dequeue_allocated(&mut self, count: usize) {
  298. assert!(count <= self.len());
  299. self.length -= count;
  300. self.read_at = (self.read_at + count) % self.capacity();
  301. }
  302. }
  303. impl<'a, T: 'a> From<ManagedSlice<'a, T>> for RingBuffer<'a, T> {
  304. fn from(slice: ManagedSlice<'a, T>) -> RingBuffer<'a, T> {
  305. RingBuffer::new(slice)
  306. }
  307. }
  308. #[cfg(test)]
  309. mod test {
  310. use super::*;
  311. #[test]
  312. fn test_buffer_length_changes() {
  313. let mut ring = RingBuffer::new(vec![0; 2]);
  314. assert!(ring.is_empty());
  315. assert!(!ring.is_full());
  316. assert_eq!(ring.len(), 0);
  317. assert_eq!(ring.capacity(), 2);
  318. assert_eq!(ring.window(), 2);
  319. ring.length = 1;
  320. assert!(!ring.is_empty());
  321. assert!(!ring.is_full());
  322. assert_eq!(ring.len(), 1);
  323. assert_eq!(ring.capacity(), 2);
  324. assert_eq!(ring.window(), 1);
  325. ring.length = 2;
  326. assert!(!ring.is_empty());
  327. assert!(ring.is_full());
  328. assert_eq!(ring.len(), 2);
  329. assert_eq!(ring.capacity(), 2);
  330. assert_eq!(ring.window(), 0);
  331. }
  332. #[test]
  333. fn test_buffer_enqueue_dequeue_one_with() {
  334. let mut ring = RingBuffer::new(vec![0; 5]);
  335. assert_eq!(ring.dequeue_one_with(|_| unreachable!()) as Result<()>,
  336. Err(Error::Exhausted));
  337. ring.enqueue_one_with(|e| Ok(e)).unwrap();
  338. assert!(!ring.is_empty());
  339. assert!(!ring.is_full());
  340. for i in 1..5 {
  341. ring.enqueue_one_with(|e| Ok(*e = i)).unwrap();
  342. assert!(!ring.is_empty());
  343. }
  344. assert!(ring.is_full());
  345. assert_eq!(ring.enqueue_one_with(|_| unreachable!()) as Result<()>,
  346. Err(Error::Exhausted));
  347. for i in 0..5 {
  348. assert_eq!(ring.dequeue_one_with(|e| Ok(*e)).unwrap(), i);
  349. assert!(!ring.is_full());
  350. }
  351. assert_eq!(ring.dequeue_one_with(|_| unreachable!()) as Result<()>,
  352. Err(Error::Exhausted));
  353. assert!(ring.is_empty());
  354. }
  355. #[test]
  356. fn test_buffer_enqueue_dequeue_one() {
  357. let mut ring = RingBuffer::new(vec![0; 5]);
  358. assert_eq!(ring.dequeue_one(), Err(Error::Exhausted));
  359. ring.enqueue_one().unwrap();
  360. assert!(!ring.is_empty());
  361. assert!(!ring.is_full());
  362. for i in 1..5 {
  363. *ring.enqueue_one().unwrap() = i;
  364. assert!(!ring.is_empty());
  365. }
  366. assert!(ring.is_full());
  367. assert_eq!(ring.enqueue_one(), Err(Error::Exhausted));
  368. for i in 0..5 {
  369. assert_eq!(*ring.dequeue_one().unwrap(), i);
  370. assert!(!ring.is_full());
  371. }
  372. assert_eq!(ring.dequeue_one(), Err(Error::Exhausted));
  373. assert!(ring.is_empty());
  374. }
  375. #[test]
  376. fn test_buffer_enqueue_many_with() {
  377. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  378. assert_eq!(ring.enqueue_many_with(|buf| {
  379. assert_eq!(buf.len(), 12);
  380. buf[0..2].copy_from_slice(b"ab");
  381. (2, true)
  382. }), (2, true));
  383. assert_eq!(ring.len(), 2);
  384. assert_eq!(&ring.storage[..], b"ab..........");
  385. ring.enqueue_many_with(|buf| {
  386. assert_eq!(buf.len(), 12 - 2);
  387. buf[0..4].copy_from_slice(b"cdXX");
  388. (2, ())
  389. });
  390. assert_eq!(ring.len(), 4);
  391. assert_eq!(&ring.storage[..], b"abcdXX......");
  392. ring.enqueue_many_with(|buf| {
  393. assert_eq!(buf.len(), 12 - 4);
  394. buf[0..4].copy_from_slice(b"efgh");
  395. (4, ())
  396. });
  397. assert_eq!(ring.len(), 8);
  398. assert_eq!(&ring.storage[..], b"abcdefgh....");
  399. for _ in 0..4 {
  400. *ring.dequeue_one().unwrap() = b'.';
  401. }
  402. assert_eq!(ring.len(), 4);
  403. assert_eq!(&ring.storage[..], b"....efgh....");
  404. ring.enqueue_many_with(|buf| {
  405. assert_eq!(buf.len(), 12 - 8);
  406. buf[0..4].copy_from_slice(b"ijkl");
  407. (4, ())
  408. });
  409. assert_eq!(ring.len(), 8);
  410. assert_eq!(&ring.storage[..], b"....efghijkl");
  411. ring.enqueue_many_with(|buf| {
  412. assert_eq!(buf.len(), 4);
  413. buf[0..4].copy_from_slice(b"abcd");
  414. (4, ())
  415. });
  416. assert_eq!(ring.len(), 12);
  417. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  418. for _ in 0..4 {
  419. *ring.dequeue_one().unwrap() = b'.';
  420. }
  421. assert_eq!(ring.len(), 8);
  422. assert_eq!(&ring.storage[..], b"abcd....ijkl");
  423. }
  424. #[test]
  425. fn test_buffer_enqueue_many() {
  426. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  427. ring.enqueue_many(8).copy_from_slice(b"abcdefgh");
  428. assert_eq!(ring.len(), 8);
  429. assert_eq!(&ring.storage[..], b"abcdefgh....");
  430. ring.enqueue_many(8).copy_from_slice(b"ijkl");
  431. assert_eq!(ring.len(), 12);
  432. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  433. }
  434. #[test]
  435. fn test_buffer_enqueue_slice() {
  436. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  437. assert_eq!(ring.enqueue_slice(b"abcdefgh"), 8);
  438. assert_eq!(ring.len(), 8);
  439. assert_eq!(&ring.storage[..], b"abcdefgh....");
  440. for _ in 0..4 {
  441. *ring.dequeue_one().unwrap() = b'.';
  442. }
  443. assert_eq!(ring.len(), 4);
  444. assert_eq!(&ring.storage[..], b"....efgh....");
  445. assert_eq!(ring.enqueue_slice(b"ijklabcd"), 8);
  446. assert_eq!(ring.len(), 12);
  447. assert_eq!(&ring.storage[..], b"abcdefghijkl");
  448. }
  449. #[test]
  450. fn test_buffer_dequeue_many_with() {
  451. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  452. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  453. assert_eq!(ring.dequeue_many_with(|buf| {
  454. assert_eq!(buf.len(), 12);
  455. assert_eq!(buf, b"abcdefghijkl");
  456. buf[..4].copy_from_slice(b"....");
  457. (4, true)
  458. }), (4, true));
  459. assert_eq!(ring.len(), 8);
  460. assert_eq!(&ring.storage[..], b"....efghijkl");
  461. ring.dequeue_many_with(|buf| {
  462. assert_eq!(buf, b"efghijkl");
  463. buf[..4].copy_from_slice(b"....");
  464. (4, ())
  465. });
  466. assert_eq!(ring.len(), 4);
  467. assert_eq!(&ring.storage[..], b"........ijkl");
  468. assert_eq!(ring.enqueue_slice(b"abcd"), 4);
  469. assert_eq!(ring.len(), 8);
  470. ring.dequeue_many_with(|buf| {
  471. assert_eq!(buf, b"ijkl");
  472. buf[..4].copy_from_slice(b"....");
  473. (4, ())
  474. });
  475. ring.dequeue_many_with(|buf| {
  476. assert_eq!(buf, b"abcd");
  477. buf[..4].copy_from_slice(b"....");
  478. (4, ())
  479. });
  480. assert_eq!(ring.len(), 0);
  481. assert_eq!(&ring.storage[..], b"............");
  482. }
  483. #[test]
  484. fn test_buffer_dequeue_many() {
  485. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  486. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  487. {
  488. let buf = ring.dequeue_many(8);
  489. assert_eq!(buf, b"abcdefgh");
  490. buf.copy_from_slice(b"........");
  491. }
  492. assert_eq!(ring.len(), 4);
  493. assert_eq!(&ring.storage[..], b"........ijkl");
  494. {
  495. let buf = ring.dequeue_many(8);
  496. assert_eq!(buf, b"ijkl");
  497. buf.copy_from_slice(b"....");
  498. }
  499. assert_eq!(ring.len(), 0);
  500. assert_eq!(&ring.storage[..], b"............");
  501. }
  502. #[test]
  503. fn test_buffer_dequeue_slice() {
  504. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  505. assert_eq!(ring.enqueue_slice(b"abcdefghijkl"), 12);
  506. {
  507. let mut buf = [0; 8];
  508. assert_eq!(ring.dequeue_slice(&mut buf[..]), 8);
  509. assert_eq!(&buf[..], b"abcdefgh");
  510. assert_eq!(ring.len(), 4);
  511. }
  512. assert_eq!(ring.enqueue_slice(b"abcd"), 4);
  513. {
  514. let mut buf = [0; 8];
  515. assert_eq!(ring.dequeue_slice(&mut buf[..]), 8);
  516. assert_eq!(&buf[..], b"ijklabcd");
  517. assert_eq!(ring.len(), 0);
  518. }
  519. }
  520. #[test]
  521. fn test_buffer_get_unallocated() {
  522. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  523. assert_eq!(ring.get_unallocated(16, 4), b"");
  524. {
  525. let buf = ring.get_unallocated(0, 4);
  526. buf.copy_from_slice(b"abcd");
  527. }
  528. assert_eq!(&ring.storage[..], b"abcd........");
  529. ring.enqueue_many(4);
  530. assert_eq!(ring.len(), 4);
  531. {
  532. let buf = ring.get_unallocated(4, 8);
  533. buf.copy_from_slice(b"ijkl");
  534. }
  535. assert_eq!(&ring.storage[..], b"abcd....ijkl");
  536. ring.enqueue_many(8).copy_from_slice(b"EFGHIJKL");
  537. ring.dequeue_many(4).copy_from_slice(b"abcd");
  538. assert_eq!(ring.len(), 8);
  539. assert_eq!(&ring.storage[..], b"abcdEFGHIJKL");
  540. {
  541. let buf = ring.get_unallocated(0, 8);
  542. buf.copy_from_slice(b"ABCD");
  543. }
  544. assert_eq!(&ring.storage[..], b"ABCDEFGHIJKL");
  545. }
  546. #[test]
  547. fn test_buffer_write_unallocated() {
  548. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  549. ring.enqueue_many(6).copy_from_slice(b"abcdef");
  550. ring.dequeue_many(6).copy_from_slice(b"ABCDEF");
  551. assert_eq!(ring.write_unallocated(0, b"ghi"), 3);
  552. assert_eq!(&ring.storage[..], b"ABCDEFghi...");
  553. assert_eq!(ring.write_unallocated(3, b"jklmno"), 6);
  554. assert_eq!(&ring.storage[..], b"mnoDEFghijkl");
  555. assert_eq!(ring.write_unallocated(9, b"pqrstu"), 3);
  556. assert_eq!(&ring.storage[..], b"mnopqrghijkl");
  557. }
  558. #[test]
  559. fn test_buffer_get_allocated() {
  560. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  561. assert_eq!(ring.get_allocated(16, 4), b"");
  562. assert_eq!(ring.get_allocated(0, 4), b"");
  563. ring.enqueue_slice(b"abcd");
  564. assert_eq!(ring.get_allocated(0, 8), b"abcd");
  565. ring.enqueue_slice(b"efghijkl");
  566. ring.dequeue_many(4).copy_from_slice(b"....");
  567. assert_eq!(ring.get_allocated(4, 8), b"ijkl");
  568. ring.enqueue_slice(b"abcd");
  569. assert_eq!(ring.get_allocated(4, 8), b"ijkl");
  570. }
  571. #[test]
  572. fn test_buffer_read_allocated() {
  573. let mut ring = RingBuffer::new(vec![b'.'; 12]);
  574. ring.enqueue_many(12).copy_from_slice(b"abcdefghijkl");
  575. let mut data = [0; 6];
  576. assert_eq!(ring.read_allocated(0, &mut data[..]), 6);
  577. assert_eq!(&data[..], b"abcdef");
  578. ring.dequeue_many(6).copy_from_slice(b"ABCDEF");
  579. ring.enqueue_many(3).copy_from_slice(b"mno");
  580. let mut data = [0; 6];
  581. assert_eq!(ring.read_allocated(3, &mut data[..]), 6);
  582. assert_eq!(&data[..], b"jklmno");
  583. let mut data = [0; 6];
  584. assert_eq!(ring.read_allocated(6, &mut data[..]), 3);
  585. assert_eq!(&data[..], b"mno\x00\x00\x00");
  586. }
  587. }