bookkeeper.rs 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. //! Memory bookkeeping.
  2. use prelude::*;
  3. use vec::Vec;
  4. use core::{ptr, cmp, mem, intrinsics};
  5. /// Canonicalize a BRK request.
  6. ///
  7. /// Syscalls can be expensive, which is why we would rather accquire more memory than necessary,
  8. /// than having many syscalls acquiring memory stubs. Memory stubs are small blocks of memory,
  9. /// which are essentially useless until merge with another block.
  10. ///
  11. /// To avoid many syscalls and accumulating memory stubs, we BRK a little more memory than
  12. /// necessary. This function calculate the memory to be BRK'd based on the necessary memory.
  13. ///
  14. /// The return value is always greater than or equals to the argument.
  15. #[inline]
  16. fn canonicalize_brk(min: usize) -> usize {
  17. /// The BRK multiplier.
  18. ///
  19. /// The factor determining the linear dependence between the minimum segment, and the acquired
  20. /// segment.
  21. const BRK_MULTIPLIER: usize = 2;
  22. /// The minimum size to be BRK'd.
  23. const BRK_MIN: usize = 65536;
  24. /// The maximal amount of _extra_ elements.
  25. const BRK_MAX_EXTRA: usize = 4 * 65536;
  26. let res = cmp::max(BRK_MIN, min.saturating_add(cmp::min(BRK_MULTIPLIER * min, BRK_MAX_EXTRA)));
  27. // Make some handy assertions.
  28. debug_assert!(res >= min, "Canonicalized BRK space is smaller than the one requested.");
  29. res
  30. }
  31. /// The default OOM handler.
  32. ///
  33. /// This will simply abort the process.
  34. fn default_oom_handler() -> ! {
  35. unsafe {
  36. intrinsics::abort();
  37. }
  38. }
  39. /// The memory bookkeeper.
  40. ///
  41. /// This is the main component of ralloc. Its job is to keep track of the free blocks in a
  42. /// structured manner, such that allocation, reallocation, and deallocation are all efficient.
  43. /// Particularly, it keeps a list of blocks, commonly called the "block pool". This list is kept.
  44. /// Entries in the block pool can be "empty", meaning that you can overwrite the entry without
  45. /// breaking consistency.
  46. ///
  47. /// Only making use of only [`alloc`](#method.alloc), [`free`](#method.free),
  48. /// [`realloc`](#method.realloc) (and following their respective assumptions) guarantee that no
  49. /// buffer overrun, arithmetic overflow, panic, or otherwise unexpected crash will happen.
  50. pub struct Bookkeeper {
  51. /// The internal block pool.
  52. ///
  53. /// Guarantees
  54. /// ==========
  55. ///
  56. /// Certain guarantees are made:
  57. ///
  58. /// 1. The list is always sorted with respect to the block's pointers.
  59. /// 2. No two consecutive or empty block delimited blocks are adjacent, except if the right
  60. /// block is empty.
  61. /// 3. There are no trailing empty blocks.
  62. ///
  63. /// These are invariants assuming that only the public methods are used.
  64. pool: Vec<Block>,
  65. /// The inner OOM handler.
  66. oom_handler: fn() -> !,
  67. /// The number of bytes currently allocated.
  68. #[cfg(features = "debug_tools")]
  69. allocated: usize,
  70. }
  71. impl Bookkeeper {
  72. /// Create a new, empty block pool.
  73. ///
  74. /// This will make no allocations or BRKs.
  75. #[inline]
  76. #[cfg(features = "debug_tools")]
  77. pub const fn new() -> Bookkeeper {
  78. Bookkeeper {
  79. pool: Vec::new(),
  80. oom_handler: default_oom_handler,
  81. allocated: 0,
  82. }
  83. }
  84. #[inline]
  85. #[cfg(not(features = "debug_tools"))]
  86. pub const fn new() -> Bookkeeper {
  87. Bookkeeper {
  88. pool: Vec::new(),
  89. oom_handler: default_oom_handler,
  90. }
  91. }
  92. /// Allocate a chunk of memory.
  93. ///
  94. /// This function takes a size and an alignment. From these a fitting block is found, to which
  95. /// a pointer is returned. The block returned is guaranteed to be aligned to `align`.
  96. ///
  97. /// # Example
  98. ///
  99. /// We start with our initial segment.
  100. ///
  101. /// ```notrust
  102. /// Address space
  103. /// I---------------------------------I
  104. /// B
  105. /// l
  106. /// k
  107. /// s
  108. /// ```
  109. ///
  110. /// We then split it at the aligner, which is used for making sure that
  111. /// the pointer is aligned properly.
  112. ///
  113. /// ```notrust
  114. /// Address space
  115. /// I------I
  116. /// B ^ I--------------------------I
  117. /// l al
  118. /// k
  119. /// s
  120. /// ```
  121. ///
  122. /// We then use the remaining block, but leave the excessive space.
  123. ///
  124. /// ```notrust
  125. /// Address space
  126. /// I------I
  127. /// B I--------I
  128. /// l \_________________/
  129. /// k our allocated block.
  130. /// s
  131. /// ```
  132. ///
  133. /// A block representing the marked area is then returned.
  134. pub fn alloc(&mut self, size: usize, align: usize) -> Block {
  135. // TODO: scan more intelligently.
  136. if let Some((n, b)) = self.pool.iter_mut().enumerate().filter_map(|(n, i)| {
  137. // Try to split at the aligner.
  138. i.align(align).and_then(|(a, b)| {
  139. if b.size() >= size {
  140. // Override the old block.
  141. *i = a;
  142. Some((n, b))
  143. } else { None }
  144. })
  145. }).next() {
  146. let (res, excessive) = b.split(size);
  147. // Mark the excessive space as free.
  148. self.free_ind(n, excessive);
  149. // ^^^^ Important note to self: Do not replace the old block, it is already replaced
  150. // by the alignment block. Better let `free_ind` handle that.
  151. // Check consistency.
  152. self.check();
  153. debug_assert!(res.aligned_to(align), "Alignment failed.");
  154. debug_assert!(res.size() == size, "Requested space does not match with the returned \
  155. block.");
  156. self.leave(res)
  157. } else {
  158. // No fitting block found. Allocate a new block.
  159. let res = self.alloc_fresh(size, align);
  160. // "Leave" the allocator.
  161. self.leave(res)
  162. }
  163. }
  164. /// Free a memory block.
  165. ///
  166. /// After this have been called, no guarantees are made about the passed pointer. If it want
  167. /// to, it could begin shooting laser beams.
  168. ///
  169. /// Freeing an invalid block will drop all future guarantees about this bookkeeper.
  170. ///
  171. /// # Example
  172. ///
  173. /// ```notrust
  174. /// Address space
  175. /// I------I
  176. /// B I--------I
  177. /// l \_________________/
  178. /// k the used block we want to deallocate.
  179. /// s
  180. /// ```
  181. ///
  182. /// If the blocks are adjacent, we merge them:
  183. ///
  184. /// ```notrust
  185. /// Address space
  186. /// I------I
  187. /// B I-----------------I
  188. /// l I--------I
  189. /// k
  190. /// s
  191. /// ```
  192. ///
  193. /// This gives us:
  194. ///
  195. /// ```notrust
  196. /// Address space
  197. /// I------------------------I
  198. /// B I--------I
  199. /// l
  200. /// k
  201. /// s
  202. /// ```
  203. ///
  204. /// And we're done. If it cannot be done, we insert the block, while keeping the list sorted.
  205. /// See [`insert`](#method.insert) for details.
  206. #[inline]
  207. pub fn free(&mut self, block: Block) {
  208. // "Enter" the allocator.
  209. let block = self.enter(block);
  210. let ind = self.find(&block);
  211. self.free_ind(ind, block);
  212. }
  213. /// Reallocate memory.
  214. ///
  215. /// If necessary (inplace reallocation is not possible or feasible) it will allocate a new
  216. /// buffer, fill it with the contents of the old buffer, and deallocate the replaced buffer.
  217. ///
  218. /// The following guarantees are made:
  219. ///
  220. /// 1. The returned block is valid and aligned to `align`.
  221. /// 2. The returned block contains the same data byte-for-byte as the original buffer.
  222. ///
  223. /// The data will be truncated if `new_size` is smaller than `block`'s size.
  224. ///
  225. /// Example
  226. /// =======
  227. ///
  228. /// We will first try to perform an in-place reallocation, and if that fails, we will use
  229. /// memmove.
  230. ///
  231. /// ```notrust
  232. /// Address space
  233. /// I------I
  234. /// B \~~~~~~~~~~~~~~~~~~~~~/
  235. /// l needed
  236. /// k
  237. /// s
  238. /// ```
  239. ///
  240. /// We simply find the block next to our initial block. If this block is free and have
  241. /// sufficient size, we will simply merge it into our initial block, and leave the excessive
  242. /// space as free. If these conditions are not met, we have to allocate a new list, and then
  243. /// deallocate the old one, after which we use memmove to copy the data over to the newly
  244. /// allocated list.
  245. pub fn realloc(&mut self, block: Block, new_size: usize, align: usize) -> Block {
  246. // Find the index.
  247. let ind = self.find(&block);
  248. // "Leave" the allocator.
  249. let block = self.enter(block);
  250. // Try to do an inplace reallocation.
  251. match self.realloc_inplace_ind(ind, block, new_size) {
  252. Ok(block) => self.leave(block),
  253. Err(block) => {
  254. // Reallocation cannot be done inplace.
  255. // Allocate a new block with the same size.
  256. let mut res = self.alloc(new_size, align);
  257. // Copy the old data to the new location.
  258. block.copy_to(&mut res);
  259. // Free the old block.
  260. self.free_ind(ind, block);
  261. // Check consistency.
  262. self.check();
  263. debug_assert!(res.aligned_to(align), "Alignment failed.");
  264. debug_assert!(res.size() >= new_size, "Requested space does not match with the \
  265. returned block.");
  266. self.leave(res)
  267. },
  268. }
  269. }
  270. /// Extend/shrink the buffer inplace.
  271. ///
  272. /// This will try to extend the buffer without copying, if the new size is larger than the old
  273. /// one. If not, truncate the block and place it back to the pool.
  274. ///
  275. /// On failure, return `Err(Block)` with the old _intact_ block. Shrinking cannot fail.
  276. ///
  277. /// This shouldn't be used when the index of insertion is known, since this performs an binary
  278. /// search to find the blocks index. When you know the index use
  279. /// [`realloc_inplace_ind`](#method.realloc_inplace_ind.html).
  280. #[inline]
  281. pub fn realloc_inplace(&mut self, block: Block, new_size: usize) -> Result<Block, Block> {
  282. let ind = self.find(&block);
  283. let res = self.realloc_inplace_ind(ind, block, new_size);
  284. // Check consistency.
  285. debug_assert!(res.as_ref().ok().map_or(true, |x| x.size() == new_size), "Requested space \
  286. does not match with the returned block.");
  287. res
  288. }
  289. /// Allocate _fresh_ space.
  290. ///
  291. /// "Fresh" means that the space is allocated through a BRK call to the kernel.
  292. ///
  293. /// The returned pointer is guaranteed to be aligned to `align`.
  294. #[inline]
  295. fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
  296. // BRK what you need.
  297. let (alignment_block, res, excessive) = self.brk(size, align);
  298. // Add it to the list. This will not change the order, since the pointer is higher than all
  299. // the previous blocks.
  300. self.push(alignment_block);
  301. // Push the excessive space to the end of the block pool.
  302. self.push(excessive);
  303. // Check consistency.
  304. self.check();
  305. res
  306. }
  307. /// Reallocate a block on a know index inplace.
  308. ///
  309. /// See [`realloc_inplace_ind`](#method.realloc_inplace.html) for more information.
  310. fn realloc_inplace_ind(&mut self, ind: usize, mut block: Block, new_size: usize) -> Result<Block, Block> {
  311. /// Assertions...
  312. debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
  313. if new_size <= block.size() {
  314. // Shrink the block.
  315. // Split the block in two segments, the main segment and the excessive segment.
  316. let (block, excessive) = block.split(new_size);
  317. // Free the excessive segment.
  318. self.free_ind(ind, excessive);
  319. // Make some assertions to avoid dumb bugs.
  320. debug_assert!(block.size() == new_size, "Block wasn't shrinked properly.");
  321. // Run a consistency check.
  322. self.check();
  323. return Ok(block);
  324. // We check if `ind` is the end of the array.
  325. } else if let Some(entry) = self.pool.get_mut(ind + 1) {
  326. // Note that we are sure that no segments in the array are adjacent (unless they have size
  327. // 0). This way we know that we will, at maximum, need one and only one block for extending
  328. // the current block.
  329. if entry.size() + block.size() >= new_size && block.merge_right(entry).is_ok() {
  330. // Merge succeeded.
  331. // Place the excessive block back.
  332. let (res, excessive) = block.split(new_size);
  333. *entry = excessive;
  334. // Block will still not be adjacent, due to `excessive` being guaranteed to not be
  335. // adjacent to the next block.
  336. // TODO, damn you borrowck
  337. // Run a consistency check.
  338. // self.check();
  339. // TODO, drop excessive space
  340. return Ok(res);
  341. }
  342. }
  343. Err(block)
  344. }
  345. /// Free a block placed on some index.
  346. ///
  347. /// This will at maximum insert one element.
  348. ///
  349. /// See [`free`](#method.free) for more information.
  350. #[inline]
  351. fn free_ind(&mut self, ind: usize, mut block: Block) {
  352. /// Assertions...
  353. debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
  354. // Try to merge left, and then right.
  355. if self.pool.is_empty() || {
  356. // To avoid double bound checking and other shenanigans, we declare a variable holding our
  357. // entry's pointer.
  358. let entry = &mut self.pool[ind];
  359. // Make some handy assertions.
  360. #[cfg(features = "debug_tools")]
  361. assert!(entry != &mut block, "Double free.");
  362. debug_assert!(block.is_empty() || entry <= &mut block, "Block merged in the wrong \
  363. direction.");
  364. entry.merge_right(&mut block).is_err()
  365. } || ind == 0 || self.pool[ind - 1].merge_right(&mut block).is_err() {
  366. // Since merge failed, we will have to insert it in a normal manner.
  367. self.insert(ind, block);
  368. }
  369. // Check consistency.
  370. self.check();
  371. }
  372. /// Extend the data segment.
  373. #[inline]
  374. fn brk(&self, size: usize, align: usize) -> (Block, Block, Block) {
  375. // Calculate the canonical size (extra space is allocated to limit the number of system calls).
  376. let brk_size = canonicalize_brk(size).checked_add(align).unwrap_or_else(|| self.oom());
  377. // Use SBRK to allocate extra data segment. The alignment is used as precursor for our
  378. // allocated block. This ensures that it is properly memory aligned to the requested value.
  379. let (alignment_block, rest) = Block::brk(brk_size)
  380. .unwrap_or_else(|_| self.oom())
  381. .align(align)
  382. .unwrap();
  383. // Split the block to leave the excessive space.
  384. let (res, excessive) = rest.split(size);
  385. // Make some assertions.
  386. debug_assert!(res.aligned_to(align), "Alignment failed.");
  387. debug_assert!(res.size() + alignment_block.size() + excessive.size() == brk_size, "BRK memory leak");
  388. (alignment_block, res, excessive)
  389. }
  390. /// Push to the block pool.
  391. ///
  392. /// This will append a block entry to the end of the block pool. Make sure that this entry has
  393. /// a value higher than any of the elements in the list, to keep it sorted.
  394. #[inline]
  395. fn push(&mut self, mut block: Block) {
  396. // We will try to simply merge it with the last block.
  397. if let Some(x) = self.pool.last_mut() {
  398. if x.merge_right(&mut block).is_ok() {
  399. return;
  400. }
  401. } else if block.is_empty() { return; }
  402. // Merging failed. Note that trailing empty blocks are not allowed, hence the last block is
  403. // the only non-empty candidate which may be adjacent to `block`.
  404. // It failed, so we will need to add a new block to the end.
  405. let len = self.pool.len();
  406. // This is guaranteed not to overflow, since `len` is bounded by the address space, since
  407. // each entry represent at minimum one byte, meaning that `len` is bounded by the address
  408. // space.
  409. self.reserve(len + 1);
  410. let res = self.pool.push(block);
  411. // Make some assertions.
  412. debug_assert!(res.is_ok(), "Push failed (buffer filled).");
  413. self.check();
  414. }
  415. /// Reserve space for the block pool.
  416. ///
  417. /// This will extend the capacity to a number greater than or equals to `needed`, potentially
  418. /// reallocating the block pool.
  419. #[inline]
  420. fn reserve(&mut self, needed: usize) {
  421. if needed > self.pool.capacity() {
  422. // TODO allow BRK-free non-inplace reservations.
  423. // TODO Enable inplace reallocation in this position.
  424. // Reallocate the block pool.
  425. // Make a fresh allocation.
  426. let size = needed.saturating_add(
  427. cmp::min(self.pool.capacity(), 200 + self.pool.capacity() / 2)
  428. // We add:
  429. + 1 // block for the alignment block.
  430. + 1 // block for the freed vector.
  431. + 1 // block for the excessive space.
  432. ) * mem::size_of::<Block>();
  433. let (alignment_block, alloc, excessive) = self.brk(size, mem::align_of::<Block>());
  434. // Refill the pool.
  435. let old = self.pool.refill(alloc);
  436. // Push the alignment block (note that it is in fact in the end of the pool,
  437. // due to BRK _extending_ the segment).
  438. self.push(alignment_block);
  439. // The excessive space.
  440. self.push(excessive);
  441. // Free the old vector.
  442. self.free(old);
  443. // Check consistency.
  444. self.check();
  445. }
  446. }
  447. /// Perform a binary search to find the appropriate place where the block can be insert or is
  448. /// located.
  449. ///
  450. /// It is guaranteed that no block left to the returned value, satisfy the above condition.
  451. #[inline]
  452. fn find(&self, block: &Block) -> usize {
  453. // TODO optimize this function.
  454. let ind = match self.pool.binary_search(block) {
  455. Ok(x) | Err(x) => x,
  456. };
  457. // Move left.
  458. ind - self.pool.iter().skip(ind).rev().take_while(|x| x.is_empty()).count()
  459. }
  460. /// Insert a block entry at some index.
  461. ///
  462. /// If the space is non-empty, the elements will be pushed filling out the empty gaps to the
  463. /// right. If all places to the right is occupied, it will reserve additional space to the
  464. /// block pool.
  465. ///
  466. /// # Example
  467. /// We want to insert the block denoted by the tildes into our list. Perform a binary search to
  468. /// find where insertion is appropriate.
  469. ///
  470. /// ```notrust
  471. /// Address space
  472. /// I------I
  473. /// B < here I--------I
  474. /// l I------------I
  475. /// k
  476. /// s I---I
  477. /// I~~~~~~~~~~I
  478. /// ```
  479. ///
  480. /// We keep pushing the blocks to the right to the next entry until a empty entry is reached:
  481. ///
  482. /// ```notrust
  483. /// Address space
  484. /// I------I
  485. /// B < here I--------I <~ this one cannot move down, due to being blocked.
  486. /// l
  487. /// k I------------I <~ thus we have moved this one down.
  488. /// s I---I
  489. /// I~~~~~~~~~~I
  490. /// ```
  491. ///
  492. /// Repeating yields:
  493. ///
  494. /// ```notrust
  495. /// Address space
  496. /// I------I
  497. /// B < here
  498. /// l I--------I <~ this one cannot move down, due to being blocked.
  499. /// k I------------I <~ thus we have moved this one down.
  500. /// s I---I
  501. /// I~~~~~~~~~~I
  502. /// ```
  503. ///
  504. /// Now an empty space is left out, meaning that we can insert the block:
  505. ///
  506. /// ```notrust
  507. /// Address space
  508. /// I------I
  509. /// B I----------I
  510. /// l I--------I
  511. /// k I------------I
  512. /// s I---I
  513. /// ```
  514. ///
  515. /// The insertion is now completed.
  516. #[inline]
  517. fn insert(&mut self, ind: usize, block: Block) {
  518. // Bound check.
  519. assert!(self.pool.len() > ind, "Insertion out of bounds.");
  520. // Some assertions...
  521. debug_assert!(self.pool.is_empty() || block >= self.pool[ind + 1], "Inserting at {} will \
  522. make the list unsorted.", ind);
  523. debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
  524. debug_assert!(!block.is_empty(), "Inserting an empty block.");
  525. // Find the next gap, where a used block were.
  526. if let Some((n, _)) = self.pool.iter().skip(ind).enumerate().filter(|&(_, x)| !x.is_empty()).next() {
  527. // Reserve capacity.
  528. {
  529. let new_len = self.pool.len() + 1;
  530. self.reserve(new_len);
  531. }
  532. unsafe {
  533. // Memmove the elements.
  534. ptr::copy(self.pool.get_unchecked(ind) as *const Block,
  535. self.pool.get_unchecked_mut(ind + 1) as *mut Block, self.pool.len() - n);
  536. // Set the element.
  537. *self.pool.get_unchecked_mut(ind) = block;
  538. }
  539. } else {
  540. self.push(block);
  541. }
  542. // Check consistency.
  543. self.check();
  544. }
  545. /// Call the OOM handler.
  546. ///
  547. /// This is used one out-of-memory errors, and will never return. Usually, it simply consists
  548. /// of aborting the process.
  549. fn oom(&self) -> ! {
  550. (self.oom_handler)()
  551. }
  552. /// Set the OOM handler.
  553. ///
  554. /// This is called when the process is out-of-memory.
  555. #[inline]
  556. pub fn set_oom_handler(&mut self, handler: fn() -> !) {
  557. self.oom_handler = handler;
  558. }
  559. /// Leave the allocator.
  560. ///
  561. /// A block should be "registered" through this function when it leaves the allocated (e.g., is
  562. /// returned), these are used to keep track of the current heap usage, and memory leaks.
  563. #[inline]
  564. fn leave(&mut self, block: Block) -> Block {
  565. // Update the number of bytes allocated.
  566. #[cfg(features = "debug_tools")]
  567. {
  568. self.allocated += block.size();
  569. }
  570. block
  571. }
  572. /// Enter the allocator.
  573. ///
  574. /// A block should be "registered" through this function when it enters the allocated (e.g., is
  575. /// given as argument), these are used to keep track of the current heap usage, and memory
  576. /// leaks.
  577. #[inline]
  578. fn enter(&mut self, block: Block) -> Block {
  579. // Update the number of bytes allocated.
  580. #[cfg(features = "debug_tools")]
  581. {
  582. self.allocated -= block.size();
  583. }
  584. block
  585. }
  586. /// No-op in release mode.
  587. #[cfg(not(debug_assertions))]
  588. #[inline]
  589. fn check(&self) {}
  590. /// Perform consistency checks.
  591. ///
  592. /// This will check for the following conditions:
  593. ///
  594. /// 1. The list is sorted.
  595. /// 2. No blocks are adjacent.
  596. #[cfg(debug_assertions)]
  597. fn check(&self) {
  598. if let Some(x) = self.pool.first() {
  599. let mut prev = x;
  600. for (n, i) in self.pool.iter().enumerate().skip(1) {
  601. // Check if sorted.
  602. assert!(i >= prev, "The block pool is not sorted at index, {} ({:?} < {:?})", n, i,
  603. prev);
  604. // Make sure no blocks are adjacent.
  605. assert!(!prev.left_to(i) || i.is_empty(), "Adjacent blocks at index, {} ({:?} and \
  606. {:?})", n, i, prev);
  607. // Set the variable tracking the previous block.
  608. prev = i;
  609. }
  610. }
  611. }
  612. /// Check for memory leaks.
  613. ///
  614. /// This will ake sure that all the allocated blocks have been freed.
  615. #[cfg(features = "debug_tools")]
  616. pub fn assert_no_leak(&self) {
  617. assert!(self.allocated == self.pool.capacity() * mem::size_of::<Block>(), "Not all blocks \
  618. freed. Total allocated space is {} ({} free blocks).", self.allocated,
  619. self.pool.len());
  620. }
  621. }