bookkeeper.rs 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955
  1. //! Memory bookkeeping.
  2. use prelude::*;
  3. use core::ops::Range;
  4. use core::{ptr, mem, ops};
  5. use shim::config;
  6. /// Elements required _more_ than the length as capacity.
  7. ///
  8. /// This represents how many elements that are needed to conduct a `reserve` without the
  9. /// stack overflowing, plus one (representing the new element):
  10. ///
  11. /// 1. Aligner.
  12. /// 2. Excessive space.
  13. /// 3. The old buffer.
  14. /// 4. The pushed or inserted block.
  15. ///
  16. /// See assumption 4.
  17. pub const EXTRA_ELEMENTS: usize = 4;
  18. #[cfg(feature = "alloc_id")]
  19. use core::sync::atomic::{self, AtomicUsize};
  20. /// The bookkeeper ID count.
  21. ///
  22. /// This is atomically incremented whenever a new `Bookkeeper` is created.
  23. #[cfg(feature = "alloc_id")]
  24. static BOOKKEEPER_ID_COUNTER: AtomicUsize = AtomicUsize::new(0);
  25. /// The memory bookkeeper.
  26. ///
  27. /// This stores data about the state of the allocator, and in particular, the free memory.
  28. ///
  29. /// The actual functionality is provided by [`Allocator`](./trait.Allocator.html).
  30. pub struct Bookkeeper {
  31. /// The internal block pool.
  32. ///
  33. /// Entries in the block pool can be "empty", meaning that you can overwrite the entry without
  34. /// breaking consistency.
  35. ///
  36. /// # Assumptions
  37. ///
  38. /// Certain assumptions are made:
  39. ///
  40. /// 1. The list is always sorted with respect to the block's pointers.
  41. /// 2. No two consecutive or empty block delimited blocks are adjacent, except if the right
  42. /// block is empty.
  43. /// 3. There are no trailing empty blocks.
  44. /// 4. The capacity is always `EXTRA_ELEMENTS` blocks more than the length (this is due to
  45. /// reallocation pushing at maximum two elements, so we reserve two or more extra to allow
  46. /// pushing one additional element without unbounded recursion).
  47. ///
  48. /// These are **not** invariants: If these assumpptions are not held, it will simply act strange
  49. /// (e.g. logic bugs), but not memory unsafety.
  50. pool: Vec<Block>,
  51. /// The total number of bytes in the pool.
  52. total_bytes: usize,
  53. /// Is this bookkeeper currently reserving?
  54. ///
  55. /// This is used to avoid unbounded metacircular reallocation (reservation).
  56. ///
  57. // TODO: Find a replacement for this "hack".
  58. reserving: bool,
  59. /// The allocator ID.
  60. ///
  61. /// This is simply to be able to distinguish allocators in the locks.
  62. #[cfg(feature = "alloc_id")]
  63. id: usize,
  64. }
  65. impl Bookkeeper {
  66. /// Create a new bookkeeper with some initial vector.
  67. pub fn new(vec: Vec<Block>) -> Bookkeeper {
  68. // Make sure the assumptions are satisfied.
  69. debug_assert!(vec.capacity() >= EXTRA_ELEMENTS, "Not enough initial capacity of the vector.");
  70. debug_assert!(vec.is_empty(), "Initial vector isn't empty.");
  71. // TODO: When added use expr field attributes.
  72. #[cfg(feature = "alloc_id")]
  73. let res = Bookkeeper {
  74. pool: vec,
  75. total_bytes: 0,
  76. reserving: false,
  77. // Increment the ID counter to get a brand new ID.
  78. id: BOOKKEEPER_ID_COUNTER.fetch_add(1, atomic::Ordering::SeqCst),
  79. };
  80. #[cfg(not(feature = "alloc_id"))]
  81. let res = Bookkeeper {
  82. pool: vec,
  83. total_bytes: 0,
  84. reserving: false,
  85. };
  86. bk_log!(res, "Bookkeeper created.");
  87. res.check();
  88. res
  89. }
  90. /// Perform a binary search to find the appropriate place where the block can be insert or is
  91. /// located.
  92. ///
  93. /// It is guaranteed that no block left to the returned value, satisfy the above condition.
  94. #[inline]
  95. fn find(&mut self, block: &Block) -> usize {
  96. // Logging.
  97. bk_log!(self, "Searching (exact) for {:?}.", block);
  98. let ind = match self.pool.binary_search(block) {
  99. Ok(x) | Err(x) => x,
  100. };
  101. let len = self.pool.len();
  102. // Move left.
  103. ind - self.pool.iter_mut()
  104. .rev()
  105. .skip(len - ind)
  106. .take_while(|x| x.is_empty())
  107. .count()
  108. }
  109. /// Perform a binary search to find the appropriate bound where the block can be insert or is
  110. /// located.
  111. ///
  112. /// It is guaranteed that no block left to the returned value, satisfy the above condition.
  113. #[inline]
  114. fn find_bound(&mut self, block: &Block) -> Range<usize> {
  115. // Logging.
  116. bk_log!(self, "Searching (bounds) for {:?}.", block);
  117. let mut left_ind = match self.pool.binary_search(block) {
  118. Ok(x) | Err(x) => x,
  119. };
  120. let len = self.pool.len();
  121. // Move left.
  122. left_ind -= self.pool.iter_mut()
  123. .rev()
  124. .skip(len - left_ind)
  125. .take_while(|x| x.is_empty())
  126. .count();
  127. let mut right_ind = match self.pool.binary_search(&block.empty_right()) {
  128. Ok(x) | Err(x) => x,
  129. };
  130. // Move right.
  131. right_ind += self.pool.iter()
  132. .skip(right_ind)
  133. .take_while(|x| x.is_empty())
  134. .count();
  135. left_ind..right_ind
  136. }
  137. /// Go over every block in the allocator and call some function.
  138. ///
  139. /// Technically, this could be done through an iterator, but this, more unidiomatic, way is
  140. /// slightly faster in some cases.
  141. pub fn for_each<F: FnMut(Block)>(mut self, mut f: F) {
  142. // Logging.
  143. bk_log!(self, "Iterating over the blocks of the bookkeeper...");
  144. // Run over all the blocks in the pool.
  145. for i in self.pool.pop_iter() {
  146. f(i);
  147. }
  148. // Take the block holding the pool.
  149. f(Block::from(self.pool));
  150. }
  151. /// Pop the top block from the pool.
  152. pub fn pop(&mut self) -> Option<Block> {
  153. self.pool.pop().map(|res| {
  154. // Update the byte count.
  155. self.total_bytes -= res.size();
  156. // Check stuff, just in case.
  157. self.check();
  158. res
  159. })
  160. }
  161. /// Get the length of the pool.
  162. pub fn len(&self) -> usize {
  163. self.pool.len()
  164. }
  165. /// Get the total bytes of memory in the pool.
  166. pub fn total_bytes(&self) -> usize {
  167. self.total_bytes
  168. }
  169. /// Perform consistency checks.
  170. ///
  171. /// This will check for the following conditions:
  172. ///
  173. /// 1. The list is sorted.
  174. /// 2. No blocks are adjacent.
  175. ///
  176. /// This is NOOP in release mode.
  177. fn check(&self) {
  178. if cfg!(debug_assertions) {
  179. // Logging.
  180. bk_log!(self, "Checking...");
  181. // The total number of bytes.
  182. let mut total_bytes = 0;
  183. // Reverse iterator over the blocks.
  184. let mut it = self.pool.iter().enumerate().rev();
  185. // Check that the capacity is large enough.
  186. assert!(self.reserving || self.pool.len() + EXTRA_ELEMENTS <= self.pool.capacity(),
  187. "The capacity should be at least {} more than the length of the pool.",
  188. EXTRA_ELEMENTS);
  189. if let Some((_, x)) = it.next() {
  190. // Make sure there are no leading empty blocks.
  191. assert!(!x.is_empty(), "The leading block is empty.");
  192. total_bytes += x.size();
  193. let mut next = x;
  194. for (n, i) in it {
  195. total_bytes += i.size();
  196. // Check if sorted.
  197. assert!(next >= i, "The block pool is not sorted at index, {} ({:?} < {:?}).",
  198. n, next, i);
  199. // Make sure no blocks are adjacent.
  200. assert!(!i.left_to(next) || i.is_empty(), "Adjacent blocks at index, {} ({:?} and \
  201. {:?})", n, i, next);
  202. // Make sure an empty block has the same address as its right neighbor.
  203. assert!(!i.is_empty() || i == next, "Empty block not adjacent to right neighbor \
  204. at index {} ({:?} and {:?})", n, i, next);
  205. // Set the variable tracking the previous block.
  206. next = i;
  207. }
  208. // Check for trailing empty blocks.
  209. assert!(!self.pool.last().unwrap().is_empty(), "Trailing empty blocks.");
  210. }
  211. // Make sure the sum is maintained properly.
  212. assert!(total_bytes == self.total_bytes, "The sum is not equal to the 'total_bytes' \
  213. field: {} ≠ {}.", total_bytes, self.total_bytes);
  214. }
  215. }
  216. }
  217. /// An allocator.
  218. ///
  219. /// This provides the functionality of the memory bookkeeper, requiring only provision of two
  220. /// methods, defining the "breaker" (fresh allocator). The core functionality is provided by
  221. /// default methods, which aren't generally made to be overwritten.
  222. ///
  223. /// The reason why these methods aren't implemented directly on the bookkeeper is the distinction
  224. /// between different forms of allocators (global, local, and so on). Any newtype of
  225. /// [`Bookkeeper`](./struct.Bookkeeper.html).
  226. ///
  227. /// # Guarantees vs. assumptions
  228. ///
  229. /// Please note that whenever a guarantee is mentioned, it relies on that the all the methods
  230. /// overwritten are upholding the guarantees specified in the documentation.
  231. pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
  232. /// Allocate _fresh_ space.
  233. ///
  234. /// "Fresh" means that the space is allocated through some breaker (be it SBRK or the global
  235. /// allocator).
  236. ///
  237. /// The returned pointer is assumed to be aligned to `align`. If this is not held, all future
  238. /// guarantees are invalid.
  239. ///
  240. /// # Assumptions
  241. ///
  242. /// This is assumed to not modify the order. If some block `b` is associated with index `i`
  243. /// prior to call of this function, it should be too after it.
  244. fn alloc_fresh(&mut self, size: usize, align: usize) -> Block;
  245. /// Called right before new memory is added to the pool.
  246. fn on_new_memory(&mut self) {}
  247. /// Allocate a chunk of memory.
  248. ///
  249. /// This function takes a size and an alignment. From these a fitting block is found, to which
  250. /// a pointer is returned. The block returned is guaranteed to be aligned to `align`.
  251. ///
  252. /// # Example
  253. ///
  254. /// We start with our initial segment.
  255. ///
  256. /// ```notrust
  257. /// Address space
  258. /// I---------------------------------I
  259. /// B
  260. /// l
  261. /// k
  262. /// s
  263. /// ```
  264. ///
  265. /// We then split it at the aligner, which is used for making sure that
  266. /// the pointer is aligned properly.
  267. ///
  268. /// ```notrust
  269. /// Address space
  270. /// I------I
  271. /// B ^ I--------------------------I
  272. /// l al
  273. /// k
  274. /// s
  275. /// ```
  276. ///
  277. /// We then use the remaining block, but leave the excessive space.
  278. ///
  279. /// ```notrust
  280. /// Address space
  281. /// I------I
  282. /// B I--------I
  283. /// l \_________________/
  284. /// k our allocated block.
  285. /// s
  286. /// ```
  287. ///
  288. /// A block representing the marked area is then returned.
  289. fn alloc(&mut self, size: usize, align: usize) -> Block {
  290. // Logging.
  291. bk_log!(self, "Allocating {} bytes with alignment {}.", size, align);
  292. if let Some((n, b)) = self.pool.iter_mut().enumerate().filter_map(|(n, i)| {
  293. if i.size() >= size {
  294. // Try to split at the aligner.
  295. i.align(align).and_then(|(mut a, mut b)| {
  296. if b.size() >= size {
  297. // Override the old block.
  298. *i = a;
  299. Some((n, b))
  300. } else {
  301. // Put the split block back together and place it back in its spot.
  302. a.merge_right(&mut b).expect("Unable to merge block right.");
  303. *i = a;
  304. None
  305. }
  306. })
  307. } else {
  308. None
  309. }
  310. }).next() {
  311. // Update the pool byte count.
  312. self.total_bytes -= b.size();
  313. if self.pool[n].is_empty() {
  314. // For empty alignment invariant.
  315. let _ = self.remove_at(n);
  316. }
  317. // Split and mark the block uninitialized to the debugger.
  318. let (res, excessive) = b.mark_uninitialized().split(size);
  319. // There are many corner cases that make knowing where to insert it difficult
  320. // so we search instead.
  321. self.free(excessive);
  322. // Check consistency.
  323. self.check();
  324. debug_assert!(res.aligned_to(align), "Alignment failed.");
  325. debug_assert!(res.size() == size, "Requested space does not match with the returned \
  326. block.");
  327. res
  328. } else {
  329. // No fitting block found. Allocate a new block.
  330. self.alloc_external(size, align)
  331. }
  332. }
  333. /// Free a memory block.
  334. ///
  335. /// After this have been called, no guarantees are made about the passed pointer. If it want
  336. /// to, it could begin shooting laser beams.
  337. ///
  338. /// Freeing an invalid block will drop all future guarantees about this bookkeeper.
  339. ///
  340. /// # Example
  341. ///
  342. /// ```notrust
  343. /// Address space
  344. /// I------I
  345. /// B I--------I
  346. /// l \_________________/
  347. /// k the used block we want to deallocate.
  348. /// s
  349. /// ```
  350. ///
  351. /// If the blocks are adjacent, we merge them:
  352. ///
  353. /// ```notrust
  354. /// Address space
  355. /// I------I
  356. /// B I-----------------I
  357. /// l I--------I
  358. /// k
  359. /// s
  360. /// ```
  361. ///
  362. /// This gives us:
  363. ///
  364. /// ```notrust
  365. /// Address space
  366. /// I------------------------I
  367. /// B I--------I
  368. /// l
  369. /// k
  370. /// s
  371. /// ```
  372. ///
  373. /// And we're done. If it cannot be done, we insert the block, while keeping the list sorted.
  374. /// See [`insert`](#method.insert) for details.
  375. #[inline]
  376. fn free(&mut self, block: Block) {
  377. // Just logging for the unlucky people debugging this shit. No problem.
  378. bk_log!(self, "Freeing {:?}...", block);
  379. // Binary search for the block.
  380. let bound = self.find_bound(&block);
  381. // Free the given block.
  382. self.free_bound(bound, block);
  383. }
  384. /// Reallocate memory.
  385. ///
  386. /// If necessary (inplace reallocation is not possible or feasible) it will allocate a new
  387. /// buffer, fill it with the contents of the old buffer, and deallocate the replaced buffer.
  388. ///
  389. /// The following guarantees are made:
  390. ///
  391. /// 1. The returned block is valid and aligned to `align`.
  392. /// 2. The returned block contains the same data byte-for-byte as the original buffer.
  393. ///
  394. /// The data will be truncated if `new_size` is smaller than `block`'s size.
  395. ///
  396. /// # Example
  397. ///
  398. /// We will first try to perform an in-place reallocation, and if that fails, we will use
  399. /// memmove.
  400. ///
  401. /// ```notrust
  402. /// Address space
  403. /// I------I
  404. /// B \~~~~~~~~~~~~~~~~~~~~~/
  405. /// l needed
  406. /// k
  407. /// s
  408. /// ```
  409. ///
  410. /// We simply find the block next to our initial block. If this block is free and have
  411. /// sufficient size, we will simply merge it into our initial block, and leave the excessive
  412. /// space as free. If these conditions are not met, we have to allocate a new list, and then
  413. /// deallocate the old one, after which we use memmove to copy the data over to the newly
  414. /// allocated list.
  415. fn realloc(&mut self, block: Block, new_size: usize, align: usize) -> Block {
  416. // Find the index bound.
  417. let ind = self.find_bound(&block);
  418. // Logging.
  419. bk_log!(self;ind, "Reallocating {:?} to size {} with align {}...", block, new_size, align);
  420. // Try to do an inplace reallocation.
  421. match self.realloc_inplace_bound(ind, block, new_size) {
  422. Ok(block) => block,
  423. Err(block) => {
  424. // Reallocation cannot be done inplace.
  425. // Allocate a new block with the same size.
  426. let mut res = self.alloc(new_size, align);
  427. // Copy the old data to the new location.
  428. block.copy_to(&mut res);
  429. // Free the old block.
  430. // Allocation may have moved insertion so we search again.
  431. self.free(block);
  432. // Check consistency.
  433. self.check();
  434. debug_assert!(res.aligned_to(align), "Alignment failed.");
  435. debug_assert!(res.size() >= new_size, "Requested space does not match with the \
  436. returned block.");
  437. res
  438. },
  439. }
  440. }
  441. /// Extend/shrink the buffer inplace.
  442. ///
  443. /// This will try to extend the buffer without copying, if the new size is larger than the old
  444. /// one. If not, truncate the block and place it back to the pool.
  445. ///
  446. /// On failure, return `Err(Block)` with the old _intact_ block. Shrinking cannot fail.
  447. ///
  448. /// This shouldn't be used when the index of insertion is known, since this performs an binary
  449. /// search to find the blocks index. When you know the index use
  450. /// [`realloc_inplace_bound`](#method.realloc_inplace_bound.html).
  451. #[inline]
  452. fn realloc_inplace(&mut self, block: Block, new_size: usize) -> Result<Block, Block> {
  453. // Logging.
  454. bk_log!(self, "Reallocating {:?} inplace to {}...", block, new_size);
  455. // Find the bounds of given block.
  456. let bound = self.find_bound(&block);
  457. // Go for it!
  458. let res = self.realloc_inplace_bound(bound, block, new_size);
  459. // Check consistency.
  460. debug_assert!(res.as_ref().ok().map_or(true, |x| x.size() == new_size), "Requested space \
  461. does not match with the returned block.");
  462. res
  463. }
  464. /// Reallocate a block on a know index bound inplace.
  465. ///
  466. /// See [`realloc_inplace`](#method.realloc_inplace.html) for more information.
  467. fn realloc_inplace_bound(&mut self, ind: Range<usize>, mut block: Block, new_size: usize) -> Result<Block, Block> {
  468. // Logging.
  469. bk_log!(self;ind, "Try inplace reallocating {:?} to size {}.", block, new_size);
  470. /// Assertions...
  471. debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
  472. index.");
  473. if new_size <= block.size() {
  474. // Shrink the block.
  475. bk_log!(self;ind, "Shrinking {:?}.", block);
  476. // Split the block in two segments, the main segment and the excessive segment.
  477. let (block, excessive) = block.split(new_size);
  478. // Free the excessive segment.
  479. self.free_bound(ind, excessive);
  480. // Make some assertions to avoid dumb bugs.
  481. debug_assert!(block.size() == new_size, "Block wasn't shrinked properly.");
  482. // Run a consistency check.
  483. self.check();
  484. return Ok(block);
  485. // We check if `ind` is the end of the array.
  486. } else {
  487. let mut mergable = false;
  488. if let Some(entry) = self.pool.get_mut(ind.end) {
  489. mergable = entry.size() + block.size() >= new_size && block.left_to(entry);
  490. }
  491. // Note that we are sure that no segments in the array are adjacent (unless they have size
  492. // 0). This way we know that we will, at maximum, need one and only one block for extending
  493. // the current block.
  494. if mergable {
  495. // Logging...
  496. bk_log!(self;ind, "Merging {:?} to the right.", block);
  497. // We'll merge it with the block at the end of the range.
  498. block.merge_right(&mut self.remove_at(ind.end))
  499. .expect("Unable to merge block right, to the end of the range.");
  500. // Merge succeeded.
  501. // Place the excessive block back.
  502. let (res, excessive) = block.split(new_size);
  503. // Remove_at may have shortened the vector.
  504. if ind.start == self.pool.len() {
  505. self.push(excessive);
  506. } else if !excessive.is_empty() {
  507. self.pool[ind.start] = excessive;
  508. }
  509. // Block will still not be adjacent, due to `excessive` being guaranteed to not be
  510. // adjacent to the next block.
  511. // Run a consistency check.
  512. self.check();
  513. return Ok(res);
  514. }
  515. }
  516. Err(block)
  517. }
  518. /// Free a block placed in some index bound.
  519. ///
  520. /// This will at maximum insert one element.
  521. ///
  522. /// See [`free`](#method.free) for more information.
  523. #[inline]
  524. fn free_bound(&mut self, ind: Range<usize>, mut block: Block) {
  525. // Logging.
  526. bk_log!(self;ind, "Freeing {:?}.", block);
  527. // Short circuit in case of empty block.
  528. if block.is_empty() { return; }
  529. // When compiled with `security`, we zero this block.
  530. block.sec_zero();
  531. if ind.start == self.pool.len() {
  532. self.push(block);
  533. return;
  534. }
  535. // Assertions...
  536. debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
  537. index.");
  538. // Try to merge it with the block to the right.
  539. if ind.end < self.pool.len() && block.left_to(&self.pool[ind.end]) {
  540. // Merge the block with the rightmost block in the range.
  541. block.merge_right(&mut self.remove_at(ind.end))
  542. .expect("Unable to merge block right to the block at the end of the range");
  543. // The merging succeeded. We proceed to try to close in the possible gap.
  544. if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
  545. // Check consistency.
  546. self.check();
  547. return;
  548. }
  549. // Dammit, let's try to merge left.
  550. } else if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
  551. // Check consistency.
  552. self.check();
  553. return;
  554. }
  555. // Well, it failed, so we insert it the old-fashioned way.
  556. self.insert(ind.start, block);
  557. // Check consistency.
  558. self.check();
  559. }
  560. /// Allocate external ("fresh") space.
  561. ///
  562. /// "Fresh" means that the space is allocated through the breaker.
  563. ///
  564. /// The returned pointer is guaranteed to be aligned to `align`.
  565. fn alloc_external(&mut self, size: usize, align: usize) -> Block {
  566. // Logging.
  567. bk_log!(self, "Fresh allocation of size {} with alignment {}.", size, align);
  568. // Break it to me!
  569. let res = self.alloc_fresh(size, align);
  570. // Check consistency.
  571. self.check();
  572. res
  573. }
  574. /// Push an element without reserving.
  575. // TODO: Make `push` and `free` one.
  576. fn push(&mut self, block: Block) {
  577. // Logging.
  578. bk_log!(self;self.pool.len(), "Pushing {:?}.", block);
  579. // Mark the block free.
  580. let mut block = block.mark_free();
  581. // Short-circuit in case on empty block.
  582. if !block.is_empty() {
  583. // Trigger the new memory event handler.
  584. self.on_new_memory();
  585. // Update the pool byte count.
  586. self.total_bytes += block.size();
  587. // Some assertions...
  588. debug_assert!(self.pool.is_empty() || &block > self.pool.last().unwrap(), "Pushing will \
  589. make the list unsorted.");
  590. // We will try to simply merge it with the last block.
  591. if let Some(x) = self.pool.last_mut() {
  592. if x.merge_right(&mut block).is_ok() {
  593. return;
  594. }
  595. }
  596. // Reserve space and free the old buffer.
  597. if let Some(x) = unborrow!(self.reserve(self.pool.len() + 1)) {
  598. // Note that we do not set the count down because this isn't setting back our
  599. // pushed block.
  600. self.free(x);
  601. }
  602. // Try again to merge with last block on the off chance reserve pushed something we can
  603. // merge with. This has actually happened in testing.
  604. if let Some(x) = self.pool.last_mut() {
  605. if x.merge_right(&mut block).is_ok() {
  606. return;
  607. }
  608. }
  609. // Merging failed. Note that trailing empty blocks are not allowed, hence the last block is
  610. // the only non-empty candidate which may be adjacent to `block`.
  611. // Check again that pushing is correct.
  612. if self.pool.is_empty() || &block > self.pool.last().unwrap() {
  613. // We push.
  614. let res = self.pool.push(block);
  615. // Make some assertions.
  616. debug_assert!(res.is_ok(), "Push failed (buffer full).");
  617. } else {
  618. // `free` handles the count, so we set it back.
  619. // TODO: Find a better way to do so.
  620. self.total_bytes -= block.size();
  621. // Can't push because reserve changed the end of the pool.
  622. self.free(block);
  623. }
  624. }
  625. // Check consistency.
  626. self.check();
  627. }
  628. /// Reserve some number of elements, and return the old buffer's block.
  629. ///
  630. /// # Assumptions
  631. ///
  632. /// This is assumed to not modify the order. If some block `b` is associated with index `i`
  633. /// prior to call of this function, it should be too after it.
  634. fn reserve(&mut self, min_cap: usize) -> Option<Block> {
  635. // Logging.
  636. bk_log!(self;min_cap, "Reserving {}.", min_cap);
  637. if !self.reserving && (self.pool.capacity() < self.pool.len() + EXTRA_ELEMENTS || self.pool.capacity() < min_cap + EXTRA_ELEMENTS) {
  638. // Reserve a little extra for performance reasons.
  639. // TODO: This should be moved to some new method.
  640. let new_cap = min_cap + EXTRA_ELEMENTS + config::extra_fresh(min_cap);
  641. // Catch 'em all.
  642. debug_assert!(new_cap > self.pool.capacity(), "Reserve shrinks?!");
  643. // Make sure no unbounded reallocation happens.
  644. self.reserving = true;
  645. // Break it to me!
  646. let new_buf = self.alloc_external(new_cap * mem::size_of::<Block>(), mem::align_of::<Block>());
  647. // Go back to the original state.
  648. self.reserving = false;
  649. // Check consistency.
  650. self.check();
  651. Some(self.pool.refill(new_buf))
  652. } else {
  653. None
  654. }
  655. }
  656. /// Insert a block entry at some index.
  657. ///
  658. /// If the space is non-empty, the elements will be pushed filling out the empty gaps to the
  659. /// right.
  660. ///
  661. /// # Warning
  662. ///
  663. /// This might in fact break the order.
  664. ///
  665. /// # Panics
  666. ///
  667. /// Panics on when `ind` is greater than the block pool's length.
  668. ///
  669. /// # Example
  670. ///
  671. /// We want to insert the block denoted by the tildes into our list. Perform a binary search to
  672. /// find where insertion is appropriate.
  673. ///
  674. /// ```notrust
  675. /// Address space
  676. /// I------I
  677. /// B < here I--------I
  678. /// l I------------I
  679. /// k
  680. /// s I---I
  681. /// I~~~~~~~~~~I
  682. /// ```
  683. ///
  684. /// We keep pushing the blocks to the right to the next entry until a empty entry is reached:
  685. ///
  686. /// ```notrust
  687. /// Address space
  688. /// I------I
  689. /// B < here I--------I <~ this one cannot move down, due to being blocked.
  690. /// l
  691. /// k I------------I <~ thus we have moved this one down.
  692. /// s I---I
  693. /// I~~~~~~~~~~I
  694. /// ```
  695. ///
  696. /// Repeating yields:
  697. ///
  698. /// ```notrust
  699. /// Address space
  700. /// I------I
  701. /// B < here
  702. /// l I--------I <~ this one cannot move down, due to being blocked.
  703. /// k I------------I <~ thus we have moved this one down.
  704. /// s I---I
  705. /// I~~~~~~~~~~I
  706. /// ```
  707. ///
  708. /// Now an empty space is left out, meaning that we can insert the block:
  709. ///
  710. /// ```notrust
  711. /// Address space
  712. /// I------I
  713. /// B I----------I
  714. /// l I--------I
  715. /// k I------------I
  716. /// s I---I
  717. /// ```
  718. ///
  719. /// The insertion is now completed.
  720. #[inline]
  721. fn insert(&mut self, ind: usize, block: Block) {
  722. // Logging.
  723. bk_log!(self;ind, "Inserting block {:?}...", block);
  724. // Bound check.
  725. assert!(self.pool.len() >= ind, "Insertion out of bounds.");
  726. // Some assertions...
  727. debug_assert!(self.pool.len() <= ind || block <= self.pool[ind], "Inserting at {} will make \
  728. the list unsorted.", ind);
  729. debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
  730. debug_assert!(!block.is_empty(), "Inserting an empty block.");
  731. // Trigger the new memory event handler.
  732. self.on_new_memory();
  733. // Find the next gap, where a used block were.
  734. let gap = self.pool
  735. .iter()
  736. .enumerate()
  737. // We only check _after_ the index.
  738. .skip(ind)
  739. // Until the block is empty.
  740. .filter(|&(_, x)| x.is_empty())
  741. .next()
  742. .map(|(n, _)| n);
  743. // Log the operation.
  744. bk_log!(self;ind, "Moving all blocks right to {} blocks to the right.",
  745. gap.unwrap_or_else(|| self.pool.len()));
  746. // The old vector's buffer.
  747. let mut old_buf = None;
  748. unsafe {
  749. // LAST AUDIT: 2016-08-21 (Ticki).
  750. // Memmove the elements to make a gap to the new block.
  751. ptr::copy(self.pool.get_unchecked(ind) as *const Block,
  752. self.pool.get_unchecked_mut(ind + 1) as *mut Block,
  753. // The gap defaults to the end of the pool.
  754. gap.unwrap_or_else(|| {
  755. // We will only extend the length if we were unable to fit it into the current length.
  756. // Loooooooging...
  757. bk_log!(self;ind, "Block pool not long enough for shift. Extending.");
  758. // Reserve space. This does not break order, due to the assumption that
  759. // `reserve` never breaks order.
  760. old_buf = unborrow!(self.reserve(self.pool.len() + 1));
  761. // We will move a block into reserved memory but outside of the vec's bounds. For
  762. // that reason, we push an uninitialized element to extend the length, which will
  763. // be assigned in the memcpy.
  764. let res = self.pool.push(mem::uninitialized());
  765. // Just some assertions...
  766. debug_assert!(res.is_ok(), "Push failed (buffer full).");
  767. self.pool.len() - 1
  768. }) - ind);
  769. // Update the pool byte count.
  770. self.total_bytes += block.size();
  771. // Mark it free and set the element.
  772. ptr::write(self.pool.get_unchecked_mut(ind), block.mark_free());
  773. }
  774. // Free the old buffer, if it exists.
  775. if let Some(block) = old_buf {
  776. self.free(block);
  777. }
  778. // Check consistency.
  779. self.check();
  780. }
  781. /// Remove a block.
  782. fn remove_at(&mut self, ind: usize) -> Block {
  783. // Logging.
  784. bk_log!(self;ind, "Removing block at {}.", ind);
  785. let res = if ind + 1 == self.pool.len() {
  786. let block = self.pool[ind].pop();
  787. // Make sure there are no trailing empty blocks.
  788. let new_len = self.pool.len() - self.pool.iter().rev().take_while(|x| x.is_empty()).count();
  789. // Truncate the vector.
  790. self.pool.truncate(new_len);
  791. block
  792. } else {
  793. // Calculate the upper and lower bound
  794. let empty = self.pool[ind + 1].empty_left();
  795. let empty2 = empty.empty_left();
  796. // Replace the block at `ind` with the left empty block from `ind + 1`.
  797. let block = mem::replace(&mut self.pool[ind], empty);
  798. // Iterate over the pool from `ind` and down and set it to the empty of our block.
  799. let skip = self.pool.len() - ind;
  800. for place in self.pool.iter_mut().rev().skip(skip).take_while(|x| x.is_empty()) {
  801. // Empty the blocks.
  802. *place = empty2.empty_left();
  803. }
  804. block
  805. };
  806. // Update the pool byte count.
  807. self.total_bytes -= res.size();
  808. // Check consistency.
  809. self.check();
  810. // Mark the block uninitialized to the debugger.
  811. res.mark_uninitialized()
  812. }
  813. }