bookkeeper.rs 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056
  1. //! Memory bookkeeping.
  2. use prelude::*;
  3. use core::ops::Range;
  4. use core::{mem, ops, ptr};
  5. use shim::config;
  6. /// Elements required _more_ than the length as capacity.
  7. ///
  8. /// This represents how many elements that are needed to conduct a `reserve` without the
  9. /// stack overflowing, plus one (representing the new element):
  10. ///
  11. /// 1. Aligner.
  12. /// 2. Excessive space.
  13. /// 3. The old buffer.
  14. /// 4. The pushed or inserted block.
  15. ///
  16. /// See assumption 4.
  17. pub const EXTRA_ELEMENTS: usize = 4;
  18. #[cfg(feature = "alloc_id")]
  19. use core::sync::atomic::{self, AtomicUsize};
  20. /// The bookkeeper ID count.
  21. ///
  22. /// This is atomically incremented whenever a new `Bookkeeper` is created.
  23. #[cfg(feature = "alloc_id")]
  24. static BOOKKEEPER_ID_COUNTER: AtomicUsize = AtomicUsize::new(0);
  25. /// The memory bookkeeper.
  26. ///
  27. /// This stores data about the state of the allocator, and in particular, the free memory.
  28. ///
  29. /// The actual functionality is provided by [`Allocator`](./trait.Allocator.html).
  30. pub struct Bookkeeper {
  31. /// The internal block pool.
  32. ///
  33. /// Entries in the block pool can be "empty", meaning that you can overwrite the entry without
  34. /// breaking consistency.
  35. ///
  36. /// # Assumptions
  37. ///
  38. /// Certain assumptions are made:
  39. ///
  40. /// 1. The list is always sorted with respect to the block's pointers.
  41. /// 2. No two consecutive or empty block delimited blocks are adjacent, except if the right
  42. /// block is empty.
  43. /// 3. There are no trailing empty blocks.
  44. /// 4. The capacity is always `EXTRA_ELEMENTS` blocks more than the length (this is due to
  45. /// reallocation pushing at maximum two elements, so we reserve two or more extra to allow
  46. /// pushing one additional element without unbounded recursion).
  47. ///
  48. /// These are **not** invariants: If these assumpptions are not held, it will simply act strange
  49. /// (e.g. logic bugs), but not memory unsafety.
  50. pool: Vec<Block>,
  51. /// The total number of bytes in the pool.
  52. total_bytes: usize,
  53. /// Is this bookkeeper currently reserving?
  54. ///
  55. /// This is used to avoid unbounded metacircular reallocation (reservation).
  56. ///
  57. // TODO: Find a replacement for this "hack".
  58. reserving: bool,
  59. /// The allocator ID.
  60. ///
  61. /// This is simply to be able to distinguish allocators in the locks.
  62. #[cfg(feature = "alloc_id")]
  63. id: usize,
  64. }
  65. impl Bookkeeper {
  66. /// Create a new bookkeeper with some initial vector.
  67. pub fn new(vec: Vec<Block>) -> Bookkeeper {
  68. // Make sure the assumptions are satisfied.
  69. debug_assert!(
  70. vec.capacity() >= EXTRA_ELEMENTS,
  71. "Not enough initial capacity of the vector."
  72. );
  73. debug_assert!(vec.is_empty(), "Initial vector isn't empty.");
  74. // TODO: When added use expr field attributes.
  75. #[cfg(feature = "alloc_id")]
  76. let res = Bookkeeper {
  77. pool: vec,
  78. total_bytes: 0,
  79. reserving: false,
  80. // Increment the ID counter to get a brand new ID.
  81. id: BOOKKEEPER_ID_COUNTER.fetch_add(1, atomic::Ordering::SeqCst),
  82. };
  83. #[cfg(not(feature = "alloc_id"))]
  84. let res = Bookkeeper {
  85. pool: vec,
  86. total_bytes: 0,
  87. reserving: false,
  88. };
  89. bk_log!(res, "Bookkeeper created.");
  90. res.check();
  91. res
  92. }
  93. /// Perform a binary search to find the appropriate place where the block can be insert or is
  94. /// located.
  95. ///
  96. /// It is guaranteed that no block left to the returned value, satisfy the above condition.
  97. #[inline]
  98. fn find(&mut self, block: &Block) -> usize {
  99. // Logging.
  100. bk_log!(self, "Searching (exact) for {:?}.", block);
  101. let ind = match self.pool.binary_search(block) {
  102. Ok(x) | Err(x) => x,
  103. };
  104. let len = self.pool.len();
  105. // Move left.
  106. ind - self
  107. .pool
  108. .iter_mut()
  109. .rev()
  110. .skip(len - ind)
  111. .take_while(|x| x.is_empty())
  112. .count()
  113. }
  114. /// Perform a binary search to find the appropriate bound where the block can be insert or is
  115. /// located.
  116. ///
  117. /// It is guaranteed that no block left to the returned value, satisfy the above condition.
  118. #[inline]
  119. fn find_bound(&mut self, block: &Block) -> Range<usize> {
  120. // Logging.
  121. bk_log!(self, "Searching (bounds) for {:?}.", block);
  122. let mut left_ind = match self.pool.binary_search(block) {
  123. Ok(x) | Err(x) => x,
  124. };
  125. let len = self.pool.len();
  126. // Move left.
  127. left_ind -= self
  128. .pool
  129. .iter_mut()
  130. .rev()
  131. .skip(len - left_ind)
  132. .take_while(|x| x.is_empty())
  133. .count();
  134. let mut right_ind = match self.pool.binary_search(&block.empty_right()) {
  135. Ok(x) | Err(x) => x,
  136. };
  137. // Move right.
  138. right_ind += self
  139. .pool
  140. .iter()
  141. .skip(right_ind)
  142. .take_while(|x| x.is_empty())
  143. .count();
  144. left_ind..right_ind
  145. }
  146. /// Go over every block in the allocator and call some function.
  147. ///
  148. /// Technically, this could be done through an iterator, but this, more unidiomatic, way is
  149. /// slightly faster in some cases.
  150. pub fn for_each<F: FnMut(Block)>(mut self, mut f: F) {
  151. // Logging.
  152. bk_log!(self, "Iterating over the blocks of the bookkeeper...");
  153. // Run over all the blocks in the pool.
  154. for i in self.pool.pop_iter() {
  155. f(i);
  156. }
  157. // Take the block holding the pool.
  158. f(Block::from(self.pool));
  159. }
  160. /// Remove a block.
  161. fn remove_at(&mut self, ind: usize) -> Block {
  162. // Logging.
  163. bk_log!(self;ind, "Removing block at {}.", ind);
  164. let res = if ind + 1 == self.pool.len() {
  165. let block = self.pool[ind].pop();
  166. // Make sure there are no trailing empty blocks.
  167. let new_len =
  168. self.pool.len() - self.pool.iter().rev().take_while(|x| x.is_empty()).count();
  169. // Truncate the vector.
  170. self.pool.truncate(new_len);
  171. block
  172. } else {
  173. // Calculate the upper and lower bound
  174. let empty = self.pool[ind + 1].empty_left();
  175. let empty2 = empty.empty_left();
  176. // Replace the block at `ind` with the left empty block from `ind + 1`.
  177. let block = mem::replace(&mut self.pool[ind], empty);
  178. // Iterate over the pool from `ind` and down and set it to the empty of our block.
  179. let skip = self.pool.len() - ind;
  180. for place in self
  181. .pool
  182. .iter_mut()
  183. .rev()
  184. .skip(skip)
  185. .take_while(|x| x.is_empty())
  186. {
  187. // Empty the blocks.
  188. *place = empty2.empty_left();
  189. }
  190. block
  191. };
  192. // Update the pool byte count.
  193. self.total_bytes -= res.size();
  194. // Check consistency.
  195. self.check();
  196. // Mark the block uninitialized to the debugger.
  197. res.mark_uninitialized()
  198. }
  199. /// Pop the top block from the pool.
  200. pub fn pop(&mut self) -> Option<Block> {
  201. let len = self.pool.len();
  202. if len == 0 {
  203. None
  204. } else {
  205. Some(self.remove_at(len - 1))
  206. }
  207. }
  208. /// Get the length of the pool.
  209. pub fn len(&self) -> usize {
  210. self.pool.len()
  211. }
  212. /// Get the total bytes of memory in the pool.
  213. pub fn total_bytes(&self) -> usize {
  214. self.total_bytes
  215. }
  216. /// Perform consistency checks.
  217. ///
  218. /// This will check for the following conditions:
  219. ///
  220. /// 1. The list is sorted.
  221. /// 2. No blocks are adjacent.
  222. ///
  223. /// This is NOOP in release mode.
  224. fn check(&self) {
  225. if cfg!(debug_assertions) {
  226. // Logging.
  227. bk_log!(self, "Checking...");
  228. // The total number of bytes.
  229. let mut total_bytes = 0;
  230. // Reverse iterator over the blocks.
  231. let mut it = self.pool.iter().enumerate().rev();
  232. // Check that the capacity is large enough.
  233. assert!(
  234. self.reserving || self.pool.len() + EXTRA_ELEMENTS <= self.pool.capacity(),
  235. "The capacity should be at least {} more than the length of the pool.",
  236. EXTRA_ELEMENTS
  237. );
  238. if let Some((_, x)) = it.next() {
  239. // Make sure there are no leading empty blocks.
  240. assert!(!x.is_empty(), "The leading block is empty.");
  241. total_bytes += x.size();
  242. let mut next = x;
  243. for (n, i) in it {
  244. total_bytes += i.size();
  245. // Check if sorted.
  246. assert!(
  247. next >= i,
  248. "The block pool is not sorted at index, {} ({:?} < {:?}).",
  249. n,
  250. next,
  251. i
  252. );
  253. // Make sure no blocks are adjacent.
  254. assert!(
  255. !i.left_to(next) || i.is_empty(),
  256. "Adjacent blocks at index, {} ({:?} and \
  257. {:?})",
  258. n,
  259. i,
  260. next
  261. );
  262. // Make sure an empty block has the same address as its right neighbor.
  263. assert!(
  264. !i.is_empty() || i == next,
  265. "Empty block not adjacent to right neighbor \
  266. at index {} ({:?} and {:?})",
  267. n,
  268. i,
  269. next
  270. );
  271. // Set the variable tracking the previous block.
  272. next = i;
  273. }
  274. // Check for trailing empty blocks.
  275. assert!(
  276. !self.pool.last().unwrap().is_empty(),
  277. "Trailing empty blocks."
  278. );
  279. }
  280. // Make sure the sum is maintained properly.
  281. assert!(
  282. total_bytes == self.total_bytes,
  283. "The sum is not equal to the 'total_bytes' \
  284. field: {} ≠ {}.",
  285. total_bytes,
  286. self.total_bytes
  287. );
  288. }
  289. }
  290. }
  291. /// An allocator.
  292. ///
  293. /// This provides the functionality of the memory bookkeeper, requiring only provision of two
  294. /// methods, defining the "breaker" (fresh allocator). The core functionality is provided by
  295. /// default methods, which aren't generally made to be overwritten.
  296. ///
  297. /// The reason why these methods aren't implemented directly on the bookkeeper is the distinction
  298. /// between different forms of allocators (global, local, and so on). Any newtype of
  299. /// [`Bookkeeper`](./struct.Bookkeeper.html).
  300. ///
  301. /// # Guarantees vs. assumptions
  302. ///
  303. /// Please note that whenever a guarantee is mentioned, it relies on that the all the methods
  304. /// overwritten are upholding the guarantees specified in the documentation.
  305. pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
  306. /// Allocate _fresh_ space.
  307. ///
  308. /// "Fresh" means that the space is allocated through some breaker (be it SBRK or the global
  309. /// allocator).
  310. ///
  311. /// The returned pointer is assumed to be aligned to `align`. If this is not held, all future
  312. /// guarantees are invalid.
  313. ///
  314. /// # Assumptions
  315. ///
  316. /// This is assumed to not modify the order. If some block `b` is associated with index `i`
  317. /// prior to call of this function, it should be too after it.
  318. fn alloc_fresh(&mut self, size: usize, align: usize) -> Block;
  319. /// Called right before new memory is added to the pool.
  320. fn on_new_memory(&mut self) {}
  321. /// Allocate a chunk of memory.
  322. ///
  323. /// This function takes a size and an alignment. From these a fitting block is found, to which
  324. /// a pointer is returned. The block returned is guaranteed to be aligned to `align`.
  325. ///
  326. /// # Example
  327. ///
  328. /// We start with our initial segment.
  329. ///
  330. /// ```notrust
  331. /// Address space
  332. /// I---------------------------------I
  333. /// B
  334. /// l
  335. /// k
  336. /// s
  337. /// ```
  338. ///
  339. /// We then split it at the aligner, which is used for making sure that
  340. /// the pointer is aligned properly.
  341. ///
  342. /// ```notrust
  343. /// Address space
  344. /// I------I
  345. /// B ^ I--------------------------I
  346. /// l al
  347. /// k
  348. /// s
  349. /// ```
  350. ///
  351. /// We then use the remaining block, but leave the excessive space.
  352. ///
  353. /// ```notrust
  354. /// Address space
  355. /// I------I
  356. /// B I--------I
  357. /// l \_________________/
  358. /// k our allocated block.
  359. /// s
  360. /// ```
  361. ///
  362. /// A block representing the marked area is then returned.
  363. fn alloc(&mut self, size: usize, align: usize) -> Block {
  364. // Logging.
  365. bk_log!(self, "Allocating {} bytes with alignment {}.", size, align);
  366. if let Some((n, b)) = self
  367. .pool
  368. .iter_mut()
  369. .enumerate()
  370. .filter_map(|(n, i)| {
  371. if i.size() >= size {
  372. // Try to split at the aligner.
  373. i.align(align).and_then(|(mut a, mut b)| {
  374. if b.size() >= size {
  375. // Override the old block.
  376. *i = a;
  377. Some((n, b))
  378. } else {
  379. // Put the split block back together and place it back in its spot.
  380. a.merge_right(&mut b).expect("Unable to merge block right.");
  381. *i = a;
  382. None
  383. }
  384. })
  385. } else {
  386. None
  387. }
  388. })
  389. .next()
  390. {
  391. // Update the pool byte count.
  392. self.total_bytes -= b.size();
  393. if self.pool[n].is_empty() {
  394. // For empty alignment invariant.
  395. let _ = self.remove_at(n);
  396. }
  397. // Split and mark the block uninitialized to the debugger.
  398. let (res, excessive) = b.mark_uninitialized().split(size);
  399. // There are many corner cases that make knowing where to insert it difficult
  400. // so we search instead.
  401. self.free(excessive);
  402. // Check consistency.
  403. self.check();
  404. debug_assert!(res.aligned_to(align), "Alignment failed.");
  405. debug_assert!(
  406. res.size() == size,
  407. "Requested space does not match with the returned \
  408. block."
  409. );
  410. res
  411. } else {
  412. // No fitting block found. Allocate a new block.
  413. self.alloc_external(size, align)
  414. }
  415. }
  416. /// Free a memory block.
  417. ///
  418. /// After this have been called, no guarantees are made about the passed pointer. If it want
  419. /// to, it could begin shooting laser beams.
  420. ///
  421. /// Freeing an invalid block will drop all future guarantees about this bookkeeper.
  422. ///
  423. /// # Example
  424. ///
  425. /// ```notrust
  426. /// Address space
  427. /// I------I
  428. /// B I--------I
  429. /// l \_________________/
  430. /// k the used block we want to deallocate.
  431. /// s
  432. /// ```
  433. ///
  434. /// If the blocks are adjacent, we merge them:
  435. ///
  436. /// ```notrust
  437. /// Address space
  438. /// I------I
  439. /// B I-----------------I
  440. /// l I--------I
  441. /// k
  442. /// s
  443. /// ```
  444. ///
  445. /// This gives us:
  446. ///
  447. /// ```notrust
  448. /// Address space
  449. /// I------------------------I
  450. /// B I--------I
  451. /// l
  452. /// k
  453. /// s
  454. /// ```
  455. ///
  456. /// And we're done. If it cannot be done, we insert the block, while keeping the list sorted.
  457. /// See [`insert`](#method.insert) for details.
  458. #[inline]
  459. fn free(&mut self, block: Block) {
  460. // Just logging for the unlucky people debugging this shit. No problem.
  461. bk_log!(self, "Freeing {:?}...", block);
  462. // Binary search for the block.
  463. let bound = self.find_bound(&block);
  464. // Free the given block.
  465. self.free_bound(bound, block);
  466. }
  467. /// Reallocate memory.
  468. ///
  469. /// If necessary (inplace reallocation is not possible or feasible) it will allocate a new
  470. /// buffer, fill it with the contents of the old buffer, and deallocate the replaced buffer.
  471. ///
  472. /// The following guarantees are made:
  473. ///
  474. /// 1. The returned block is valid and aligned to `align`.
  475. /// 2. The returned block contains the same data byte-for-byte as the original buffer.
  476. ///
  477. /// The data will be truncated if `new_size` is smaller than `block`'s size.
  478. ///
  479. /// # Example
  480. ///
  481. /// We will first try to perform an in-place reallocation, and if that fails, we will use
  482. /// memmove.
  483. ///
  484. /// ```notrust
  485. /// Address space
  486. /// I------I
  487. /// B \~~~~~~~~~~~~~~~~~~~~~/
  488. /// l needed
  489. /// k
  490. /// s
  491. /// ```
  492. ///
  493. /// We simply find the block next to our initial block. If this block is free and have
  494. /// sufficient size, we will simply merge it into our initial block, and leave the excessive
  495. /// space as free. If these conditions are not met, we have to allocate a new list, and then
  496. /// deallocate the old one, after which we use memmove to copy the data over to the newly
  497. /// allocated list.
  498. fn realloc(&mut self, block: Block, new_size: usize, align: usize) -> Block {
  499. // Find the index bound.
  500. let ind = self.find_bound(&block);
  501. // Logging.
  502. bk_log!(self;ind, "Reallocating {:?} to size {} with align {}...", block, new_size, align);
  503. // Try to do an inplace reallocation.
  504. match self.realloc_inplace_bound(ind, block, new_size) {
  505. Ok(block) => block,
  506. Err(block) => {
  507. // Reallocation cannot be done inplace.
  508. // Allocate a new block with the same size.
  509. let mut res = self.alloc(new_size, align);
  510. // Copy the old data to the new location.
  511. block.copy_to(&mut res);
  512. // Free the old block.
  513. // Allocation may have moved insertion so we search again.
  514. self.free(block);
  515. // Check consistency.
  516. self.check();
  517. debug_assert!(res.aligned_to(align), "Alignment failed.");
  518. debug_assert!(
  519. res.size() >= new_size,
  520. "Requested space does not match with the \
  521. returned block."
  522. );
  523. res
  524. }
  525. }
  526. }
  527. /// Extend/shrink the buffer inplace.
  528. ///
  529. /// This will try to extend the buffer without copying, if the new size is larger than the old
  530. /// one. If not, truncate the block and place it back to the pool.
  531. ///
  532. /// On failure, return `Err(Block)` with the old _intact_ block. Shrinking cannot fail.
  533. ///
  534. /// This shouldn't be used when the index of insertion is known, since this performs an binary
  535. /// search to find the blocks index. When you know the index use
  536. /// [`realloc_inplace_bound`](#method.realloc_inplace_bound.html).
  537. #[inline]
  538. fn realloc_inplace(&mut self, block: Block, new_size: usize) -> Result<Block, Block> {
  539. // Logging.
  540. bk_log!(self, "Reallocating {:?} inplace to {}...", block, new_size);
  541. // Find the bounds of given block.
  542. let bound = self.find_bound(&block);
  543. // Go for it!
  544. let res = self.realloc_inplace_bound(bound, block, new_size);
  545. // Check consistency.
  546. debug_assert!(
  547. res.as_ref().ok().map_or(true, |x| x.size() == new_size),
  548. "Requested space \
  549. does not match with the returned block."
  550. );
  551. res
  552. }
  553. /// Reallocate a block on a know index bound inplace.
  554. ///
  555. /// See [`realloc_inplace`](#method.realloc_inplace.html) for more information.
  556. fn realloc_inplace_bound(
  557. &mut self,
  558. ind: Range<usize>,
  559. mut block: Block,
  560. new_size: usize,
  561. ) -> Result<Block, Block> {
  562. // Logging.
  563. bk_log!(self;ind, "Try inplace reallocating {:?} to size {}.", block, new_size);
  564. /// Assertions...
  565. debug_assert!(
  566. self.find(&block) == ind.start,
  567. "Block is not inserted at the appropriate \
  568. index."
  569. );
  570. if new_size <= block.size() {
  571. // Shrink the block.
  572. bk_log!(self;ind, "Shrinking {:?}.", block);
  573. // Split the block in two segments, the main segment and the excessive segment.
  574. let (block, excessive) = block.split(new_size);
  575. // Free the excessive segment.
  576. self.free_bound(ind, excessive);
  577. // Make some assertions to avoid dumb bugs.
  578. debug_assert!(block.size() == new_size, "Block wasn't shrinked properly.");
  579. // Run a consistency check.
  580. self.check();
  581. return Ok(block);
  582. // We check if `ind` is the end of the array.
  583. } else {
  584. let mut mergable = false;
  585. if let Some(entry) = self.pool.get_mut(ind.end) {
  586. mergable = entry.size() + block.size() >= new_size && block.left_to(entry);
  587. }
  588. // Note that we are sure that no segments in the array are adjacent (unless they have size
  589. // 0). This way we know that we will, at maximum, need one and only one block for extending
  590. // the current block.
  591. if mergable {
  592. // Logging...
  593. bk_log!(self;ind, "Merging {:?} to the right.", block);
  594. // We'll merge it with the block at the end of the range.
  595. block
  596. .merge_right(&mut self.remove_at(ind.end))
  597. .expect("Unable to merge block right, to the end of the range.");
  598. // Merge succeeded.
  599. // Place the excessive block back.
  600. let (res, excessive) = block.split(new_size);
  601. // Remove_at may have shortened the vector.
  602. if ind.start == self.pool.len() {
  603. self.push(excessive);
  604. } else if !excessive.is_empty() {
  605. self.pool[ind.start] = excessive;
  606. }
  607. // Block will still not be adjacent, due to `excessive` being guaranteed to not be
  608. // adjacent to the next block.
  609. // Run a consistency check.
  610. self.check();
  611. return Ok(res);
  612. }
  613. }
  614. Err(block)
  615. }
  616. /// Free a block placed in some index bound.
  617. ///
  618. /// This will at maximum insert one element.
  619. ///
  620. /// See [`free`](#method.free) for more information.
  621. #[inline]
  622. fn free_bound(&mut self, ind: Range<usize>, mut block: Block) {
  623. // Logging.
  624. bk_log!(self;ind, "Freeing {:?}.", block);
  625. // Short circuit in case of empty block.
  626. if block.is_empty() {
  627. return;
  628. }
  629. // When compiled with `security`, we zero this block.
  630. block.sec_zero();
  631. if ind.start == self.pool.len() {
  632. self.push(block);
  633. return;
  634. }
  635. // Assertions...
  636. debug_assert!(
  637. self.find(&block) == ind.start,
  638. "Block is not inserted at the appropriate \
  639. index."
  640. );
  641. // Try to merge it with the block to the right.
  642. if ind.end < self.pool.len() && block.left_to(&self.pool[ind.end]) {
  643. // Merge the block with the rightmost block in the range.
  644. block
  645. .merge_right(&mut self.remove_at(ind.end))
  646. .expect("Unable to merge block right to the block at the end of the range");
  647. // The merging succeeded. We proceed to try to close in the possible gap.
  648. let size = block.size();
  649. if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
  650. self.total_bytes += size;
  651. }
  652. // Check consistency.
  653. self.check();
  654. return;
  655. // Dammit, let's try to merge left.
  656. } else if ind.start != 0 && self.pool[ind.start - 1].left_to(&block) {
  657. let size = block.size();
  658. if self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
  659. self.total_bytes += size;
  660. }
  661. // Check consistency.
  662. self.check();
  663. return;
  664. }
  665. // Well, it failed, so we insert it the old-fashioned way.
  666. self.insert(ind.start, block);
  667. // Check consistency.
  668. self.check();
  669. }
  670. /// Allocate external ("fresh") space.
  671. ///
  672. /// "Fresh" means that the space is allocated through the breaker.
  673. ///
  674. /// The returned pointer is guaranteed to be aligned to `align`.
  675. fn alloc_external(&mut self, size: usize, align: usize) -> Block {
  676. // Logging.
  677. bk_log!(
  678. self,
  679. "Fresh allocation of size {} with alignment {}.",
  680. size,
  681. align
  682. );
  683. // Break it to me!
  684. let res = self.alloc_fresh(size, align);
  685. // Check consistency.
  686. self.check();
  687. res
  688. }
  689. /// Push an element without reserving.
  690. // TODO: Make `push` and `free` one.
  691. fn push(&mut self, block: Block) {
  692. // Logging.
  693. bk_log!(self;self.pool.len(), "Pushing {:?}.", block);
  694. // Mark the block free.
  695. let mut block = block.mark_free();
  696. // Short-circuit in case on empty block.
  697. if !block.is_empty() {
  698. // Trigger the new memory event handler.
  699. //self.on_new_memory();
  700. // Update the pool byte count.
  701. self.total_bytes += block.size();
  702. // Some assertions...
  703. debug_assert!(
  704. self.pool.is_empty() || &block > self.pool.last().unwrap(),
  705. "Pushing will \
  706. make the list unsorted."
  707. );
  708. // We will try to simply merge it with the last block.
  709. if let Some(x) = self.pool.last_mut() {
  710. if x.merge_right(&mut block).is_ok() {
  711. return;
  712. }
  713. }
  714. // Reserve space and free the old buffer.
  715. if let Some(x) = unborrow!(self.reserve(self.pool.len() + 1)) {
  716. // Note that we do not set the count down because this isn't setting back our
  717. // pushed block.
  718. self.free(x);
  719. }
  720. // Try again to merge with last block on the off chance reserve pushed something we can
  721. // merge with. This has actually happened in testing.
  722. if let Some(x) = self.pool.last_mut() {
  723. if x.merge_right(&mut block).is_ok() {
  724. return;
  725. }
  726. }
  727. // Merging failed. Note that trailing empty blocks are not allowed, hence the last block is
  728. // the only non-empty candidate which may be adjacent to `block`.
  729. // Check again that pushing is correct.
  730. if self.pool.is_empty() || &block > self.pool.last().unwrap() {
  731. // We push.
  732. let res = self.pool.push(block);
  733. // Make some assertions.
  734. debug_assert!(res.is_ok(), "Push failed (buffer full).");
  735. } else {
  736. // `free` handles the count, so we set it back.
  737. // TODO: Find a better way to do so.
  738. self.total_bytes -= block.size();
  739. // Can't push because reserve changed the end of the pool.
  740. self.free(block);
  741. }
  742. }
  743. // Check consistency.
  744. self.check();
  745. }
  746. /// Reserve some number of elements, and return the old buffer's block.
  747. ///
  748. /// # Assumptions
  749. ///
  750. /// This is assumed to not modify the order. If some block `b` is associated with index `i`
  751. /// prior to call of this function, it should be too after it.
  752. fn reserve(&mut self, min_cap: usize) -> Option<Block> {
  753. // Logging.
  754. bk_log!(self;min_cap, "Reserving {}.", min_cap);
  755. if !self.reserving
  756. && (self.pool.capacity() < self.pool.len() + EXTRA_ELEMENTS
  757. || self.pool.capacity() < min_cap + EXTRA_ELEMENTS)
  758. {
  759. // Reserve a little extra for performance reasons.
  760. // TODO: This should be moved to some new method.
  761. let new_cap = min_cap + (EXTRA_ELEMENTS * 2) + config::extra_fresh(min_cap);
  762. // Catch 'em all.
  763. debug_assert!(new_cap > self.pool.capacity(), "Reserve shrinks?!");
  764. // Make sure no unbounded reallocation happens.
  765. self.reserving = true;
  766. // Break it to me!
  767. let new_buf =
  768. self.alloc_external(new_cap * mem::size_of::<Block>(), mem::align_of::<Block>());
  769. // Go back to the original state.
  770. self.reserving = false;
  771. let old_buf = self.pool.refill(new_buf);
  772. // Check consistency.
  773. self.check();
  774. Some(old_buf)
  775. } else {
  776. None
  777. }
  778. }
  779. /// Insert a block entry at some index.
  780. ///
  781. /// If the space is non-empty, the elements will be pushed filling out the empty gaps to the
  782. /// right.
  783. ///
  784. /// # Warning
  785. ///
  786. /// This might in fact break the order.
  787. ///
  788. /// # Panics
  789. ///
  790. /// Panics on when `ind` is greater than the block pool's length.
  791. ///
  792. /// # Example
  793. ///
  794. /// We want to insert the block denoted by the tildes into our list. Perform a binary search to
  795. /// find where insertion is appropriate.
  796. ///
  797. /// ```notrust
  798. /// Address space
  799. /// I------I
  800. /// B < here I--------I
  801. /// l I------------I
  802. /// k
  803. /// s I---I
  804. /// I~~~~~~~~~~I
  805. /// ```
  806. ///
  807. /// We keep pushing the blocks to the right to the next entry until a empty entry is reached:
  808. ///
  809. /// ```notrust
  810. /// Address space
  811. /// I------I
  812. /// B < here I--------I <~ this one cannot move down, due to being blocked.
  813. /// l
  814. /// k I------------I <~ thus we have moved this one down.
  815. /// s I---I
  816. /// I~~~~~~~~~~I
  817. /// ```
  818. ///
  819. /// Repeating yields:
  820. ///
  821. /// ```notrust
  822. /// Address space
  823. /// I------I
  824. /// B < here
  825. /// l I--------I <~ this one cannot move down, due to being blocked.
  826. /// k I------------I <~ thus we have moved this one down.
  827. /// s I---I
  828. /// I~~~~~~~~~~I
  829. /// ```
  830. ///
  831. /// Now an empty space is left out, meaning that we can insert the block:
  832. ///
  833. /// ```notrust
  834. /// Address space
  835. /// I------I
  836. /// B I----------I
  837. /// l I--------I
  838. /// k I------------I
  839. /// s I---I
  840. /// ```
  841. ///
  842. /// The insertion is now completed.
  843. #[inline]
  844. fn insert(&mut self, ind: usize, block: Block) {
  845. // Logging.
  846. bk_log!(self;ind, "Inserting block {:?}...", block);
  847. // Bound check.
  848. assert!(self.pool.len() >= ind, "Insertion out of bounds.");
  849. // Some assertions...
  850. debug_assert!(
  851. self.pool.len() <= ind || block <= self.pool[ind],
  852. "Inserting at {} will make \
  853. the list unsorted.",
  854. ind
  855. );
  856. debug_assert!(
  857. self.find(&block) == ind,
  858. "Block is not inserted at the appropriate index."
  859. );
  860. debug_assert!(!block.is_empty(), "Inserting an empty block.");
  861. // Trigger the new memory event handler.
  862. //self.on_new_memory();
  863. // Find the next gap, where a used block were.
  864. let gap = self.pool
  865. .iter()
  866. .enumerate()
  867. // We only check _after_ the index.
  868. .skip(ind)
  869. // Until the block is empty.
  870. .filter(|&(_, x)| x.is_empty())
  871. .next()
  872. .map(|(n, _)| n);
  873. // Log the operation.
  874. bk_log!(self;ind, "Moving all blocks right to {} blocks to the right.",
  875. gap.unwrap_or_else(|| self.pool.len()));
  876. // The old vector's buffer.
  877. let mut old_buf = None;
  878. unsafe {
  879. // LAST AUDIT: 2016-08-21 (Ticki).
  880. // The gap defaults to the end of the pool.
  881. let movcount = gap.unwrap_or_else(|| {
  882. // We will only extend the length if we were unable to fit it into the current length.
  883. // Loooooooging...
  884. bk_log!(self;ind, "Block pool not long enough for shift. Extending.");
  885. bk_log!(self;ind, "copy {} blocks from {:p} to {:p}", self.pool.len() - 1 - ind, self.pool.get_unchecked(ind) as *const Block, self.pool.get_unchecked(ind + 1) as *const Block);
  886. // Reserve space. This does not break order, due to the assumption that
  887. // `reserve` never breaks order.
  888. old_buf = unborrow!(self.reserve(self.pool.len() + 1));
  889. // We will move a block into reserved memory but outside of the vec's bounds. For
  890. // that reason, we push an uninitialized element to extend the length, which will
  891. // be assigned in the memcpy.
  892. let res = self.pool.push(mem::zeroed());
  893. // Just some assertions...
  894. debug_assert!(res.is_ok(), "Push failed (buffer full).");
  895. bk_log!(self;ind, "copy {} blocks from {:p} to {:p}", self.pool.len() - 1 - ind, self.pool.get_unchecked(ind) as *const Block, self.pool.get_unchecked(ind + 1) as *const Block);
  896. self.pool.len() - 1
  897. }) - ind;
  898. // Memmove the elements to make a gap to the new block.
  899. ptr::copy(
  900. self.pool.get_unchecked(ind) as *const Block,
  901. self.pool.get_unchecked_mut(ind + 1) as *mut Block,
  902. movcount,
  903. );
  904. // Update the pool byte count.
  905. self.total_bytes += block.size();
  906. // Mark it free and set the element.
  907. ptr::write(self.pool.get_unchecked_mut(ind), block.mark_free());
  908. }
  909. // Free the old buffer, if it exists.
  910. if let Some(block) = old_buf {
  911. self.free(block);
  912. }
  913. // Check consistency.
  914. self.check();
  915. }
  916. }