bookkeeper.rs 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976
  1. //! Memory bookkeeping.
  2. use prelude::*;
  3. use brk;
  4. use vec::Vec;
  5. use core::marker::PhantomData;
  6. use core::ops::Range;
  7. use core::{ptr, cmp, mem};
  8. /// The memory bookkeeper.
  9. ///
  10. /// This is the main component of ralloc. Its job is to keep track of the free blocks in a
  11. /// structured manner, such that allocation, reallocation, and deallocation are all efficient.
  12. /// Particularly, it keeps a list of blocks, commonly called the "block pool". This list is kept.
  13. /// Entries in the block pool can be "empty", meaning that you can overwrite the entry without
  14. /// breaking consistency.
  15. ///
  16. /// Only making use of only [`alloc`](#method.alloc), [`free`](#method.free),
  17. /// [`realloc`](#method.realloc) (and following their respective assumptions) guarantee that no
  18. /// buffer overrun, arithmetic overflow, panic, or otherwise unexpected crash will happen.
  19. pub struct Bookkeeper<B> {
  20. /// The internal block pool.
  21. ///
  22. /// # Guarantees
  23. ///
  24. /// Certain guarantees are made:
  25. ///
  26. /// 1. The list is always sorted with respect to the block's pointers.
  27. /// 2. No two consecutive or empty block delimited blocks are adjacent, except if the right
  28. /// block is empty.
  29. /// 3. There are no trailing empty blocks.
  30. ///
  31. /// These are invariants assuming that only the public methods are used.
  32. pool: Vec<Block>,
  33. /// The number of bytes currently allocated.
  34. #[cfg(feature = "debug_tools")]
  35. allocated: usize,
  36. /// The "breaker", i.e. the fresh allocator.
  37. ///
  38. /// This has as job as acquiring new memory through some external source (e.g. BRK or the
  39. /// global allocator).
  40. breaker: PhantomData<B>,
  41. }
  42. impl<B: Breaker> Bookkeeper<B> {
  43. /// Create a new, empty block pool.
  44. ///
  45. /// This will make no allocations or BRKs.
  46. #[inline]
  47. #[cfg(feature = "debug_tools")]
  48. pub const fn new() -> Bookkeeper {
  49. Bookkeeper {
  50. pool: Vec::new(),
  51. allocated: 0,
  52. }
  53. }
  54. #[inline]
  55. #[cfg(not(feature = "debug_tools"))]
  56. pub const fn new() -> Bookkeeper {
  57. Bookkeeper {
  58. pool: Vec::new(),
  59. }
  60. }
  61. /// Allocate a chunk of memory.
  62. ///
  63. /// This function takes a size and an alignment. From these a fitting block is found, to which
  64. /// a pointer is returned. The block returned is guaranteed to be aligned to `align`.
  65. ///
  66. /// # Example
  67. ///
  68. /// We start with our initial segment.
  69. ///
  70. /// ```notrust
  71. /// Address space
  72. /// I---------------------------------I
  73. /// B
  74. /// l
  75. /// k
  76. /// s
  77. /// ```
  78. ///
  79. /// We then split it at the aligner, which is used for making sure that
  80. /// the pointer is aligned properly.
  81. ///
  82. /// ```notrust
  83. /// Address space
  84. /// I------I
  85. /// B ^ I--------------------------I
  86. /// l al
  87. /// k
  88. /// s
  89. /// ```
  90. ///
  91. /// We then use the remaining block, but leave the excessive space.
  92. ///
  93. /// ```notrust
  94. /// Address space
  95. /// I------I
  96. /// B I--------I
  97. /// l \_________________/
  98. /// k our allocated block.
  99. /// s
  100. /// ```
  101. ///
  102. /// A block representing the marked area is then returned.
  103. pub fn alloc(&mut self, size: usize, align: usize) -> Block {
  104. // TODO: scan more intelligently.
  105. // Logging.
  106. log!(self.pool, "Allocating {} bytes with alignment {}.", size, align);
  107. if let Some((n, b)) = self.pool.iter_mut().enumerate().filter_map(|(n, i)| {
  108. if i.size() >= size {
  109. // Try to split at the aligner.
  110. i.align(align).and_then(|(mut a, mut b)| {
  111. if b.size() >= size {
  112. // Override the old block.
  113. *i = a;
  114. Some((n, b))
  115. } else {
  116. // Put the split block back together and place it back in its spot.
  117. a.merge_right(&mut b).unwrap();
  118. *i = a;
  119. None
  120. }
  121. })
  122. } else {
  123. None
  124. }
  125. }).next() {
  126. if self.pool[n].is_empty() {
  127. // For empty alignment invariant.
  128. let _ = self.remove_at(n);
  129. }
  130. let (res, excessive) = b.split(size);
  131. // Mark the excessive space as free.
  132. // There are many corner cases that make knowing where to insert it difficult
  133. // so we search instead.
  134. let bound = self.find_bound(&excessive);
  135. self.free_bound(bound, excessive);
  136. // Check consistency.
  137. self.check();
  138. debug_assert!(res.aligned_to(align), "Alignment failed.");
  139. debug_assert!(res.size() == size, "Requested space does not match with the returned \
  140. block.");
  141. self.leave(res)
  142. } else {
  143. // No fitting block found. Allocate a new block.
  144. let res = self.alloc_fresh(size, align);
  145. // "Leave" the allocator.
  146. self.leave(res)
  147. }
  148. }
  149. /// Free a memory block.
  150. ///
  151. /// After this have been called, no guarantees are made about the passed pointer. If it want
  152. /// to, it could begin shooting laser beams.
  153. ///
  154. /// Freeing an invalid block will drop all future guarantees about this bookkeeper.
  155. ///
  156. /// # Example
  157. ///
  158. /// ```notrust
  159. /// Address space
  160. /// I------I
  161. /// B I--------I
  162. /// l \_________________/
  163. /// k the used block we want to deallocate.
  164. /// s
  165. /// ```
  166. ///
  167. /// If the blocks are adjacent, we merge them:
  168. ///
  169. /// ```notrust
  170. /// Address space
  171. /// I------I
  172. /// B I-----------------I
  173. /// l I--------I
  174. /// k
  175. /// s
  176. /// ```
  177. ///
  178. /// This gives us:
  179. ///
  180. /// ```notrust
  181. /// Address space
  182. /// I------------------------I
  183. /// B I--------I
  184. /// l
  185. /// k
  186. /// s
  187. /// ```
  188. ///
  189. /// And we're done. If it cannot be done, we insert the block, while keeping the list sorted.
  190. /// See [`insert`](#method.insert) for details.
  191. #[inline]
  192. pub fn free(&mut self, block: Block) {
  193. // Just logging for the unlucky people debugging this shit. No problem.
  194. log!(self.pool, "Freeing {:?}...", block);
  195. // "Enter" the allocator.
  196. let block = self.enter(block);
  197. self.reserve_more(1);
  198. // Binary search for the block.
  199. let bound = self.find_bound(&block);
  200. // Free the given block.
  201. self.free_bound(bound, block);
  202. }
  203. /// Reallocate memory.
  204. ///
  205. /// If necessary (inplace reallocation is not possible or feasible) it will allocate a new
  206. /// buffer, fill it with the contents of the old buffer, and deallocate the replaced buffer.
  207. ///
  208. /// The following guarantees are made:
  209. ///
  210. /// 1. The returned block is valid and aligned to `align`.
  211. /// 2. The returned block contains the same data byte-for-byte as the original buffer.
  212. ///
  213. /// The data will be truncated if `new_size` is smaller than `block`'s size.
  214. ///
  215. /// # Example
  216. ///
  217. /// We will first try to perform an in-place reallocation, and if that fails, we will use
  218. /// memmove.
  219. ///
  220. /// ```notrust
  221. /// Address space
  222. /// I------I
  223. /// B \~~~~~~~~~~~~~~~~~~~~~/
  224. /// l needed
  225. /// k
  226. /// s
  227. /// ```
  228. ///
  229. /// We simply find the block next to our initial block. If this block is free and have
  230. /// sufficient size, we will simply merge it into our initial block, and leave the excessive
  231. /// space as free. If these conditions are not met, we have to allocate a new list, and then
  232. /// deallocate the old one, after which we use memmove to copy the data over to the newly
  233. /// allocated list.
  234. pub fn realloc(&mut self, block: Block, new_size: usize, align: usize) -> Block {
  235. // Find the index bound.
  236. let ind = self.find_bound(&block);
  237. // Logging.
  238. log!(self.pool;ind, "Reallocating {:?} to size {} with align {}...", block, new_size, align);
  239. // "Leave" the allocator.
  240. let block = self.enter(block);
  241. // Try to do an inplace reallocation.
  242. match self.realloc_inplace_bound(ind, block, new_size) {
  243. Ok(block) => self.leave(block),
  244. Err(block) => {
  245. // Reallocation cannot be done inplace.
  246. // Allocate a new block with the same size.
  247. let mut res = self.alloc(new_size, align);
  248. // Copy the old data to the new location.
  249. block.copy_to(&mut res);
  250. // Free the old block.
  251. // Allocation may have moved insertion so we search again.
  252. let bound = self.find_bound(&block);
  253. self.free_bound(bound, block);
  254. // Check consistency.
  255. self.check();
  256. debug_assert!(res.aligned_to(align), "Alignment failed.");
  257. debug_assert!(res.size() >= new_size, "Requested space does not match with the \
  258. returned block.");
  259. // Leave the allocator.
  260. self.leave(res)
  261. },
  262. }
  263. }
  264. /// Extend/shrink the buffer inplace.
  265. ///
  266. /// This will try to extend the buffer without copying, if the new size is larger than the old
  267. /// one. If not, truncate the block and place it back to the pool.
  268. ///
  269. /// On failure, return `Err(Block)` with the old _intact_ block. Shrinking cannot fail.
  270. ///
  271. /// This shouldn't be used when the index of insertion is known, since this performs an binary
  272. /// search to find the blocks index. When you know the index use
  273. /// [`realloc_inplace_bound`](#method.realloc_inplace_bound.html).
  274. #[inline]
  275. pub fn realloc_inplace(&mut self, block: Block, new_size: usize) -> Result<Block, Block> {
  276. // Logging.
  277. log!(self.pool, "Reallocating {:?} inplace to {}...", block, new_size);
  278. // Find the bounds of given block.
  279. let bound = self.find_bound(&block);
  280. // Go for it!
  281. let res = self.realloc_inplace_bound(bound, block, new_size);
  282. // Check consistency.
  283. debug_assert!(res.as_ref().ok().map_or(true, |x| x.size() == new_size), "Requested space \
  284. does not match with the returned block.");
  285. res
  286. }
  287. /// Reallocate a block on a know index bound inplace.
  288. ///
  289. /// See [`realloc_inplace`](#method.realloc_inplace.html) for more information.
  290. fn realloc_inplace_bound(&mut self, ind: Range<usize>, mut block: Block, new_size: usize) -> Result<Block, Block> {
  291. // Logging.
  292. log!(self.pool;ind, "Try inplace reallocating {:?} to size {}.", block, new_size);
  293. /// Assertions...
  294. debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
  295. index.");
  296. if new_size <= block.size() {
  297. // Shrink the block.
  298. log!(self.pool;ind, "Shrinking {:?}.", block);
  299. // Split the block in two segments, the main segment and the excessive segment.
  300. let (block, excessive) = block.split(new_size);
  301. // Free the excessive segment.
  302. self.free_bound(ind, excessive);
  303. // Make some assertions to avoid dumb bugs.
  304. debug_assert!(block.size() == new_size, "Block wasn't shrinked properly.");
  305. // Run a consistency check.
  306. self.check();
  307. return Ok(block);
  308. // We check if `ind` is the end of the array.
  309. } else {
  310. let mut mergable = false;
  311. if let Some(entry) = self.pool.get_mut(ind.end) {
  312. mergable = entry.size() + block.size() >= new_size && block.left_to(entry);
  313. }
  314. // Note that we are sure that no segments in the array are adjacent (unless they have size
  315. // 0). This way we know that we will, at maximum, need one and only one block for extending
  316. // the current block.
  317. if mergable {
  318. // Logging...
  319. log!(self.pool;ind, "Merging {:?} to the right.", block);
  320. // We'll merge it with the block at the end of the range.
  321. block.merge_right(&mut self.remove_at(ind.end)).unwrap();
  322. // Merge succeeded.
  323. // Place the excessive block back.
  324. let (res, excessive) = block.split(new_size);
  325. // Remove_at may have shortened the vector.
  326. if ind.start == self.pool.len() {
  327. self.push_no_reserve(excessive);
  328. } else if !excessive.is_empty() {
  329. self.pool[ind.start] = excessive;
  330. }
  331. // Block will still not be adjacent, due to `excessive` being guaranteed to not be
  332. // adjacent to the next block.
  333. // Run a consistency check.
  334. self.check();
  335. // TODO, drop excessive space
  336. return Ok(res);
  337. }
  338. }
  339. Err(block)
  340. }
  341. /// Free a block placed in some index bound.
  342. ///
  343. /// This will at maximum insert one element.
  344. ///
  345. /// See [`free`](#method.free) for more information.
  346. #[inline]
  347. fn free_bound(&mut self, ind: Range<usize>, mut block: Block) {
  348. // Logging.
  349. log!(self.pool;ind, "Freeing {:?}.", block);
  350. // Short circuit in case of empty block.
  351. if block.is_empty() { return; }
  352. // When compiled with `security`, we zero this block.
  353. block.sec_zero();
  354. if ind.start == self.pool.len() {
  355. self.push_no_reserve(block);
  356. return;
  357. }
  358. // Assertions...
  359. debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
  360. index.");
  361. // Try to merge it with the block to the right.
  362. if ind.end < self.pool.len() && block.left_to(&self.pool[ind.end]) {
  363. block.merge_right(&mut self.remove_at(ind.end)).unwrap();
  364. // The merging succeeded. We proceed to try to close in the possible gap.
  365. if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
  366. self.check();
  367. return;
  368. }
  369. // Dammit, let's try to merge left.
  370. } else if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
  371. // Check consistency.
  372. self.check();
  373. return;
  374. }
  375. // Well, it failed, so we insert it the old-fashioned way.
  376. self.insert(ind.start, block);
  377. // Check consistency.
  378. self.check();
  379. }
  380. /// Allocate _fresh_ space.
  381. ///
  382. /// "Fresh" means that the space is allocated through the breaker.
  383. ///
  384. /// The returned pointer is guaranteed to be aligned to `align`.
  385. fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
  386. // Logging.
  387. log!(self.pool, "Fresh allocation of size {} with alignment {}.", size, align);
  388. // Break it to me!
  389. let res = B::alloc_fresh(size, align);
  390. // Check consistency.
  391. self.check();
  392. res
  393. }
  394. /// Push two blocks to the block pool.
  395. ///
  396. /// This will append the blocks to the end of the block pool (and merge if possible). Make sure
  397. /// that these blocks has a value higher than any of the elements in the list, to keep it
  398. /// sorted.
  399. ///
  400. /// This guarantees linearity so that the blocks will be adjacent.
  401. #[inline]
  402. fn double_push(&mut self, block_a: Block, block_b: Block) {
  403. // Logging.
  404. log!(self.pool;self.pool.len(), "Pushing {:?} and {:?}.", block_a, block_b);
  405. // Catch stupid bug...
  406. debug_assert!(block_a <= block_b, "The first pushed block is not lower or equal to the second.");
  407. // Reserve extra elements.
  408. self.reserve_more(2);
  409. self.push_no_reserve(block_a);
  410. self.push_no_reserve(block_b);
  411. }
  412. /// Push an element without reserving.
  413. fn push_no_reserve(&mut self, mut block: Block) {
  414. // Logging.
  415. log!(self.pool;self.pool.len(), "Pushing {:?}.", block);
  416. // Short-circuit in case on empty block.
  417. if !block.is_empty() {
  418. // We will try to simply merge it with the last block.
  419. if let Some(x) = self.pool.last_mut() {
  420. if x.merge_right(&mut block).is_ok() {
  421. return;
  422. }
  423. }
  424. // Merging failed. Note that trailing empty blocks are not allowed, hence the last block is
  425. // the only non-empty candidate which may be adjacent to `block`.
  426. // We push.
  427. let res = self.pool.push(block);
  428. // Make some assertions.
  429. debug_assert!(res.is_ok(), "Push failed (buffer full).");
  430. }
  431. self.check();
  432. }
  433. /// Perform a binary search to find the appropriate place where the block can be insert or is
  434. /// located.
  435. ///
  436. /// It is guaranteed that no block left to the returned value, satisfy the above condition.
  437. #[inline]
  438. fn find(&mut self, block: &Block) -> usize {
  439. // TODO optimize this function.
  440. // Logging.
  441. log!(self.pool, "Searching (exact) for {:?}.", block);
  442. let ind = match self.pool.binary_search(block) {
  443. Ok(x) | Err(x) => x,
  444. };
  445. let len = self.pool.len();
  446. // Move left.
  447. ind - self.pool.iter_mut()
  448. .rev()
  449. .skip(len - ind)
  450. .take_while(|x| x.is_empty())
  451. .count()
  452. }
  453. /// Perform a binary search to find the appropriate bound where the block can be insert or is
  454. /// located.
  455. ///
  456. /// It is guaranteed that no block left to the returned value, satisfy the above condition.
  457. #[inline]
  458. fn find_bound(&mut self, block: &Block) -> Range<usize> {
  459. // TODO optimize this function.
  460. // Logging.
  461. log!(self.pool, "Searching (bounds) for {:?}.", block);
  462. let mut left_ind = match self.pool.binary_search(block) {
  463. Ok(x) | Err(x) => x,
  464. };
  465. let len = self.pool.len();
  466. // Move left.
  467. left_ind -= self.pool.iter_mut()
  468. .rev()
  469. .skip(len - left_ind)
  470. .take_while(|x| x.is_empty())
  471. .count();
  472. let mut right_ind = match self.pool.binary_search(&block.empty_right()) {
  473. Ok(x) | Err(x) => x,
  474. };
  475. // Move right.
  476. right_ind += self.pool.iter()
  477. .skip(right_ind)
  478. .take_while(|x| x.is_empty())
  479. .count();
  480. left_ind..right_ind
  481. }
  482. /// Insert a block entry at some index.
  483. ///
  484. /// If the space is non-empty, the elements will be pushed filling out the empty gaps to the
  485. /// right. If all places to the right is occupied, it will reserve additional space to the
  486. /// block pool.
  487. ///
  488. /// # Panics
  489. ///
  490. /// Panics on when `ind` is greater than the block pool's length.
  491. ///
  492. /// # Example
  493. ///
  494. /// We want to insert the block denoted by the tildes into our list. Perform a binary search to
  495. /// find where insertion is appropriate.
  496. ///
  497. /// ```notrust
  498. /// Address space
  499. /// I------I
  500. /// B < here I--------I
  501. /// l I------------I
  502. /// k
  503. /// s I---I
  504. /// I~~~~~~~~~~I
  505. /// ```
  506. ///
  507. /// We keep pushing the blocks to the right to the next entry until a empty entry is reached:
  508. ///
  509. /// ```notrust
  510. /// Address space
  511. /// I------I
  512. /// B < here I--------I <~ this one cannot move down, due to being blocked.
  513. /// l
  514. /// k I------------I <~ thus we have moved this one down.
  515. /// s I---I
  516. /// I~~~~~~~~~~I
  517. /// ```
  518. ///
  519. /// Repeating yields:
  520. ///
  521. /// ```notrust
  522. /// Address space
  523. /// I------I
  524. /// B < here
  525. /// l I--------I <~ this one cannot move down, due to being blocked.
  526. /// k I------------I <~ thus we have moved this one down.
  527. /// s I---I
  528. /// I~~~~~~~~~~I
  529. /// ```
  530. ///
  531. /// Now an empty space is left out, meaning that we can insert the block:
  532. ///
  533. /// ```notrust
  534. /// Address space
  535. /// I------I
  536. /// B I----------I
  537. /// l I--------I
  538. /// k I------------I
  539. /// s I---I
  540. /// ```
  541. ///
  542. /// The insertion is now completed.
  543. #[inline]
  544. fn insert(&mut self, ind: usize, block: Block) {
  545. // Logging.
  546. log!(self.pool;ind, "Inserting block {:?}...", block);
  547. // Bound check.
  548. assert!(self.pool.len() >= ind, "Insertion out of bounds.");
  549. // Some assertions...
  550. debug_assert!(self.pool.len() <= ind || block <= self.pool[ind], "Inserting at {} will make \
  551. the list unsorted.", ind);
  552. debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
  553. debug_assert!(!block.is_empty(), "Inserting an empty block.");
  554. // Find the next gap, where a used block were.
  555. let n = {
  556. // The element we search for.
  557. let elem = self.pool
  558. .iter()
  559. .skip(ind)
  560. .enumerate()
  561. .filter(|&(_, x)| x.is_empty())
  562. .next()
  563. .map(|(n, _)| n);
  564. elem.unwrap_or_else(|| {
  565. // Reserve capacity.
  566. self.reserve_more(1);
  567. // We default to the end of the pool.
  568. self.pool.len() - ind
  569. })
  570. };
  571. // Log the operation.
  572. log!(self.pool;ind, "Moving {} blocks to the right.", n);
  573. unsafe {
  574. // TODO clean this mess up.
  575. if ind + n == self.pool.len() {
  576. // We will move a block into reserved memory but outside of the vec's bounds. For
  577. // that reason, we push an uninitialized element to extend the length, which will
  578. // be assigned in the memcpy.
  579. let res = self.pool.push(mem::uninitialized());
  580. // Just some assertions...
  581. debug_assert!(res.is_ok(), "Push failed (buffer full).");
  582. }
  583. // Memmove the elements.
  584. ptr::copy(self.pool.get_unchecked(ind) as *const Block,
  585. self.pool.get_unchecked_mut(ind + 1) as *mut Block, n);
  586. // Set the element.
  587. *self.pool.get_unchecked_mut(ind) = block;
  588. }
  589. // Check consistency.
  590. self.check();
  591. }
  592. /// Remove a block.
  593. fn remove_at(&mut self, ind: usize) -> Block {
  594. // Logging.
  595. log!(self.pool;ind, "Removing block.");
  596. if ind == self.pool.len() - 1 {
  597. let res = self.pool[ind].pop();
  598. // Make sure there are no trailing empty blocks.
  599. let new_len = self.pool.len() - self.pool.iter().rev().take_while(|x| x.is_empty()).count();
  600. // Truncate the vector.
  601. self.pool.truncate(new_len);
  602. res
  603. } else {
  604. // Calculate the upper and lower bound
  605. let empty = self.pool[ind + 1].empty_left();
  606. let empty2 = empty.empty_left();
  607. // Replace the block at `ind` with the left empty block from `ind + 1`.
  608. let res = mem::replace(&mut self.pool[ind], empty);
  609. // Iterate over the pool from `ind` and down.
  610. let skip = self.pool.len() - ind;
  611. for place in self.pool.iter_mut().rev().skip(skip).take_while(|x| x.is_empty()) {
  612. // Empty the blocks.
  613. *place = empty2.empty_left();
  614. }
  615. res
  616. }
  617. }
  618. /// Reserve space for the block pool.
  619. ///
  620. /// This will ensure the capacity is at least `needed` greater than the current length,
  621. /// potentially reallocating the block pool.
  622. fn reserve_more(&mut self, extra: usize) {
  623. // Logging.
  624. log!(bk.pool;bk.pool.len(), "Reserving {} past {}, currently has capacity {}.", extra,
  625. bk.pool.len(), bk.pool.capacity());
  626. let needed = bk.pool.len() + extra;
  627. if needed > bk.pool.capacity() {
  628. B::realloc_pool(self, needed);
  629. // Check consistency.
  630. bk.check();
  631. }
  632. }
  633. /// Leave the allocator.
  634. ///
  635. /// A block should be "registered" through this function when it leaves the allocated (e.g., is
  636. /// returned), these are used to keep track of the current heap usage, and memory leaks.
  637. #[inline]
  638. fn leave(&mut self, block: Block) -> Block {
  639. // Update the number of bytes allocated.
  640. #[cfg(feature = "debug_tools")]
  641. {
  642. self.allocated += block.size();
  643. }
  644. block
  645. }
  646. /// Enter the allocator.
  647. ///
  648. /// A block should be "registered" through this function when it enters the allocated (e.g., is
  649. /// given as argument), these are used to keep track of the current heap usage, and memory
  650. /// leaks.
  651. #[inline]
  652. fn enter(&mut self, block: Block) -> Block {
  653. // Update the number of bytes allocated.
  654. #[cfg(feature = "debug_tools")]
  655. {
  656. self.allocated -= block.size();
  657. }
  658. block
  659. }
  660. /// Perform consistency checks.
  661. ///
  662. /// This will check for the following conditions:
  663. ///
  664. /// 1. The list is sorted.
  665. /// 2. No blocks are adjacent.
  666. ///
  667. /// This is NOOP in release mode.
  668. fn check(&self) {
  669. if cfg!(debug_assertions) {
  670. // Logging.
  671. log!(self.pool, "Checking...");
  672. // Reverse iterator over the blocks.
  673. let mut it = self.pool.iter().enumerate().rev();
  674. if let Some((_, x)) = it.next() {
  675. // Make sure there are no leading empty blocks.
  676. assert!(!x.is_empty());
  677. let mut next = x;
  678. for (n, i) in it {
  679. // Check if sorted.
  680. assert!(next >= i, "The block pool is not sorted at index, {} ({:?} < {:?}).",
  681. n, next, i);
  682. // Make sure no blocks are adjacent.
  683. assert!(!i.left_to(next) || i.is_empty(), "Adjacent blocks at index, {} ({:?} and \
  684. {:?})", n, i, next);
  685. // Make sure an empty block has the same address as its right neighbor.
  686. assert!(!i.is_empty() || i == next, "Empty block not adjacent to right neighbor \
  687. at index {} ({:?} and {:?})", n, i, next);
  688. // Set the variable tracking the previous block.
  689. next = i;
  690. }
  691. // Check for trailing empty blocks.
  692. assert!(!self.pool.last().unwrap().is_empty(), "Trailing empty blocks.");
  693. }
  694. // Logging...
  695. log!(self.pool, "Check OK!");
  696. }
  697. }
  698. /// Attach this allocator to the current thread.
  699. ///
  700. /// This will make sure this allocator's data is freed to the
  701. pub unsafe fn attach(&mut self) {
  702. fn dtor(ptr: *mut Bookkeeper) {
  703. let alloc = *ptr;
  704. // Lock the global allocator.
  705. let global_alloc = allocator::GLOBAL_ALLOCATOR.lock();
  706. // TODO, we know this is sorted, so we could abuse that fact to faster insertion in the
  707. // global allocator.
  708. // Free everything in the allocator.
  709. while let Some(i) = alloc.pool.pop() {
  710. global_alloc.free(i);
  711. }
  712. // Deallocate the vector itself.
  713. global_alloc.free(Block::from(alloc.pool));
  714. // Gotta' make sure no memleaks are here.
  715. #[cfg(feature = "debug_tools")]
  716. alloc.assert_no_leak();
  717. }
  718. sys::register_thread_destructor(self as *mut Bookkeeper, dtor).unwrap();
  719. }
  720. /// Check for memory leaks.
  721. ///
  722. /// This will ake sure that all the allocated blocks have been freed.
  723. #[cfg(feature = "debug_tools")]
  724. fn assert_no_leak(&self) {
  725. assert!(self.allocated == self.pool.capacity() * mem::size_of::<Block>(), "Not all blocks \
  726. freed. Total allocated space is {} ({} free blocks).", self.allocated,
  727. self.pool.len());
  728. }
  729. }
  730. trait Breaker {
  731. /// Allocate _fresh_ space.
  732. ///
  733. /// "Fresh" means that the space is allocated through the breaker.
  734. ///
  735. /// The returned pointer is guaranteed to be aligned to `align`.
  736. fn alloc_fresh(bk: &mut Bookkeeper<Self>, size: usize, align: usize) -> Block;
  737. /// Realloate the block pool to some specified capacity.
  738. fn realloc_pool(bk: &mut Bookkeeper<Self>, cap: usize);
  739. }
  740. /// SBRK fresh allocator.
  741. ///
  742. /// This will extend the data segment whenever new memory is needed. Since this includes leaving
  743. /// userspace, this shouldn't be used when other allocators are available (i.e. the bookkeeper is
  744. /// local).
  745. struct Sbrk;
  746. impl Breaker for Sbrk {
  747. #[inline]
  748. fn alloc_fresh(bk: &mut Bookkeeper<Sbrk>, size: usize, align: usize) -> Block {
  749. // Obtain what you need.
  750. let (alignment_block, res, excessive) = brk::get(size, align);
  751. // Add it to the list. This will not change the order, since the pointer is higher than all
  752. // the previous blocks.
  753. bk.double_push(alignment_block, excessive);
  754. res
  755. }
  756. #[inline]
  757. fn realloc_pool(bk: &mut Bookkeeper<Sbrk>, extra: usize) {
  758. // TODO allow BRK-free non-inplace reservations.
  759. // TODO Enable inplace reallocation in this position.
  760. // Reallocate the block pool.
  761. // Make a fresh allocation.
  762. let size = (needed +
  763. cmp::min(bk.pool.capacity(), 200 + bk.pool.capacity() / 2)
  764. // We add:
  765. + 1 // block for the alignment block.
  766. + 1 // block for the freed vector.
  767. + 1 // block for the excessive space.
  768. ) * mem::size_of::<Block>();
  769. let (alignment_block, alloc, excessive) = brk::get(size, mem::align_of::<Block>());
  770. // Refill the pool.
  771. let old = bk.pool.refill(alloc);
  772. // Double push the alignment block and the excessive space linearly (note that it is in
  773. // fact in the end of the pool, due to BRK _extending_ the segment).
  774. bk.double_push(alignment_block, excessive);
  775. // Free the old vector.
  776. bk.free(old);
  777. }
  778. }
  779. /// Allocate fresh memory from the global allocator.
  780. struct GlobalAllocator;
  781. impl Breaker for GlobalAllocator {
  782. #[inline]
  783. fn alloc_fresh(bk: &mut Bookkeeper<GlobalAllocator>, size: usize, align: usize) -> Block {
  784. /// Canonicalize the requested space.
  785. ///
  786. /// We request excessive space to the upstream allocator to avoid repeated requests and
  787. /// lock contentions.
  788. #[inline]
  789. fn canonicalize_space(min: usize) -> usize {
  790. // TODO tweak this.
  791. // To avoid having mega-allocations allocate way to much space, we
  792. // have a maximal extra space limit.
  793. if min > 8192 { min } else {
  794. // To avoid paying for short-living or little-allocating threads, we have no minimum.
  795. // Instead we multiply.
  796. min * 4
  797. // This won't overflow due to the conditition of this branch.
  798. }
  799. }
  800. // Get the block from the global allocator.
  801. let (res, excessive) = allocator::GLOBAL_ALLOCATOR.lock()
  802. .alloc(canonicalize_space(size), align)
  803. .split(size);
  804. // Free the excessive space to the current allocator. Note that you cannot simply push
  805. // (which is the case for SBRK), due the block not necessarily being above all the other
  806. // blocks in the pool. For this reason, we let `free` handle the search and so on.
  807. bk.free(excessive);
  808. res
  809. }
  810. #[inline]
  811. fn realloc_pool(bk: &mut Bookkeeper<GlobalAllocator>, extra: usize) {
  812. // TODO allow BRK-free non-inplace reservations.
  813. // TODO Enable inplace reallocation in this position.
  814. // Reallocate the block pool.
  815. // Make a fresh allocation.
  816. let size = (needed +
  817. cmp::min(bk.pool.capacity(), 200 + bk.pool.capacity() / 2)
  818. // We add:
  819. + 1 // block for the alignment block.
  820. + 1 // block for the freed vector.
  821. + 1 // block for the excessive space.
  822. ) * mem::size_of::<Block>();
  823. let (alignment_block, alloc, excessive) = brk::get(size, mem::align_of::<Block>());
  824. // Refill the pool.
  825. let old = bk.pool.refill(alloc);
  826. // Double push the alignment block and the excessive space linearly (note that it is in
  827. // fact in the end of the pool, due to BRK _extending_ the segment).
  828. bk.double_push(alignment_block, excessive);
  829. // Free the old vector.
  830. bk.free(old);
  831. }
  832. }