block.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. //! Memory blocks.
  2. //!
  3. //! Blocks are the main unit for the memory bookkeeping. A block is a simple construct with a
  4. //! `Pointer` pointer and a size. Occupied (non-free) blocks are represented by a zero-sized block.
  5. // TODO: Check the allow(cast_possible_wrap)s again.
  6. use prelude::*;
  7. use core::{ptr, cmp, mem, fmt};
  8. /// A contiguous memory block.
  9. ///
  10. /// This provides a number of guarantees,
  11. ///
  12. /// 1. The buffer is valid for the block's lifetime, but not necessarily initialized.
  13. /// 2. The Block "owns" the inner data.
  14. /// 3. There is no interior mutability. Mutation requires either mutable access or ownership over
  15. /// the block.
  16. /// 4. The buffer is not aliased. That is, it do not overlap with other blocks or is aliased in any
  17. /// way.
  18. ///
  19. /// All this is enforced through the type system. These invariants can only be broken through
  20. /// unsafe code.
  21. ///
  22. /// Accessing it through an immutable reference does not break these guarantees. That is, you are
  23. /// not able to read/mutate without acquiring a _mutable_ reference.
  24. #[must_use]
  25. pub struct Block {
  26. /// The size of this block, in bytes.
  27. size: usize,
  28. /// The pointer to the start of this block.
  29. ptr: Pointer<u8>,
  30. }
  31. impl Block {
  32. /// Construct a block from its raw parts (pointer and size).
  33. #[inline]
  34. pub unsafe fn from_raw_parts(ptr: Pointer<u8>, size: usize) -> Block {
  35. Block {
  36. size: size,
  37. ptr: ptr,
  38. }
  39. }
  40. /// Create an empty block starting at `ptr`.
  41. #[inline]
  42. pub fn empty(ptr: Pointer<u8>) -> Block {
  43. Block {
  44. size: 0,
  45. // This won't alias `ptr`, since the block is empty.
  46. ptr: ptr,
  47. }
  48. }
  49. /// Create an empty block representing the left edge of this block
  50. #[inline]
  51. pub fn empty_left(&self) -> Block {
  52. Block::empty(self.ptr.clone())
  53. }
  54. /// Create an empty block representing the right edge of this block
  55. #[inline]
  56. pub fn empty_right(&self) -> Block {
  57. Block {
  58. size: 0,
  59. ptr: unsafe {
  60. // LAST AUDIT: 2016-08-21 (Ticki).
  61. // By the invariants of this type (the end is addressable), this conversion isn't
  62. // overflowing.
  63. self.ptr.clone().offset(self.size as isize)
  64. },
  65. }
  66. }
  67. /// Merge this block with a block to the right.
  68. ///
  69. /// This will simply extend the block, adding the size of the block, and then set the size to
  70. /// zero. The return value is `Ok(())` on success, and `Err(())` on failure (e.g., the blocks
  71. /// are not adjacent).
  72. ///
  73. /// If you merge with a zero sized block, it will succeed, even if they are not adjacent.
  74. #[inline]
  75. pub fn merge_right(&mut self, block: &mut Block) -> Result<(), ()> {
  76. if block.is_empty() {
  77. Ok(())
  78. } else if self.left_to(block) {
  79. // Since the end of `block` is bounded by the address space, adding them cannot
  80. // overflow.
  81. self.size += block.pop().size;
  82. // We pop it to make sure it isn't aliased.
  83. Ok(())
  84. } else { Err(()) }
  85. }
  86. /// Is this block empty/free?
  87. #[inline]
  88. pub fn is_empty(&self) -> bool {
  89. self.size == 0
  90. }
  91. /// Get the size of the block.
  92. pub fn size(&self) -> usize {
  93. self.size
  94. }
  95. /// Is this block aligned to `align`?
  96. #[inline]
  97. pub fn aligned_to(&self, align: usize) -> bool {
  98. self.ptr.get() as usize % align == 0
  99. }
  100. /// memcpy the block to another pointer.
  101. ///
  102. /// # Panics
  103. ///
  104. /// This will panic if the target block is smaller than the source.
  105. #[inline]
  106. pub fn copy_to(&self, block: &mut Block) {
  107. log!(INTERNAL, "Copying {:?} to {:?}", *self, *block);
  108. // Bound check.
  109. assert!(self.size <= block.size, "Block too small.");
  110. unsafe {
  111. // LAST AUDIT: 2016-08-21 (Ticki).
  112. // From the invariants of `Block`, this copy is well-defined.
  113. ptr::copy_nonoverlapping(self.ptr.get(), block.ptr.get(), self.size);
  114. }
  115. }
  116. /// Volatile zero this memory if the `security` feature is set.
  117. pub fn sec_zero(&mut self) {
  118. use core::intrinsics;
  119. if cfg!(feature = "security") {
  120. log!(INTERNAL, "Zeroing {:?}", *self);
  121. unsafe {
  122. // LAST AUDIT: 2016-08-21 (Ticki).
  123. // Since the memory of the block is inaccessible (read-wise), zeroing it is fully
  124. // safe.
  125. intrinsics::volatile_set_memory(self.ptr.get(), 0, self.size);
  126. }
  127. }
  128. }
  129. /// "Pop" this block.
  130. ///
  131. /// This marks it as free, and returns the old value.
  132. #[inline]
  133. pub fn pop(&mut self) -> Block {
  134. unborrow!(mem::replace(self, Block::empty(self.ptr.clone())))
  135. }
  136. /// Is this block placed left to the given other block?
  137. #[inline]
  138. pub fn left_to(&self, to: &Block) -> bool {
  139. // This won't overflow due to the end being bounded by the address space.
  140. self.size + self.ptr.get() as usize == to.ptr.get() as usize
  141. }
  142. /// Split the block at some position.
  143. ///
  144. /// # Panics
  145. ///
  146. /// Panics if `pos` is out of bound.
  147. #[inline]
  148. pub fn split(self, pos: usize) -> (Block, Block) {
  149. assert!(pos <= self.size, "Split {} out of bound (size is {})!", pos, self.size);
  150. (
  151. Block {
  152. size: pos,
  153. ptr: self.ptr.clone(),
  154. },
  155. Block {
  156. size: self.size - pos,
  157. ptr: unsafe {
  158. // LAST AUDIT: 2016-08-21 (Ticki).
  159. // This won't overflow due to the assertion above, ensuring that it is bounded
  160. // by the address space. See the `split_at_mut` source from libcore.
  161. self.ptr.offset(pos as isize)
  162. },
  163. }
  164. )
  165. }
  166. /// Split this block, such that the second block is aligned to `align`.
  167. ///
  168. /// Returns an `None` holding the intact block if `align` is out of bounds.
  169. #[inline]
  170. pub fn align(&mut self, align: usize) -> Option<(Block, Block)> {
  171. // Logging.
  172. log!(INTERNAL, "Padding {:?} to align {}", self, align);
  173. // TODO: This functions suffers from external fragmentation. Leaving bigger segments might
  174. // increase performance.
  175. // Calculate the aligner, which defines the smallest size required as precursor to align
  176. // the block to `align`.
  177. let aligner = (align - self.ptr.get() as usize % align) % align;
  178. // ^^^^^^^^
  179. // To avoid wasting space on the case where the block is already aligned, we calculate it
  180. // modulo `align`.
  181. // Bound check.
  182. if aligner < self.size {
  183. // Invalidate the old block.
  184. let old = self.pop();
  185. Some((
  186. Block {
  187. size: aligner,
  188. ptr: old.ptr.clone(),
  189. },
  190. Block {
  191. size: old.size - aligner,
  192. ptr: unsafe {
  193. // LAST AUDIT: 2016-08-21 (Ticki).
  194. // The aligner is bounded by the size, which itself is bounded by the
  195. // address space. Therefore, this conversion cannot overflow.
  196. old.ptr.offset(aligner as isize)
  197. },
  198. }
  199. ))
  200. } else {
  201. // Logging.
  202. log!(INTERNAL, "Unable to align block.");
  203. None
  204. }
  205. }
  206. /// Mark this block free to the debugger.
  207. ///
  208. /// The debugger might do things like memleak and use-after-free checks. This methods informs
  209. /// the debugger that this block is freed.
  210. #[inline]
  211. pub fn mark_free(self) -> Block {
  212. #[cfg(feature = "debugger")]
  213. ::shim::debug::mark_free(*self.ptr as *const u8, self.size);
  214. self
  215. }
  216. /// Mark this block uninitialized to the debugger.
  217. ///
  218. /// To detect use-after-free, the allocator need to mark
  219. #[inline]
  220. pub fn mark_uninitialized(self) -> Block {
  221. #[cfg(feature = "debugger")]
  222. ::shim::debug::mark_unintialized(*self.ptr as *const u8, self.size);
  223. self
  224. }
  225. }
  226. impl From<Block> for Pointer<u8> {
  227. fn from(from: Block) -> Pointer<u8> {
  228. from.ptr
  229. }
  230. }
  231. /// Compare the blocks address.
  232. impl PartialOrd for Block {
  233. #[inline]
  234. fn partial_cmp(&self, other: &Block) -> Option<cmp::Ordering> {
  235. self.ptr.get().partial_cmp(&other.ptr.get())
  236. }
  237. }
  238. /// Compare the blocks address.
  239. impl Ord for Block {
  240. #[inline]
  241. fn cmp(&self, other: &Block) -> cmp::Ordering {
  242. self.ptr.get().cmp(&other.ptr.get())
  243. }
  244. }
  245. impl cmp::PartialEq for Block {
  246. #[inline]
  247. fn eq(&self, other: &Block) -> bool {
  248. self.ptr.get() == other.ptr.get()
  249. }
  250. }
  251. impl cmp::Eq for Block {}
  252. impl fmt::Debug for Block {
  253. fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  254. write!(f, "0x{:x}[{}]", self.ptr.get() as usize, self.size)
  255. }
  256. }
  257. #[cfg(test)]
  258. mod test {
  259. use prelude::*;
  260. #[test]
  261. fn test_array() {
  262. let arr = b"Lorem ipsum dolor sit amet";
  263. let block = unsafe {
  264. Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
  265. };
  266. // Test split.
  267. let (mut lorem, mut rest) = block.split(5);
  268. assert_eq!(lorem.size(), 5);
  269. assert_eq!(lorem.size() + rest.size(), arr.len());
  270. assert!(lorem < rest);
  271. assert_eq!(lorem, lorem);
  272. assert!(!rest.is_empty());
  273. assert!(lorem.align(2).unwrap().1.aligned_to(2));
  274. assert!(rest.align(15).unwrap().1.aligned_to(15));
  275. assert_eq!(Pointer::from(lorem).get() as usize + 5, Pointer::from(rest).get() as usize);
  276. }
  277. #[test]
  278. fn test_merge() {
  279. let arr = b"Lorem ipsum dolor sit amet";
  280. let block = unsafe {
  281. Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
  282. };
  283. let (mut lorem, mut rest) = block.split(5);
  284. lorem.merge_right(&mut rest).unwrap();
  285. let mut tmp = rest.split(0).0;
  286. assert!(tmp.is_empty());
  287. lorem.split(2).0.merge_right(&mut tmp).unwrap();
  288. }
  289. #[test]
  290. #[should_panic]
  291. fn test_oob() {
  292. let arr = b"lorem";
  293. let block = unsafe {
  294. Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
  295. };
  296. // Test OOB.
  297. block.split(6);
  298. }
  299. #[test]
  300. fn test_mutate() {
  301. let mut arr = [0u8, 2, 0, 0, 255, 255];
  302. let block = unsafe {
  303. Block::from_raw_parts(Pointer::new(&mut arr[0] as *mut u8), 6)
  304. };
  305. let (a, mut b) = block.split(2);
  306. a.copy_to(&mut b);
  307. assert_eq!(arr, [0, 2, 0, 2, 255, 255]);
  308. }
  309. #[test]
  310. fn test_empty_lr() {
  311. let arr = b"Lorem ipsum dolor sit amet";
  312. let block = unsafe {
  313. Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
  314. };
  315. assert!(block.empty_left().is_empty());
  316. assert!(block.empty_right().is_empty());
  317. assert_eq!(Pointer::from(block.empty_left()).get() as *const u8, arr.as_ptr());
  318. assert_eq!(block.empty_right(), block.split(arr.len()).1);
  319. }
  320. }