block.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. //! Memory blocks.
  2. //!
  3. //! Blocks are the main unit for the memory bookkeeping. A block is a simple construct with a
  4. //! `Pointer` pointer and a size. Occupied (non-free) blocks are represented by a zero-sized block.
  5. // TODO: Check the allow(cast_possible_wrap)s again.
  6. use prelude::*;
  7. use core::{ptr, cmp, mem, fmt};
  8. /// A contiguous memory block.
  9. ///
  10. /// This provides a number of guarantees,
  11. ///
  12. /// 1. The buffer is valid for the block's lifetime, but not necessarily initialized.
  13. /// 2. The Block "owns" the inner data.
  14. /// 3. There is no interior mutability. Mutation requires either mutable access or ownership over
  15. /// the block.
  16. /// 4. The buffer is not aliased. That is, it do not overlap with other blocks or is aliased in any
  17. /// way.
  18. ///
  19. /// All this is enforced through the type system. These invariants can only be broken through
  20. /// unsafe code.
  21. ///
  22. /// Accessing it through an immutable reference does not break these guarantees. That is, you are
  23. /// not able to read/mutate without acquiring a _mutable_ reference.
  24. #[must_use]
  25. pub struct Block {
  26. /// The size of this block, in bytes.
  27. size: usize,
  28. /// The pointer to the start of this block.
  29. ptr: Pointer<u8>,
  30. }
  31. impl Block {
  32. /// Construct a block from its raw parts (pointer and size).
  33. #[inline]
  34. pub unsafe fn from_raw_parts(ptr: Pointer<u8>, size: usize) -> Block {
  35. Block {
  36. size: size,
  37. ptr: ptr,
  38. }
  39. }
  40. /// Create an empty block starting at `ptr`.
  41. #[inline]
  42. pub fn empty(ptr: Pointer<u8>) -> Block {
  43. Block {
  44. size: 0,
  45. // This won't alias `ptr`, since the block is empty.
  46. ptr: ptr,
  47. }
  48. }
  49. /// Create an empty block representing the left edge of this block
  50. #[inline]
  51. pub fn empty_left(&self) -> Block {
  52. Block::empty(self.ptr.clone())
  53. }
  54. /// Create an empty block representing the right edge of this block
  55. #[inline]
  56. #[allow(cast_possible_wrap)]
  57. pub fn empty_right(&self) -> Block {
  58. Block {
  59. size: 0,
  60. ptr: unsafe {
  61. // LAST AUDIT: 2016-08-21 (Ticki).
  62. // By the invariants of this type (the end is addressable), this conversion isn't
  63. // overflowing.
  64. self.ptr.clone().offset(self.size as isize)
  65. },
  66. }
  67. }
  68. /// Merge this block with a block to the right.
  69. ///
  70. /// This will simply extend the block, adding the size of the block, and then set the size to
  71. /// zero. The return value is `Ok(())` on success, and `Err(())` on failure (e.g., the blocks
  72. /// are not adjacent).
  73. ///
  74. /// If you merge with a zero sized block, it will succeed, even if they are not adjacent.
  75. #[inline]
  76. pub fn merge_right(&mut self, block: &mut Block) -> Result<(), ()> {
  77. if block.is_empty() {
  78. Ok(())
  79. } else if self.left_to(block) {
  80. // Since the end of `block` is bounded by the address space, adding them cannot
  81. // overflow.
  82. self.size += block.pop().size;
  83. // We pop it to make sure it isn't aliased.
  84. Ok(())
  85. } else { Err(()) }
  86. }
  87. /// Is this block empty/free?
  88. #[inline]
  89. pub fn is_empty(&self) -> bool {
  90. self.size == 0
  91. }
  92. /// Get the size of the block.
  93. pub fn size(&self) -> usize {
  94. self.size
  95. }
  96. /// Is this block aligned to `align`?
  97. #[inline]
  98. pub fn aligned_to(&self, align: usize) -> bool {
  99. *self.ptr as usize % align == 0
  100. }
  101. /// memcpy the block to another pointer.
  102. ///
  103. /// # Panics
  104. ///
  105. /// This will panic if the target block is smaller than the source.
  106. #[inline]
  107. pub fn copy_to(&self, block: &mut Block) {
  108. log!(INTERNAL, "Copying {:?} to {:?}", *self, *block);
  109. // Bound check.
  110. assert!(self.size <= block.size, "Block too small.");
  111. unsafe {
  112. // LAST AUDIT: 2016-08-21 (Ticki).
  113. // From the invariants of `Block`, this copy is well-defined.
  114. ptr::copy_nonoverlapping(*self.ptr, *block.ptr, self.size);
  115. }
  116. }
  117. /// Volatile zero this memory if the `security` feature is set.
  118. pub fn sec_zero(&mut self) {
  119. use core::intrinsics;
  120. if cfg!(feature = "security") {
  121. log!(INTERNAL, "Zeroing {:?}", *self);
  122. unsafe {
  123. // LAST AUDIT: 2016-08-21 (Ticki).
  124. // Since the memory of the block is inaccessible (read-wise), zeroing it is fully
  125. // safe.
  126. intrinsics::volatile_set_memory(*self.ptr, 0, self.size);
  127. }
  128. }
  129. }
  130. /// "Pop" this block.
  131. ///
  132. /// This marks it as free, and returns the old value.
  133. #[inline]
  134. pub fn pop(&mut self) -> Block {
  135. unborrow!(mem::replace(self, Block::empty(self.ptr.clone())))
  136. }
  137. /// Is this block placed left to the given other block?
  138. #[inline]
  139. pub fn left_to(&self, to: &Block) -> bool {
  140. // This won't overflow due to the end being bounded by the address space.
  141. self.size + *self.ptr as usize == *to.ptr as usize
  142. }
  143. /// Split the block at some position.
  144. ///
  145. /// # Panics
  146. ///
  147. /// Panics if `pos` is out of bound.
  148. #[inline]
  149. #[allow(cast_possible_wrap)]
  150. pub fn split(self, pos: usize) -> (Block, Block) {
  151. assert!(pos <= self.size, "Split {} out of bound (size is {})!", pos, self.size);
  152. (
  153. Block {
  154. size: pos,
  155. ptr: self.ptr.clone(),
  156. },
  157. Block {
  158. size: self.size - pos,
  159. ptr: unsafe {
  160. // LAST AUDIT: 2016-08-21 (Ticki).
  161. // This won't overflow due to the assertion above, ensuring that it is bounded
  162. // by the address space. See the `split_at_mut` source from libcore.
  163. self.ptr.offset(pos as isize)
  164. },
  165. }
  166. )
  167. }
  168. /// Split this block, such that the second block is aligned to `align`.
  169. ///
  170. /// Returns an `None` holding the intact block if `align` is out of bounds.
  171. #[inline]
  172. #[allow(cast_possible_wrap)]
  173. pub fn align(&mut self, align: usize) -> Option<(Block, Block)> {
  174. // Logging.
  175. log!(INTERNAL, "Padding {:?} to align {}", self, align);
  176. // TODO: This functions suffers from external fragmentation. Leaving bigger segments might
  177. // increase performance.
  178. // Calculate the aligner, which defines the smallest size required as precursor to align
  179. // the block to `align`.
  180. let aligner = (align - *self.ptr as usize % align) % align;
  181. // ^^^^^^^^
  182. // To avoid wasting space on the case where the block is already aligned, we calculate it
  183. // modulo `align`.
  184. // Bound check.
  185. if aligner < self.size {
  186. // Invalidate the old block.
  187. let old = self.pop();
  188. Some((
  189. Block {
  190. size: aligner,
  191. ptr: old.ptr.clone(),
  192. },
  193. Block {
  194. size: old.size - aligner,
  195. ptr: unsafe {
  196. // LAST AUDIT: 2016-08-21 (Ticki).
  197. // The aligner is bounded by the size, which itself is bounded by the
  198. // address space. Therefore, this conversion cannot overflow.
  199. old.ptr.offset(aligner as isize)
  200. },
  201. }
  202. ))
  203. } else {
  204. // Logging.
  205. log!(INTERNAL, "Unable to align block.");
  206. None
  207. }
  208. }
  209. /// Mark this block free to the debugger.
  210. ///
  211. /// The debugger might do things like memleak and use-after-free checks. This methods informs
  212. /// the debugger that this block is freed.
  213. #[inline]
  214. pub fn mark_free(self) -> Block {
  215. #[cfg(feature = "debugger")]
  216. ::shim::debug::mark_free(*self.ptr as *const u8, self.size);
  217. self
  218. }
  219. /// Mark this block uninitialized to the debugger.
  220. ///
  221. /// To detect use-after-free, the allocator need to mark
  222. #[inline]
  223. pub fn mark_uninitialized(self) -> Block {
  224. #[cfg(feature = "debugger")]
  225. ::shim::debug::mark_unintialized(*self.ptr as *const u8, self.size);
  226. self
  227. }
  228. }
  229. impl From<Block> for Pointer<u8> {
  230. fn from(from: Block) -> Pointer<u8> {
  231. from.ptr
  232. }
  233. }
  234. /// Compare the blocks address.
  235. impl PartialOrd for Block {
  236. #[inline]
  237. fn partial_cmp(&self, other: &Block) -> Option<cmp::Ordering> {
  238. self.ptr.partial_cmp(&other.ptr)
  239. }
  240. }
  241. /// Compare the blocks address.
  242. impl Ord for Block {
  243. #[inline]
  244. fn cmp(&self, other: &Block) -> cmp::Ordering {
  245. self.ptr.cmp(&other.ptr)
  246. }
  247. }
  248. impl cmp::PartialEq for Block {
  249. #[inline]
  250. fn eq(&self, other: &Block) -> bool {
  251. *self.ptr == *other.ptr
  252. }
  253. }
  254. impl cmp::Eq for Block {}
  255. impl fmt::Debug for Block {
  256. fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  257. write!(f, "0x{:x}[{}]", *self.ptr as usize, self.size)
  258. }
  259. }
  260. #[cfg(test)]
  261. mod test {
  262. use prelude::*;
  263. #[test]
  264. fn test_array() {
  265. let arr = b"Lorem ipsum dolor sit amet";
  266. let block = unsafe {
  267. Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
  268. };
  269. // Test split.
  270. let (mut lorem, mut rest) = block.split(5);
  271. assert_eq!(lorem.size(), 5);
  272. assert_eq!(lorem.size() + rest.size(), arr.len());
  273. assert!(lorem < rest);
  274. assert_eq!(lorem, lorem);
  275. assert!(!rest.is_empty());
  276. assert!(lorem.align(2).unwrap().1.aligned_to(2));
  277. assert!(rest.align(15).unwrap().1.aligned_to(15));
  278. assert_eq!(*Pointer::from(lorem) as usize + 5, *Pointer::from(rest) as usize);
  279. }
  280. #[test]
  281. fn test_merge() {
  282. let arr = b"Lorem ipsum dolor sit amet";
  283. let block = unsafe {
  284. Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
  285. };
  286. let (mut lorem, mut rest) = block.split(5);
  287. lorem.merge_right(&mut rest).unwrap();
  288. let mut tmp = rest.split(0).0;
  289. assert!(tmp.is_empty());
  290. lorem.split(2).0.merge_right(&mut tmp).unwrap();
  291. }
  292. #[test]
  293. #[should_panic]
  294. fn test_oob() {
  295. let arr = b"lorem";
  296. let block = unsafe {
  297. Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
  298. };
  299. // Test OOB.
  300. block.split(6);
  301. }
  302. #[test]
  303. fn test_mutate() {
  304. let mut arr = [0u8, 2, 0, 0, 255, 255];
  305. let block = unsafe {
  306. Block::from_raw_parts(Pointer::new(&mut arr[0] as *mut u8), 6)
  307. };
  308. let (a, mut b) = block.split(2);
  309. a.copy_to(&mut b);
  310. assert_eq!(arr, [0, 2, 0, 2, 255, 255]);
  311. }
  312. #[test]
  313. fn test_empty_lr() {
  314. let arr = b"Lorem ipsum dolor sit amet";
  315. let block = unsafe {
  316. Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
  317. };
  318. assert!(block.empty_left().is_empty());
  319. assert!(block.empty_right().is_empty());
  320. assert_eq!(*Pointer::from(block.empty_left()) as *const u8, arr.as_ptr());
  321. assert_eq!(block.empty_right(), block.split(arr.len()).1);
  322. }
  323. }