block.rs 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. //! Memory blocks.
  2. //!
  3. //! Blocks are the main unit for the memory bookkeeping. A block is a simple
  4. //! construct with a `Pointer` pointer and a size. Occupied (non-free) blocks
  5. //! are represented by a zero-sized block.
  6. // TODO: Check the allow(cast_possible_wrap)s again.
  7. use prelude::*;
  8. use core::{cmp, fmt, mem, ptr};
  9. /// A contiguous memory block.
  10. ///
  11. /// This provides a number of guarantees,
  12. ///
  13. /// 1. The buffer is valid for the block's lifetime, but not necessarily
  14. /// initialized. 2. The Block "owns" the inner data.
  15. /// 3. There is no interior mutability. Mutation requires either mutable access
  16. /// or ownership over the block.
  17. /// 4. The buffer is not aliased. That is, it do not overlap with other blocks
  18. /// or is aliased in any way.
  19. ///
  20. /// All this is enforced through the type system. These invariants can only be
  21. /// broken through unsafe code.
  22. ///
  23. /// Accessing it through an immutable reference does not break these
  24. /// guarantees. That is, you are not able to read/mutate without acquiring a
  25. /// _mutable_ reference.
  26. #[must_use]
  27. pub struct Block {
  28. /// The size of this block, in bytes.
  29. size: usize,
  30. /// The pointer to the start of this block.
  31. ptr: Pointer<u8>,
  32. }
  33. impl Block {
  34. /// Construct a block from its raw parts (pointer and size).
  35. #[inline]
  36. pub unsafe fn from_raw_parts(ptr: Pointer<u8>, size: usize) -> Block {
  37. Block {
  38. size: size,
  39. ptr: ptr,
  40. }
  41. }
  42. /// Create an empty block starting at `ptr`.
  43. #[inline]
  44. pub fn empty(ptr: Pointer<u8>) -> Block {
  45. Block {
  46. size: 0,
  47. // This won't alias `ptr`, since the block is empty.
  48. ptr: ptr,
  49. }
  50. }
  51. /// Create an empty block representing the left edge of this block
  52. #[inline]
  53. pub fn empty_left(&self) -> Block {
  54. Block::empty(self.ptr.clone())
  55. }
  56. /// Create an empty block representing the right edge of this block
  57. #[inline]
  58. pub fn empty_right(&self) -> Block {
  59. Block {
  60. size: 0,
  61. ptr: unsafe {
  62. // LAST AUDIT: 2016-08-21 (Ticki).
  63. // By the invariants of this type (the end is addressable),
  64. // this conversion isn't overflowing.
  65. self.ptr.clone().offset(self.size as isize)
  66. },
  67. }
  68. }
  69. /// Merge this block with a block to the right.
  70. ///
  71. /// This will simply extend the block, adding the size of the block, and
  72. /// then set the size to zero. The return value is `Ok(())` on success,
  73. /// and `Err(())` on failure (e.g., the blocks are not adjacent).
  74. ///
  75. /// If you merge with a zero sized block, it will succeed, even if they are
  76. /// not adjacent.
  77. #[inline]
  78. pub fn merge_right(&mut self, block: &mut Block) -> Result<(), ()> {
  79. if block.is_empty() {
  80. Ok(())
  81. } else if self.left_to(block) {
  82. // Since the end of `block` is bounded by the address space, adding
  83. // them cannot overflow.
  84. self.size += block.pop().size;
  85. // We pop it to make sure it isn't aliased.
  86. Ok(())
  87. } else {
  88. Err(())
  89. }
  90. }
  91. /// Is this block empty/free?
  92. #[inline]
  93. pub fn is_empty(&self) -> bool {
  94. self.size == 0
  95. }
  96. /// Get the size of the block.
  97. pub fn size(&self) -> usize {
  98. self.size
  99. }
  100. /// Is this block aligned to `align`?
  101. #[inline]
  102. pub fn aligned_to(&self, align: usize) -> bool {
  103. self.ptr.get() as usize % align == 0
  104. }
  105. /// memcpy the block to another pointer.
  106. ///
  107. /// # Panics
  108. ///
  109. /// This will panic if the target block is smaller than the source.
  110. #[inline]
  111. pub fn copy_to(&self, block: &mut Block) {
  112. log!(INTERNAL, "Copying {:?} to {:?}", *self, *block);
  113. // Bound check.
  114. assert!(self.size <= block.size, "Block too small.");
  115. unsafe {
  116. // LAST AUDIT: 2016-08-21 (Ticki).
  117. // From the invariants of `Block`, this copy is well-defined.
  118. ptr::copy_nonoverlapping(
  119. self.ptr.get(),
  120. block.ptr.get(),
  121. self.size,
  122. );
  123. }
  124. }
  125. /// Volatile zero this memory if the `security` feature is set.
  126. pub fn sec_zero(&mut self) {
  127. use core::intrinsics;
  128. if cfg!(feature = "security") {
  129. log!(INTERNAL, "Zeroing {:?}", *self);
  130. unsafe {
  131. // LAST AUDIT: 2016-08-21 (Ticki).
  132. // Since the memory of the block is inaccessible (read-wise),
  133. // zeroing it is fully safe.
  134. intrinsics::volatile_set_memory(self.ptr.get(), 0, self.size);
  135. }
  136. }
  137. }
  138. /// "Pop" this block.
  139. ///
  140. /// This marks it as free, and returns the old value.
  141. #[inline]
  142. pub fn pop(&mut self) -> Block {
  143. unborrow!(mem::replace(self, Block::empty(self.ptr.clone())))
  144. }
  145. /// Is this block placed left to the given other block?
  146. #[inline]
  147. pub fn left_to(&self, to: &Block) -> bool {
  148. // This won't overflow due to the end being bounded by the address
  149. // space.
  150. self.size + self.ptr.get() as usize == to.ptr.get() as usize
  151. }
  152. /// Split the block at some position.
  153. ///
  154. /// # Panics
  155. ///
  156. /// Panics if `pos` is out of bound.
  157. #[inline]
  158. pub fn split(self, pos: usize) -> (Block, Block) {
  159. assert!(
  160. pos <= self.size,
  161. "Split {} out of bound (size is {})!",
  162. pos,
  163. self.size
  164. );
  165. (
  166. Block {
  167. size: pos,
  168. ptr: self.ptr.clone(),
  169. },
  170. Block {
  171. size: self.size - pos,
  172. ptr: unsafe {
  173. // LAST AUDIT: 2016-08-21 (Ticki).
  174. // This won't overflow due to the assertion above, ensuring
  175. // that it is bounded by the address
  176. // space. See the `split_at_mut` source from libcore.
  177. self.ptr.offset(pos as isize)
  178. },
  179. },
  180. )
  181. }
  182. /// Split this block, such that the second block is aligned to `align`.
  183. ///
  184. /// Returns an `None` holding the intact block if `align` is out of bounds.
  185. #[inline]
  186. pub fn align(&mut self, align: usize) -> Option<(Block, Block)> {
  187. // Logging.
  188. log!(INTERNAL, "Padding {:?} to align {}", self, align);
  189. // TODO: This functions suffers from external fragmentation. Leaving
  190. // bigger segments might increase performance.
  191. // Calculate the aligner, which defines the smallest size required as
  192. // precursor to align the block to `align`.
  193. let aligner = (align - self.ptr.get() as usize % align) % align;
  194. // ^^^^^^^^
  195. // To avoid wasting space on the case where the block is already
  196. // aligned, we calculate it modulo `align`.
  197. // Bound check.
  198. if aligner < self.size {
  199. // Invalidate the old block.
  200. let old = self.pop();
  201. Some((
  202. Block {
  203. size: aligner,
  204. ptr: old.ptr.clone(),
  205. },
  206. Block {
  207. size: old.size - aligner,
  208. ptr: unsafe {
  209. // LAST AUDIT: 2016-08-21 (Ticki).
  210. // The aligner is bounded by the size, which itself is
  211. // bounded by the address space.
  212. // Therefore, this conversion cannot overflow.
  213. old.ptr.offset(aligner as isize)
  214. },
  215. },
  216. ))
  217. } else {
  218. // Logging.
  219. log!(INTERNAL, "Unable to align block.");
  220. None
  221. }
  222. }
  223. /// Mark this block free to the debugger.
  224. ///
  225. /// The debugger might do things like memleak and use-after-free checks.
  226. /// This methods informs the debugger that this block is freed.
  227. #[inline]
  228. pub fn mark_free(self) -> Block {
  229. #[cfg(feature = "debugger")]
  230. ::shim::debug::mark_free(*self.ptr as *const u8, self.size);
  231. self
  232. }
  233. /// Mark this block uninitialized to the debugger.
  234. ///
  235. /// To detect use-after-free, the allocator need to mark
  236. #[inline]
  237. pub fn mark_uninitialized(self) -> Block {
  238. #[cfg(feature = "debugger")]
  239. ::shim::debug::mark_unintialized(*self.ptr as *const u8, self.size);
  240. self
  241. }
  242. }
  243. impl From<Block> for Pointer<u8> {
  244. fn from(from: Block) -> Pointer<u8> {
  245. from.ptr
  246. }
  247. }
  248. /// Compare the blocks address.
  249. impl PartialOrd for Block {
  250. #[inline]
  251. fn partial_cmp(&self, other: &Block) -> Option<cmp::Ordering> {
  252. self.ptr.get().partial_cmp(&other.ptr.get())
  253. }
  254. }
  255. /// Compare the blocks address.
  256. impl Ord for Block {
  257. #[inline]
  258. fn cmp(&self, other: &Block) -> cmp::Ordering {
  259. self.ptr.get().cmp(&other.ptr.get())
  260. }
  261. }
  262. impl cmp::PartialEq for Block {
  263. #[inline]
  264. fn eq(&self, other: &Block) -> bool {
  265. self.ptr.get() == other.ptr.get()
  266. }
  267. }
  268. impl cmp::Eq for Block {}
  269. impl fmt::Debug for Block {
  270. fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  271. write!(f, "0x{:x}[{}]", self.ptr.get() as usize, self.size)
  272. }
  273. }
  274. #[cfg(test)]
  275. mod test {
  276. use prelude::*;
  277. #[test]
  278. fn test_array() {
  279. let arr = b"Lorem ipsum dolor sit amet";
  280. let block = unsafe {
  281. Block::from_raw_parts(
  282. Pointer::new(arr.as_ptr() as *mut u8),
  283. arr.len(),
  284. )
  285. };
  286. // Test split.
  287. let (mut lorem, mut rest) = block.split(5);
  288. assert_eq!(lorem.size(), 5);
  289. assert_eq!(lorem.size() + rest.size(), arr.len());
  290. assert!(lorem < rest);
  291. assert_eq!(lorem, lorem);
  292. assert!(!rest.is_empty());
  293. assert!(lorem.align(2).unwrap().1.aligned_to(2));
  294. assert!(rest.align(15).unwrap().1.aligned_to(15));
  295. assert_eq!(
  296. Pointer::from(lorem).get() as usize + 5,
  297. Pointer::from(rest).get() as usize
  298. );
  299. }
  300. #[test]
  301. fn test_merge() {
  302. let arr = b"Lorem ipsum dolor sit amet";
  303. let block = unsafe {
  304. Block::from_raw_parts(
  305. Pointer::new(arr.as_ptr() as *mut u8),
  306. arr.len(),
  307. )
  308. };
  309. let (mut lorem, mut rest) = block.split(5);
  310. lorem.merge_right(&mut rest).unwrap();
  311. let mut tmp = rest.split(0).0;
  312. assert!(tmp.is_empty());
  313. lorem.split(2).0.merge_right(&mut tmp).unwrap();
  314. }
  315. #[test]
  316. #[should_panic]
  317. fn test_oob() {
  318. let arr = b"lorem";
  319. let block = unsafe {
  320. Block::from_raw_parts(
  321. Pointer::new(arr.as_ptr() as *mut u8),
  322. arr.len(),
  323. )
  324. };
  325. // Test OOB.
  326. block.split(6);
  327. }
  328. #[test]
  329. fn test_mutate() {
  330. let mut arr = [0u8, 2, 0, 0, 255, 255];
  331. let block = unsafe {
  332. Block::from_raw_parts(Pointer::new(&mut arr[0] as *mut u8), 6)
  333. };
  334. let (a, mut b) = block.split(2);
  335. a.copy_to(&mut b);
  336. assert_eq!(arr, [0, 2, 0, 2, 255, 255]);
  337. }
  338. #[test]
  339. fn test_empty_lr() {
  340. let arr = b"Lorem ipsum dolor sit amet";
  341. let block = unsafe {
  342. Block::from_raw_parts(
  343. Pointer::new(arr.as_ptr() as *mut u8),
  344. arr.len(),
  345. )
  346. };
  347. assert!(block.empty_left().is_empty());
  348. assert!(block.empty_right().is_empty());
  349. assert_eq!(
  350. Pointer::from(block.empty_left()).get() as *const u8,
  351. arr.as_ptr()
  352. );
  353. assert_eq!(block.empty_right(), block.split(arr.len()).1);
  354. }
  355. }