|
@@ -8,7 +8,10 @@ use sys;
|
|
|
|
|
|
use core::mem::{align_of, size_of};
|
|
|
use core::ptr::Unique;
|
|
|
-use core::{ops, ptr, slice, cmp, fmt};
|
|
|
+use core::{ops, ptr, slice, cmp};
|
|
|
+
|
|
|
+#[cfg(debug_assertions)]
|
|
|
+use core::fmt;
|
|
|
|
|
|
/// An address representing an "empty" or non-allocated value on the heap.
|
|
|
const EMPTY_HEAP: *mut u8 = 0x1 as *mut _;
|
|
@@ -17,14 +20,14 @@ const EMPTY_HEAP: *mut u8 = 0x1 as *mut _;
|
|
|
///
|
|
|
/// This is the main primitive in ralloc. Its job is to keep track of the free blocks in a
|
|
|
/// structured manner, such that allocation, reallocation, and deallocation are all efficient.
|
|
|
-/// Parituclarly, it keeps a list of blocks, commonly called the "block list". This list is kept.
|
|
|
-/// Entries in the block list can be "empty", meaning that you can overwrite the entry without
|
|
|
+/// Parituclarly, it keeps a list of blocks, commonly called the "block vector". This list is kept.
|
|
|
+/// Entries in the block vector can be "empty", meaning that you can overwrite the entry without
|
|
|
/// breaking consistency.
|
|
|
///
|
|
|
-/// For details about the internals, see [`BlockList`](./struct.BlockList.html) (requires the docs
|
|
|
+/// For details about the internals, see [`BlockVec`](./struct.BlockVec.html) (requires the docs
|
|
|
/// to be rendered with private item exposed).
|
|
|
pub struct Bookkeeper {
|
|
|
- /// The internal block list.
|
|
|
+ /// The internal block vector.
|
|
|
///
|
|
|
/// Guarantees
|
|
|
/// ==========
|
|
@@ -34,7 +37,7 @@ pub struct Bookkeeper {
|
|
|
/// 1. The list is always sorted with respect to the block's pointers.
|
|
|
/// 2. No two blocks overlap.
|
|
|
/// 3. No two free blocks are adjacent.
|
|
|
- block_list: BlockList,
|
|
|
+ inner: BlockVec,
|
|
|
}
|
|
|
|
|
|
impl Bookkeeper {
|
|
@@ -43,7 +46,7 @@ impl Bookkeeper {
|
|
|
/// No allocations or BRKs are done.
|
|
|
pub fn new() -> Bookkeeper {
|
|
|
Bookkeeper {
|
|
|
- block_list: BlockList::new(),
|
|
|
+ inner: BlockVec::new(),
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -57,7 +60,7 @@ impl Bookkeeper {
|
|
|
/// bound is undefined behavior.
|
|
|
/// 3. It is a valid, unique, non-null pointer, until `free` is called again.
|
|
|
pub fn alloc(&mut self, size: usize, align: usize) -> Unique<u8> {
|
|
|
- self.block_list.alloc(size, align)
|
|
|
+ self.inner.alloc(size, align)
|
|
|
}
|
|
|
|
|
|
/// Reallocate memory.
|
|
@@ -71,7 +74,7 @@ impl Bookkeeper {
|
|
|
/// original buffer.
|
|
|
/// 3. Reading and writing up to the bound, `new_size`, is valid.
|
|
|
pub fn realloc(&mut self, block: Block, new_size: usize, align: usize) -> Unique<u8> {
|
|
|
- self.block_list.realloc(block, new_size, align)
|
|
|
+ self.inner.realloc(block, new_size, align)
|
|
|
}
|
|
|
|
|
|
/// Free a memory block.
|
|
@@ -81,7 +84,7 @@ impl Bookkeeper {
|
|
|
///
|
|
|
/// Freeing an invalid block will drop all future guarantees about this bookkeeper.
|
|
|
pub fn free(&mut self, block: Block) {
|
|
|
- self.block_list.free(block)
|
|
|
+ self.inner.free(block)
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -107,38 +110,47 @@ fn canonicalize_brk(size: usize) -> usize {
|
|
|
const BRK_MIN: usize = 200;
|
|
|
const BRK_MIN_EXTRA: usize = 10000; // TODO tune this?
|
|
|
|
|
|
- cmp::max(BRK_MIN, size.saturating_add(cmp::min(BRK_MULTIPLIER * size, BRK_MIN_EXTRA)))
|
|
|
+ let res = cmp::max(BRK_MIN, size.saturating_add(cmp::min(BRK_MULTIPLIER * size, BRK_MIN_EXTRA)));
|
|
|
+
|
|
|
+ debug_assert!(res >= size, "Canonicalized BRK space is smaller than the one requested.");
|
|
|
+
|
|
|
+ res
|
|
|
}
|
|
|
|
|
|
-/// A block list.
|
|
|
+/// A block vector.
|
|
|
///
|
|
|
/// This primitive is used for keeping track of the free blocks.
|
|
|
///
|
|
|
/// Only making use of only [`alloc`](#method.alloc), [`free`](#method.free),
|
|
|
/// [`realloc`](#method.realloc) (and following their respective assumptions) guarantee that no
|
|
|
/// buffer overrun, segfault, arithmetic overflow, or otherwise unexpected crash.
|
|
|
-struct BlockList {
|
|
|
- /// The capacity of the block list.
|
|
|
+struct BlockVec {
|
|
|
+ /// The capacity of the block vector.
|
|
|
cap: usize,
|
|
|
- /// The length of the block list.
|
|
|
+ /// The length of the block vector.
|
|
|
len: usize,
|
|
|
- /// The pointer to the first element in the block list.
|
|
|
+ /// The segment end.
|
|
|
+ ///
|
|
|
+ /// This points to the end of the heap.
|
|
|
+ seg_end: Unique<u8>,
|
|
|
+ /// The pointer to the first element in the block vector.
|
|
|
ptr: Unique<Block>,
|
|
|
}
|
|
|
|
|
|
-impl BlockList {
|
|
|
- /// Create a new, empty block list.
|
|
|
+impl BlockVec {
|
|
|
+ /// Create a new, empty block vector.
|
|
|
///
|
|
|
/// This will make no allocations or BRKs.
|
|
|
- fn new() -> BlockList {
|
|
|
- BlockList {
|
|
|
+ fn new() -> BlockVec {
|
|
|
+ BlockVec {
|
|
|
cap: 0,
|
|
|
len: 0,
|
|
|
+ seg_end: unsafe { Unique::new(EMPTY_HEAP as *mut _) },
|
|
|
ptr: unsafe { Unique::new(EMPTY_HEAP as *mut _) },
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /// Initialize the block list.
|
|
|
+ /// Initialize the block vector.
|
|
|
///
|
|
|
/// This will do some basic initial allocation, and a bunch of other things as well. It is
|
|
|
/// necessary to avoid meta-circular dependency.
|
|
@@ -149,23 +161,51 @@ impl BlockList {
|
|
|
/// The initial capacity.
|
|
|
const INITIAL_CAPACITY: usize = 16;
|
|
|
|
|
|
- let reserve = INITIAL_CAPACITY * size_of::<Block>();
|
|
|
- let brk = unsafe {
|
|
|
- sys::inc_brk(reserve + align_of::<Block>()).unwrap_or_else(|x| x.handle())
|
|
|
+ let size = INITIAL_CAPACITY * size_of::<Block>() + align_of::<Block>();
|
|
|
+ // Use SYSBRK to allocate extra data segment.
|
|
|
+ let ptr = unsafe {
|
|
|
+ sys::inc_brk(size).unwrap_or_else(|x| x.handle())
|
|
|
};
|
|
|
- let aligner = aligner(*brk as *const _, align_of::<Block>());
|
|
|
|
|
|
- self.cap = reserve;
|
|
|
- self.ptr = unsafe {
|
|
|
- Unique::new((*brk as usize + aligner) as *mut _)
|
|
|
- };
|
|
|
+ // Calculate the aligner.
|
|
|
+ let aligner = aligner(*ptr, align_of::<Block>());
|
|
|
|
|
|
- self.push(Block {
|
|
|
+ // The alignment is used as precursor for our allocated block. This ensures that it is
|
|
|
+ // properly memory aligned to the requested value.
|
|
|
+ let alignment_block = Block {
|
|
|
size: aligner,
|
|
|
- ptr: brk,
|
|
|
- });
|
|
|
+ ptr: unsafe { Unique::new(*ptr) },
|
|
|
+ };
|
|
|
+
|
|
|
+ // Set the initial capacity.
|
|
|
+ self.cap = INITIAL_CAPACITY;
|
|
|
+ // Update the pointer.
|
|
|
+ self.ptr = unsafe { Unique::new(*alignment_block.end() as *mut _) };
|
|
|
+
|
|
|
+ // We have a stub in the end, which we will store as well.
|
|
|
+ let stub = Block {
|
|
|
+ size: align_of::<Block>() - aligner,
|
|
|
+ ptr: Block {
|
|
|
+ size: self.cap * size_of::<Block>(),
|
|
|
+ ptr: alignment_block.end(),
|
|
|
+ }.end(),
|
|
|
+ };
|
|
|
+ // Set the new segment end.
|
|
|
+ self.seg_end = stub.end();
|
|
|
+
|
|
|
+ // Add it to the list. This will not change the order, since the pointer is higher than all
|
|
|
+ // the previous blocks.
|
|
|
+ self.push(alignment_block);
|
|
|
+
|
|
|
+ if stub.size != 0 {
|
|
|
+ self.push(stub);
|
|
|
+ }
|
|
|
|
|
|
+ // Check consistency.
|
|
|
self.check();
|
|
|
+ debug_assert!(*self.ptr as usize % align_of::<Block>() == 0, "Alignment in `init` failed.");
|
|
|
+ debug_assert!(self.iter().map(|x| x.size).sum::<usize>() + self.cap * size_of::<Block>() ==
|
|
|
+ size, "BRK memory leaked in `init`.");
|
|
|
}
|
|
|
|
|
|
/// *[See `Bookkeeper`'s respective method.](./struct.Bookkeeper.html#method.alloc)*
|
|
@@ -257,14 +297,15 @@ impl BlockList {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /// Push to the block list.
|
|
|
+ /// Push to the block vector.
|
|
|
///
|
|
|
- /// This will append a block entry to the end of the block list. Make sure that this entry has
|
|
|
+ /// This will append a block entry to the end of the block vector. Make sure that this entry has
|
|
|
/// a value higher than any of the elements in the list, to keep it sorted.
|
|
|
fn push(&mut self, block: Block) {
|
|
|
// Some assertions.
|
|
|
debug_assert!(block.size != 0, "Pushing a zero sized block.");
|
|
|
- debug_assert!(self.last().map_or(0, |x| *x.ptr as usize) <= *block.ptr as usize, "The previous last block is higher than the new.");
|
|
|
+ debug_assert!(self.last().map_or(0, |x| *x.ptr as usize) <= *block.ptr as usize, "The \
|
|
|
+ previous last block is higher than the new.");
|
|
|
|
|
|
{
|
|
|
let len = self.len;
|
|
@@ -294,19 +335,26 @@ impl BlockList {
|
|
|
/// Allocate _fresh_ space.
|
|
|
///
|
|
|
/// "Fresh" means that the space is allocated through a BRK call to the kernel.
|
|
|
+ ///
|
|
|
+ /// The following guarantees are made:
|
|
|
+ ///
|
|
|
+ /// 1. The returned pointer is aligned to `align`.
|
|
|
+ /// 2. The returned pointer points to a _valid buffer of size `size` (in bytes)_.
|
|
|
+ /// 3. The returned pointer is equal to the old segment end, if the align is one.
|
|
|
fn alloc_fresh(&mut self, size: usize, align: usize) -> Unique<u8> {
|
|
|
// Calculate the canonical size (extra space is allocated to limit the number of system calls).
|
|
|
let can_size = canonicalize_brk(size);
|
|
|
- // Get the previous segment end.
|
|
|
- // TODO Is this thread-safe?
|
|
|
- let seg_end = sys::segment_end().unwrap_or_else(|x| x.handle());
|
|
|
- // Calculate the aligner.
|
|
|
- let aligner = aligner(seg_end, align);
|
|
|
+ let brk_size = can_size.checked_add(align).unwrap_or_else(|| sys::oom());
|
|
|
// Use SYSBRK to allocate extra data segment.
|
|
|
let ptr = unsafe {
|
|
|
- sys::inc_brk(can_size.checked_add(aligner).unwrap_or_else(|| sys::oom())).unwrap_or_else(|x| x.handle())
|
|
|
+ sys::inc_brk(brk_size).unwrap_or_else(|x| x.handle())
|
|
|
};
|
|
|
|
|
|
+ // Calculate the aligner.
|
|
|
+ let aligner = aligner(*ptr, align);
|
|
|
+
|
|
|
+ // The alignment is used as precursor for our allocated block. This ensures that it is
|
|
|
+ // properly memory aligned to the requested value.
|
|
|
let alignment_block = Block {
|
|
|
size: aligner,
|
|
|
ptr: ptr,
|
|
@@ -316,20 +364,30 @@ impl BlockList {
|
|
|
ptr: alignment_block.end(),
|
|
|
};
|
|
|
|
|
|
+ // Calculate the excessive space.
|
|
|
+ let excessive = Block {
|
|
|
+ // This won't overflow, since `can_size` is bounded by `size`
|
|
|
+ size: can_size - size,
|
|
|
+ ptr: res.end(),
|
|
|
+ };
|
|
|
+
|
|
|
+ // Make some assertions.
|
|
|
+ debug_assert!(*res.ptr as usize % align == 0, "Alignment in `alloc_fresh` failed.");
|
|
|
+ debug_assert!(res.size + alignment_block.size + excessive.size == brk_size, "BRK memory \
|
|
|
+ leak in fresh allocation.");
|
|
|
+
|
|
|
+ // Set the segment end.
|
|
|
+ self.seg_end = excessive.end();
|
|
|
+
|
|
|
// Add it to the list. This will not change the order, since the pointer is higher than all
|
|
|
// the previous blocks.
|
|
|
self.push(alignment_block);
|
|
|
|
|
|
- // Add the extra space allocated.
|
|
|
- self.push(Block {
|
|
|
- // This won't overflow, since `can_size` is bounded by `size`
|
|
|
- size: can_size - size,
|
|
|
- ptr: res.end(),
|
|
|
- });
|
|
|
+ // Push the excessive space to the end of the block vector.
|
|
|
+ self.push(excessive);
|
|
|
|
|
|
// Check consistency.
|
|
|
self.check();
|
|
|
- debug_assert!(*res.ptr as usize % align == 0, "Alignment in `alloc_fresh` failed.");
|
|
|
|
|
|
res.ptr
|
|
|
}
|
|
@@ -353,24 +411,50 @@ impl BlockList {
|
|
|
// Make sure that invariants aren't broken.
|
|
|
debug_assert!(new_size > block.size, "`realloc_inplace` cannot be used for shrinking!");
|
|
|
|
|
|
- // Note that we are sure that no segments in the array are adjacent (unless they have size
|
|
|
- // 0). This way we know that we will, at maximum, need one and only one block for extending
|
|
|
- // the current block.
|
|
|
- if block.left_to(&self[ind + 1]) && self[ind + 1].size + block.size >= new_size {
|
|
|
- // There is space for inplace reallocation.
|
|
|
+ let res;
|
|
|
+
|
|
|
+ // Evil hack to emulate post-function hooks.
|
|
|
+ // TODO make this more idiomatic.
|
|
|
+ loop {
|
|
|
+ {
|
|
|
+ // We check if `ind` is the end of the array.
|
|
|
+ if let Some(entry) = self.get_mut(ind + 1) {
|
|
|
+ // Note that we are sure that no segments in the array are adjacent (unless they have size
|
|
|
+ // 0). This way we know that we will, at maximum, need one and only one block for extending
|
|
|
+ // the current block.
|
|
|
+ if block.left_to(*entry.ptr) && entry.size + block.size >= new_size {
|
|
|
+ // There is space for inplace reallocation.
|
|
|
+
|
|
|
+ // Set the following block.
|
|
|
+ entry.size -= new_size - block.size;
|
|
|
+ // We now move the block to the new appropriate place.
|
|
|
+ entry.ptr = entry.end();
|
|
|
+
|
|
|
+ res = Ok(());
|
|
|
+ break;
|
|
|
+ } else { return Err(()) }
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- // Set the following block.
|
|
|
- self[ind + 1].size -= new_size - block.size;
|
|
|
- // We now move the block to the new appropriate place.
|
|
|
- self[ind + 1].ptr = self[ind + 1].end();
|
|
|
+ // We are in the left outermost index, therefore we can extend the segment to the
|
|
|
+ // right.
|
|
|
+ if block.left_to(*self.seg_end) {
|
|
|
+ // We make a fresh allocation (BRK), since we are in the end of the segment, and
|
|
|
+ // thus an extension will simply extend our buffer.
|
|
|
+ let ptr = self.alloc_fresh(new_size - block.size, 1);
|
|
|
|
|
|
- // Run a consistency check.
|
|
|
- self.check();
|
|
|
+ // Check consistency.
|
|
|
+ debug_assert!(block.left_to(*ptr));
|
|
|
|
|
|
- Ok(())
|
|
|
- } else {
|
|
|
- Err(())
|
|
|
+ res = Ok(());
|
|
|
+ break;
|
|
|
+ } else { return Err(()) }
|
|
|
}
|
|
|
+
|
|
|
+ // Run a consistency check.
|
|
|
+ self.check();
|
|
|
+
|
|
|
+ res
|
|
|
}
|
|
|
|
|
|
/// *[See `Bookkeeper`'s respective method.](./struct.Bookkeeper.html#method.realloc)*
|
|
@@ -433,17 +517,17 @@ impl BlockList {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /// Reserve space for the block list.
|
|
|
+ /// Reserve space for the block vector.
|
|
|
///
|
|
|
/// This will extend the capacity to a number greater than or equals to `needed`, potentially
|
|
|
- /// reallocating the block list.
|
|
|
+ /// reallocating the block vector.
|
|
|
fn reserve(&mut self, needed: usize) {
|
|
|
/* TODO remove this.
|
|
|
if needed > self.cap {
|
|
|
// Set the new capacity.
|
|
|
self.cap = cmp::max(30, self.cap.saturating_mul(2));
|
|
|
|
|
|
- // Reallocate the block list.
|
|
|
+ // Reallocate the block vector.
|
|
|
self.ptr = unsafe {
|
|
|
let block = Block {
|
|
|
ptr: Unique::new(*self.ptr as *mut _),
|
|
@@ -470,7 +554,7 @@ impl BlockList {
|
|
|
let ind = self.find(&block);
|
|
|
// TODO allow BRK-free non-inplace reservations.
|
|
|
|
|
|
- // Reallocate the block list.
|
|
|
+ // Reallocate the block vector.
|
|
|
|
|
|
// We first try inplace.
|
|
|
if self.realloc_inplace(ind, &block, needed).is_ok() {
|
|
@@ -551,17 +635,35 @@ impl BlockList {
|
|
|
///
|
|
|
/// See [`free`](#method.free) for more information.
|
|
|
fn free_ind(&mut self, ind: usize, block: Block) {
|
|
|
- // Make some handy assertions.
|
|
|
- debug_assert!(*self[ind].ptr != *block.ptr || !self[ind].is_free(), "Double free.");
|
|
|
-
|
|
|
- // Try to merge right.
|
|
|
- if self[ind].is_free() && ind + 1 < self.len && self[ind].left_to(&block) {
|
|
|
- self[ind].size += block.size;
|
|
|
- // Try to merge left. Note that `self[ind]` is not free, by the conditional above.
|
|
|
- } else if self[ind - 1].is_free() && ind != 0 && self[ind - 1].left_to(&block) {
|
|
|
- self[ind - 1].size += block.size;
|
|
|
- } else {
|
|
|
+ // We use loops as an evil hack to make local returns.
|
|
|
+ // TODO: do this in a better way.
|
|
|
+ loop {
|
|
|
+ {
|
|
|
+ let len = self.len;
|
|
|
+ let entry = &mut self[ind];
|
|
|
+
|
|
|
+ // Make some handy assertions.
|
|
|
+ debug_assert!(*entry.ptr != *block.ptr || !entry.is_free(), "Double free.");
|
|
|
+
|
|
|
+ // Try to merge right.
|
|
|
+ if entry.is_free() && ind + 1 < len && entry.left_to(*block.ptr) {
|
|
|
+ entry.size += block.size;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if ind != 0 {
|
|
|
+ let prev_entry = &mut self[ind - 1];
|
|
|
+ // Try to merge left. Note that `entry` is not free, by the conditional above.
|
|
|
+ if prev_entry.is_free() && prev_entry.left_to(*block.ptr) {
|
|
|
+ prev_entry.size += block.size;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // We will have to insert it in a normal manner.
|
|
|
self.insert(ind, block);
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
// Check consistency.
|
|
@@ -572,7 +674,7 @@ impl BlockList {
|
|
|
///
|
|
|
/// If the space is non-empty, the elements will be pushed filling out the empty gaps to the
|
|
|
/// right. If all places to the right is occupied, it will reserve additional space to the
|
|
|
- /// block list.
|
|
|
+ /// block vector.
|
|
|
///
|
|
|
/// # Example
|
|
|
/// We want to insert the block denoted by the tildes into our list. Perform a binary search to
|
|
@@ -626,7 +728,8 @@ impl BlockList {
|
|
|
/// The insertion is now completed.
|
|
|
fn insert(&mut self, ind: usize, block: Block) {
|
|
|
// Some assertions...
|
|
|
- debug_assert!(block >= self[ind.saturating_sub(1)], "Inserting at {} will make the list unsorted.", ind);
|
|
|
+ debug_assert!(block >= self[ind.saturating_sub(1)], "Inserting at {} will make the list \
|
|
|
+ unsorted.", ind);
|
|
|
debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
|
|
|
|
|
|
// TODO consider moving right before searching left.
|
|
@@ -676,23 +779,26 @@ impl BlockList {
|
|
|
/// 3. The length does not exceed the capacity.
|
|
|
#[cfg(debug_assertions)]
|
|
|
fn check(&self) {
|
|
|
- if self.len == 0 { return; }
|
|
|
+ if let Some(x) = self.first() {
|
|
|
+ let mut prev = *x.ptr;
|
|
|
+ let mut end = *x.ptr;
|
|
|
+ for (n, i) in self.iter().enumerate().skip(1) {
|
|
|
+ // Check if sorted.
|
|
|
+ assert!(*i.ptr >= prev, "The block vector is not sorted at index, {}: 0x{:x} ≤ \
|
|
|
+ 0x{:x}.", n, *i.ptr as usize, prev as usize);
|
|
|
+ // Check if overlapping.
|
|
|
+ assert!(*i.ptr > end || i.is_free() && *i.ptr == end, "Two blocks are \
|
|
|
+ overlapping/adjacent at index, {}.", n);
|
|
|
+ // Check if bounded by seg_end
|
|
|
+ assert!(*i.end() <= *self.seg_end, "The {}th element in the block list is placed \
|
|
|
+ outside the segment.", n);
|
|
|
+ prev = *i.ptr;
|
|
|
+ end = *i.end();
|
|
|
+ }
|
|
|
|
|
|
- // Check if sorted.
|
|
|
- let mut prev = *self[0].ptr;
|
|
|
- for (n, i) in self.iter().enumerate().skip(1) {
|
|
|
- assert!(*i.ptr >= prev, "The block list is not sorted at index, {}: 0x{:x} ≤ 0x{:x}.", n, *i.ptr as usize, prev as usize);
|
|
|
- prev = *i.ptr;
|
|
|
- }
|
|
|
- // Check if overlapping.
|
|
|
- let mut prev = *self[0].ptr;
|
|
|
- for (n, i) in self.iter().enumerate().skip(1) {
|
|
|
- assert!(*i.ptr > prev || i.is_free() && *i.ptr == prev, "Two blocks are overlapping/adjacent at index, {}.", n);
|
|
|
- prev = *i.end();
|
|
|
+ // Check that the length is lower than or equals to the capacity.
|
|
|
+ assert!(self.len <= self.cap, "The capacity does not cover the length.");
|
|
|
}
|
|
|
-
|
|
|
- // Check that the length is lower than or equals to the capacity.
|
|
|
- assert!(self.len <= self.cap, "The capacity does not cover the length.");
|
|
|
}
|
|
|
|
|
|
/// Dump the contents into a format writer.
|
|
@@ -731,7 +837,7 @@ mod test {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-impl ops::Deref for BlockList {
|
|
|
+impl ops::Deref for BlockVec {
|
|
|
type Target = [Block];
|
|
|
|
|
|
fn deref(&self) -> &[Block] {
|
|
@@ -740,7 +846,7 @@ impl ops::Deref for BlockList {
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
-impl ops::DerefMut for BlockList {
|
|
|
+impl ops::DerefMut for BlockVec {
|
|
|
fn deref_mut(&mut self) -> &mut [Block] {
|
|
|
unsafe {
|
|
|
slice::from_raw_parts_mut(*self.ptr, self.len)
|