Ver código fonte

Audit all unsafes.

We equip all `unsafe` blocks with a timestamped confirmations of an
audit. Whenever a block is changed, it should be removed until a new
audit is made.

The format of these messages is:

```
LAST AUDIT: yyyy-mm-dd (Username)
```

Close #28.
ticki 8 anos atrás
pai
commit
5a42b783d4
11 arquivos alterados com 108 adições e 19 exclusões
  1. 4 0
      src/allocator.rs
  2. 16 6
      src/block.rs
  3. 3 0
      src/bookkeeper.rs
  4. 16 2
      src/brk.rs
  5. 7 1
      src/cell.rs
  6. 2 0
      src/fail.rs
  7. 7 1
      src/log.rs
  8. 12 2
      src/ptr.rs
  9. 6 0
      src/sync.rs
  10. 13 4
      src/tls.rs
  11. 22 3
      src/vec.rs

+ 4 - 0
src/allocator.rs

@@ -129,6 +129,8 @@ impl GlobalAllocator {
         // Initialize the new allocator.
         let mut res = GlobalAllocator {
             inner: Bookkeeper::new(unsafe {
+                // LAST AUDIT: 2016-08-21 (Ticki).
+
                 Vec::from_raw_parts(initial_segment, 0)
             }),
         };
@@ -240,6 +242,8 @@ impl LocalAllocator {
             .alloc(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
 
         unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
             // Register the thread destructor on the current thread.
             THREAD_ALLOCATOR.register_thread_destructor(dtor);
 

+ 16 - 6
src/block.rs

@@ -49,17 +49,14 @@ impl Block {
         Block {
             size: 0,
             // This won't alias `ptr`, since the block is empty.
-            ptr: unsafe { Pointer::new(*ptr) },
+            ptr: ptr,
         }
     }
 
     /// Create an empty block representing the left edge of this block
     #[inline]
     pub fn empty_left(&self) -> Block {
-        Block {
-            size: 0,
-            ptr: unsafe { Pointer::new(*self.ptr) },
-        }
+        Block::empty(self.ptr.clone())
     }
 
     /// Create an empty block representing the right edge of this block
@@ -69,9 +66,11 @@ impl Block {
         Block {
             size: 0,
             ptr: unsafe {
+                // LAST AUDIT: 2016-08-21 (Ticki).
+
                 // By the invariants of this type (the end is addressable), this conversion isn't
                 // overflowing.
-                Pointer::new(*self.ptr).offset(self.size as isize)
+                self.ptr.clone().offset(self.size as isize)
             },
         }
     }
@@ -127,6 +126,9 @@ impl Block {
         assert!(self.size <= block.size, "Block too small.");
 
         unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
+            // From the invariants of `Block`, this copy is well-defined.
             ptr::copy_nonoverlapping(*self.ptr, *block.ptr, self.size);
         }
     }
@@ -139,6 +141,10 @@ impl Block {
             log!(INTERNAL, "Zeroing {:?}", *self);
 
             unsafe {
+                // LAST AUDIT: 2016-08-21 (Ticki).
+
+                // Since the memory of the block is inaccessible (read-wise), zeroing it is fully
+                // safe.
                 intrinsics::volatile_set_memory(*self.ptr, 0, self.size);
             }
         }
@@ -177,6 +183,8 @@ impl Block {
             Block {
                 size: self.size - pos,
                 ptr: unsafe {
+                    // LAST AUDIT: 2016-08-21 (Ticki).
+
                     // This won't overflow due to the assertion above, ensuring that it is bounded
                     // by the address space. See the `split_at_mut` source from libcore.
                     self.ptr.offset(pos as isize)
@@ -217,6 +225,8 @@ impl Block {
                 Block {
                     size: old.size - aligner,
                     ptr: unsafe {
+                        // LAST AUDIT: 2016-08-21 (Ticki).
+
                         // The aligner is bounded by the size, which itself is bounded by the
                         // address space. Therefore, this conversion cannot overflow.
                         old.ptr.offset(aligner as isize)

+ 3 - 0
src/bookkeeper.rs

@@ -869,6 +869,8 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         let mut old_buf = None;
 
         unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
             // Memmove the elements to make a gap to the new block.
             ptr::copy(self.pool.get_unchecked(ind) as *const Block,
                       self.pool.get_unchecked_mut(ind + 1) as *mut Block,
@@ -896,6 +898,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
 
             // Update the pool byte count.
             self.total_bytes += block.size();
+
             // Mark it free and set the element.
             ptr::write(self.pool.get_unchecked_mut(ind), block.mark_free());
         }

+ 16 - 2
src/brk.rs

@@ -79,7 +79,13 @@ impl BrkLock {
             log!(DEBUG, "Releasing {:?} to the OS.", block);
 
             // We are. Now, sbrk the memory back. Do to the condition above, this is safe.
-            let res = unsafe { self.sbrk(-(block.size() as isize)) };
+            let res = unsafe {
+                // LAST AUDIT: 2016-08-21 (Ticki).
+
+                // Note that the end of the block is addressable, making the size as well. For this
+                // reason the first bit is unset and the cast will never wrap.
+                self.sbrk(-(block.size() as isize))
+            };
 
             // In debug mode, we want to check for WTF-worthy scenarios.
             debug_assert!(res.is_ok(), "Failed to set the program break back.");
@@ -134,7 +140,11 @@ impl BrkLock {
         // allocated block. This ensures that it is properly memory aligned to the requested value.
         // TODO: Audit the casts.
         let (alignment_block, rest) = unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
             Block::from_raw_parts(
+                // Important! The conversion is failable to avoid arithmetic overflow-based
+                // attacks.
                 self.sbrk(brk_size.try_into().unwrap()).unwrap_or_else(|()| fail::oom()),
                 brk_size,
             )
@@ -175,7 +185,11 @@ pub unsafe extern fn sbrk(size: isize) -> *mut u8 {
 
 /// Get the current program break.
 fn current_brk() -> Pointer<u8> {
-    unsafe { Pointer::new(syscalls::brk(ptr::null()) as *mut u8) }
+    unsafe {
+        // LAST AUDIT: 2016-08-21 (Ticki).
+
+        Pointer::new(syscalls::brk(ptr::null()) as *mut u8)
+    }
 }
 
 #[cfg(test)]

+ 7 - 1
src/cell.rs

@@ -25,7 +25,13 @@ impl<T> MoveCell<T> {
     /// Replace the inner data and return the old.
     #[inline]
     pub fn replace(&self, new: T) -> T {
-        mem::replace(unsafe { &mut *self.inner.get() }, new)
+        mem::replace(unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
+            // This is safe due to never aliasing the value, but simply transfering ownership to
+            // the caller.
+            &mut *self.inner.get()
+        }, new)
     }
 }
 

+ 2 - 0
src/fail.rs

@@ -44,6 +44,8 @@ pub fn oom() -> ! {
     log!(DEBUG, "Calling the global OOM handler.");
 
     unsafe {
+        // LAST AUDIT: 2016-08-21 (Ticki).
+
         // Transmute the atomic pointer to a function pointer and call it.
         (mem::transmute::<_, fn() -> !>(OOM_HANDLER.load(atomic::Ordering::SeqCst)))()
     }

+ 7 - 1
src/log.rs

@@ -92,7 +92,13 @@ macro_rules! assert {
             log!(ERROR, $( $arg ),*);
 
             #[allow(unused_unsafe)]
-            unsafe { intrinsics::abort() }
+            unsafe {
+                // LAST AUDIT: 2016-08-21 (Ticki).
+
+                // Right now there is no safe interface exposed for this, but it is safe no matter
+                // what.
+                intrinsics::abort();
+            }
         }
     }}
 }

+ 12 - 2
src/ptr.rs

@@ -41,7 +41,12 @@ impl<T> Pointer<T> {
     #[inline]
     pub const fn empty() -> Pointer<T> {
         Pointer {
-            ptr: unsafe { NonZero::new(0x1 as *mut T) },
+            ptr: unsafe {
+                // LAST AUDIT: 2016-08-21 (Ticki).
+
+                // 0x1 is non-zero.
+                NonZero::new(0x1 as *mut T)
+            },
             _phantom: marker::PhantomData,
         }
     }
@@ -52,7 +57,12 @@ impl<T> Pointer<T> {
     #[inline]
     pub fn cast<U>(self) -> Pointer<U> {
         Pointer {
-            ptr: unsafe { NonZero::new(*self as *mut U) },
+            ptr: unsafe {
+                // LAST AUDIT: 2016-08-21 (Ticki).
+
+                // Casting the pointer will preserve its nullable state.
+                NonZero::new(*self as *mut U)
+            },
             _phantom: marker::PhantomData,
         }
     }

+ 6 - 0
src/sync.rs

@@ -74,6 +74,9 @@ impl<'a, T> ops::Deref for MutexGuard<'a, T> {
     #[inline]
     fn deref(&self) -> &T {
         unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
+            // Aliasing is allowed due to the lock representing mutual exclusive access.
             &*self.mutex.inner.get()
         }
     }
@@ -82,6 +85,9 @@ impl<'a, T> ops::Deref for MutexGuard<'a, T> {
 impl<'a, T> ops::DerefMut for MutexGuard<'a, T> {
     fn deref_mut(&mut self) -> &mut T {
         unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
+            // Aliasing is allowed due to the lock representing mutual exclusive access.
             &mut *self.mutex.inner.get()
         }
     }

+ 13 - 4
src/tls.rs

@@ -46,9 +46,12 @@ impl<T: 'static> Key<T> {
         // Logging.
         log!(INTERNAL, "Registering thread destructor.");
 
-        // This is safe due to sharing memory layout.
-        thread_destructor::register(&self.inner as *const T as *const u8 as *mut u8,
-                                    unsafe { mem::transmute(dtor) });
+        thread_destructor::register(&self.inner as *const T as *const u8 as *mut u8, unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
+            // This is safe due to sharing memory layout.
+            mem::transmute(dtor)
+        });
     }
 }
 
@@ -67,6 +70,12 @@ macro_rules! tls {
     (#[$($attr:meta),*] static $name:ident: $ty:ty = $val:expr;) => {
         $(#[$attr])*
         #[thread_local]
-        static $name: tls::Key<$ty> = unsafe { tls::Key::new($val) };
+        static $name: tls::Key<$ty> = unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
+            // This is secure due to being stored in a thread-local variable and thus being bounded
+            // by the current thread.
+            tls::Key::new($val)
+        };
     }
 }

+ 22 - 3
src/vec.rs

@@ -61,9 +61,13 @@ impl<T: Leak> Vec<T> {
 
         // Update the fields of `self`.
         self.cap = new_cap;
-        self.ptr = unsafe { Pointer::new(*Pointer::from(block) as *mut T) };
+        self.ptr = Pointer::from(block).cast();
         self.len = old.len;
         unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
+            // Due to the invariants of `Block`, this copy is safe (the pointer is valid and
+            // unaliased).
             ptr::copy_nonoverlapping(*old.ptr, *self.ptr, old.len);
         }
 
@@ -87,6 +91,8 @@ impl<T: Leak> Vec<T> {
         } else {
             // Place the element in the end of the vector.
             unsafe {
+                // LAST AUDIT: 2016-08-21 (Ticki).
+
                 // By the invariants of this type (the size is bounded by the address space), this
                 // conversion isn't overflowing.
                 ptr::write((*self.ptr).offset(self.len as isize), elem);
@@ -107,7 +113,9 @@ impl<T: Leak> Vec<T> {
             None
         } else {
             unsafe {
-                // Decrement the length.
+                // LAST AUDIT: 2016-08-21 (Ticki).
+
+                // Decrement the length. This won't underflow due to the conditional above.
                 self.len -= 1;
 
                 // We use `ptr::read` since the element is unaccessible due to the decrease in the
@@ -167,7 +175,12 @@ impl<T: Leak> Default for Vec<T> {
 /// Cast this vector to the respective block.
 impl<T: Leak> From<Vec<T>> for Block {
     fn from(from: Vec<T>) -> Block {
-        unsafe { Block::from_raw_parts(from.ptr.cast(), from.cap * mem::size_of::<T>()) }
+        unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
+            // The invariants maintains safety.
+            Block::from_raw_parts(from.ptr.cast(), from.cap * mem::size_of::<T>())
+        }
     }
 }
 
@@ -177,6 +190,9 @@ impl<T: Leak> ops::Deref for Vec<T> {
     #[inline]
     fn deref(&self) -> &[T] {
         unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
+            // The invariants maintains safety.
             slice::from_raw_parts(*self.ptr as *const T, self.len)
         }
     }
@@ -186,6 +202,9 @@ impl<T: Leak> ops::DerefMut for Vec<T> {
     #[inline]
     fn deref_mut(&mut self) -> &mut [T] {
         unsafe {
+            // LAST AUDIT: 2016-08-21 (Ticki).
+
+            // The invariants maintains safety.
             slice::from_raw_parts_mut(*self.ptr as *mut T, self.len)
         }
     }