Browse Source

Use atomic_load_unordered for first word load in misaligned case

Gary Guo 3 years ago
parent
commit
ce86d41b4f
1 changed files with 4 additions and 4 deletions
  1. 4 4
      src/mem/impls.rs

+ 4 - 4
src/mem/impls.rs

@@ -61,8 +61,8 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
 
         // Realign src
         let mut src_aligned = (src as usize & !WORD_MASK) as *mut usize;
-        // XXX: Could this possibly be UB?
-        let mut prev_word = *src_aligned;
+        // This will read (but won't use) bytes out of bound.
+        let mut prev_word = core::intrinsics::atomic_load_unordered(src_aligned);
 
         while dest_usize < dest_end {
             src_aligned = src_aligned.add(1);
@@ -154,8 +154,8 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
 
         // Realign src_aligned
         let mut src_aligned = (src as usize & !WORD_MASK) as *mut usize;
-        // XXX: Could this possibly be UB?
-        let mut prev_word = *src_aligned;
+        // This will read (but won't use) bytes out of bound.
+        let mut prev_word = core::intrinsics::atomic_load_unordered(src_aligned);
 
         while dest_start < dest_usize {
             src_aligned = src_aligned.sub(1);