From 36a097446ca137500f21530c0da6d43ab073d873 Mon Sep 17 00:00:00 2001 From: Phoebe Bell Date: Sun, 17 Nov 2019 17:40:25 -0800 Subject: [PATCH] #66219 finished documenting libcore! --- src/libcore/iter/adapters/zip.rs | 7 +++- src/libcore/mem/maybe_uninit.rs | 7 +++- src/libcore/slice/memchr.rs | 9 ++-- src/libcore/slice/mod.rs | 70 ++++++++++++++++++++++++++++---- src/libcore/slice/sort.rs | 25 +++++++++++- 5 files changed, 100 insertions(+), 18 deletions(-) diff --git a/src/libcore/iter/adapters/zip.rs b/src/libcore/iter/adapters/zip.rs index 14d9d5499b880..5907491cfe527 100644 --- a/src/libcore/iter/adapters/zip.rs +++ b/src/libcore/iter/adapters/zip.rs @@ -1,5 +1,3 @@ -// ignore-tidy-undocumented-unsafe - use crate::cmp; use super::super::{Iterator, DoubleEndedIterator, ExactSizeIterator, FusedIterator, TrustedLen}; @@ -165,11 +163,13 @@ impl ZipImpl for Zip if self.index < self.len { let i = self.index; self.index += 1; + // SAFETY: checked that i < min(a.len(), b.len()) unsafe { Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) } } else if A::may_have_side_effect() && self.index < self.a.len() { // match the base implementation's potential side effects + // SAFETY: checked that index < a.len() unsafe { self.a.get_unchecked(self.index); } @@ -194,9 +194,11 @@ impl ZipImpl for Zip let i = self.index; self.index += 1; if A::may_have_side_effect() { + // SAFETY: i < end < self.len unsafe { self.a.get_unchecked(i); } } if B::may_have_side_effect() { + // SAFETY: i < end < self.len unsafe { self.b.get_unchecked(i); } } } @@ -229,6 +231,7 @@ impl ZipImpl for Zip if self.index < self.len { self.len -= 1; let i = self.len; + // SAFETY: i < min(a.len(), b.len()) unsafe { Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) } diff --git a/src/libcore/mem/maybe_uninit.rs b/src/libcore/mem/maybe_uninit.rs index d35a5ce57fe9f..f61354067be9c 100644 --- a/src/libcore/mem/maybe_uninit.rs +++ b/src/libcore/mem/maybe_uninit.rs @@ -1,8 +1,6 @@ use crate::intrinsics; use crate::mem::ManuallyDrop; -// ignore-tidy-undocumented-unsafe - /// A wrapper type to construct uninitialized instances of `T`. /// /// # Initialization invariant @@ -292,6 +290,7 @@ impl MaybeUninit { #[unstable(feature = "maybe_uninit_uninit_array", issue = "0")] #[inline(always)] pub fn uninit_array() -> [Self; LEN] { + // SAFETY: see type-level documentation unsafe { MaybeUninit::<[MaybeUninit; LEN]>::uninit().assume_init() } @@ -341,6 +340,7 @@ impl MaybeUninit { #[inline] pub fn zeroed() -> MaybeUninit { let mut u = MaybeUninit::::uninit(); + // SAFETY: depends on T, see above comment unsafe { u.as_mut_ptr().write_bytes(0u8, 1); } @@ -354,6 +354,7 @@ impl MaybeUninit { #[unstable(feature = "maybe_uninit_extra", issue = "63567")] #[inline(always)] pub fn write(&mut self, val: T) -> &mut T { + // SAFETY: initializes field, and returns reference to the value unsafe { self.value = ManuallyDrop::new(val); self.get_mut() @@ -394,6 +395,7 @@ impl MaybeUninit { #[stable(feature = "maybe_uninit", since = "1.36.0")] #[inline(always)] pub fn as_ptr(&self) -> *const T { + // SAFETY: unsafe if uninitialized unsafe { &*self.value as *const T } } @@ -431,6 +433,7 @@ impl MaybeUninit { #[stable(feature = "maybe_uninit", since = "1.36.0")] #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut T { + // SAFETY: unsafe if uninitialized unsafe { &mut *self.value as *mut T } } diff --git a/src/libcore/slice/memchr.rs b/src/libcore/slice/memchr.rs index 2a2169dd348c2..713efaf960aa0 100644 --- a/src/libcore/slice/memchr.rs +++ b/src/libcore/slice/memchr.rs @@ -1,8 +1,6 @@ // Original implementation taken from rust-memchr. // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch -// ignore-tidy-undocumented-unsafe - use crate::cmp; use crate::mem; @@ -63,6 +61,9 @@ pub fn memchr(x: u8, text: &[u8]) -> Option { if len >= 2 * usize_bytes { while offset <= len - 2 * usize_bytes { + // SAFETY: both u and v can be read since + // ptr + offset + usize_bytes <= ptr + len - usize_bytes < ptr + len + // means the pointers are in bounds unsafe { let u = *(ptr.add(offset) as *const usize); let v = *(ptr.add(offset + usize_bytes) as *const usize); @@ -95,7 +96,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { type Chunk = usize; let (min_aligned_offset, max_aligned_offset) = { - // We call this just to obtain the length of the prefix and suffix. + // SAFETY: We call this just to obtain the length of the prefix and suffix. // In the middle we always process two chunks at once. let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() }; (prefix.len(), len - suffix.len()) @@ -113,6 +114,8 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { let chunk_bytes = mem::size_of::(); while offset > min_aligned_offset { + // SAFETY: since offset is always aligned, offset > min_aligned_offset means + // that offset - 2 * chunk_bytes is within bounds. unsafe { let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk); let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk); diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index c8fe9f9861315..4c454e8faf79e 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -1,5 +1,4 @@ // ignore-tidy-filelength -// ignore-tidy-undocumented-unsafe //! Slice management and manipulation. //! @@ -63,10 +62,11 @@ impl [T] { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - // SAFETY: const sound because we transmute out the length field as a usize (which it must be) #[allow(unused_attributes)] #[allow_internal_unstable(const_fn_union)] pub const fn len(&self) -> usize { + // SAFETY: const sound because we transmute out the length field as a usize + // (which it must be) unsafe { crate::ptr::Repr { rust: self }.raw.len } @@ -441,7 +441,8 @@ impl [T] { #[unstable(feature = "slice_ptr_range", issue = "65807")] #[inline] pub fn as_ptr_range(&self) -> Range<*const T> { - // The `add` here is safe, because: + let start = self.as_ptr(); + // SAFETY: The `add` here is safe, because: // // - Both pointers are part of the same object, as pointing directly // past the object also counts. @@ -458,7 +459,6 @@ impl [T] { // the end of the address space. // // See the documentation of pointer::add. - let start = self.as_ptr(); let end = unsafe { start.add(self.len()) }; start..end } @@ -482,8 +482,8 @@ impl [T] { #[unstable(feature = "slice_ptr_range", issue = "65807")] #[inline] pub fn as_mut_ptr_range(&mut self) -> Range<*mut T> { - // See as_ptr_range() above for why `add` here is safe. let start = self.as_mut_ptr(); + // SAFETY: See as_ptr_range() above for why `add` here is safe. let end = unsafe { start.add(self.len()) }; start..end } @@ -509,6 +509,8 @@ impl [T] { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn swap(&mut self, a: usize, b: usize) { + // SAFETY: self[a] and self[b] are both properly aligned (since they're taken from a slice) + // and valid for reads/writes (since this would panic otherwise) unsafe { // Can't take two mutable loans from one vector, so instead just cast // them to their raw pointers to do the swap @@ -553,11 +555,16 @@ impl [T] { // Use the llvm.bswap intrinsic to reverse u8s in a usize let chunk = mem::size_of::(); while i + chunk - 1 < ln / 2 { + // SAFETY: see inline comments unsafe { + // within bounds since: 0 <= i < ln let pa: *mut T = self.get_unchecked_mut(i); + // within bounds since: 0 <= i + chunk - 2 < ln - i - chunk < ln let pb: *mut T = self.get_unchecked_mut(ln - i - chunk); + // both are valid for reads since they're in this slice let va = ptr::read_unaligned(pa as *mut usize); let vb = ptr::read_unaligned(pb as *mut usize); + // and they're valid for writes for the same reason ptr::write_unaligned(pa as *mut usize, vb.swap_bytes()); ptr::write_unaligned(pb as *mut usize, va.swap_bytes()); } @@ -569,6 +576,7 @@ impl [T] { // Use rotate-by-16 to reverse u16s in a u32 let chunk = mem::size_of::() / 2; while i + chunk - 1 < ln / 2 { + // SAFETY: see above block unsafe { let pa: *mut T = self.get_unchecked_mut(i); let pb: *mut T = self.get_unchecked_mut(ln - i - chunk); @@ -583,6 +591,7 @@ impl [T] { while i < ln / 2 { // Unsafe swap to avoid the bounds check in safe swap. + // SAFETY: safe since 0 <= i < ln and -1 <= i - 1 <= 2i - i - 1 < ln - i - 1 < ln unsafe { let pa: *mut T = self.get_unchecked_mut(i); let pb: *mut T = self.get_unchecked_mut(ln - i - 1); @@ -608,11 +617,15 @@ impl [T] { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn iter(&self) -> Iter<'_, T> { + // SAFETY: it's invariant that [ptr, ptr + self.len()) all point to valid T unsafe { let ptr = self.as_ptr(); assume(!ptr.is_null()); let end = if mem::size_of::() == 0 { + // ZSTs don't take up any space, it is an error to dereference this. + // However, casting this to a *u8 lets us create an iterator with the right end + // anyway. (ptr as *const u8).wrapping_add(self.len()) as *const T } else { ptr.add(self.len()) @@ -640,6 +653,7 @@ impl [T] { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn iter_mut(&mut self) -> IterMut<'_, T> { + // SAFETY: it's invariant that [ptr, ptr + self.len()) all point to valid T unsafe { let ptr = self.as_mut_ptr(); assume(!ptr.is_null()); @@ -1075,6 +1089,7 @@ impl [T] { let len = self.len(); let ptr = self.as_mut_ptr(); + // SAFETY: it's invariant that [ptr, ptr + self.len()) all point to valid T unsafe { assert!(mid <= len); @@ -1510,14 +1525,14 @@ impl [T] { while size > 1 { let half = size / 2; let mid = base + half; - // mid is always in [0, size), that means mid is >= 0 and < size. + // SAFETY: mid is always in [0, size), that means mid is >= 0 and < size. // mid >= 0: by definition // mid < size: mid = size / 2 + size / 4 + size / 8 ... let cmp = f(unsafe { s.get_unchecked(mid) }); base = if cmp == Greater { base } else { mid }; size -= half; } - // base is always in [0, size) because base <= mid. + // SAFETY: base is always in [0, size) because base <= mid. let cmp = f(unsafe { s.get_unchecked(base) }); if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) } @@ -1959,6 +1974,10 @@ impl [T] { let mut next_read: usize = 1; let mut next_write: usize = 1; + // SAFETY: ptr_read, prev_ptr_write, ptr_write are all in bounds since + // ptr + 1 < ptr_read = ptr + next_read < ptr + len + // ptr + 1 < prev_ptr_write = ptr + next_write - 1 + // < ptr_write = ptr + next_write < ptr + len unsafe { // Avoid bounds checks by using raw pointers. while next_read < len { @@ -2042,6 +2061,8 @@ impl [T] { assert!(mid <= self.len()); let k = self.len() - mid; + // SAFETY: this just requires [p, self.len()) are valid for reading and writing, which + // they must be. unsafe { let p = self.as_mut_ptr(); rotate::ptr_rotate(mid, p.add(mid), k); @@ -2083,6 +2104,8 @@ impl [T] { assert!(k <= self.len()); let mid = self.len() - k; + // SAFETY: this just requires [p, self.len()) are valid for reading and writing, which + // they must be. unsafe { let p = self.as_mut_ptr(); rotate::ptr_rotate(mid, p.add(mid), k); @@ -2217,6 +2240,9 @@ impl [T] { pub fn copy_from_slice(&mut self, src: &[T]) where T: Copy { assert_eq!(self.len(), src.len(), "destination and source slices have different lengths"); + // SAFETY: it's possible we might try to copy from two overlapping + // slices which would cause undefined behavior, although this should be + // impossible in safe code. unsafe { ptr::copy_nonoverlapping( src.as_ptr(), self.as_mut_ptr(), self.len()); @@ -2270,6 +2296,7 @@ impl [T] { assert!(src_end <= self.len(), "src is out of bounds"); let count = src_end - src_start; assert!(dest <= self.len() - count, "dest is out of bounds"); + // SAFETY: src_start, src_end, dest are all within bounds unsafe { ptr::copy( self.as_ptr().add(src_start), @@ -2330,6 +2357,9 @@ impl [T] { pub fn swap_with_slice(&mut self, other: &mut [T]) { assert!(self.len() == other.len(), "destination and source slices have different lengths"); + // SAFETY: it's possible we might try to copy from two overlapping + // slices which would cause undefined behavior, although this should be + // impossible in safe code. unsafe { ptr::swap_nonoverlapping( self.as_mut_ptr(), other.as_mut_ptr(), self.len()); @@ -2362,6 +2392,7 @@ impl [T] { // iterative stein’s algorithm // We should still make this `const fn` (and revert to recursive algorithm if we do) // because relying on llvm to consteval all this is… well, it makes me uncomfortable. + // SAFETY: we make sure that a and b are nonzero let (ctz_a, mut ctz_b) = unsafe { if a == 0 { return b; } if b == 0 { return a; } @@ -2377,6 +2408,7 @@ impl [T] { mem::swap(&mut a, &mut b); } b = b - a; + // SAFETY: we make sure that b is nonzero unsafe { if b == 0 { break; @@ -2762,6 +2794,7 @@ impl SliceIndex<[T]> for usize { #[inline] fn get(self, slice: &[T]) -> Option<&T> { if self < slice.len() { + // SAFETY: since it's usize, 0 <= self < slice.len() unsafe { Some(self.get_unchecked(slice)) } @@ -2773,6 +2806,7 @@ impl SliceIndex<[T]> for usize { #[inline] fn get_mut(self, slice: &mut [T]) -> Option<&mut T> { if self < slice.len() { + // SAFETY: since it's usize, 0 <= self < slice.len() unsafe { Some(self.get_unchecked_mut(slice)) } @@ -2813,6 +2847,7 @@ impl SliceIndex<[T]> for ops::Range { if self.start > self.end || self.end > slice.len() { None } else { + // SAFETY: 0 <= start <= end <= slice.len() since start is usize unsafe { Some(self.get_unchecked(slice)) } @@ -2824,6 +2859,7 @@ impl SliceIndex<[T]> for ops::Range { if self.start > self.end || self.end > slice.len() { None } else { + // SAFETY: 0 <= start <= end <= slice.len() since start is usize unsafe { Some(self.get_unchecked_mut(slice)) } @@ -2847,6 +2883,7 @@ impl SliceIndex<[T]> for ops::Range { } else if self.end > slice.len() { slice_index_len_fail(self.end, slice.len()); } + // SAFETY: 0 <= start <= end <= slice.len() since start is usize unsafe { self.get_unchecked(slice) } @@ -2859,6 +2896,7 @@ impl SliceIndex<[T]> for ops::Range { } else if self.end > slice.len() { slice_index_len_fail(self.end, slice.len()); } + // SAFETY: 0 <= start <= end <= slice.len() since start is usize unsafe { self.get_unchecked_mut(slice) } @@ -3160,6 +3198,7 @@ macro_rules! iterator { // Helper function for creating a slice from the iterator. #[inline(always)] fn make_slice(&self) -> &'a [T] { + // SAFETY: [ptr, ptr + len) are guaranteed to be valid unsafe { from_raw_parts(self.ptr, len!(self)) } } @@ -3213,6 +3252,8 @@ macro_rules! iterator { #[inline] fn next(&mut self) -> Option<$elem> { // could be implemented with slices, but this avoids bounds checks + // SAFETY: we can call next_unchecked as along as the iterator is + // not empty. unsafe { assume(!self.ptr.is_null()); if mem::size_of::() != 0 { @@ -3250,7 +3291,7 @@ macro_rules! iterator { } return None; } - // We are in bounds. `post_inc_start` does the right thing even for ZSTs. + // SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs. unsafe { self.post_inc_start(n as isize); Some(next_unchecked!(self)) @@ -3275,6 +3316,7 @@ macro_rules! iterator { else { Ok(i + 1) } }).err() .map(|i| { + // SAFETY: Generates no code unsafe { assume(i < n) }; i }) @@ -3293,6 +3335,7 @@ macro_rules! iterator { else { Ok(i) } }).err() .map(|i| { + // SAFETY: Generates no code unsafe { assume(i < n) }; i }) @@ -3306,6 +3349,8 @@ macro_rules! iterator { #[inline] fn next_back(&mut self) -> Option<$elem> { // could be implemented with slices, but this avoids bounds checks + // SAFETY: we can call next_back_unchecked as along as the iterator is + // not empty. unsafe { assume(!self.ptr.is_null()); if mem::size_of::() != 0 { @@ -3326,7 +3371,7 @@ macro_rules! iterator { self.end = self.ptr; return None; } - // We are in bounds. `pre_dec_end` does the right thing even for ZSTs. + // SAFETY: We are in bounds. `pre_dec_end` does the right thing even for ZSTs. unsafe { self.pre_dec_end(n as isize); Some(next_back_unchecked!(self)) @@ -3523,6 +3568,7 @@ impl<'a, T> IterMut<'a, T> { /// ``` #[stable(feature = "iter_to_slice", since = "1.4.0")] pub fn into_slice(self) -> &'a mut [T] { + // SAFETY: [ptr, ptr + len) are guaranteed to be valid unsafe { from_raw_parts_mut(self.ptr, len!(self)) } } @@ -5365,6 +5411,7 @@ pub unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] /// Converts a reference to T into a slice of length 1 (without copying). #[stable(feature = "from_ref", since = "1.28.0")] pub fn from_ref(s: &T) -> &[T] { + // SAFETY: the reference guarantees it's valid for reads during its lifetime unsafe { from_raw_parts(s, 1) } @@ -5373,6 +5420,7 @@ pub fn from_ref(s: &T) -> &[T] { /// Converts a reference to T into a slice of length 1 (without copying). #[stable(feature = "from_ref", since = "1.28.0")] pub fn from_mut(s: &mut T) -> &mut [T] { + // SAFETY: the mut reference guarantees it's valid for reads and writes during its lifetime unsafe { from_raw_parts_mut(s, 1) } @@ -5481,6 +5529,7 @@ impl SlicePartialEq for [A] if self.as_ptr() == other.as_ptr() { return true; } + // SAFETY: both are references of slices of the same size unsafe { let size = mem::size_of_val(self); memcmp(self.as_ptr() as *const u8, @@ -5558,6 +5607,8 @@ impl SliceOrd for [A] impl SliceOrd for [u8] { #[inline] fn compare(&self, other: &[u8]) -> Ordering { + // SAFETY: both are references of slices, which are guaranteed to be + // valid up to the min of their lengths let order = unsafe { memcmp(self.as_ptr(), other.as_ptr(), cmp::min(self.len(), other.len())) @@ -5623,6 +5674,7 @@ impl SliceContains for u8 { impl SliceContains for i8 { fn slice_contains(&self, x: &[Self]) -> bool { let byte = *self as u8; + // SAFETY: just a way to cast the slice from i8 to u8 let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) }; memchr::memchr(byte, bytes).is_some() } diff --git a/src/libcore/slice/sort.rs b/src/libcore/slice/sort.rs index a719a51b61605..566815776c2ed 100644 --- a/src/libcore/slice/sort.rs +++ b/src/libcore/slice/sort.rs @@ -6,8 +6,6 @@ //! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our //! stable sorting implementation. -// ignore-tidy-undocumented-unsafe - use crate::cmp; use crate::mem::{self, MaybeUninit}; use crate::ptr; @@ -20,6 +18,7 @@ struct CopyOnDrop { impl Drop for CopyOnDrop { fn drop(&mut self) { + // SAFETY: both *src and *dest are (supposedly) properly aligned T unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); } } } @@ -29,6 +28,7 @@ fn shift_head(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool { let len = v.len(); + // SAFETY: See comments in block unsafe { // If the first two elements are out-of-order... if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) { @@ -61,6 +61,7 @@ fn shift_tail(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool { let len = v.len(); + // SAFETY: See comments in block unsafe { // If the last two elements are out-of-order... if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) { @@ -104,6 +105,7 @@ fn partial_insertion_sort(v: &mut [T], is_less: &mut F) -> bool let mut i = 1; for _ in 0..MAX_STEPS { + // SAFETY: 0 < i < len unsafe { // Find the next pair of adjacent out-of-order elements. while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) { @@ -221,6 +223,7 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize let mut offsets_l = [MaybeUninit::::uninit(); BLOCK]; // The current block on the right side (from `r.sub(block_r)` to `r`). + // SAFETY: safe because we only end up accessing r.offset(-1) let mut r = unsafe { l.add(v.len()) }; let mut block_r = BLOCK; let mut start_r = ptr::null_mut(); @@ -269,6 +272,8 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize let mut elem = l; for i in 0..block_l { + // SAFETY: elem and end_l are always within an offset of [0, block_l) from l + // when accessed unsafe { // Branchless comparison. *end_l = i as u8; @@ -285,6 +290,8 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize let mut elem = r; for i in 0..block_r { + // SAFETY: elem and end_r are always within an offset of [0, block_r) from r + // when accessed unsafe { // Branchless comparison. elem = elem.offset(-1); @@ -304,6 +311,7 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize // Instead of swapping one pair at the time, it is more efficient to perform a cyclic // permutation. This is not strictly equivalent to swapping, but produces a similar // result using fewer memory operations. + // SAFETY: safe since we keep start_l < end_l and start_r < end_r unsafe { let tmp = ptr::read(left!()); ptr::copy_nonoverlapping(right!(), left!(), 1); @@ -324,11 +332,13 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize if start_l == end_l { // All out-of-order elements in the left block were moved. Move to the next block. + // SAFETY: we check that width(l, r) > 2 * block before dereferencing l = unsafe { l.offset(block_l as isize) }; } if start_r == end_r { // All out-of-order elements in the right block were moved. Move to the previous block. + // SAFETY: we check that width(l, r) > 2 * block before dereferencing r = unsafe { r.offset(-(block_r as isize)) }; } @@ -346,6 +356,10 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize // Move its remaining out-of-order elements to the far right. debug_assert_eq!(width(l, r), block_l); while start_l < end_l { + // SAFETY: this is safe since + // - l < start_l < end_l < r, + // - l only increases, and + // - r only decreases unsafe { end_l = end_l.offset(-1); ptr::swap(l.offset(*end_l as isize), r.offset(-1)); @@ -358,6 +372,7 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize // Move its remaining out-of-order elements to the far left. debug_assert_eq!(width(l, r), block_r); while start_r < end_r { + // SAFETY: see above comment unsafe { end_r = end_r.offset(-1); ptr::swap(l, r.offset(-(*end_r as isize) - 1)); @@ -389,6 +404,7 @@ fn partition(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool) // Read the pivot into a stack-allocated variable for efficiency. If a following comparison // operation panics, the pivot will be automatically written back into the slice. + // SAFETY: pivot points to the first element of v let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); let _pivot_guard = CopyOnDrop { src: &mut *tmp, @@ -399,6 +415,7 @@ fn partition(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool) // Find the first pair of out-of-order elements. let mut l = 0; let mut r = v.len(); + // SAFETY: safe since 0 <= l < r <= v.len(), l only increases, and r only decreases unsafe { // Find the first element greater then or equal to the pivot. while l < r && is_less(v.get_unchecked(l), pivot) { @@ -438,6 +455,7 @@ fn partition_equal(v: &mut [T], pivot: usize, is_less: &mut F) -> usize // Read the pivot into a stack-allocated variable for efficiency. If a following comparison // operation panics, the pivot will be automatically written back into the slice. + // SAFETY: pivot points to the first element of v let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); let _pivot_guard = CopyOnDrop { src: &mut *tmp, @@ -449,6 +467,7 @@ fn partition_equal(v: &mut [T], pivot: usize, is_less: &mut F) -> usize let mut l = 0; let mut r = v.len(); loop { + // SAFETY: safe since 0 <= l < r <= v.len(), l only increases, and r only decreases unsafe { // Find the first element greater that the pivot. while l < r && !is_less(pivot, v.get_unchecked(l)) { @@ -548,6 +567,8 @@ fn choose_pivot(v: &mut [T], is_less: &mut F) -> (usize, bool) if len >= 8 { // Swaps indices so that `v[a] <= v[b]`. + // SAFETY: a, b, and c are all in bounds, and if len >= SHORTEST_MEDIAN_OF_MEDIANS + // then so are them +/- 1 let mut sort2 = |a: &mut usize, b: &mut usize| unsafe { if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) { ptr::swap(a, b);