diff --git a/library/core/src/slice/sort/shared/smallsort.rs b/library/core/src/slice/sort/shared/smallsort.rs index 09f898309bd65..2584a18e84f39 100644 --- a/library/core/src/slice/sort/shared/smallsort.rs +++ b/library/core/src/slice/sort/shared/smallsort.rs @@ -3,6 +3,13 @@ use crate::mem::{self, ManuallyDrop, MaybeUninit}; use crate::slice::sort::shared::FreezeMarker; use crate::{intrinsics, ptr, slice}; +use safety::{ensures, requires}; + +#[allow_unused] +use crate::ub_checks; + +#[cfg(kani)] +use crate::kani; // It's important to differentiate between SMALL_SORT_THRESHOLD performance for // small slices and small-sort performance sorting small sub-slices as part of @@ -62,6 +69,26 @@ impl StableSmallSortTypeImpl for T { } } +// // This wrapper contract function exists because; +// // - kani contract attribute macros doesnt't work with `default fn` +// // - we cannot specify the trait member function with `proof_for_contract` +// #[cfg(kani)] +// #[kani::modifies(v)] +// #[ensures(|_| { +// let mut is_less = is_less.clone(); +// v.is_sorted_by(|a, b| !is_less(b, a)) +// })] +// pub(crate) fn _stable_small_sort_type_impl_small_sort( +// v: &mut [T], +// scratch: &mut [MaybeUninit], +// is_less: &mut F, +// ) +// where +// F: FnMut(&T, &T) -> bool + Clone, +// { +// ::small_sort(v, scratch, is_less); +// } + /// Using a trait allows us to specialize on `Freeze` which in turn allows us to make safe /// abstractions. pub(crate) trait UnstableSmallSortTypeImpl: Sized { @@ -102,6 +129,25 @@ impl UnstableSmallSortTypeImpl for T { } } +// // This wrapper contract function exists because; +// // - kani contract attribute macros doesnt't work with `default fn` +// // - we cannot specify the trait member function with `proof_for_contract` +// #[cfg(kani)] +// #[kani::modifies(v)] +// #[ensures(|_| { +// let mut is_less = is_less.clone(); +// v.is_sorted_by(|a, b| !is_less(b, a)) +// })] +// pub(crate) fn _unstable_small_sort_type_impl_small_sort( +// v: &mut [T], +// is_less: &mut F, +// ) +// where +// F: FnMut(&T, &T) -> bool + Clone, +// { +// ::small_sort(v, is_less); +// } + /// FIXME(const_trait_impl) use original ipnsort approach with choose_unstable_small_sort, /// as found here . pub(crate) trait UnstableSmallSortFreezeTypeImpl: Sized + FreezeMarker { @@ -170,6 +216,26 @@ impl UnstableSmallSortFreezeTypeImpl for T { } } +// // This wrapper contract function exists because; +// // - kani contract attribute macros doesnt't work with `default fn` +// // - we cannot specify the trait member function with `proof_for_contract` +// #[cfg(kani)] +// #[kani::modifies(v)] +// #[ensures(|_| { +// let mut is_less = is_less.clone(); +// v.is_sorted_by(|a, b| !is_less(b, a)) +// })] +// pub(crate) fn _unstable_small_sort_freeze_type_impl_small_sort( +// v: &mut [T], +// is_less: &mut F, +// ) +// where +// T: FreezeMarker + CopyMarker, +// F: FnMut(&T, &T) -> bool + Clone, +// { +// ::small_sort(v, is_less); +// } + /// Optimal number of comparisons, and good perf. const SMALL_SORT_FALLBACK_THRESHOLD: usize = 16; @@ -539,6 +605,27 @@ where /// /// # Safety /// begin < tail and p must be valid and initialized for all begin <= p <= tail. +#[cfg_attr( + kani, + kani::modifies( + ptr::slice_from_raw_parts_mut(begin, tail.addr().saturating_sub(head.addr() - 1)) + ) +)] +#[requires(begin.addr() < tail.addr() && { + let len = tail.addr() - begin.addr(); + let is_less: &mut F = unsafe { mem::transmute(&is_less) }; + (0..=len).into_iter().all(|i| { + let p = begin.add(i); + ub_checks::can_dereference(p as *const _) && + ub_checks::can_write(p) && + ub_checks::same_allocation(begin as *const _, p as *const _) + }) && (0..(len - 1)).into_iter().all(|i| !is_less(&*begin.add(i + 1), &*begin.add(i))) +})] +#[ensures(|_| { + let len = tail.addr() - begin.addr(); + let is_less: &mut F = unsafe { mem::transmute(&is_less) }; + (0..len).into_iter().all(|i| !is_less(&*begin.add(i + 1), &*begin.add(i))) +})] unsafe fn insert_tail bool>(begin: *mut T, tail: *mut T, is_less: &mut F) { // SAFETY: see individual comments. unsafe { @@ -556,7 +643,13 @@ unsafe fn insert_tail bool>(begin: *mut T, tail: *mut T, let tmp = ManuallyDrop::new(tail.read()); let mut gap_guard = CopyOnDrop { src: &*tmp, dst: tail, len: 1 }; - loop { + #[safety::loop_invariant( + sift.addr() >= begin.addr() && sift.addr() < tail.addr() + )] + // FIXME: This was `loop` but kani's loop contract doesn't support `loop`. + // Once it is supported, replace `while true` with the original `loop` + #[allow(while_true)] + while true { // SAFETY: we move sift into the gap (which is valid), and point the // gap guard destination at sift, ensuring that if a panic occurs the // gap is once again filled. @@ -577,6 +670,15 @@ unsafe fn insert_tail bool>(begin: *mut T, tail: *mut T, } /// Sort `v` assuming `v[..offset]` is already sorted. +#[cfg_attr(kani, kani::modifies(v))] +#[requires(offset != 0 && offset <= v.len() && { + let is_less: &mut F = unsafe { mem::transmute(&is_less) }; + v[..offset].is_sorted_by(|a, b| !is_less(b, a)) +})] +#[ensures(|_| { + let is_less: &mut F = unsafe { mem::transmute(&is_less) }; + v.is_sorted_by(|a, b| !is_less(b, a)) +})] pub fn insertion_sort_shift_left bool>( v: &mut [T], offset: usize, @@ -597,6 +699,14 @@ pub fn insertion_sort_shift_left bool>( let v_end = v_base.add(len); let mut tail = v_base.add(offset); while tail != v_end { + // FIXME: This should be loop contract but sadly, making this into + // loop invariant causes OOM + #[cfg(kani)] + kani::assert( + tail.addr() > v_base.addr() && tail.addr() <= v_end.addr(), + "loop invariants", + ); + // SAFETY: v_base and tail are both valid pointers to elements, and // v_base < tail since we checked offset != 0. insert_tail(v_base, tail, is_less); @@ -609,6 +719,28 @@ pub fn insertion_sort_shift_left bool>( /// SAFETY: The caller MUST guarantee that `v_base` is valid for 4 reads and /// `dst` is valid for 4 writes. The result will be stored in `dst[0..4]`. +#[cfg_attr(kani, kani::modifies(dst, dst.add(1), dst.add(2), dst.add(3)))] +#[requires( + (0..4).into_iter().all(|i| { + let p = v_base.add(i); + ub_checks::can_dereference(p) && + ub_checks::same_allocation(v_base, p) + }) +)] +#[requires( + (0..4).into_iter().all(|i| { + let p = dst.add(i); + ub_checks::can_write(p) && + ub_checks::same_allocation(dst, p) + }) +)] +#[ensures(|_| { + let is_less: &mut F = unsafe { mem::transmute(&is_less) }; + (0..3).into_iter().all(|i| !is_less( + &*dst.add(i + 1), + &*dst.add(i), + )) +})] pub unsafe fn sort4_stable bool>( v_base: *const T, dst: *mut T, @@ -870,3 +1002,136 @@ pub(crate) const fn has_efficient_in_place_swap() -> bool { // Heuristic that holds true on all tested 64-bit capable architectures. mem::size_of::() <= 8 // mem::size_of::() } + +#[cfg(kani)] +#[unstable(feature = "kani", issue = "none")] +mod verify { + use super::*; + + // The maximum length of the slice that `insertion_sort_shift_left` + // is called upon. + // The value is from the following line; + // https://github.com/model-checking/verify-rust-std/blob/1a38674ad6753e3a78e0181d1fe613f3b25ebacd/library/core/src/slice/sort/shared/smallsort.rs#L330 + const INSERTION_SORT_MAX_LEN: usize = 17; + + #[kani::proof] + pub fn check_swap_if_less() { + let mut array: [u8; SMALL_SORT_GENERAL_THRESHOLD] = kani::any(); + let a_pos = kani::any_where(|x: &usize| *x < SMALL_SORT_GENERAL_THRESHOLD); + let b_pos = kani::any_where(|x: &usize| *x < SMALL_SORT_GENERAL_THRESHOLD); + let mut is_less = |x: &u8, y: &u8| x < y; + let expected = { + let mut array = array.clone(); + let a: u8 = array[a_pos]; + let b: u8 = array[b_pos]; + if is_less(&b, &a) { + array[a_pos] = b; + array[b_pos] = a; + } + array + }; + unsafe { + swap_if_less(array.as_mut_ptr(), a_pos, b_pos, &mut is_less); + } + kani::assert( + array == expected, + "swapped slice is different from the expectation" + ); + } + + #[kani::proof_for_contract(insert_tail)] + #[kani::unwind(17)] + pub fn check_insert_tail() { + let mut array: [u8; INSERTION_SORT_MAX_LEN] = kani::any(); + let tail = kani::any_where(|x: &usize| *x < INSERTION_SORT_MAX_LEN); + let mut is_less = |x: &u8, y: &u8| x < y; + unsafe { + let begin = array.as_mut_ptr(); + let tail = begin.add(tail); + insert_tail(begin, tail, &mut is_less); + } + } + + // FIXME: Ideally, this should be `proof_for_contract(insertion_sort_shift_left)`, + // but this failes with OOM due to `proof_for_contract`'s perf issue. + // + // See https://github.com/model-checking/kani/issues/3797 + #[kani::proof] + #[kani::stub_verified(insert_tail)] + #[kani::unwind(17)] + pub fn check_insertion_sort_shift_left() { + let slice_len = kani::any_where(|x: &usize| { + *x != 0 && *x <= INSERTION_SORT_MAX_LEN + }); + let offset = kani::any_where(|x: &usize| *x != 0 && *x <= slice_len); + let mut is_less = |x: &u8, y: &u8| x < y; + let mut array = kani::any_where(|x: &[u8; INSERTION_SORT_MAX_LEN]| { + x[..offset].is_sorted_by(|a, b| !is_less(b, a)) + }); + insertion_sort_shift_left(&mut array[..slice_len], offset, &mut is_less); + kani::assert( + array[..slice_len].is_sorted_by(|a, b| !is_less(b, a)), + "slice is not sorted", + ); + } + + #[kani::proof_for_contract(sort4_stable)] + pub fn check_sort4_stable() { + let src: [u8; 4] = kani::any(); + let mut dst = MaybeUninit::<[u8; 4]>::uninit(); + let mut is_less = |x: &u8, y: &u8| x < y; + unsafe { + sort4_stable(src.as_ptr(), dst.as_mut_ptr() as *mut _, &mut is_less); + } + } + + #[kani::proof] + pub fn check_sort4_stable_stability() { + let src: [(u8, u8); 4] = [ + (kani::any(), 0), + (kani::any(), 1), + (kani::any(), 2), + (kani::any(), 3), + ]; + let mut dst = MaybeUninit::<[(u8, u8); 4]>::uninit(); + let mut is_less = |x: &(u8, u8), y: &(u8, u8)| x.0 < y.0; + unsafe { + sort4_stable(src.as_ptr(), dst.as_mut_ptr() as *mut _, &mut is_less); + } + let dst = unsafe { dst.assume_init() }; + let mut is_stably_less = |x: &(u8, u8), y: &(u8, u8)| { + if x.0 == y.0 { + x.1 < y.1 + } else { + x.0 < y.0 + } + }; + kani::assert( + dst.is_sorted_by(|a, b| !is_stably_less(b, a)), + "slice is not stably sorted", + ); + } + + #[kani::proof] + pub fn check_has_efficient_in_place_swap() { + // There aren't much to verify as the function just calls `mem::size_of`. + // So, just brought some tests written by the author of smallsort, + // from Voultapher/sort-research-rs + // + // https://github.com/Voultapher/sort-research-rs/blob/c0cb46214a8d6e10ae5f4e0c363c2dbcbf71966f/ipnsort/src/smallsort.rs#L758-L771 + assert!(has_efficient_in_place_swap::()); + assert!(has_efficient_in_place_swap::()); + assert!(!has_efficient_in_place_swap::()); + } + + // #[kani::proof_for_contract(_stable_small_sort_type_impl_small_sort)] + // #[kani::stub_verified(insertion_sort_shift_left)] + // pub fn check_stable_small_sort_type_impl_small_sort() { + // let mut array: [u8; SMALL_SORT_FALLBACK_THRESHOLD] = kani::any(); + // let len = kani::any_where(|x: &usize| *x <= SMALL_SORT_FALLBACK_THRESHOLD); + // let mut scratch: [MaybeUninit; SMALL_SORT_GENERAL_SCRATCH_LEN] + // = [const { MaybeUninit::uninit() }; SMALL_SORT_GENERAL_SCRATCH_LEN]; + // let mut is_less = |x: &u8, y: &u8| x < y; + // _stable_small_sort_type_impl_small_sort(&mut array[..len], &mut scratch, &mut is_less); + // } +}