diff --git a/mini-lsm-mvcc/src/compact.rs b/mini-lsm-mvcc/src/compact.rs index 56362961..bc0593af 100644 --- a/mini-lsm-mvcc/src/compact.rs +++ b/mini-lsm-mvcc/src/compact.rs @@ -330,7 +330,7 @@ impl LsmStorageInner { assert!(result.is_none()); } assert_eq!(l1_sstables, state.levels[0].1); - state.levels[0].1 = ids.clone(); + state.levels[0].1.clone_from(&ids); let mut l0_sstables_map = l0_sstables.iter().copied().collect::>(); state.l0_sstables = state .l0_sstables diff --git a/mini-lsm-mvcc/src/iterators/merge_iterator.rs b/mini-lsm-mvcc/src/iterators/merge_iterator.rs index b1f5bdf7..d0d62061 100644 --- a/mini-lsm-mvcc/src/iterators/merge_iterator.rs +++ b/mini-lsm-mvcc/src/iterators/merge_iterator.rs @@ -12,27 +12,25 @@ struct HeapWrapper(pub usize, pub Box); impl PartialEq for HeapWrapper { fn eq(&self, other: &Self) -> bool { - self.partial_cmp(other).unwrap() == cmp::Ordering::Equal + self.cmp(other) == cmp::Ordering::Equal } } impl Eq for HeapWrapper {} impl PartialOrd for HeapWrapper { - #[allow(clippy::non_canonical_partial_ord_impl)] fn partial_cmp(&self, other: &Self) -> Option { - match self.1.key().cmp(&other.1.key()) { - cmp::Ordering::Greater => Some(cmp::Ordering::Greater), - cmp::Ordering::Less => Some(cmp::Ordering::Less), - cmp::Ordering::Equal => self.0.partial_cmp(&other.0), - } - .map(|x| x.reverse()) + Some(self.cmp(other)) } } impl Ord for HeapWrapper { fn cmp(&self, other: &Self) -> cmp::Ordering { - self.partial_cmp(other).unwrap() + self.1 + .key() + .cmp(&other.1.key()) + .then(self.0.cmp(&other.0)) + .reverse() } } diff --git a/mini-lsm-mvcc/src/key.rs b/mini-lsm-mvcc/src/key.rs index b383282c..79329c77 100644 --- a/mini-lsm-mvcc/src/key.rs +++ b/mini-lsm-mvcc/src/key.rs @@ -14,10 +14,10 @@ pub const TS_ENABLED: bool = true; /// Temporary, should remove after implementing full week 3 day 1 + 2. pub const TS_DEFAULT: u64 = 0; -pub const TS_MAX: u64 = std::u64::MAX; -pub const TS_MIN: u64 = std::u64::MIN; -pub const TS_RANGE_BEGIN: u64 = std::u64::MAX; -pub const TS_RANGE_END: u64 = std::u64::MIN; +pub const TS_MAX: u64 = u64::MAX; +pub const TS_MIN: u64 = u64::MIN; +pub const TS_RANGE_BEGIN: u64 = u64::MAX; +pub const TS_RANGE_END: u64 = u64::MIN; impl> Key { pub fn into_inner(self) -> T { diff --git a/mini-lsm-mvcc/src/mem_table.rs b/mini-lsm-mvcc/src/mem_table.rs index 92d90d76..b7e0d77e 100644 --- a/mini-lsm-mvcc/src/mem_table.rs +++ b/mini-lsm-mvcc/src/mem_table.rs @@ -93,7 +93,7 @@ impl MemTable { /// Get a value by key. Should not be used in week 3. pub fn get(&self, key: KeySlice) -> Option { let key_bytes = KeyBytes::from_bytes_with_ts( - Bytes::from_static(unsafe { std::mem::transmute(key.key_ref()) }), + Bytes::from_static(unsafe { std::mem::transmute::<&[u8], &[u8]>(key.key_ref()) }), key.ts(), ); self.map.get(&key_bytes).map(|e| e.value().clone()) diff --git a/mini-lsm-mvcc/src/table/bloom.rs b/mini-lsm-mvcc/src/table/bloom.rs index 550542fa..ed649015 100644 --- a/mini-lsm-mvcc/src/table/bloom.rs +++ b/mini-lsm-mvcc/src/table/bloom.rs @@ -79,7 +79,7 @@ impl Bloom { /// Build bloom filter from key hashes pub fn build_from_key_hashes(keys: &[u32], bits_per_key: usize) -> Self { let k = (bits_per_key as f64 * 0.69) as u32; - let k = k.min(30).max(1); + let k = k.clamp(1, 30); let nbits = (keys.len() * bits_per_key).max(64); let nbytes = (nbits + 7) / 8; let nbits = nbytes * 8; diff --git a/mini-lsm-starter/src/iterators/merge_iterator.rs b/mini-lsm-starter/src/iterators/merge_iterator.rs index c4fae54b..78470d0b 100644 --- a/mini-lsm-starter/src/iterators/merge_iterator.rs +++ b/mini-lsm-starter/src/iterators/merge_iterator.rs @@ -14,27 +14,25 @@ struct HeapWrapper(pub usize, pub Box); impl PartialEq for HeapWrapper { fn eq(&self, other: &Self) -> bool { - self.partial_cmp(other).unwrap() == cmp::Ordering::Equal + self.cmp(other) == cmp::Ordering::Equal } } impl Eq for HeapWrapper {} impl PartialOrd for HeapWrapper { - #[allow(clippy::non_canonical_partial_ord_impl)] fn partial_cmp(&self, other: &Self) -> Option { - match self.1.key().cmp(&other.1.key()) { - cmp::Ordering::Greater => Some(cmp::Ordering::Greater), - cmp::Ordering::Less => Some(cmp::Ordering::Less), - cmp::Ordering::Equal => self.0.partial_cmp(&other.0), - } - .map(|x| x.reverse()) + Some(self.cmp(other)) } } impl Ord for HeapWrapper { fn cmp(&self, other: &Self) -> cmp::Ordering { - self.partial_cmp(other).unwrap() + self.1 + .key() + .cmp(&other.1.key()) + .then(self.0.cmp(&other.0)) + .reverse() } } diff --git a/mini-lsm-starter/src/table/bloom.rs b/mini-lsm-starter/src/table/bloom.rs index 1f2f453f..71dfc5bb 100644 --- a/mini-lsm-starter/src/table/bloom.rs +++ b/mini-lsm-starter/src/table/bloom.rs @@ -72,7 +72,7 @@ impl Bloom { /// Build bloom filter from key hashes pub fn build_from_key_hashes(keys: &[u32], bits_per_key: usize) -> Self { let k = (bits_per_key as f64 * 0.69) as u32; - let k = k.min(30).max(1); + let k = k.clamp(1, 30); let nbits = (keys.len() * bits_per_key).max(64); let nbytes = (nbits + 7) / 8; let nbits = nbytes * 8; diff --git a/mini-lsm/src/compact.rs b/mini-lsm/src/compact.rs index fd4ec75f..695ab3bb 100644 --- a/mini-lsm/src/compact.rs +++ b/mini-lsm/src/compact.rs @@ -284,7 +284,7 @@ impl LsmStorageInner { assert!(result.is_none()); } assert_eq!(l1_sstables, state.levels[0].1); - state.levels[0].1 = ids.clone(); + state.levels[0].1.clone_from(&ids); let mut l0_sstables_map = l0_sstables.iter().copied().collect::>(); state.l0_sstables = state .l0_sstables diff --git a/mini-lsm/src/iterators/merge_iterator.rs b/mini-lsm/src/iterators/merge_iterator.rs index b1f5bdf7..d0d62061 100644 --- a/mini-lsm/src/iterators/merge_iterator.rs +++ b/mini-lsm/src/iterators/merge_iterator.rs @@ -12,27 +12,25 @@ struct HeapWrapper(pub usize, pub Box); impl PartialEq for HeapWrapper { fn eq(&self, other: &Self) -> bool { - self.partial_cmp(other).unwrap() == cmp::Ordering::Equal + self.cmp(other) == cmp::Ordering::Equal } } impl Eq for HeapWrapper {} impl PartialOrd for HeapWrapper { - #[allow(clippy::non_canonical_partial_ord_impl)] fn partial_cmp(&self, other: &Self) -> Option { - match self.1.key().cmp(&other.1.key()) { - cmp::Ordering::Greater => Some(cmp::Ordering::Greater), - cmp::Ordering::Less => Some(cmp::Ordering::Less), - cmp::Ordering::Equal => self.0.partial_cmp(&other.0), - } - .map(|x| x.reverse()) + Some(self.cmp(other)) } } impl Ord for HeapWrapper { fn cmp(&self, other: &Self) -> cmp::Ordering { - self.partial_cmp(other).unwrap() + self.1 + .key() + .cmp(&other.1.key()) + .then(self.0.cmp(&other.0)) + .reverse() } } diff --git a/mini-lsm/src/table/bloom.rs b/mini-lsm/src/table/bloom.rs index 550542fa..ed649015 100644 --- a/mini-lsm/src/table/bloom.rs +++ b/mini-lsm/src/table/bloom.rs @@ -79,7 +79,7 @@ impl Bloom { /// Build bloom filter from key hashes pub fn build_from_key_hashes(keys: &[u32], bits_per_key: usize) -> Self { let k = (bits_per_key as f64 * 0.69) as u32; - let k = k.min(30).max(1); + let k = k.clamp(1, 30); let nbits = (keys.len() * bits_per_key).max(64); let nbytes = (nbits + 7) / 8; let nbits = nbytes * 8;