From e495219c8b8a04a563fcfb19dd72cdabd3ece5f2 Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Sat, 5 Mar 2022 15:51:19 +0800 Subject: [PATCH] Fix the undeterministic storage proof recorded for the same execution (#10915) * Add a test case for the determinism of recorded proof * Replace HashMap with BTreeMap for the actual proof records * cargo +nightly fmt --all * Store the trie nodes in BTreeSet for StorageProof * Nit * Revert the BTreeMap changes and sort when converting to storage proof * Remove PartialEq from StorageProof * Remove unnecessary change * Add `compare` method to StorageProof * FMT * Dummy change to trigger CI * Use `BTreeSet` for StorageProof and keep using `Vec` for CompactProof * Update comment on `iter_nodes` * Revert `PartialEq` removal --- client/executor/src/native_executor.rs | 5 +- client/offchain/src/api/http.rs | 10 ++-- .../state-machine/src/proving_backend.rs | 49 +++++++++++++++---- primitives/trie/src/storage_proof.rs | 31 ++++++------ primitives/trie/src/trie_stream.rs | 2 +- 5 files changed, 65 insertions(+), 32 deletions(-) diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 363ea474a7e43..669780f2a4b6a 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -462,8 +462,9 @@ impl RuntimeSpawn for RuntimeInstanceSpawn { // https://github.com/paritytech/substrate/issues/7354 let mut instance = match module.new_instance() { Ok(instance) => instance, - Err(error) => - panic!("failed to create new instance from module: {}", error), + Err(error) => { + panic!("failed to create new instance from module: {}", error) + }, }; match instance diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index bc8f81f25a643..2a7514116cb5d 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -432,8 +432,9 @@ impl HttpApi { ); }, None => {}, // can happen if we detected an IO error when sending the body - _ => - tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker"), + _ => { + tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker") + }, } }, @@ -443,8 +444,9 @@ impl HttpApi { self.requests.insert(id, HttpApiRequest::Fail(error)); }, None => {}, // can happen if we detected an IO error when sending the body - _ => - tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker"), + _ => { + tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker") + }, }, None => { diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 934f08492deed..eeffcc8e47052 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -158,15 +158,13 @@ impl ProofRecorder { /// Convert into a [`StorageProof`]. pub fn to_storage_proof(&self) -> StorageProof { - let trie_nodes = self - .inner - .read() - .records - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - - StorageProof::new(trie_nodes) + StorageProof::new( + self.inner + .read() + .records + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())), + ) } /// Reset the internal state. @@ -393,6 +391,7 @@ mod tests { proving_backend::create_proof_check_backend, trie_backend::tests::test_trie, InMemoryBackend, }; + use sp_core::H256; use sp_runtime::traits::BlakeTwo256; use sp_trie::PrefixedMemoryDB; @@ -426,7 +425,6 @@ mod tests { #[test] fn proof_is_invalid_when_does_not_contains_root() { - use sp_core::H256; let result = create_proof_check_backend::( H256::from_low_u64_be(1), StorageProof::empty(), @@ -582,4 +580,35 @@ mod tests { assert!(backend.storage(b"doesnotexist2").unwrap().is_none()); check_estimation(&backend); } + + #[test] + fn proof_recorded_for_same_execution_should_be_deterministic() { + let storage_changes = vec![ + (H256::random(), Some(b"value1".to_vec())), + (H256::random(), Some(b"value2".to_vec())), + (H256::random(), Some(b"value3".to_vec())), + (H256::random(), Some(b"value4".to_vec())), + (H256::random(), Some(b"value5".to_vec())), + (H256::random(), Some(b"value6".to_vec())), + (H256::random(), Some(b"value7".to_vec())), + (H256::random(), Some(b"value8".to_vec())), + ]; + + let proof_recorder = + ProofRecorder:: { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) }; + storage_changes + .clone() + .into_iter() + .for_each(|(key, val)| proof_recorder.record(key, val)); + let proof1 = proof_recorder.to_storage_proof(); + + let proof_recorder = + ProofRecorder:: { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) }; + storage_changes + .into_iter() + .for_each(|(key, val)| proof_recorder.record(key, val)); + let proof2 = proof_recorder.to_storage_proof(); + + assert_eq!(proof1, proof2); + } } diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 79da009ae151d..f6139584dbbad 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -18,7 +18,7 @@ use codec::{Decode, Encode}; use hash_db::{HashDB, Hasher}; use scale_info::TypeInfo; -use sp_std::vec::Vec; +use sp_std::{collections::btree_set::BTreeSet, iter::IntoIterator, vec::Vec}; // Note that `LayoutV1` usage here (proof compaction) is compatible // with `LayoutV0`. use crate::LayoutV1 as Layout; @@ -32,13 +32,13 @@ use crate::LayoutV1 as Layout; /// the serialized nodes and performing the key lookups. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct StorageProof { - trie_nodes: Vec>, + trie_nodes: BTreeSet>, } impl StorageProof { /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. - pub fn new(trie_nodes: Vec>) -> Self { - StorageProof { trie_nodes } + pub fn new(trie_nodes: impl IntoIterator>) -> Self { + StorageProof { trie_nodes: BTreeSet::from_iter(trie_nodes) } } /// Returns a new empty proof. @@ -46,7 +46,7 @@ impl StorageProof { /// An empty proof is capable of only proving trivial statements (ie. that an empty set of /// key-value pairs exist in storage). pub fn empty() -> Self { - StorageProof { trie_nodes: Vec::new() } + StorageProof { trie_nodes: BTreeSet::new() } } /// Returns whether this is an empty proof. @@ -54,14 +54,14 @@ impl StorageProof { self.trie_nodes.is_empty() } - /// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed - /// to be traversed in any particular order. + /// Create an iterator over encoded trie nodes in lexicographical order constructed + /// from the proof. pub fn iter_nodes(self) -> StorageProofNodeIterator { StorageProofNodeIterator::new(self) } /// Convert into plain node vector. - pub fn into_nodes(self) -> Vec> { + pub fn into_nodes(self) -> BTreeSet> { self.trie_nodes } @@ -138,12 +138,13 @@ impl CompactProof { expected_root, )?; Ok(( - StorageProof::new( - db.drain() - .into_iter() - .filter_map(|kv| if (kv.1).1 > 0 { Some((kv.1).0) } else { None }) - .collect(), - ), + StorageProof::new(db.drain().into_iter().filter_map(|kv| { + if (kv.1).1 > 0 { + Some((kv.1).0) + } else { + None + } + })), root, )) } @@ -171,7 +172,7 @@ impl CompactProof { /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to /// be traversed in any particular order. pub struct StorageProofNodeIterator { - inner: > as IntoIterator>::IntoIter, + inner: > as IntoIterator>::IntoIter, } impl StorageProofNodeIterator { diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 7a5c7d003e034..a17d7c25e1b8a 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -26,8 +26,8 @@ use hash_db::Hasher; use sp_std::vec::Vec; use trie_root; -#[derive(Default, Clone)] /// Codec-flavored TrieStream. +#[derive(Default, Clone)] pub struct TrieStream { /// Current node buffer. buffer: Vec,