Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Fix the undeterministic storage proof recorded for the same execution #10915

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
ea9c8f2
Add a test case for the determinism of recorded proof
liuchengxu Feb 24, 2022
3051028
Replace HashMap with BTreeMap for the actual proof records
liuchengxu Feb 24, 2022
3254cda
cargo +nightly fmt --all
liuchengxu Feb 24, 2022
56aaa02
Store the trie nodes in BTreeSet for StorageProof
liuchengxu Feb 25, 2022
f98796b
Merge branch 'master' of https://github.com/paritytech/substrate into…
liuchengxu Feb 25, 2022
4d1b6fa
Nit
liuchengxu Feb 25, 2022
7320c43
Revert the BTreeMap changes and sort when converting to storage proof
liuchengxu Feb 26, 2022
40067d3
Remove PartialEq from StorageProof
liuchengxu Feb 26, 2022
1213c52
Remove unnecessary change
liuchengxu Feb 26, 2022
8ce692b
Add `compare` method to StorageProof
liuchengxu Feb 28, 2022
b225f0b
Merge branch 'master' of https://github.com/paritytech/substrate into…
liuchengxu Feb 28, 2022
76669a8
FMT
liuchengxu Feb 28, 2022
5508aef
Dummy change to trigger CI
liuchengxu Feb 28, 2022
e1aa977
Merge branch 'master' of https://github.com/paritytech/substrate into…
liuchengxu Mar 2, 2022
9701cde
Use `BTreeSet` for StorageProof and keep using `Vec` for CompactProof
liuchengxu Mar 4, 2022
d3506bd
Update comment on `iter_nodes`
liuchengxu Mar 4, 2022
3e5afce
Revert `PartialEq` removal
liuchengxu Mar 4, 2022
bd48df1
Merge branch 'master' of https://github.com/paritytech/substrate into…
liuchengxu Mar 4, 2022
89e312a
Merge branch 'master' of https://github.com/paritytech/substrate into…
liuchengxu Mar 5, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions client/executor/src/native_executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -462,8 +462,9 @@ impl RuntimeSpawn for RuntimeInstanceSpawn {
// https://github.com/paritytech/substrate/issues/7354
let mut instance = match module.new_instance() {
Ok(instance) => instance,
Err(error) =>
panic!("failed to create new instance from module: {}", error),
Err(error) => {
panic!("failed to create new instance from module: {}", error)
},
};

match instance
Expand Down
10 changes: 6 additions & 4 deletions client/offchain/src/api/http.rs
Original file line number Diff line number Diff line change
Expand Up @@ -432,8 +432,9 @@ impl HttpApi {
);
},
None => {}, // can happen if we detected an IO error when sending the body
_ =>
tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker"),
_ => {
tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker")
},
}
},

Expand All @@ -443,8 +444,9 @@ impl HttpApi {
self.requests.insert(id, HttpApiRequest::Fail(error));
},
None => {}, // can happen if we detected an IO error when sending the body
_ =>
tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker"),
_ => {
tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker")
},
},

None => {
Expand Down
49 changes: 39 additions & 10 deletions primitives/state-machine/src/proving_backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,15 +158,13 @@ impl<Hash: std::hash::Hash + Eq> ProofRecorder<Hash> {

/// Convert into a [`StorageProof`].
pub fn to_storage_proof(&self) -> StorageProof {
let trie_nodes = self
.inner
.read()
.records
.iter()
.filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec()))
.collect();

StorageProof::new(trie_nodes)
StorageProof::new(
self.inner
.read()
.records
.iter()
.filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())),
)
}

/// Reset the internal state.
Expand Down Expand Up @@ -393,6 +391,7 @@ mod tests {
proving_backend::create_proof_check_backend, trie_backend::tests::test_trie,
InMemoryBackend,
};
use sp_core::H256;
use sp_runtime::traits::BlakeTwo256;
use sp_trie::PrefixedMemoryDB;

Expand Down Expand Up @@ -426,7 +425,6 @@ mod tests {

#[test]
fn proof_is_invalid_when_does_not_contains_root() {
use sp_core::H256;
let result = create_proof_check_backend::<BlakeTwo256>(
H256::from_low_u64_be(1),
StorageProof::empty(),
Expand Down Expand Up @@ -582,4 +580,35 @@ mod tests {
assert!(backend.storage(b"doesnotexist2").unwrap().is_none());
check_estimation(&backend);
}

#[test]
fn proof_recorded_for_same_execution_should_be_deterministic() {
let storage_changes = vec![
(H256::random(), Some(b"value1".to_vec())),
(H256::random(), Some(b"value2".to_vec())),
(H256::random(), Some(b"value3".to_vec())),
(H256::random(), Some(b"value4".to_vec())),
(H256::random(), Some(b"value5".to_vec())),
(H256::random(), Some(b"value6".to_vec())),
(H256::random(), Some(b"value7".to_vec())),
(H256::random(), Some(b"value8".to_vec())),
];

let proof_recorder =
ProofRecorder::<H256> { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) };
storage_changes
.clone()
.into_iter()
.for_each(|(key, val)| proof_recorder.record(key, val));
let proof1 = proof_recorder.to_storage_proof();

let proof_recorder =
ProofRecorder::<H256> { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) };
storage_changes
.into_iter()
.for_each(|(key, val)| proof_recorder.record(key, val));
let proof2 = proof_recorder.to_storage_proof();

assert_eq!(proof1, proof2);
}
}
31 changes: 16 additions & 15 deletions primitives/trie/src/storage_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
use codec::{Decode, Encode};
use hash_db::{HashDB, Hasher};
use scale_info::TypeInfo;
use sp_std::vec::Vec;
use sp_std::{collections::btree_set::BTreeSet, iter::IntoIterator, vec::Vec};
// Note that `LayoutV1` usage here (proof compaction) is compatible
// with `LayoutV0`.
use crate::LayoutV1 as Layout;
Expand All @@ -32,36 +32,36 @@ use crate::LayoutV1 as Layout;
/// the serialized nodes and performing the key lookups.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)]
pub struct StorageProof {
trie_nodes: Vec<Vec<u8>>,
trie_nodes: BTreeSet<Vec<u8>>,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So btreeset will produce always ordered proof, but it will also allow decoding proof with non ordered node.

Actually forcing order on decode will break existing code, so it is probably better this way.

}

impl StorageProof {
/// Constructs a storage proof from a subset of encoded trie nodes in a storage backend.
pub fn new(trie_nodes: Vec<Vec<u8>>) -> Self {
StorageProof { trie_nodes }
pub fn new(trie_nodes: impl IntoIterator<Item = Vec<u8>>) -> Self {
StorageProof { trie_nodes: BTreeSet::from_iter(trie_nodes) }
}

/// Returns a new empty proof.
///
/// An empty proof is capable of only proving trivial statements (ie. that an empty set of
/// key-value pairs exist in storage).
pub fn empty() -> Self {
StorageProof { trie_nodes: Vec::new() }
StorageProof { trie_nodes: BTreeSet::new() }
}

/// Returns whether this is an empty proof.
pub fn is_empty(&self) -> bool {
self.trie_nodes.is_empty()
}

/// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed
/// to be traversed in any particular order.
/// Create an iterator over encoded trie nodes in lexicographical order constructed
/// from the proof.
pub fn iter_nodes(self) -> StorageProofNodeIterator {
StorageProofNodeIterator::new(self)
}

/// Convert into plain node vector.
pub fn into_nodes(self) -> Vec<Vec<u8>> {
pub fn into_nodes(self) -> BTreeSet<Vec<u8>> {
self.trie_nodes
}

Expand Down Expand Up @@ -138,12 +138,13 @@ impl CompactProof {
expected_root,
)?;
Ok((
StorageProof::new(
db.drain()
.into_iter()
.filter_map(|kv| if (kv.1).1 > 0 { Some((kv.1).0) } else { None })
.collect(),
),
StorageProof::new(db.drain().into_iter().filter_map(|kv| {
if (kv.1).1 > 0 {
Some((kv.1).0)
} else {
None
}
})),
root,
))
}
Expand Down Expand Up @@ -171,7 +172,7 @@ impl CompactProof {
/// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to
/// be traversed in any particular order.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Comment could be updated : "in lexicographical order of encoded nodes.", or something like that.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's keep it that way, because we then can change this later.

pub struct StorageProofNodeIterator {
inner: <Vec<Vec<u8>> as IntoIterator>::IntoIter,
inner: <BTreeSet<Vec<u8>> as IntoIterator>::IntoIter,
}

impl StorageProofNodeIterator {
Expand Down
2 changes: 1 addition & 1 deletion primitives/trie/src/trie_stream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ use hash_db::Hasher;
use sp_std::vec::Vec;
use trie_root;

#[derive(Default, Clone)]
/// Codec-flavored TrieStream.
#[derive(Default, Clone)]
pub struct TrieStream {
/// Current node buffer.
buffer: Vec<u8>,
Expand Down