Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Consensus Engines Implementation: Aura #911

Merged
merged 57 commits into from
Oct 27, 2018
Merged
Show file tree
Hide file tree
Changes from 56 commits
Commits
Show all changes
57 commits
Select commit Hold shift + click to select a range
e333350
begin to shuffle some more stuff over to consensus-common
rphmeier Oct 9, 2018
ef29c2e
Generalize BlockImport
gnunicorn Oct 16, 2018
9bd7495
initial aura block production
rphmeier Oct 16, 2018
2a9268e
move node consensus into rhd
gnunicorn Oct 17, 2018
de5a15e
Move ImportBlock into consensus-common
gnunicorn Oct 19, 2018
65f8ad6
Added block notification after import
arkpar Oct 19, 2018
822a0d6
re-enable tests
gnunicorn Oct 19, 2018
c1b9387
Send import notification in aura tests
arkpar Oct 19, 2018
962bc0a
split into light and full, attempt to use aura
gnunicorn Oct 19, 2018
51ace82
Integrating aura into service
gnunicorn Oct 19, 2018
c1f9f19
Make Signatures more generic
gnunicorn Oct 19, 2018
dd085e5
Aura Block Production with the given key
gnunicorn Oct 20, 2018
bf1c7a4
Block Proposing like 912 intended
gnunicorn Oct 20, 2018
1dbcfce
also adapt to 924
gnunicorn Oct 20, 2018
122c633
match up inherentdata struct
gnunicorn Oct 20, 2018
8138a0b
Merge remote-tracking branch 'origin/master' into rh-aura
gavofyork Oct 21, 2018
ffe5a14
Fix
gavofyork Oct 21, 2018
e177ae9
update slot duration and secs_per_block to not conflict
gnunicorn Oct 21, 2018
02d6fe0
run aura on the thread pool
rphmeier Oct 21, 2018
a766d52
start at exact step start in aura
rphmeier Oct 21, 2018
a46c79b
fix best block keys
rphmeier Oct 21, 2018
0cb2291
catch panic and continue in aura proposer
rphmeier Oct 21, 2018
4696d0e
Add needed wasm blob, in leiu of better solutions.
gavofyork Oct 21, 2018
f4290fc
Merge branch 'rh-aura' of github.com:paritytech/substrate into rh-aura
gavofyork Oct 21, 2018
50f4235
update lockfile
rphmeier Oct 21, 2018
d85f344
Fix warnings.
gavofyork Oct 21, 2018
18ff95b
Remove Miscellaneous dependency.
gavofyork Oct 21, 2018
1a7f139
Remove old cruft
gavofyork Oct 21, 2018
2f6ebe9
Make API ids consistent with traits and bring upstream for sharing.
gavofyork Oct 21, 2018
2241f56
Add decrease_free_balance to Balances module
gavofyork Oct 22, 2018
9b0b5c4
Encode `Metadata` once instead of two times
bkchr Oct 22, 2018
5df41d4
Bitops include xor
gavofyork Oct 22, 2018
8ba939e
Merge branch 'rh-aura' of github.com:paritytech/substrate into rh-aura
gavofyork Oct 22, 2018
04dc1d6
Upgrade key module.
gavofyork Oct 22, 2018
de169b4
Wasm fix
gavofyork Oct 22, 2018
ed8c705
Default pages to somewhat bigger.
gavofyork Oct 22, 2018
78f488e
Make `decl_module!` implement `OnFinalise`
bkchr Oct 22, 2018
059944e
Fix.
gavofyork Oct 22, 2018
a8ba344
Runtime version pub sub. (#948)
tomusdrw Oct 23, 2018
bd8a0c4
Introduce upgrade key into node
gavofyork Oct 23, 2018
fc6aa35
add missing key in tests to make it pass
gnunicorn Oct 23, 2018
b72600a
update wasm compiles
gnunicorn Oct 23, 2018
3378b22
Add `Created` event.
pepyakin Oct 23, 2018
81b775b
Merge remote-tracking branch 'origin/master' into rh-aura
gnunicorn Oct 25, 2018
2584b98
Update WASM hashes
gnunicorn Oct 25, 2018
ded4fe9
Cleaning up consensus-rhd
gnunicorn Oct 26, 2018
39d6e50
parity-codec version bump
gnunicorn Oct 26, 2018
c4a8acd
addressing grumbles
gnunicorn Oct 26, 2018
005110e
Reform `Result<T, ()>` into `Option<T>` returns
gnunicorn Oct 26, 2018
30fc26c
Attempt to merge remote-tracking branch 'origin/master' into rh-aura
gnunicorn Oct 26, 2018
b4f0f64
Fixes bugs with the `decl_module!` macro
bkchr Oct 26, 2018
60ff69c
Fixes last errors after merge
bkchr Oct 26, 2018
63cbe77
Some nitpicking
bkchr Oct 26, 2018
075f80a
kill some TODOs
rphmeier Oct 26, 2018
4c2955f
Simplify and reuse slot_author
gnunicorn Oct 26, 2018
17be798
Addressing grumbles
gnunicorn Oct 26, 2018
9ab87a6
Merge remote-tracking branch 'origin/master' into rh-aura
gnunicorn Oct 27, 2018
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
344 changes: 175 additions & 169 deletions Cargo.lock

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ members = [
"core/client",
"core/client/db",
"core/consensus/common",
"core/consensus/aura",
"core/consensus/rhd",
"core/executor",
"core/finality-grandpa",
Expand Down Expand Up @@ -54,6 +55,7 @@ members = [
"srml/system",
"srml/timestamp",
"srml/treasury",
"srml/upgrade-key",
"core/serializer",
"core/service",
"core/service/test",
Expand All @@ -64,9 +66,7 @@ members = [
"core/trie",
"core/keystore",
"node/cli",
"node/consensus",
"node/executor",
"node/network",
"node/primitives",
"node/runtime",
"subkey",
Expand Down
2 changes: 1 addition & 1 deletion core/client/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ hex-literal = "0.1"
futures = "0.1.17"
slog = "^2"
heapsize = "0.4"
substrate-consensus-rhd = { path = "../consensus/rhd" }
substrate-consensus-common = { path = "../consensus/common" }
parity-codec = "2.1"
substrate-executor = { path = "../executor" }
substrate-primitives = { path = "../primitives" }
Expand Down
99 changes: 56 additions & 43 deletions core/client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,9 @@ impl<Block: BlockT> Backend<Block> {
meta.finalized_hash, f_hash),
).into())
}
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, f_hash.as_ref());

let lookup_key = ::utils::number_to_lookup_key(f_num);
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key);

let commit = self.storage.state_db.canonicalize_block(&f_hash);
apply_state_commit(transaction, commit);
Expand Down Expand Up @@ -586,11 +588,20 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
-> Result<(), client::error::Error>
{
let mut transaction = DBTransaction::new();

if let Some(pending_block) = operation.pending_block {
let hash = pending_block.header.hash();
let parent_hash = *pending_block.header.parent_hash();
let number = pending_block.header.number().clone();

// blocks in longest chain are keyed by number
let lookup_key = if pending_block.leaf_state.is_best() {
::utils::number_to_lookup_key(number).to_vec()
} else {
// other blocks are keyed by number + hash
::utils::number_and_hash_to_lookup_key(number, hash)
};

if pending_block.leaf_state.is_best() {
let meta = self.blockchain.meta.read();

Expand Down Expand Up @@ -678,17 +689,9 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
}
}

transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
transaction.put(columns::META, meta_keys::BEST_BLOCK, &lookup_key);
}

// blocks in longest chain are keyed by number
let lookup_key = if pending_block.leaf_state.is_best() {
::utils::number_to_lookup_key(number).to_vec()
} else {
// other blocks are keyed by number + hash
::utils::number_and_hash_to_lookup_key(number, hash)
};

transaction.put(columns::HEADER, &lookup_key, &pending_block.header.encode());
if let Some(body) = pending_block.body {
transaction.put(columns::BODY, &lookup_key, &body.encode());
Expand All @@ -700,7 +703,7 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
transaction.put(columns::HASH_LOOKUP, hash.as_ref(), &lookup_key);

if number == Zero::zero() {
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, hash.as_ref());
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key);
transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
}

Expand Down Expand Up @@ -797,7 +800,8 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
|| client::error::ErrorKind::UnknownBlock(
format!("Error reverting to {}. Block header not found.", best)))?;

transaction.put(columns::META, meta_keys::BEST_BLOCK, header.hash().as_ref());
let lookup_key = ::utils::number_to_lookup_key(header.number().clone());
transaction.put(columns::META, meta_keys::BEST_BLOCK, &lookup_key);
transaction.delete(columns::HASH_LOOKUP, header.hash().as_ref());
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(header.hash().clone(), best.clone(), true, false);
Expand Down Expand Up @@ -927,40 +931,49 @@ mod tests {

#[test]
fn block_hash_inserted_correctly() {
let db = Backend::<Block>::new_test(1, 0);
for i in 0..10 {
assert!(db.blockchain().hash(i).unwrap().is_none());

{
let id = if i == 0 {
BlockId::Hash(Default::default())
} else {
BlockId::Number(i - 1)
};

let mut op = db.begin_operation(id).unwrap();
let header = Header {
number: i,
parent_hash: if i == 0 {
Default::default()
let backing = {
let db = Backend::<Block>::new_test(1, 0);
for i in 0..10 {
assert!(db.blockchain().hash(i).unwrap().is_none());

{
let id = if i == 0 {
BlockId::Hash(Default::default())
} else {
db.blockchain.hash(i - 1).unwrap().unwrap()
},
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};

op.set_block_data(
header,
Some(vec![]),
None,
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
BlockId::Number(i - 1)
};

let mut op = db.begin_operation(id).unwrap();
let header = Header {
number: i,
parent_hash: if i == 0 {
Default::default()
} else {
db.blockchain.hash(i - 1).unwrap().unwrap()
},
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};

op.set_block_data(
header,
Some(vec![]),
None,
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
}

assert!(db.blockchain().hash(i).unwrap().is_some())
}
db.storage.db.clone()
};

assert!(db.blockchain().hash(i).unwrap().is_some())
let backend = Backend::<Block>::from_kvdb(backing, PruningMode::keep_blocks(1), 0).unwrap();
assert_eq!(backend.blockchain().info().unwrap().best_number, 9);
for i in 0..10 {
assert!(backend.blockchain().hash(i).unwrap().is_some())
}
}

Expand Down
21 changes: 11 additions & 10 deletions core/client/db/src/light.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,8 @@ impl<Block: BlockT> LightStorage<Block> {
).into())
}

transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, hash.as_ref());
let lookup_key = ::utils::number_to_lookup_key(header.number().clone());
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key);

// build new CHT if required
if let Some(new_cht_number) = cht::is_build_required(cht::SIZE, *header.number()) {
Expand Down Expand Up @@ -244,6 +245,14 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
let number = *header.number();
let parent_hash = *header.parent_hash();

// blocks in longest chain are keyed by number
let lookup_key = if leaf_state.is_best() {
::utils::number_to_lookup_key(number).to_vec()
} else {
// other blocks are keyed by number + hash
::utils::number_and_hash_to_lookup_key(number, hash)
};

if leaf_state.is_best() {
// handle reorg.
{
Expand Down Expand Up @@ -298,17 +307,9 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
}
}

transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
transaction.put(columns::META, meta_keys::BEST_BLOCK, &lookup_key);
}

// blocks in longest chain are keyed by number
let lookup_key = if leaf_state.is_best() {
::utils::number_to_lookup_key(number).to_vec()
} else {
// other blocks are keyed by number + hash
::utils::number_and_hash_to_lookup_key(number, hash)
};

transaction.put(columns::HEADER, &lookup_key, &header.encode());
transaction.put(columns::HASH_LOOKUP, hash.as_ref(), &lookup_key);

Expand Down
1 change: 1 addition & 0 deletions core/client/db/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ pub mod meta_keys {
}

/// Database metadata.
#[derive(Debug)]
pub struct Meta<N, H> {
/// Hash of the best known block.
pub best_hash: H,
Expand Down
2 changes: 1 addition & 1 deletion core/client/src/call_executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ where
let heap_pages = state.storage(well_known_keys::HEAP_PAGES)
.map_err(|e| error::ErrorKind::Execution(Box::new(e)))?
.and_then(|v| u64::decode(&mut &v[..]))
.unwrap_or(8) as usize;
.unwrap_or(1024) as usize;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why was this changed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@gavofyork can you say something about this?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

8 is way too low to do a chain upgrade. as i understand it, memory is unallocated until used anyway so it shouldn't cause a performance degradation?


let mut ext = Ext::new(&mut overlay, &state, self.backend.changes_trie_storage());
self.executor.runtime_version(&mut ext, heap_pages, &code)
Expand Down
Loading