diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index eb422e039dd..6c9b19d4f72 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -115,6 +115,14 @@ jobs: - uses: actions/checkout@v1 - name: Typecheck benchmark code without running it run: make check-benches + check-consensus: + name: check-consensus + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Typecheck consensus code in strict mode + run: make check-consensus clippy: name: clippy runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 1b4fc18d4e9..af4c16dbe79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,7 +2,7 @@ # It is not intended for manual editing. [[package]] name = "account_manager" -version = "0.2.11" +version = "0.2.12" dependencies = [ "account_utils", "bls", @@ -371,7 +371,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "0.2.11" +version = "0.2.12" dependencies = [ "beacon_chain", "clap", @@ -527,7 +527,7 @@ dependencies = [ [[package]] name = "blst" version = "0.1.1" -source = "git+https://github.com/supranational/blst.git?rev=a8398ed284b0d78858302ad1ceb25a80e7bbe535#a8398ed284b0d78858302ad1ceb25a80e7bbe535" +source = "git+https://github.com/sigp/blst.git?rev=284f7059642851c760a09fb1708bcb59c7ca323c#284f7059642851c760a09fb1708bcb59c7ca323c" dependencies = [ "cc", "glob", @@ -536,7 +536,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "0.2.11" +version = "0.2.12" dependencies = [ "beacon_node", "clap", @@ -713,13 +713,15 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.15" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942f72db697d8767c22d46a598e01f2d3b475501ea43d0db4f16d90259182d0b" +checksum = "7b05acab8a90ff05c15f407779397ff10ed4049bbe086d47cd7c95817207ad81" dependencies = [ + "libc", "num-integer", "num-traits", "time 0.1.44", + "winapi 0.3.9", ] [[package]] @@ -780,7 +782,7 @@ dependencies = [ "sloggers", "slot_clock", "store", - "time 0.2.21", + "time 0.2.22", "timer", "tokio 0.2.22", "toml", @@ -1253,9 +1255,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "discv5" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4cba1b485c16864edc11ccbf3abf5fbf1c26ce759ab36c32ee8e12638d50b0d" +checksum = "c68cb1b942aadd3bb3a13620c4d831c0aa49eda988cf8bcccfdfdc7ef69504a7" dependencies = [ "aes-gcm", "arrayvec", @@ -1266,17 +1268,17 @@ dependencies = [ "hex 0.4.2", "hkdf", "lazy_static", - "libp2p-core 0.20.1", + "libp2p-core 0.22.1", "libsecp256k1", "log 0.4.11", "lru_time_cache", "multihash", - "net2", "parking_lot 0.11.0", "rand 0.7.3", "rlp", "sha2 0.8.2", "smallvec 1.4.2", + "socket2", "tokio 0.2.22", "uint", "zeroize", @@ -2576,7 +2578,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "0.2.11" +version = "0.2.12" dependencies = [ "bls", "clap", @@ -2688,9 +2690,8 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a694fd76d7c33a45a0e6e1525e9b9b5d11127c9c94e560ac0f8abba54ed80af" +version = "0.21.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" dependencies = [ "asn1_der", "bs58", @@ -2703,8 +2704,8 @@ dependencies = [ "libsecp256k1", "log 0.4.11", "multihash", - "multistream-select 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.9.2", + "multistream-select 0.8.2 (git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f)", + "parity-multiaddr 0.9.1", "parking_lot 0.10.2", "pin-project", "prost", @@ -2722,8 +2723,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52f13ba8c7df0768af2eb391696d562c7de88cc3a35122531aaa6a7d77754d25" dependencies = [ "asn1_der", "bs58", @@ -2736,8 +2738,8 @@ dependencies = [ "libsecp256k1", "log 0.4.11", "multihash", - "multistream-select 0.8.2 (git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f)", - "parity-multiaddr 0.9.1", + "multistream-select 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-multiaddr 0.9.2", "parking_lot 0.10.2", "pin-project", "prost", @@ -2936,7 +2938,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "0.2.11" +version = "0.2.12" dependencies = [ "account_manager", "account_utils", @@ -3518,9 +3520,9 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.10.2+1.1.1g" +version = "111.11.0+1.1.1h" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a287fdb22e32b5b60624d4a5a7a02dbe82777f730ec0dbc42a0554326fef5a70" +checksum = "380fe324132bea01f45239fadfec9343adb044615f29930d039bec1ae7b9fa5b" dependencies = [ "cc", ] @@ -3833,9 +3835,9 @@ checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36e28516df94f3dd551a587da5357459d9b36d945a7c37c3557928c1c2ff2a2c" +checksum = "e4b93dba1818d32e781f9d008edd577bab215e83ef50e8a1ddf1ad301b19a09f" dependencies = [ "unicode-xid", ] @@ -5336,9 +5338,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.21" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c2e31fb28e2a9f01f5ed6901b066c1ba2333c04b64dc61254142bafcb3feb2c" +checksum = "55b7151c9065e80917fbf285d9a5d1432f60db41d170ccafc749a136b41a93af" dependencies = [ "const_fn", "libc", @@ -6052,7 +6054,7 @@ dependencies = [ [[package]] name = "validator_client" -version = "0.2.11" +version = "0.2.12" dependencies = [ "account_utils", "bls", diff --git a/Makefile b/Makefile index 6fadf222f0a..0f6a69b973e 100644 --- a/Makefile +++ b/Makefile @@ -93,6 +93,10 @@ cargo-fmt: check-benches: cargo check --all --benches +# Typechecks consensus code *without* allowing deprecated legacy arithmetic +check-consensus: + cargo check --manifest-path=consensus/state_processing/Cargo.toml --no-default-features + # Runs only the ef-test vectors. run-ef-tests: cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests" @@ -133,7 +137,11 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit + # TODO: we should address this --ignore. + # + # Tracking issue: + # https://github.com/sigp/lighthouse/issues/1669 + cargo audit --ignore RUSTSEC-2020-0043 # Runs `cargo udeps` to check for unused dependencies udeps: diff --git a/README.md b/README.md index 39426fe9758..7e66dc519c4 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Like all Ethereum 2.0 clients, Lighthouse is a work-in-progress. Current development overview: -- Specification `v0.12.1` implemented, optimized and passing test vectors. +- Specification `v0.12.3` implemented, optimized and passing test vectors. - Rust-native libp2p with Gossipsub and Discv5. - RESTful JSON API via HTTP server. - Events via WebSocket. diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 5321b232bac..2029fda58ae 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "account_manager" -version = "0.2.11" +version = "0.2.12" authors = ["Paul Hauner ", "Luke Anderson "] edition = "2018" diff --git a/account_manager/src/common.rs b/account_manager/src/common.rs index 4091f6e6505..2b9c93fb1dc 100644 --- a/account_manager/src/common.rs +++ b/account_manager/src/common.rs @@ -1,5 +1,5 @@ use account_utils::PlainText; -use account_utils::{read_mnemonic_from_user, strip_off_newlines}; +use account_utils::{read_input_from_user, strip_off_newlines}; use eth2_wallet::bip39::{Language, Mnemonic}; use std::fs; use std::path::PathBuf; @@ -8,10 +8,11 @@ use std::thread::sleep; use std::time::Duration; pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:"; +pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:"; pub fn read_mnemonic_from_cli( mnemonic_path: Option, - stdin_password: bool, + stdin_inputs: bool, ) -> Result { let mnemonic = match mnemonic_path { Some(path) => fs::read(&path) @@ -31,7 +32,7 @@ pub fn read_mnemonic_from_cli( eprintln!(""); eprintln!("{}", MNEMONIC_PROMPT); - let mnemonic = read_mnemonic_from_user(stdin_password)?; + let mnemonic = read_input_from_user(stdin_inputs)?; match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) { Ok(mnemonic_m) => { @@ -48,3 +49,19 @@ pub fn read_mnemonic_from_cli( }; Ok(mnemonic) } + +/// Reads in a wallet name from the user. If the `--wallet-name` flag is provided, use it. Otherwise +/// read from an interactive prompt using tty unless the `--stdin-inputs` flag is provided. +pub fn read_wallet_name_from_cli( + wallet_name: Option, + stdin_inputs: bool, +) -> Result { + match wallet_name { + Some(name) => Ok(name), + None => { + eprintln!("{}", WALLET_NAME_PROMPT); + + read_input_from_user(stdin_inputs) + } + } +} diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 26178e966c3..0d4566e4610 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -1,11 +1,14 @@ +use crate::common::read_wallet_name_from_cli; +use crate::wallet::create::STDIN_INPUTS_FLAG; use crate::{SECRETS_DIR_FLAG, WALLETS_DIR_FLAG}; -use account_utils::{random_password, strip_off_newlines, validator_definitions}; +use account_utils::{ + random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText, +}; use clap::{App, Arg, ArgMatches}; use directory::{ ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR, }; use environment::Environment; -use eth2_wallet::PlainText; use eth2_wallet_manager::WalletManager; use std::ffi::OsStr; use std::fs; @@ -20,6 +23,7 @@ pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei"; pub const STORE_WITHDRAW_FLAG: &str = "store-withdrawal-keystore"; pub const COUNT_FLAG: &str = "count"; pub const AT_MOST_FLAG: &str = "at-most"; +pub const WALLET_PASSWORD_PROMPT: &str = "Enter your wallet's password:"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) @@ -32,16 +36,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long(WALLET_NAME_FLAG) .value_name("WALLET_NAME") .help("Use the wallet identified by this name") - .takes_value(true) - .required(true), + .takes_value(true), ) .arg( Arg::with_name(WALLET_PASSWORD_FLAG) .long(WALLET_PASSWORD_FLAG) .value_name("WALLET_PASSWORD_PATH") .help("A path to a file containing the password which will unlock the wallet.") - .takes_value(true) - .required(true), + .takes_value(true), ) .arg( Arg::with_name(WALLETS_DIR_FLAG) @@ -100,6 +102,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .conflicts_with("count") .takes_value(true), ) + .arg( + Arg::with_name(STDIN_INPUTS_FLAG) + .long(STDIN_INPUTS_FLAG) + .help("If present, read all user inputs from stdin instead of tty."), + ) } pub fn cli_run( @@ -109,8 +116,8 @@ pub fn cli_run( ) -> Result<(), String> { let spec = env.core_context().eth2_config.spec; - let name: String = clap_utils::parse_required(matches, WALLET_NAME_FLAG)?; - let wallet_password_path: PathBuf = clap_utils::parse_required(matches, WALLET_PASSWORD_FLAG)?; + let name: Option = clap_utils::parse_optional(matches, WALLET_NAME_FLAG)?; + let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG); let wallet_base_dir = if matches.value_of("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_WALLET_DIR) @@ -158,15 +165,17 @@ pub fn cli_run( return Ok(()); } - let wallet_password = fs::read(&wallet_password_path) - .map_err(|e| format!("Unable to read {:?}: {:?}", wallet_password_path, e)) - .map(|bytes| PlainText::from(strip_off_newlines(bytes)))?; + let wallet_password_path: Option = + clap_utils::parse_optional(matches, WALLET_PASSWORD_FLAG)?; + + let wallet_name = read_wallet_name_from_cli(name, stdin_inputs)?; + let wallet_password = read_wallet_password_from_cli(wallet_password_path, stdin_inputs)?; let mgr = WalletManager::open(&wallet_base_dir) .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; let mut wallet = mgr - .wallet_by_name(&name) + .wallet_by_name(&wallet_name) .map_err(|e| format!("Unable to open wallet: {:?}", e))?; for i in 0..n { @@ -211,3 +220,24 @@ fn existing_validator_count>(validator_dir: P) -> Result, + stdin_inputs: bool, +) -> Result { + match password_file_path { + Some(path) => fs::read(&path) + .map_err(|e| format!("Unable to read {:?}: {:?}", path, e)) + .map(|bytes| strip_off_newlines(bytes).into()), + None => { + eprintln!(""); + eprintln!("{}", WALLET_PASSWORD_PROMPT); + let password = + PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec()); + Ok(password) + } + } +} diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 88ed85039bc..1998709d283 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -1,3 +1,4 @@ +use crate::wallet::create::STDIN_INPUTS_FLAG; use account_utils::{ eth2_keystore::Keystore, read_password_from_user, @@ -16,7 +17,6 @@ use std::time::Duration; pub const CMD: &str = "import"; pub const KEYSTORE_FLAG: &str = "keystore"; pub const DIR_FLAG: &str = "directory"; -pub const STDIN_PASSWORD_FLAG: &str = "stdin-passwords"; pub const REUSE_PASSWORD_FLAG: &str = "reuse-password"; pub const PASSWORD_PROMPT: &str = "Enter the keystore password, or press enter to omit it:"; @@ -55,9 +55,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true), ) .arg( - Arg::with_name(STDIN_PASSWORD_FLAG) - .long(STDIN_PASSWORD_FLAG) - .help("If present, read passwords from stdin instead of tty."), + Arg::with_name(STDIN_INPUTS_FLAG) + .long(STDIN_INPUTS_FLAG) + .help("If present, read all user inputs from stdin instead of tty."), ) .arg( Arg::with_name(REUSE_PASSWORD_FLAG) @@ -69,7 +69,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { let keystore: Option = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?; let keystores_dir: Option = clap_utils::parse_optional(matches, DIR_FLAG)?; - let stdin_password = matches.is_present(STDIN_PASSWORD_FLAG); + let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG); let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG); let mut defs = ValidatorDefinitions::open_or_create(&validator_dir) @@ -135,7 +135,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin eprintln!(""); eprintln!("{}", PASSWORD_PROMPT); - let password = read_password_from_user(stdin_password)?; + let password = read_password_from_user(stdin_inputs)?; if password.as_ref().is_empty() { eprintln!("Continuing without password."); diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index 5e3a843ad1f..9029e01ad97 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -1,7 +1,7 @@ use super::create::STORE_WITHDRAW_FLAG; -use super::import::STDIN_PASSWORD_FLAG; use crate::common::read_mnemonic_from_cli; use crate::validator::create::COUNT_FLAG; +use crate::wallet::create::STDIN_INPUTS_FLAG; use crate::{SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG}; use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; use account_utils::random_password; @@ -80,9 +80,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ), ) .arg( - Arg::with_name(STDIN_PASSWORD_FLAG) - .long(STDIN_PASSWORD_FLAG) - .help("If present, read passwords from stdin instead of tty."), + Arg::with_name(STDIN_INPUTS_FLAG) + .long(STDIN_INPUTS_FLAG) + .help("If present, read all user inputs from stdin instead of tty."), ) } @@ -96,7 +96,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let first_index: u32 = clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?; let count: u32 = clap_utils::parse_required(matches, COUNT_FLAG)?; let mnemonic_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; - let stdin_password = matches.is_present(STDIN_PASSWORD_FLAG); + let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG); ensure_dir_exists(&validator_dir)?; ensure_dir_exists(&secrets_dir)?; @@ -105,7 +105,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING."); eprintln!(""); - let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_password)?; + let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_inputs)?; let seed = Seed::new(&mnemonic, ""); diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index 3a37a7a1422..a769cc019c1 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -1,5 +1,8 @@ +use crate::common::read_wallet_name_from_cli; use crate::WALLETS_DIR_FLAG; -use account_utils::{random_password, strip_off_newlines}; +use account_utils::{ + is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines, +}; use clap::{App, Arg, ArgMatches}; use eth2_wallet::{ bip39::{Language, Mnemonic, MnemonicType}, @@ -7,7 +10,8 @@ use eth2_wallet::{ }; use eth2_wallet_manager::{LockedWallet, WalletManager, WalletType}; use std::ffi::OsStr; -use std::fs::{self, File}; +use std::fs; +use std::fs::File; use std::io::prelude::*; use std::os::unix::fs::PermissionsExt; use std::path::{Path, PathBuf}; @@ -18,6 +22,10 @@ pub const NAME_FLAG: &str = "name"; pub const PASSWORD_FLAG: &str = "password-file"; pub const TYPE_FLAG: &str = "type"; pub const MNEMONIC_FLAG: &str = "mnemonic-output-path"; +pub const STDIN_INPUTS_FLAG: &str = "stdin-inputs"; +pub const NEW_WALLET_PASSWORD_PROMPT: &str = + "Enter a password for your new wallet that is at least 12 characters long:"; +pub const RETYPE_PASSWORD_PROMPT: &str = "Please re-enter your wallet's new password:"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) @@ -30,8 +38,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "The wallet will be created with this name. It is not allowed to \ create two wallets with the same name for the same --base-dir.", ) - .takes_value(true) - .required(true), + .takes_value(true), ) .arg( Arg::with_name(PASSWORD_FLAG) @@ -43,8 +50,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { saved at that path. To avoid confusion, if the file does not already \ exist it must include a '.pass' suffix.", ) - .takes_value(true) - .required(true), + .takes_value(true), ) .arg( Arg::with_name(TYPE_FLAG) @@ -67,6 +73,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .takes_value(true) ) + .arg( + Arg::with_name(STDIN_INPUTS_FLAG) + .long(STDIN_INPUTS_FLAG) + .help("If present, read all user inputs from stdin instead of tty."), + ) } pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> { @@ -113,9 +124,10 @@ pub fn create_wallet_from_mnemonic( wallet_base_dir: &Path, mnemonic: &Mnemonic, ) -> Result { - let name: String = clap_utils::parse_required(matches, NAME_FLAG)?; - let wallet_password_path: PathBuf = clap_utils::parse_required(matches, PASSWORD_FLAG)?; + let name: Option = clap_utils::parse_optional(matches, NAME_FLAG)?; + let wallet_password_path: Option = clap_utils::parse_optional(matches, PASSWORD_FLAG)?; let type_field: String = clap_utils::parse_required(matches, TYPE_FLAG)?; + let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG); let wallet_type = match type_field.as_ref() { HD_TYPE => WalletType::Hd, @@ -125,31 +137,81 @@ pub fn create_wallet_from_mnemonic( let mgr = WalletManager::open(&wallet_base_dir) .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; - // Create a random password if the file does not exist. - if !wallet_password_path.exists() { - // To prevent users from accidentally supplying their password to the PASSWORD_FLAG and - // create a file with that name, we require that the password has a .pass suffix. - if wallet_password_path.extension() != Some(&OsStr::new("pass")) { - return Err(format!( - "Only creates a password file if that file ends in .pass: {:?}", - wallet_password_path - )); + let wallet_password: PlainText = match wallet_password_path { + Some(path) => { + // Create a random password if the file does not exist. + if !path.exists() { + // To prevent users from accidentally supplying their password to the PASSWORD_FLAG and + // create a file with that name, we require that the password has a .pass suffix. + if path.extension() != Some(&OsStr::new("pass")) { + return Err(format!( + "Only creates a password file if that file ends in .pass: {:?}", + path + )); + } + + create_with_600_perms(&path, random_password().as_bytes()) + .map_err(|e| format!("Unable to write to {:?}: {:?}", path, e))?; + } + read_new_wallet_password_from_cli(Some(path), stdin_inputs)? } + None => read_new_wallet_password_from_cli(None, stdin_inputs)?, + }; - create_with_600_perms(&wallet_password_path, random_password().as_bytes()) - .map_err(|e| format!("Unable to write to {:?}: {:?}", wallet_password_path, e))?; - } - - let wallet_password = fs::read(&wallet_password_path) - .map_err(|e| format!("Unable to read {:?}: {:?}", wallet_password_path, e)) - .map(|bytes| PlainText::from(strip_off_newlines(bytes)))?; + let wallet_name = read_wallet_name_from_cli(name, stdin_inputs)?; let wallet = mgr - .create_wallet(name, wallet_type, &mnemonic, wallet_password.as_bytes()) + .create_wallet( + wallet_name, + wallet_type, + &mnemonic, + wallet_password.as_bytes(), + ) .map_err(|e| format!("Unable to create wallet: {:?}", e))?; Ok(wallet) } +/// Used when a user is creating a new wallet. Read in a wallet password from a file if the password file +/// path is provided. Otherwise, read from an interactive prompt using tty unless the `--stdin-inputs` +/// flag is provided. This verifies the password complexity and verifies the password is correctly re-entered. +pub fn read_new_wallet_password_from_cli( + password_file_path: Option, + stdin_inputs: bool, +) -> Result { + match password_file_path { + Some(path) => { + let password: PlainText = fs::read(&path) + .map_err(|e| format!("Unable to read {:?}: {:?}", path, e)) + .map(|bytes| strip_off_newlines(bytes).into())?; + + // Ensure the password meets the minimum requirements. + is_password_sufficiently_complex(password.as_bytes())?; + Ok(password) + } + None => loop { + eprintln!(""); + eprintln!("{}", NEW_WALLET_PASSWORD_PROMPT); + let password = + PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec()); + + // Ensure the password meets the minimum requirements. + match is_password_sufficiently_complex(password.as_bytes()) { + Ok(_) => { + eprintln!("{}", RETYPE_PASSWORD_PROMPT); + let retyped_password = + PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec()); + if retyped_password == password { + break Ok(password); + } else { + eprintln!("Passwords do not match."); + } + } + Err(message) => eprintln!("{}", message), + } + }, + } +} + /// Creates a file with `600 (-rw-------)` permissions. pub fn create_with_600_perms>(path: P, bytes: &[u8]) -> Result<(), String> { let path = path.as_ref(); diff --git a/account_manager/src/wallet/recover.rs b/account_manager/src/wallet/recover.rs index 9e96de60d1b..2240323c26a 100644 --- a/account_manager/src/wallet/recover.rs +++ b/account_manager/src/wallet/recover.rs @@ -1,12 +1,11 @@ use crate::common::read_mnemonic_from_cli; -use crate::wallet::create::create_wallet_from_mnemonic; +use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; use clap::{App, Arg, ArgMatches}; use std::path::PathBuf; pub const CMD: &str = "recover"; pub const MNEMONIC_FLAG: &str = "mnemonic-path"; -pub const STDIN_PASSWORD_FLAG: &str = "stdin-passwords"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) @@ -19,8 +18,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "The wallet will be created with this name. It is not allowed to \ create two wallets with the same name for the same --base-dir.", ) - .takes_value(true) - .required(true), + .takes_value(true), ) .arg( Arg::with_name(PASSWORD_FLAG) @@ -33,8 +31,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { saved at that path. To avoid confusion, if the file does not already \ exist it must include a '.pass' suffix.", ) - .takes_value(true) - .required(true), + .takes_value(true), ) .arg( Arg::with_name(MNEMONIC_FLAG) @@ -56,21 +53,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value(HD_TYPE), ) .arg( - Arg::with_name(STDIN_PASSWORD_FLAG) - .long(STDIN_PASSWORD_FLAG) - .help("If present, read passwords from stdin instead of tty."), + Arg::with_name(STDIN_INPUTS_FLAG) + .long(STDIN_INPUTS_FLAG) + .help("If present, read all user inputs from stdin instead of tty."), ) } pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> { let mnemonic_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; - let stdin_password = matches.is_present(STDIN_PASSWORD_FLAG); + let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG); eprintln!(""); eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING."); eprintln!(""); - let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_password)?; + let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_inputs)?; let wallet = create_wallet_from_mnemonic(matches, &wallet_base_dir.as_path(), &mnemonic) .map_err(|e| format!("Unable to create wallet: {:?}", e))?; diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index c99f3779ebc..2c19b753192 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "0.2.11" +version = "0.2.12" authors = ["Paul Hauner ", "Age Manning { if network_id != config_network { - error!( + crit!( self.log, - "Failed to update eth1 cache"; - "reason" => "Invalid eth1 network id", + "Invalid eth1 network. Please switch to correct network"; "expected" => format!("{:?}",DEFAULT_NETWORK_ID), - "got" => format!("{:?}",network_id), + "received" => format!("{:?}",network_id), + "warning" => WARNING_MSG, ); return Ok(()); } } - Err(e) => { - error!(self.log, "Failed to get eth1 network id"; "error" => e); + Err(_) => { + crit!( + self.log, + "Error connecting to eth1 node. Please ensure that you have an eth1 http server running locally on http://localhost:8545 or \ + pass an external endpoint using `--eth1-endpoint `. Also ensure that `eth` and `net` apis are enabled on the eth1 http server"; + "warning" => WARNING_MSG, + ); return Ok(()); } } diff --git a/beacon_node/eth2_libp2p/Cargo.toml b/beacon_node/eth2_libp2p/Cargo.toml index 21d93d0831d..9dabaaff7b6 100644 --- a/beacon_node/eth2_libp2p/Cargo.toml +++ b/beacon_node/eth2_libp2p/Cargo.toml @@ -35,7 +35,6 @@ tokio-util = { version = "0.3.1", features = ["codec", "compat"] } discv5 = { version = "0.1.0-alpha.10", features = ["libp2p"] } tiny-keccak = "2.0.2" environment = { path = "../../lighthouse/environment" } -# TODO: Remove rand crate for mainnet rand = "0.7.3" directory = { path = "../../common/directory" } regex = "1.3.9" diff --git a/beacon_node/eth2_libp2p/src/behaviour/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/mod.rs index cb9c367fbf9..716af9eb5b3 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/mod.rs @@ -40,6 +40,59 @@ mod handler; const MAX_IDENTIFY_ADDRESSES: usize = 10; +/// Identifier of requests sent by a peer. +pub type PeerRequestId = (ConnectionId, SubstreamId); + +/// The types of events than can be obtained from polling the behaviour. +#[derive(Debug)] +pub enum BehaviourEvent { + /// We have successfully dialed and connected to a peer. + PeerDialed(PeerId), + /// A peer has successfully dialed and connected to us. + PeerConnected(PeerId), + /// A peer has disconnected. + PeerDisconnected(PeerId), + /// An RPC Request that was sent failed. + RPCFailed { + /// The id of the failed request. + id: RequestId, + /// The peer to which this request was sent. + peer_id: PeerId, + /// The error that occurred. + error: RPCError, + }, + RequestReceived { + /// The peer that sent the request. + peer_id: PeerId, + /// Identifier of the request. All responses to this request must use this id. + id: PeerRequestId, + /// Request the peer sent. + request: Request, + }, + ResponseReceived { + /// Peer that sent the response. + peer_id: PeerId, + /// Id of the request to which the peer is responding. + id: RequestId, + /// Response the peer sent. + response: Response, + }, + PubsubMessage { + /// The gossipsub message id. Used when propagating blocks after validation. + id: MessageId, + /// The peer from which we received this message, not the peer that published it. + source: PeerId, + /// The topics that this message was sent on. + topics: Vec, + /// The message itself. + message: PubsubMessage, + }, + /// Subscribed to peer for given topic + PeerSubscribed(PeerId, TopicHash), + /// Inform the network to send a Status to this peer. + StatusPeer(PeerId), +} + /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. @@ -837,13 +890,15 @@ impl NetworkBehaviour for Behaviour { // notify the peer manager of a successful connection match endpoint { - ConnectedPoint::Listener { .. } => { - self.peer_manager.connect_ingoing(&peer_id); + ConnectedPoint::Listener { send_back_addr, .. } => { + self.peer_manager + .connect_ingoing(&peer_id, send_back_addr.clone()); self.add_event(BehaviourEvent::PeerConnected(peer_id.clone())); debug!(self.log, "Connection established"; "peer_id" => peer_id.to_string(), "connection" => "Incoming"); } - ConnectedPoint::Dialer { .. } => { - self.peer_manager.connect_outgoing(&peer_id); + ConnectedPoint::Dialer { address } => { + self.peer_manager + .connect_outgoing(&peer_id, address.clone()); self.add_event(BehaviourEvent::PeerDialed(peer_id.clone())); debug!(self.log, "Connection established"; "peer_id" => peer_id.to_string(), "connection" => "Dialed"); } @@ -1061,59 +1116,6 @@ impl std::convert::From> for RPCCodedResponse { - /// We have successfully dialed and connected to a peer. - PeerDialed(PeerId), - /// A peer has successfully dialed and connected to us. - PeerConnected(PeerId), - /// A peer has disconnected. - PeerDisconnected(PeerId), - /// An RPC Request that was sent failed. - RPCFailed { - /// The id of the failed request. - id: RequestId, - /// The peer to which this request was sent. - peer_id: PeerId, - /// The error that occurred. - error: RPCError, - }, - RequestReceived { - /// The peer that sent the request. - peer_id: PeerId, - /// Identifier of the request. All responses to this request must use this id. - id: PeerRequestId, - /// Request the peer sent. - request: Request, - }, - ResponseReceived { - /// Peer that sent the response. - peer_id: PeerId, - /// Id of the request to which the peer is responding. - id: RequestId, - /// Response the peer sent. - response: Response, - }, - PubsubMessage { - /// The gossipsub message id. Used when propagating blocks after validation. - id: MessageId, - /// The peer from which we received this message, not the peer that published it. - source: PeerId, - /// The topics that this message was sent on. - topics: Vec, - /// The message itself. - message: PubsubMessage, - }, - /// Subscribed to peer for given topic - PeerSubscribed(PeerId, TopicHash), - /// Inform the network to send a Status to this peer. - StatusPeer(PeerId), -} - /// Persist metadata to disk pub fn save_metadata_to_disk(dir: &PathBuf, metadata: MetaData, log: &slog::Logger) { let _ = std::fs::create_dir_all(&dir); diff --git a/beacon_node/eth2_libp2p/src/config.rs b/beacon_node/eth2_libp2p/src/config.rs index a2c7f626a9e..11bb0d36271 100644 --- a/beacon_node/eth2_libp2p/src/config.rs +++ b/beacon_node/eth2_libp2p/src/config.rs @@ -85,14 +85,6 @@ impl Default for Config { .join(DEFAULT_HARDCODED_TESTNET) .join(DEFAULT_BEACON_NODE_DIR) .join(DEFAULT_NETWORK_DIR); - // The default topics that we will initially subscribe to - let topics = vec![ - GossipKind::BeaconBlock, - GossipKind::BeaconAggregateAndProof, - GossipKind::VoluntaryExit, - GossipKind::ProposerSlashing, - GossipKind::AttesterSlashing, - ]; // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(data) for content addressing @@ -152,7 +144,7 @@ impl Default for Config { trusted_peers: vec![], client_version: lighthouse_version::version_with_platform(), disable_discovery: false, - topics, + topics: Vec::new(), } } } diff --git a/beacon_node/eth2_libp2p/src/discovery/mod.rs b/beacon_node/eth2_libp2p/src/discovery/mod.rs index 57b0b8abf2a..b1a74b2d282 100644 --- a/beacon_node/eth2_libp2p/src/discovery/mod.rs +++ b/beacon_node/eth2_libp2p/src/discovery/mod.rs @@ -4,7 +4,7 @@ pub mod enr_ext; // Allow external use of the lighthouse ENR builder pub use enr::{build_enr, create_enr_builder_from_config, use_or_load_enr, CombinedKey, Eth2Enr}; -pub use enr_ext::{CombinedKeyExt, EnrExt}; +pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; pub use libp2p::core::identity::Keypair; use crate::metrics; @@ -20,7 +20,7 @@ use ssz::{Decode, Encode}; use ssz_types::BitVector; use std::{ collections::{HashMap, VecDeque}, - net::SocketAddr, + net::{IpAddr, SocketAddr}, path::Path, pin::Pin, sync::Arc, @@ -436,6 +436,33 @@ impl Discovery { enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); } + // Bans a peer and it's associated seen IP addresses. + pub fn ban_peer(&mut self, peer_id: &PeerId, ip_addresses: Vec) { + // first try and convert the peer_id to a node_id. + if let Ok(node_id) = peer_id_to_node_id(peer_id) { + // If we could convert this peer id, remove it from the DHT and ban it from discovery. + self.discv5.ban_node(&node_id); + // Remove the node from the routing table. + self.discv5.remove_node(&node_id); + } + + for ip_address in ip_addresses { + self.discv5.ban_ip(ip_address); + } + } + + pub fn unban_peer(&mut self, peer_id: &PeerId, ip_addresses: Vec) { + // first try and convert the peer_id to a node_id. + if let Ok(node_id) = peer_id_to_node_id(peer_id) { + // If we could convert this peer id, remove it from the DHT and ban it from discovery. + self.discv5.permit_node(&node_id); + } + + for ip_address in ip_addresses { + self.discv5.permit_ip(ip_address); + } + } + /* Internal Functions */ /// Adds a subnet query if one doesn't exist. If a subnet query already exists, this diff --git a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs index 963df786873..d528d7e69e4 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs @@ -194,9 +194,9 @@ impl PeerManager { // Update the PeerDB state. if let Some(peer_id) = ban_peer.take() { - self.network_globals.peers.write().ban(&peer_id); + self.ban_peer(&peer_id); } else if let Some(peer_id) = unban_peer.take() { - self.network_globals.peers.write().unban(&peer_id); + self.unban_peer(&peer_id); } } @@ -312,19 +312,22 @@ impl PeerManager { /// Sets a peer as connected as long as their reputation allows it /// Informs if the peer was accepted - pub fn connect_ingoing(&mut self, peer_id: &PeerId) -> bool { - self.connect_peer(peer_id, ConnectingType::IngoingConnected) + pub fn connect_ingoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr) -> bool { + self.connect_peer(peer_id, ConnectingType::IngoingConnected { multiaddr }) } /// Sets a peer as connected as long as their reputation allows it /// Informs if the peer was accepted - pub fn connect_outgoing(&mut self, peer_id: &PeerId) -> bool { - self.connect_peer(peer_id, ConnectingType::OutgoingConnected) + pub fn connect_outgoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr) -> bool { + self.connect_peer(peer_id, ConnectingType::OutgoingConnected { multiaddr }) } /// Updates the database informing that a peer is being disconnected. pub fn _disconnecting_peer(&mut self, _peer_id: &PeerId) -> bool { // TODO: implement + // This informs the database that we are in the process of disconnecting the + // peer. Currently this state only exists for a short period of time before we force the + // disconnection. true } @@ -644,8 +647,12 @@ impl PeerManager { peerdb.dialing_peer(peer_id); return true; } - ConnectingType::IngoingConnected => peerdb.connect_outgoing(peer_id), - ConnectingType::OutgoingConnected => peerdb.connect_ingoing(peer_id), + ConnectingType::IngoingConnected { multiaddr } => { + peerdb.connect_outgoing(peer_id, multiaddr) + } + ConnectingType::OutgoingConnected { multiaddr } => { + peerdb.connect_ingoing(peer_id, multiaddr) + } } } @@ -683,12 +690,11 @@ impl PeerManager { /// NOTE: This is experimental and will likely be adjusted fn update_peer_scores(&mut self) { /* Check how long have peers been in this state and update their reputations if needed */ - let mut pdb = self.network_globals.peers.write(); let mut to_ban_peers = Vec::new(); let mut to_unban_peers = Vec::new(); - for (peer_id, info) in pdb.peers_mut() { + for (peer_id, info) in self.network_globals.peers.write().peers_mut() { let previous_state = info.score_state(); // Update scores info.score_update(); @@ -780,14 +786,51 @@ impl PeerManager { } // process banning peers for peer_id in to_ban_peers { - pdb.ban(&peer_id); + self.ban_peer(&peer_id); } // process unbanning peers for peer_id in to_unban_peers { - pdb.unban(&peer_id); + self.unban_peer(&peer_id); } } + /// Bans a peer. + /// + /// Records updates the peers connection status and updates the peer db as well as blocks the + /// peer from participating in discovery and removes them from the routing table. + fn ban_peer(&mut self, peer_id: &PeerId) { + let mut peer_db = self.network_globals.peers.write(); + peer_db.ban(peer_id); + let banned_ip_addresses = peer_db + .peer_info(peer_id) + .map(|info| { + info.seen_addresses + .iter() + .filter(|ip| peer_db.is_ip_banned(ip)) + .cloned() + .collect::>() + }) + .unwrap_or_default(); + + self.discovery.ban_peer(&peer_id, banned_ip_addresses); + } + + /// Unbans a peer. + /// + /// Records updates the peers connection status and updates the peer db as well as removes + /// previous bans from discovery. + fn unban_peer(&mut self, peer_id: &PeerId) { + let mut peer_db = self.network_globals.peers.write(); + peer_db.unban(&peer_id); + + let seen_ip_addresses = peer_db + .peer_info(peer_id) + .map(|info| info.seen_addresses.iter().cloned().collect::>()) + .unwrap_or_default(); + + self.discovery.unban_peer(&peer_id, seen_ip_addresses); + } + /// The Peer manager's heartbeat maintains the peer count and maintains peer reputations. /// /// It will request discovery queries if the peer count has not reached the desired number of @@ -894,7 +937,13 @@ enum ConnectingType { /// We are in the process of dialing this peer. Dialing, /// A peer has dialed us. - IngoingConnected, + IngoingConnected { + // The multiaddr the peer connected to us on. + multiaddr: Multiaddr, + }, /// We have successfully dialed a peer. - OutgoingConnected, + OutgoingConnected { + /// The multiaddr we dialed to reach the peer. + multiaddr: Multiaddr, + }, } diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs index 2933cb73163..b62fba50459 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs @@ -7,6 +7,7 @@ use serde::{ ser::{SerializeStruct, Serializer}, Serialize, }; +use std::collections::HashSet; use std::net::IpAddr; use std::time::Instant; use types::{EthSpec, SubnetId}; @@ -24,8 +25,12 @@ pub struct PeerInfo { pub client: Client, /// Connection status of this peer pub connection_status: PeerConnectionStatus, - /// The known listening addresses of this peer. + /// The known listening addresses of this peer. This is given by identify and can be arbitrary + /// (including local IPs). pub listening_addresses: Vec, + /// This is addresses we have physically seen and this is what we use for banning/un-banning + /// peers. + pub seen_addresses: HashSet, /// The current syncing state of the peer. The state may be determined after it's initial /// connection. pub sync_status: PeerSyncStatus, @@ -47,7 +52,8 @@ impl Default for PeerInfo { score: Score::default(), client: Client::default(), connection_status: Default::default(), - listening_addresses: vec![], + listening_addresses: Vec::new(), + seen_addresses: HashSet::new(), sync_status: PeerSyncStatus::Unknown, meta_data: None, min_ttl: None, diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs index 425cf4a37c6..0f8774f7c92 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs @@ -1,7 +1,7 @@ use super::peer_info::{PeerConnectionStatus, PeerInfo}; use super::peer_sync_status::PeerSyncStatus; use super::score::{Score, ScoreState}; -use crate::multiaddr::Protocol; +use crate::multiaddr::{Multiaddr, Protocol}; use crate::rpc::methods::MetaData; use crate::PeerId; use rand::seq::SliceRandom; @@ -174,13 +174,14 @@ impl PeerDB { } fn ip_is_banned(&self, peer: &PeerInfo) -> bool { - peer.listening_addresses.iter().any(|addr| { - addr.iter().any(|p| match p { - Protocol::Ip4(ip) => self.banned_peers_count.ip_is_banned(&ip.into()), - Protocol::Ip6(ip) => self.banned_peers_count.ip_is_banned(&ip.into()), - _ => false, - }) - }) + peer.seen_addresses + .iter() + .any(|addr| self.banned_peers_count.ip_is_banned(addr)) + } + + /// Returns true if the IP is banned. + pub fn is_ip_banned(&self, ip: &IpAddr) -> bool { + self.banned_peers_count.ip_is_banned(ip) } /// Returns true if the Peer is either banned or in the disconnected state. @@ -361,7 +362,7 @@ impl PeerDB { } /// Sets a peer as connected with an ingoing connection. - pub fn connect_ingoing(&mut self, peer_id: &PeerId) { + pub fn connect_ingoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr) { let info = self.peers.entry(peer_id.clone()).or_default(); if info.connection_status.is_disconnected() { @@ -370,10 +371,19 @@ impl PeerDB { self.banned_peers_count .remove_banned_peer(&info.connection_status); info.connection_status.connect_ingoing(); + + // Add the seen ip address to the peer's info + if let Some(ip_addr) = multiaddr.iter().find_map(|p| match p { + Protocol::Ip4(ip) => Some(ip.into()), + Protocol::Ip6(ip) => Some(ip.into()), + _ => None, + }) { + info.seen_addresses.insert(ip_addr); + } } /// Sets a peer as connected with an outgoing connection. - pub fn connect_outgoing(&mut self, peer_id: &PeerId) { + pub fn connect_outgoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr) { let info = self.peers.entry(peer_id.clone()).or_default(); if info.connection_status.is_disconnected() { @@ -382,6 +392,15 @@ impl PeerDB { self.banned_peers_count .remove_banned_peer(&info.connection_status); info.connection_status.connect_outgoing(); + + // Add the seen ip address to the peer's info + if let Some(ip_addr) = multiaddr.iter().find_map(|p| match p { + Protocol::Ip4(ip) => Some(ip.into()), + Protocol::Ip6(ip) => Some(ip.into()), + _ => None, + }) { + info.seen_addresses.insert(ip_addr); + } } /// Sets the peer as disconnected. A banned peer remains banned @@ -411,20 +430,7 @@ impl PeerDB { } if !info.connection_status.is_banned() { info.connection_status - .ban( - info.listening_addresses - .iter() - .fold(Vec::new(), |mut v, a| { - for p in a { - match p { - Protocol::Ip4(ip) => v.push(ip.into()), - Protocol::Ip6(ip) => v.push(ip.into()), - _ => (), - } - } - v - }), - ); + .ban(info.seen_addresses.iter().cloned().collect()); self.banned_peers_count .add_banned_peer(&info.connection_status); } @@ -564,10 +570,10 @@ mod tests { let (n_in, n_out) = (10, 20); for _ in 0..n_in { - pdb.connect_ingoing(&random_peer); + pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap()); } for _ in 0..n_out { - pdb.connect_outgoing(&random_peer); + pdb.connect_outgoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap()); } // the peer is known @@ -592,7 +598,7 @@ mod tests { for _ in 0..MAX_DC_PEERS + 1 { let p = PeerId::random(); - pdb.connect_ingoing(&p); + pdb.connect_ingoing(&p, "/ip4/0.0.0.0".parse().unwrap()); } assert_eq!(pdb.disconnected_peers, 0); @@ -609,7 +615,7 @@ mod tests { for _ in 0..MAX_BANNED_PEERS + 1 { let p = PeerId::random(); - pdb.connect_ingoing(&p); + pdb.connect_ingoing(&p, "/ip4/0.0.0.0".parse().unwrap()); } assert_eq!(pdb.banned_peers_count.banned_peers(), 0); @@ -627,9 +633,9 @@ mod tests { let p0 = PeerId::random(); let p1 = PeerId::random(); let p2 = PeerId::random(); - pdb.connect_ingoing(&p0); - pdb.connect_ingoing(&p1); - pdb.connect_ingoing(&p2); + pdb.connect_ingoing(&p0, "/ip4/0.0.0.0".parse().unwrap()); + pdb.connect_ingoing(&p1, "/ip4/0.0.0.0".parse().unwrap()); + pdb.connect_ingoing(&p2, "/ip4/0.0.0.0".parse().unwrap()); add_score(&mut pdb, &p0, 70.0); add_score(&mut pdb, &p1, 100.0); add_score(&mut pdb, &p2, 50.0); @@ -649,9 +655,9 @@ mod tests { let p0 = PeerId::random(); let p1 = PeerId::random(); let p2 = PeerId::random(); - pdb.connect_ingoing(&p0); - pdb.connect_ingoing(&p1); - pdb.connect_ingoing(&p2); + pdb.connect_ingoing(&p0, "/ip4/0.0.0.0".parse().unwrap()); + pdb.connect_ingoing(&p1, "/ip4/0.0.0.0".parse().unwrap()); + pdb.connect_ingoing(&p2, "/ip4/0.0.0.0".parse().unwrap()); add_score(&mut pdb, &p0, 70.0); add_score(&mut pdb, &p1, 100.0); add_score(&mut pdb, &p2, 50.0); @@ -669,18 +675,18 @@ mod tests { let random_peer = PeerId::random(); - pdb.connect_ingoing(&random_peer); + pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap()); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); dbg!("1"); - pdb.connect_ingoing(&random_peer); + pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap()); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); dbg!("1"); pdb.disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); dbg!("1"); - pdb.connect_outgoing(&random_peer); + pdb.connect_outgoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap()); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); dbg!("1"); pdb.disconnect(&random_peer); @@ -711,20 +717,20 @@ mod tests { let random_peer2 = PeerId::random(); let random_peer3 = PeerId::random(); - pdb.connect_ingoing(&random_peer); - pdb.connect_ingoing(&random_peer1); - pdb.connect_ingoing(&random_peer2); - pdb.connect_ingoing(&random_peer3); + pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap()); + pdb.connect_ingoing(&random_peer1, "/ip4/0.0.0.0".parse().unwrap()); + pdb.connect_ingoing(&random_peer2, "/ip4/0.0.0.0".parse().unwrap()); + pdb.connect_ingoing(&random_peer3, "/ip4/0.0.0.0".parse().unwrap()); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), pdb.banned_peers().count() ); - pdb.connect_ingoing(&random_peer); + pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap()); pdb.disconnect(&random_peer1); pdb.ban(&random_peer2); - pdb.connect_ingoing(&random_peer3); + pdb.connect_ingoing(&random_peer3, "/ip4/0.0.0.0".parse().unwrap()); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), @@ -737,7 +743,7 @@ mod tests { pdb.banned_peers().count() ); - pdb.connect_outgoing(&random_peer2); + pdb.connect_outgoing(&random_peer2, "/ip4/0.0.0.0".parse().unwrap()); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), @@ -751,10 +757,10 @@ mod tests { ); pdb.ban(&random_peer3); - pdb.connect_ingoing(&random_peer1); + pdb.connect_ingoing(&random_peer1, "/ip4/0.0.0.0".parse().unwrap()); pdb.disconnect(&random_peer2); pdb.ban(&random_peer3); - pdb.connect_ingoing(&random_peer); + pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap()); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), @@ -777,19 +783,14 @@ mod tests { assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); } - fn connect_peer_with_ips(pdb: &mut PeerDB, ips: Vec>) -> PeerId { + fn connect_peer_with_ips(pdb: &mut PeerDB, ips: Vec) -> PeerId { let p = PeerId::random(); - pdb.connect_ingoing(&p); - pdb.peers.get_mut(&p).unwrap().listening_addresses = ips - .into_iter() - .map(|ip_addresses| { - let mut addr = Multiaddr::empty(); - for ip_address in ip_addresses { - addr.push(Protocol::from(ip_address)); - } - addr - }) - .collect(); + + for ip in ips { + let mut addr = Multiaddr::empty(); + addr.push(Protocol::from(ip)); + pdb.connect_ingoing(&p, addr); + } p } @@ -797,29 +798,29 @@ mod tests { fn test_ban_address() { let mut pdb = get_db(); - let ip1: IpAddr = Ipv4Addr::new(1, 2, 3, 4).into(); - let ip2: IpAddr = Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8).into(); - let ip3: IpAddr = Ipv4Addr::new(1, 2, 3, 5).into(); - let ip4: IpAddr = Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 9).into(); - let ip5: IpAddr = Ipv4Addr::new(2, 2, 3, 4).into(); + let ip1 = Ipv4Addr::new(1, 2, 3, 4).into(); + let ip2 = Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8).into(); + let ip3 = Ipv4Addr::new(1, 2, 3, 5).into(); + let ip4 = Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 9).into(); + let ip5 = Ipv4Addr::new(2, 2, 3, 4).into(); let mut peers = Vec::new(); for i in 0..BANNED_PEERS_PER_IP_THRESHOLD + 2 { peers.push(connect_peer_with_ips( &mut pdb, if i == 0 { - vec![vec![ip1], vec![ip2]] + vec![ip1, ip2] } else { - vec![vec![ip1, ip2], vec![ip3, ip4]] + vec![ip1, ip2, ip3, ip4] }, )); } - let p1 = connect_peer_with_ips(&mut pdb, vec![vec![ip1]]); - let p2 = connect_peer_with_ips(&mut pdb, vec![vec![ip2, ip5]]); - let p3 = connect_peer_with_ips(&mut pdb, vec![vec![ip3], vec![ip5]]); - let p4 = connect_peer_with_ips(&mut pdb, vec![vec![ip5, ip4]]); - let p5 = connect_peer_with_ips(&mut pdb, vec![vec![ip5]]); + let p1 = connect_peer_with_ips(&mut pdb, vec![ip1]); + let p2 = connect_peer_with_ips(&mut pdb, vec![ip2, ip5]); + let p3 = connect_peer_with_ips(&mut pdb, vec![ip3, ip5]); + let p4 = connect_peer_with_ips(&mut pdb, vec![ip5, ip4]); + let p5 = connect_peer_with_ips(&mut pdb, vec![ip5]); for p in &peers[..BANNED_PEERS_PER_IP_THRESHOLD + 1] { pdb.ban(p); @@ -872,66 +873,63 @@ mod tests { let mut peers = Vec::new(); for _ in 0..BANNED_PEERS_PER_IP_THRESHOLD + 1 { - peers.push(connect_peer_with_ips(&mut pdb, vec![vec![ip1]])); + peers.push(connect_peer_with_ips(&mut pdb, vec![ip1])); } - let p1 = connect_peer_with_ips(&mut pdb, vec![vec![ip1]]); - let p2 = connect_peer_with_ips(&mut pdb, vec![vec![ip2]]); + let p1 = connect_peer_with_ips(&mut pdb, vec![ip1]); + let p2 = connect_peer_with_ips(&mut pdb, vec![ip2]); - //ban all peers + // ban all peers for p in &peers { pdb.ban(p); } - //check ip is banned + // check ip is banned assert!(pdb.is_banned(&p1)); assert!(!pdb.is_banned(&p2)); - //change addresses of banned peers + // change addresses of banned peers for p in &peers { - pdb.peers.get_mut(p).unwrap().listening_addresses = - vec![Multiaddr::empty().with(Protocol::from(ip2))]; + let seen_addresses = &mut pdb.peers.get_mut(p).unwrap().seen_addresses; + seen_addresses.clear(); + seen_addresses.insert(ip2); } - //check still the same ip is banned + // check still the same ip is banned assert!(pdb.is_banned(&p1)); assert!(!pdb.is_banned(&p2)); - //unban a peer + // unban a peer pdb.unban(&peers[0]); - //check not banned anymore - assert!(!pdb.is_banned(&p1)); - assert!(!pdb.is_banned(&p2)); - - //check still not banned after new ban - pdb.ban(&peers[0]); + // check not banned anymore assert!(!pdb.is_banned(&p1)); assert!(!pdb.is_banned(&p2)); - //unban and reban all peers + // unban and reban all peers for p in &peers { pdb.unban(p); pdb.ban(p); } - //ip2 is now banned + // ip2 is now banned assert!(!pdb.is_banned(&p1)); assert!(pdb.is_banned(&p2)); - //change ips back again + // change ips back again for p in &peers { - pdb.peers.get_mut(p).unwrap().listening_addresses = - vec![Multiaddr::empty().with(Protocol::from(ip1))]; + let seen_addresses = &mut pdb.peers.get_mut(p).unwrap().seen_addresses; + seen_addresses.clear(); + seen_addresses.insert(ip1); } - //reban every peer except one + // reban every peer except one for p in &peers[1..] { pdb.unban(p); pdb.ban(p); } - //nothing is banned + // nothing is banned assert!(!pdb.is_banned(&p1)); assert!(!pdb.is_banned(&p2)); @@ -950,7 +948,7 @@ mod tests { let log = build_log(slog::Level::Debug, false); let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer.clone()], &log); - pdb.connect_ingoing(&trusted_peer); + pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap()); // Check trusted status and score assert!(pdb.peer_info(&trusted_peer).unwrap().is_trusted); diff --git a/beacon_node/eth2_libp2p/src/service.rs b/beacon_node/eth2_libp2p/src/service.rs index 1d594918d5a..52286c05d36 100644 --- a/beacon_node/eth2_libp2p/src/service.rs +++ b/beacon_node/eth2_libp2p/src/service.rs @@ -207,7 +207,9 @@ impl Service { warn!(log, "Could not subscribe to topic"; "topic" => format!("{}",topic_kind)); } } - info!(log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics)); + if !subscribed_topics.is_empty() { + info!(log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics)); + } let service = Service { local_peer_id, diff --git a/beacon_node/eth2_libp2p/src/types/mod.rs b/beacon_node/eth2_libp2p/src/types/mod.rs index ec6fcd4af40..762ea7d7400 100644 --- a/beacon_node/eth2_libp2p/src/types/mod.rs +++ b/beacon_node/eth2_libp2p/src/types/mod.rs @@ -16,4 +16,4 @@ pub use globals::NetworkGlobals; pub use pubsub::PubsubMessage; pub use subnet::SubnetDiscovery; pub use sync_state::SyncState; -pub use topics::{GossipEncoding, GossipKind, GossipTopic}; +pub use topics::{GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS}; diff --git a/beacon_node/eth2_libp2p/src/types/topics.rs b/beacon_node/eth2_libp2p/src/types/topics.rs index f564a541377..3f120b3ec1d 100644 --- a/beacon_node/eth2_libp2p/src/types/topics.rs +++ b/beacon_node/eth2_libp2p/src/types/topics.rs @@ -14,6 +14,14 @@ pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; +pub const CORE_TOPICS: [GossipKind; 5] = [ + GossipKind::BeaconBlock, + GossipKind::BeaconAggregateAndProof, + GossipKind::VoluntaryExit, + GossipKind::ProposerSlashing, + GossipKind::AttesterSlashing, +]; + /// A gossipsub topic which encapsulates the type of messages that should be sent and received over /// the pubsub protocol and the way the messages should be encoded. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 2ef369e3b45..0448e7762f8 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -32,6 +32,7 @@ tokio = { version = "0.2.21", features = ["full"] } parking_lot = "0.11.0" smallvec = "1.4.1" # TODO: Remove rand crate for mainnet +# NOTE: why? rand = "0.7.3" fnv = "1.0.6" rlp = "0.4.5" diff --git a/beacon_node/network/src/beacon_processor/chain_segment.rs b/beacon_node/network/src/beacon_processor/chain_segment.rs index 94c7893f055..e659a84b8dd 100644 --- a/beacon_node/network/src/beacon_processor/chain_segment.rs +++ b/beacon_node/network/src/beacon_processor/chain_segment.rs @@ -28,39 +28,26 @@ pub fn handle_chain_segment( match process_id { // this a request from the range sync ProcessId::RangeBatchId(chain_id, epoch) => { - let len = downloaded_blocks.len(); - let start_slot = if len > 0 { - downloaded_blocks[0].message.slot.as_u64() - } else { - 0 - }; - let end_slot = if len > 0 { - downloaded_blocks[len - 1].message.slot.as_u64() - } else { - 0 - }; + let start_slot = downloaded_blocks.first().map(|b| b.message.slot.as_u64()); + let end_slot = downloaded_blocks.last().map(|b| b.message.slot.as_u64()); + let sent_blocks = downloaded_blocks.len(); - debug!(log, "Processing batch"; "batch_epoch" => epoch, "blocks" => downloaded_blocks.len(), "first_block_slot" => start_slot, "last_block_slot" => end_slot, "service" => "sync"); let result = match process_blocks(chain, downloaded_blocks.iter(), &log) { (_, Ok(_)) => { - debug!(log, "Batch processed"; "batch_epoch" => epoch , "first_block_slot" => start_slot, "last_block_slot" => end_slot, "service"=> "sync"); - BatchProcessResult::Success - } - (imported_blocks, Err(e)) if imported_blocks > 0 => { - debug!(log, "Batch processing failed but imported some blocks"; - "batch_epoch" => epoch, "error" => e, "imported_blocks"=> imported_blocks, "service" => "sync"); - BatchProcessResult::Partial + debug!(log, "Batch processed"; "batch_epoch" => epoch, "first_block_slot" => start_slot, + "last_block_slot" => end_slot, "processed_blocks" => sent_blocks, "service"=> "sync"); + BatchProcessResult::Success(sent_blocks > 0) } - (_, Err(e)) => { - debug!(log, "Batch processing failed"; "batch_epoch" => epoch, "error" => e, "service" => "sync"); - BatchProcessResult::Failed + (imported_blocks, Err(e)) => { + debug!(log, "Batch processing failed"; "batch_epoch" => epoch, "first_block_slot" => start_slot, + "last_block_slot" => end_slot, "error" => e, "imported_blocks" => imported_blocks, "service" => "sync"); + BatchProcessResult::Failed(imported_blocks > 0) } }; let msg = SyncMessage::BatchProcessed { chain_id, epoch, - downloaded_blocks, result, }; sync_send.send(msg).unwrap_or_else(|_| { @@ -70,7 +57,7 @@ pub fn handle_chain_segment( ); }); } - // this a parent lookup request from the sync manager + // this is a parent lookup request from the sync manager ProcessId::ParentLookup(peer_id, chain_head) => { debug!( log, "Processing parent lookup"; @@ -81,7 +68,7 @@ pub fn handle_chain_segment( // reverse match process_blocks(chain, downloaded_blocks.iter().rev(), &log) { (_, Err(e)) => { - debug!(log, "Parent lookup failed"; "last_peer_id" => format!("{}", peer_id), "error" => e); + debug!(log, "Parent lookup failed"; "last_peer_id" => %peer_id, "error" => e); sync_send .send(SyncMessage::ParentLookupFailed{peer_id, chain_head}) .unwrap_or_else(|_| { @@ -114,13 +101,7 @@ fn process_blocks< match chain.process_chain_segment(blocks) { ChainSegmentResult::Successful { imported_blocks } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); - if imported_blocks == 0 { - debug!(log, "All blocks already known"); - } else { - debug!( - log, "Imported blocks from network"; - "count" => imported_blocks, - ); + if imported_blocks > 0 { // Batch completed successfully with at least one block, run fork choice. run_fork_choice(chain, log); } @@ -153,7 +134,7 @@ fn run_fork_choice(chain: Arc>, log: &slog:: Err(e) => error!( log, "Fork choice failed"; - "error" => format!("{:?}", e), + "error" => ?e, "location" => "batch import error" ), } @@ -219,7 +200,7 @@ fn handle_failed_chain_segment( warn!( log, "BlockProcessingFailure"; "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", e) + "outcome" => ?e, ); Err(format!("Internal error whilst processing block: {:?}", e)) @@ -228,7 +209,7 @@ fn handle_failed_chain_segment( debug!( log, "Invalid block received"; "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", other), + "outcome" => %other, ); Err(format!("Peer sent invalid block. Reason: {:?}", other)) diff --git a/beacon_node/network/src/beacon_processor/worker.rs b/beacon_node/network/src/beacon_processor/worker.rs index 6388962e595..79d5d53c5fd 100644 --- a/beacon_node/network/src/beacon_processor/worker.rs +++ b/beacon_node/network/src/beacon_processor/worker.rs @@ -535,9 +535,10 @@ impl Worker { /// /// Creates a log if there is an interal error. fn send_sync_message(&self, message: SyncMessage) { - self.sync_tx - .send(message) - .unwrap_or_else(|_| error!(self.log, "Could not send message to the sync service")); + self.sync_tx.send(message).unwrap_or_else(|e| { + error!(self.log, "Could not send message to the sync service"; + "error" => %e) + }); } /// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 9d83c55766d..36b799c8dc5 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -82,10 +82,11 @@ impl Processor { } fn send_to_sync(&mut self, message: SyncMessage) { - self.sync_send.send(message).unwrap_or_else(|_| { + self.sync_send.send(message).unwrap_or_else(|e| { warn!( self.log, "Could not send message to the sync service"; + "error" => %e, ) }); } @@ -691,9 +692,10 @@ impl HandlerNetworkContext { /// Sends a message to the network task. fn inform_network(&mut self, msg: NetworkMessage) { + let msg_r = &format!("{:?}", msg); self.network_send .send(msg) - .unwrap_or_else(|_| warn!(self.log, "Could not send message to the network service")) + .unwrap_or_else(|e| warn!(self.log, "Could not send message to the network service"; "error" => %e, "message" => msg_r)) } /// Disconnects and ban's a peer, sending a Goodbye request with the associated reason. diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 84f807007fa..a018750f3f4 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -35,6 +35,9 @@ pub enum NetworkMessage { Subscribe { subscriptions: Vec, }, + /// Subscribes the beacon node to the core gossipsub topics. We do this when we are either + /// synced or close to the head slot. + SubscribeCoreTopics, /// Send an RPC request to the libp2p service. SendRequest { peer_id: PeerId, @@ -278,6 +281,21 @@ fn spawn_service( warn!(service.log, "Validator subscription failed"; "error" => e); } } + NetworkMessage::SubscribeCoreTopics => { + let mut subscribed_topics: Vec = vec![]; + let already_subscribed = service.network_globals.gossipsub_subscriptions.read().clone(); + let already_subscribed = already_subscribed.iter().map(|x| x.kind()).collect::>(); + for topic_kind in eth2_libp2p::types::CORE_TOPICS.iter().filter(|topic| already_subscribed.get(topic).is_none()) { + if service.libp2p.swarm.subscribe_kind(topic_kind.clone()) { + subscribed_topics.push(topic_kind.clone()); + } else { + warn!(service.log, "Could not subscribe to topic"; "topic" => format!("{}",topic_kind)); + } + } + if !subscribed_topics.is_empty() { + info!(service.log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics)); + } + } } } // process any attestation service events diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 3aa5577d70c..a2f47929285 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -29,9 +29,9 @@ //! //! Block Lookup //! -//! To keep the logic maintained to the syncing thread (and manage the request_ids), when a block needs to be searched for (i.e -//! if an attestation references an unknown block) this manager can search for the block and -//! subsequently search for parents if needed. +//! To keep the logic maintained to the syncing thread (and manage the request_ids), when a block +//! needs to be searched for (i.e if an attestation references an unknown block) this manager can +//! search for the block and subsequently search for parents if needed. use super::network_context::SyncNetworkContext; use super::peer_sync_info::{PeerSyncInfo, PeerSyncType}; @@ -57,7 +57,11 @@ use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a /// fully sync'd peer. -pub const SLOT_IMPORT_TOLERANCE: usize = 20; +/// +/// This means that we consider ourselves synced (and hence subscribe to all subnets and block +/// gossip if no peers are further than this range ahead of us that we have not already downloaded +/// blocks for. +pub const SLOT_IMPORT_TOLERANCE: usize = 32; /// How many attempts we try to find a parent of a block before we give up trying . const PARENT_FAIL_TOLERANCE: usize = 5; /// The maximum depth we will search for a parent block. In principle we should have sync'd any @@ -102,7 +106,6 @@ pub enum SyncMessage { BatchProcessed { chain_id: ChainId, epoch: Epoch, - downloaded_blocks: Vec>, result: BatchProcessResult, }, @@ -119,12 +122,10 @@ pub enum SyncMessage { // TODO: When correct batch error handling occurs, we will include an error type. #[derive(Debug)] pub enum BatchProcessResult { - /// The batch was completed successfully. - Success, - /// The batch processing failed. - Failed, - /// The batch processing failed but managed to import at least one block. - Partial, + /// The batch was completed successfully. It carries whether the sent batch contained blocks. + Success(bool), + /// The batch processing failed. It carries whether the processing imported any block. + Failed(bool), } /// Maintains a sequential list of parents to lookup and the lookup's current state. @@ -137,7 +138,7 @@ struct ParentRequests { failed_attempts: usize, /// The peer who last submitted a block. If the chain ends or fails, this is the peer that is - /// downvoted. + /// penalized. last_submitted_peer: PeerId, /// The request ID of this lookup is in progress. @@ -271,21 +272,21 @@ impl SyncManager { match local_peer_info.peer_sync_type(&remote) { PeerSyncType::FullySynced => { trace!(self.log, "Peer synced to our head found"; - "peer" => format!("{:?}", peer_id), - "peer_head_slot" => remote.head_slot, - "local_head_slot" => local_peer_info.head_slot, + "peer" => %peer_id, + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local_peer_info.head_slot, ); self.synced_peer(&peer_id, remote); // notify the range sync that a peer has been added - self.range_sync.fully_synced_peer_found(); + self.range_sync.fully_synced_peer_found(&mut self.network); } PeerSyncType::Advanced => { trace!(self.log, "Useful peer for sync found"; - "peer" => format!("{:?}", peer_id), - "peer_head_slot" => remote.head_slot, - "local_head_slot" => local_peer_info.head_slot, - "peer_finalized_epoch" => remote.finalized_epoch, - "local_finalized_epoch" => local_peer_info.finalized_epoch, + "peer" => %peer_id, + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local_peer_info.head_slot, + "peer_finalized_epoch" => remote.finalized_epoch, + "local_finalized_epoch" => local_peer_info.finalized_epoch, ); // There are few cases to handle here: @@ -303,7 +304,7 @@ impl SyncManager { { self.synced_peer(&peer_id, remote); // notify the range sync that a peer has been added - self.range_sync.fully_synced_peer_found(); + self.range_sync.fully_synced_peer_found(&mut self.network); } else { // Add the peer to our RangeSync self.range_sync @@ -675,6 +676,10 @@ impl SyncManager { fn update_sync_state(&mut self) { if let Some((old_state, new_state)) = self.network_globals.update_sync_state() { info!(self.log, "Sync state updated"; "old_state" => format!("{}", old_state), "new_state" => format!("{}",new_state)); + // If we have become synced - Subscribe to all the core subnet topics + if new_state == eth2_libp2p::types::SyncState::Synced { + self.network.subscribe_core_topics(); + } } } @@ -900,14 +905,12 @@ impl SyncManager { SyncMessage::BatchProcessed { chain_id, epoch, - downloaded_blocks, result, } => { self.range_sync.handle_block_process_result( &mut self.network, chain_id, epoch, - downloaded_blocks, result, ); } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index caccce4e614..715344eb18c 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1,11 +1,14 @@ //! Provides network functionality for the Syncing thread. This fundamentally wraps a network //! channel and stores a global RPC ID to perform requests. +use super::range_sync::{BatchId, ChainId}; +use super::RequestId as SyncRequestId; use crate::router::processor::status_message; use crate::service::NetworkMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, RequestId}; use eth2_libp2p::{Client, NetworkGlobals, PeerAction, PeerId, Request}; +use fnv::FnvHashMap; use slog::{debug, trace, warn}; use std::sync::Arc; use tokio::sync::mpsc; @@ -21,7 +24,11 @@ pub struct SyncNetworkContext { network_globals: Arc>, /// A sequential ID for all RPC requests. - request_id: usize, + request_id: SyncRequestId, + + /// BlocksByRange requests made by range syncing chains. + range_requests: FnvHashMap, + /// Logger for the `SyncNetworkContext`. log: slog::Logger, } @@ -36,6 +43,7 @@ impl SyncNetworkContext { network_send, network_globals, request_id: 1, + range_requests: FnvHashMap::default(), log, } } @@ -50,24 +58,26 @@ impl SyncNetworkContext { .unwrap_or_default() } - pub fn status_peer( + pub fn status_peers( &mut self, chain: Arc>, - peer_id: PeerId, + peers: impl Iterator, ) { if let Some(status_message) = status_message(&chain) { - debug!( - self.log, - "Sending Status Request"; - "peer" => format!("{:?}", peer_id), - "fork_digest" => format!("{:?}", status_message.fork_digest), - "finalized_root" => format!("{:?}", status_message.finalized_root), - "finalized_epoch" => format!("{:?}", status_message.finalized_epoch), - "head_root" => format!("{}", status_message.head_root), - "head_slot" => format!("{}", status_message.head_slot), - ); - - let _ = self.send_rpc_request(peer_id, Request::Status(status_message)); + for peer_id in peers { + debug!( + self.log, + "Sending Status Request"; + "peer" => %peer_id, + "fork_digest" => ?status_message.fork_digest, + "finalized_root" => ?status_message.finalized_root, + "finalized_epoch" => ?status_message.finalized_epoch, + "head_root" => %status_message.head_root, + "head_slot" => %status_message.head_slot, + ); + + let _ = self.send_rpc_request(peer_id, Request::Status(status_message.clone())); + } } } @@ -75,15 +85,34 @@ impl SyncNetworkContext { &mut self, peer_id: PeerId, request: BlocksByRangeRequest, - ) -> Result { + chain_id: ChainId, + batch_id: BatchId, + ) -> Result<(), &'static str> { trace!( self.log, "Sending BlocksByRange Request"; "method" => "BlocksByRange", "count" => request.count, - "peer" => format!("{:?}", peer_id) + "peer" => %peer_id, ); - self.send_rpc_request(peer_id, Request::BlocksByRange(request)) + let req_id = self.send_rpc_request(peer_id, Request::BlocksByRange(request))?; + self.range_requests.insert(req_id, (chain_id, batch_id)); + Ok(()) + } + + pub fn blocks_by_range_response( + &mut self, + request_id: usize, + remove: bool, + ) -> Option<(ChainId, BatchId)> { + // NOTE: we can't guarantee that the request must be registered as it could receive more + // than an error, and be removed after receiving the first one. + // FIXME: https://github.com/sigp/lighthouse/issues/1634 + if remove { + self.range_requests.remove(&request_id) + } else { + self.range_requests.get(&request_id).cloned() + } } pub fn blocks_by_root_request( @@ -113,8 +142,8 @@ impl SyncNetworkContext { debug!(self.log, "Sync reporting peer"; "peer_id" => peer_id.to_string(), "action" => action.to_string()); self.network_send .send(NetworkMessage::ReportPeer { peer_id, action }) - .unwrap_or_else(|_| { - warn!(self.log, "Could not report peer, channel failed"); + .unwrap_or_else(|e| { + warn!(self.log, "Could not report peer, channel failed"; "error"=> e.to_string()); }); } @@ -133,6 +162,14 @@ impl SyncNetworkContext { Ok(request_id) } + pub fn subscribe_core_topics(&mut self) { + self.network_send + .send(NetworkMessage::SubscribeCoreTopics) + .unwrap_or_else(|e| { + warn!(self.log, "Could not subscribe to core topics."; "error" => e.to_string()); + }); + } + fn send_network_msg(&mut self, msg: NetworkMessage) -> Result<(), &'static str> { self.network_send.send(msg).map_err(|_| { debug!(self.log, "Could not send message to the network service"); diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 1fa312c579a..532dafd2e48 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,173 +1,342 @@ -use super::chain::EPOCHS_PER_BATCH; -use eth2_libp2p::rpc::methods::*; +use eth2_libp2p::rpc::methods::BlocksByRangeRequest; use eth2_libp2p::PeerId; -use fnv::FnvHashMap; use ssz::Encode; -use std::cmp::min; -use std::cmp::Ordering; -use std::collections::hash_map::Entry; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; use types::{Epoch, EthSpec, SignedBeaconBlock, Slot}; -/// A collection of sequential blocks that are requested from peers in a single RPC request. -#[derive(PartialEq, Debug)] -pub struct Batch { - /// The requested start epoch of the batch. - pub start_epoch: Epoch, - /// The requested end slot of batch, exclusive. - pub end_slot: Slot, - /// The `Attempts` that have been made to send us this batch. - pub attempts: Vec, - /// The peer that is currently assigned to the batch. - pub current_peer: PeerId, - /// The number of retries this batch has undergone due to a failed request. - /// This occurs when peers do not respond or we get an RPC error. - pub retries: u8, - /// The number of times this batch has attempted to be re-downloaded and re-processed. This - /// occurs when a batch has been received but cannot be processed. - pub reprocess_retries: u8, - /// The blocks that have been downloaded. - pub downloaded_blocks: Vec>, +/// The number of times to retry a batch before it is considered failed. +const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5; + +/// Invalid batches are attempted to be re-downloaded from other peers. If a batch cannot be processed +/// after `MAX_BATCH_PROCESSING_ATTEMPTS` times, it is considered faulty. +const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 3; + +/// A segment of a chain. +pub struct BatchInfo { + /// Start slot of the batch. + start_slot: Slot, + /// End slot of the batch. + end_slot: Slot, + /// The `Attempts` that have been made and failed to send us this batch. + failed_processing_attempts: Vec, + /// The number of download retries this batch has undergone due to a failed request. + failed_download_attempts: Vec, + /// State of the batch. + state: BatchState, } -/// Represents a peer's attempt and providing the result for this batch. -/// -/// Invalid attempts will downscore a peer. -#[derive(PartialEq, Debug)] -pub struct Attempt { - /// The peer that made the attempt. - pub peer_id: PeerId, - /// The hash of the blocks of the attempt. - pub hash: u64, +/// Current state of a batch +pub enum BatchState { + /// The batch has failed either downloading or processing, but can be requested again. + AwaitingDownload, + /// The batch is being downloaded. + Downloading(PeerId, Vec>), + /// The batch has been completely downloaded and is ready for processing. + AwaitingProcessing(PeerId, Vec>), + /// The batch is being processed. + Processing(Attempt), + /// The batch was successfully processed and is waiting to be validated. + /// + /// It is not sufficient to process a batch successfully to consider it correct. This is + /// because batches could be erroneously empty, or incomplete. Therefore, a batch is considered + /// valid, only if the next sequential batch imports at least a block. + AwaitingValidation(Attempt), + /// Intermediate state for inner state handling. + Poisoned, + /// The batch has maxed out the allowed attempts for either downloading or processing. It + /// cannot be recovered. + Failed, } -impl Eq for Batch {} +impl BatchState { + /// Helper function for poisoning a state. + pub fn poison(&mut self) -> BatchState { + std::mem::replace(self, BatchState::Poisoned) + } +} -impl Batch { - pub fn new(start_epoch: Epoch, end_slot: Slot, peer_id: PeerId) -> Self { - Batch { - start_epoch, +impl BatchInfo { + /// Batches are downloaded excluding the first block of the epoch assuming it has already been + /// downloaded. + /// + /// For example: + /// + /// Epoch boundary | | + /// ... | 30 | 31 | 32 | 33 | 34 | ... | 61 | 62 | 63 | 64 | 65 | + /// Batch 1 | Batch 2 | Batch 3 + pub fn new(start_epoch: &Epoch, num_of_epochs: u64) -> Self { + let start_slot = start_epoch.start_slot(T::slots_per_epoch()) + 1; + let end_slot = start_slot + num_of_epochs * T::slots_per_epoch(); + BatchInfo { + start_slot, end_slot, - attempts: Vec::new(), - current_peer: peer_id, - retries: 0, - reprocess_retries: 0, - downloaded_blocks: Vec::new(), + failed_processing_attempts: Vec::new(), + failed_download_attempts: Vec::new(), + state: BatchState::AwaitingDownload, } } - pub fn start_slot(&self) -> Slot { - // batches are shifted by 1 - self.start_epoch.start_slot(T::slots_per_epoch()) + 1 + /// Gives a list of peers from which this batch has had a failed download or processing + /// attempt. + pub fn failed_peers(&self) -> HashSet { + let mut peers = HashSet::with_capacity( + self.failed_processing_attempts.len() + self.failed_download_attempts.len(), + ); + + for attempt in &self.failed_processing_attempts { + peers.insert(attempt.peer_id.clone()); + } + + for download in &self.failed_download_attempts { + peers.insert(download.clone()); + } + + peers } - pub fn end_slot(&self) -> Slot { - self.end_slot + pub fn current_peer(&self) -> Option<&PeerId> { + match &self.state { + BatchState::AwaitingDownload | BatchState::Failed => None, + BatchState::Downloading(peer_id, _) + | BatchState::AwaitingProcessing(peer_id, _) + | BatchState::Processing(Attempt { peer_id, .. }) + | BatchState::AwaitingValidation(Attempt { peer_id, .. }) => Some(&peer_id), + BatchState::Poisoned => unreachable!("Poisoned batch"), + } } + pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest { - let start_slot = self.start_slot(); BlocksByRangeRequest { - start_slot: start_slot.into(), - count: min( - T::slots_per_epoch() * EPOCHS_PER_BATCH, - self.end_slot.sub(start_slot).into(), - ), + start_slot: self.start_slot.into(), + count: self.end_slot.sub(self.start_slot).into(), step: 1, } } - /// This gets a hash that represents the blocks currently downloaded. This allows comparing a - /// previously downloaded batch of blocks with a new downloaded batch of blocks. - pub fn hash(&self) -> u64 { - // the hash used is the ssz-encoded list of blocks - let mut hasher = std::collections::hash_map::DefaultHasher::new(); - self.downloaded_blocks.as_ssz_bytes().hash(&mut hasher); - hasher.finish() + pub fn state(&self) -> &BatchState { + &self.state } -} -impl Ord for Batch { - fn cmp(&self, other: &Self) -> Ordering { - self.start_epoch.cmp(&other.start_epoch) + pub fn attempts(&self) -> &[Attempt] { + &self.failed_processing_attempts } -} -impl PartialOrd for Batch { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) + /// Adds a block to a downloading batch. + pub fn add_block(&mut self, block: SignedBeaconBlock) { + match self.state.poison() { + BatchState::Downloading(peer, mut blocks) => { + blocks.push(block); + self.state = BatchState::Downloading(peer, blocks) + } + other => unreachable!("Add block for batch in wrong state: {:?}", other), + } } -} -/// A structure that contains a mapping of pending batch requests, that also keeps track of which -/// peers are currently making batch requests. -/// -/// This is used to optimise searches for idle peers (peers that have no outbound batch requests). -pub struct PendingBatches { - /// The current pending batches. - batches: FnvHashMap>, - /// A mapping of peers to the number of pending requests. - peer_requests: HashMap>, -} + /// Marks the batch as ready to be processed if the blocks are in the range. The number of + /// received blocks is returned, or the wrong batch end on failure + #[must_use = "Batch may have failed"] + pub fn download_completed( + &mut self, + ) -> Result< + usize, /* Received blocks */ + ( + Slot, /* expected slot */ + Slot, /* received slot */ + &BatchState, + ), + > { + match self.state.poison() { + BatchState::Downloading(peer, blocks) => { + // verify that blocks are in range + if let Some(last_slot) = blocks.last().map(|b| b.slot()) { + // the batch is non-empty + let first_slot = blocks[0].slot(); + + let failed_range = if first_slot < self.start_slot { + Some((self.start_slot, first_slot)) + } else if self.end_slot < last_slot { + Some((self.end_slot, last_slot)) + } else { + None + }; -impl PendingBatches { - pub fn new() -> Self { - PendingBatches { - batches: FnvHashMap::default(), - peer_requests: HashMap::new(), + if let Some(range) = failed_range { + // this is a failed download, register the attempt and check if the batch + // can be tried again + self.failed_download_attempts.push(peer); + self.state = if self.failed_download_attempts.len() + >= MAX_BATCH_DOWNLOAD_ATTEMPTS as usize + { + BatchState::Failed + } else { + // drop the blocks + BatchState::AwaitingDownload + }; + return Err((range.0, range.1, &self.state)); + } + } + + let received = blocks.len(); + self.state = BatchState::AwaitingProcessing(peer, blocks); + Ok(received) + } + other => unreachable!("Download completed for batch in wrong state: {:?}", other), } } - pub fn insert(&mut self, request_id: usize, batch: Batch) -> Option> { - let peer_request = batch.current_peer.clone(); - self.peer_requests - .entry(peer_request) - .or_insert_with(HashSet::new) - .insert(request_id); - self.batches.insert(request_id, batch) + #[must_use = "Batch may have failed"] + pub fn download_failed(&mut self) -> &BatchState { + match self.state.poison() { + BatchState::Downloading(peer, _) => { + // register the attempt and check if the batch can be tried again + self.failed_download_attempts.push(peer); + self.state = if self.failed_download_attempts.len() + >= MAX_BATCH_DOWNLOAD_ATTEMPTS as usize + { + BatchState::Failed + } else { + // drop the blocks + BatchState::AwaitingDownload + }; + &self.state + } + other => unreachable!("Download failed for batch in wrong state: {:?}", other), + } } - pub fn remove(&mut self, request_id: usize) -> Option> { - if let Some(batch) = self.batches.remove(&request_id) { - if let Entry::Occupied(mut entry) = self.peer_requests.entry(batch.current_peer.clone()) - { - entry.get_mut().remove(&request_id); + pub fn start_downloading_from_peer(&mut self, peer: PeerId) { + match self.state.poison() { + BatchState::AwaitingDownload => { + self.state = BatchState::Downloading(peer, Vec::new()); + } + other => unreachable!("Starting download for batch in wrong state: {:?}", other), + } + } - if entry.get().is_empty() { - entry.remove(); - } + pub fn start_processing(&mut self) -> Vec> { + match self.state.poison() { + BatchState::AwaitingProcessing(peer, blocks) => { + self.state = BatchState::Processing(Attempt::new(peer, &blocks)); + blocks } - Some(batch) - } else { - None + other => unreachable!("Start processing for batch in wrong state: {:?}", other), } } - /// The number of current pending batch requests. - pub fn len(&self) -> usize { - self.batches.len() + #[must_use = "Batch may have failed"] + pub fn processing_completed(&mut self, was_sucessful: bool) -> &BatchState { + match self.state.poison() { + BatchState::Processing(attempt) => { + self.state = if !was_sucessful { + // register the failed attempt + self.failed_processing_attempts.push(attempt); + + // check if the batch can be downloaded again + if self.failed_processing_attempts.len() + >= MAX_BATCH_PROCESSING_ATTEMPTS as usize + { + BatchState::Failed + } else { + BatchState::AwaitingDownload + } + } else { + BatchState::AwaitingValidation(attempt) + }; + &self.state + } + other => unreachable!("Processing completed for batch in wrong state: {:?}", other), + } } - /// Adds a block to the batches if the request id exists. Returns None if there is no batch - /// matching the request id. - pub fn add_block(&mut self, request_id: usize, block: SignedBeaconBlock) -> Option<()> { - let batch = self.batches.get_mut(&request_id)?; - batch.downloaded_blocks.push(block); - Some(()) + #[must_use = "Batch may have failed"] + pub fn validation_failed(&mut self) -> &BatchState { + match self.state.poison() { + BatchState::AwaitingValidation(attempt) => { + self.failed_processing_attempts.push(attempt); + + // check if the batch can be downloaded again + self.state = if self.failed_processing_attempts.len() + >= MAX_BATCH_PROCESSING_ATTEMPTS as usize + { + BatchState::Failed + } else { + BatchState::AwaitingDownload + }; + &self.state + } + other => unreachable!("Validation failed for batch in wrong state: {:?}", other), + } } +} - /// Returns true if there the peer does not exist in the peer_requests mapping. Indicating it - /// has no pending outgoing requests. - pub fn peer_is_idle(&self, peer_id: &PeerId) -> bool { - self.peer_requests.get(peer_id).is_none() +/// Represents a peer's attempt and providing the result for this batch. +/// +/// Invalid attempts will downscore a peer. +#[derive(PartialEq, Debug)] +pub struct Attempt { + /// The peer that made the attempt. + pub peer_id: PeerId, + /// The hash of the blocks of the attempt. + pub hash: u64, +} + +impl Attempt { + #[allow(clippy::ptr_arg)] + fn new(peer_id: PeerId, blocks: &Vec>) -> Self { + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + blocks.as_ssz_bytes().hash(&mut hasher); + let hash = hasher.finish(); + Attempt { peer_id, hash } + } +} + +impl slog::KV for &mut BatchInfo { + fn serialize( + &self, + record: &slog::Record, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + slog::KV::serialize(*self, record, serializer) } +} - /// Removes a batch for a given peer. - pub fn remove_batch_by_peer(&mut self, peer_id: &PeerId) -> Option> { - let request_ids = self.peer_requests.get(peer_id)?; +impl slog::KV for BatchInfo { + fn serialize( + &self, + record: &slog::Record, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + use slog::Value; + Value::serialize(&self.start_slot, record, "start_slot", serializer)?; + Value::serialize( + &(self.end_slot - 1), // NOTE: The -1 shows inclusive blocks + record, + "end_slot", + serializer, + )?; + serializer.emit_usize("downloaded", self.failed_download_attempts.len())?; + serializer.emit_usize("processed", self.failed_processing_attempts.len())?; + serializer.emit_str("state", &format!("{:?}", self.state))?; + slog::Result::Ok(()) + } +} - let request_id = *request_ids.iter().next()?; - self.remove(request_id) +impl std::fmt::Debug for BatchState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BatchState::Processing(_) => f.write_str("Processing"), + BatchState::AwaitingValidation(_) => f.write_str("AwaitingValidation"), + BatchState::AwaitingDownload => f.write_str("AwaitingDownload"), + BatchState::Failed => f.write_str("Failed"), + BatchState::AwaitingProcessing(ref peer, ref blocks) => { + write!(f, "AwaitingProcessing({}, {} blocks)", peer, blocks.len()) + } + BatchState::Downloading(peer, blocks) => { + write!(f, "Downloading({}, {} blocks)", peer, blocks.len()) + } + BatchState::Poisoned => f.write_str("Poisoned"), + } } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index d79dff469b2..4decdc212a5 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,40 +1,36 @@ -use super::batch::{Batch, PendingBatches}; +use super::batch::{BatchInfo, BatchState}; use crate::beacon_processor::ProcessId; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; -use crate::sync::RequestId; use crate::sync::{network_context::SyncNetworkContext, BatchProcessResult}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::{PeerAction, PeerId}; -use rand::prelude::*; -use slog::{crit, debug, error, warn}; -use std::collections::HashSet; +use fnv::FnvHashMap; +use rand::seq::SliceRandom; +use slog::{crit, debug, o, warn}; +use std::collections::{btree_map::Entry, BTreeMap, HashSet}; +use std::hash::{Hash, Hasher}; use std::sync::Arc; -use tokio::sync::mpsc; +use tokio::sync::mpsc::Sender; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of /// blocks per batch are requested _at most_. A batch may request less blocks to account for /// already requested slots. There is a timeout for each batch request. If this value is too high, -/// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which case the -/// responder will fill the response up to the max request size, assuming they have the bandwidth -/// to do so. -pub const EPOCHS_PER_BATCH: u64 = 2; - -/// The number of times to retry a batch before the chain is considered failed and removed. -const MAX_BATCH_RETRIES: u8 = 5; +/// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which +/// case the responder will fill the response up to the max request size, assuming they have the +/// bandwidth to do so. +pub const EPOCHS_PER_BATCH: u64 = 8; /// The maximum number of batches to queue before requesting more. const BATCH_BUFFER_SIZE: u8 = 5; -/// Invalid batches are attempted to be re-downloaded from other peers. If they cannot be processed -/// after `INVALID_BATCH_LOOKUP_ATTEMPTS` times, the chain is considered faulty and all peers will -/// be reported negatively. -const INVALID_BATCH_LOOKUP_ATTEMPTS: u8 = 3; - /// A return type for functions that act on a `Chain` which informs the caller whether the chain /// has been completed and should be removed or to be kept if further processing is /// required. #[derive(PartialEq)] +#[must_use = "Should be checked, since a failed chain must be removed. A chain that requested + being removed and continued is now in an inconsistent state"] + pub enum ProcessingResult { KeepChain, RemoveChain, @@ -42,6 +38,7 @@ pub enum ProcessingResult { /// A chain identifier pub type ChainId = u64; +pub type BatchId = Epoch; /// A chain of blocks that need to be downloaded. Peers who claim to contain the target head /// root are grouped into the peer pool and queried for batches when downloading the @@ -50,7 +47,7 @@ pub struct SyncingChain { /// A random id used to identify this chain. id: ChainId, - /// The original start slot when this chain was initialised. + /// The start of the chain segment. Any epoch previous to this one has been validated. pub start_epoch: Epoch, /// The target head slot. @@ -59,35 +56,37 @@ pub struct SyncingChain { /// The target head root. pub target_head_root: Hash256, - /// The batches that are currently awaiting a response from a peer. An RPC request for these - /// has been sent. - pub pending_batches: PendingBatches, - - /// The batches that have been downloaded and are awaiting processing and/or validation. - completed_batches: Vec>, - - /// Batches that have been processed and awaiting validation before being removed. - processed_batches: Vec>, + /// Sorted map of batches undergoing some kind of processing. + batches: BTreeMap>, /// The peers that agree on the `target_head_slot` and `target_head_root` as a canonical chain - /// and thus available to download this chain from. - pub peer_pool: HashSet, + /// and thus available to download this chain from, as well as the batches we are currently + /// requesting. + peers: FnvHashMap>, /// Starting epoch of the next batch that needs to be downloaded. - to_be_downloaded: Epoch, + to_be_downloaded: BatchId, /// Starting epoch of the batch that needs to be processed next. /// This is incremented as the chain advances. - processing_target: Epoch, + processing_target: BatchId, + + /// Optimistic head to sync. + /// If a block is imported for this batch, the chain advances to this point. + optimistic_start: Option, + + /// When a batch for an optimistic start fails processing, it is stored to avoid trying it + /// again due to chain stopping/re-starting on chain switching. + failed_optimistic_starts: HashSet, /// The current state of the chain. pub state: ChainSyncingState, /// The current processing batch, if any. - current_processing_batch: Option>, + current_processing_batch: Option, /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: Sender>, /// A reference to the underlying beacon chain. chain: Arc>, @@ -105,36 +104,85 @@ pub enum ChainSyncingState { } impl SyncingChain { + pub fn id(target_root: &Hash256, target_slot: &Slot) -> u64 { + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + (target_root, target_slot).hash(&mut hasher); + hasher.finish() + } + #[allow(clippy::too_many_arguments)] pub fn new( - id: u64, start_epoch: Epoch, target_head_slot: Slot, target_head_root: Hash256, peer_id: PeerId, - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: Sender>, chain: Arc>, - log: slog::Logger, + log: &slog::Logger, ) -> Self { - let mut peer_pool = HashSet::new(); - peer_pool.insert(peer_id); + let mut peers = FnvHashMap::default(); + peers.insert(peer_id, Default::default()); + + let id = SyncingChain::::id(&target_head_root, &target_head_slot); SyncingChain { id, start_epoch, target_head_slot, target_head_root, - pending_batches: PendingBatches::new(), - completed_batches: Vec::new(), - processed_batches: Vec::new(), - peer_pool, + batches: BTreeMap::new(), + peers, to_be_downloaded: start_epoch, processing_target: start_epoch, + optimistic_start: None, + failed_optimistic_starts: HashSet::default(), state: ChainSyncingState::Stopped, current_processing_batch: None, beacon_processor_send, chain, - log, + log: log.new(o!("chain" => id)), + } + } + + /// Check if the chain has peers from which to process batches. + pub fn available_peers(&self) -> usize { + self.peers.len() + } + + /// Get the chain's id. + pub fn get_id(&self) -> ChainId { + self.id + } + + /// Removes a peer from the chain. + /// If the peer has active batches, those are considered failed and re-requested. + pub fn remove_peer( + &mut self, + peer_id: &PeerId, + network: &mut SyncNetworkContext, + ) -> ProcessingResult { + if let Some(batch_ids) = self.peers.remove(peer_id) { + // fail the batches + for id in batch_ids { + if let BatchState::Failed = self + .batches + .get_mut(&id) + .expect("registered batch exists") + .download_failed() + { + return ProcessingResult::RemoveChain; + } + if let ProcessingResult::RemoveChain = self.retry_batch_download(network, id) { + // drop the chain early + return ProcessingResult::RemoveChain; + } + } + } + + if self.peers.is_empty() { + ProcessingResult::RemoveChain + } else { + ProcessingResult::KeepChain } } @@ -146,127 +194,213 @@ impl SyncingChain { .start_slot(T::EthSpec::slots_per_epoch()) } - /// A batch of blocks has been received. This function gets run on all chains and should - /// return Some if the request id matches a pending request on this chain, or None if it does - /// not. - /// - /// If the request corresponds to a pending batch, this function processes the completed - /// batch. + /// A block has been received for a batch on this chain. + /// If the block correctly completes the batch it will be processed if possible. pub fn on_block_response( &mut self, network: &mut SyncNetworkContext, - request_id: RequestId, - beacon_block: &Option>, - ) -> Option<()> { + batch_id: BatchId, + peer_id: PeerId, + beacon_block: Option>, + ) -> ProcessingResult { + // check if we have this batch + let batch = match self.batches.get_mut(&batch_id) { + None => { + debug!(self.log, "Received a block for unknown batch"; "epoch" => batch_id); + // A batch might get removed when the chain advances, so this is non fatal. + return ProcessingResult::KeepChain; + } + Some(batch) => { + // A batch could be retried without the peer failing the request (disconnecting/ + // sending an error /timeout) if the peer is removed from the chain for other + // reasons. Check that this block belongs to the expected peer + if Some(&peer_id) != batch.current_peer() { + return ProcessingResult::KeepChain; + } + batch + } + }; + if let Some(block) = beacon_block { // This is not a stream termination, simply add the block to the request - self.pending_batches.add_block(request_id, block.clone()) + batch.add_block(block); + ProcessingResult::KeepChain } else { // A stream termination has been sent. This batch has ended. Process a completed batch. - let batch = self.pending_batches.remove(request_id)?; - self.handle_completed_batch(network, batch); - Some(()) + // Remove the request from the peer's active batches + let peer = batch + .current_peer() + .expect("Batch is downloading from a peer"); + self.peers + .get_mut(peer) + .unwrap_or_else(|| panic!("Batch is registered for the peer")) + .remove(&batch_id); + + match batch.download_completed() { + Ok(received) => { + let awaiting_batches = batch_id.saturating_sub( + self.optimistic_start + .unwrap_or_else(|| self.processing_target), + ) / EPOCHS_PER_BATCH; + debug!(self.log, "Completed batch received"; "epoch" => batch_id, "blocks" => received, "awaiting_batches" => awaiting_batches); + + // pre-emptively request more blocks from peers whilst we process current blocks, + if let ProcessingResult::RemoveChain = self.request_batches(network) { + return ProcessingResult::RemoveChain; + } + self.process_completed_batches(network) + } + Err((expected, received, state)) => { + warn!(self.log, "Batch received out of range blocks"; + "epoch" => batch_id, "expected" => expected, "received" => received); + if let BatchState::Failed = state { + return ProcessingResult::RemoveChain; + } + // this batch can't be used, so we need to request it again. + self.retry_batch_download(network, batch_id) + } + } } } - /// A completed batch has been received, process the batch. - /// This will return `ProcessingResult::KeepChain` if the chain has not completed or - /// failed indicating that further batches are required. - fn handle_completed_batch( + /// Sends to process the batch with the given id. + /// The batch must exist and be ready for processing + fn process_batch( &mut self, network: &mut SyncNetworkContext, - batch: Batch, - ) { - // An entire batch of blocks has been received. This functions checks to see if it can be processed, - // remove any batches waiting to be verified and if this chain is syncing, request new - // blocks for the peer. - debug!(self.log, "Completed batch received"; "epoch" => batch.start_epoch, "blocks" => &batch.downloaded_blocks.len(), "awaiting_batches" => self.completed_batches.len()); - - // verify the range of received blocks - // Note that the order of blocks is verified in block processing - if let Some(last_slot) = batch.downloaded_blocks.last().map(|b| b.slot()) { - // the batch is non-empty - let first_slot = batch.downloaded_blocks[0].slot(); - if batch.start_slot() > first_slot || batch.end_slot() < last_slot { - warn!(self.log, "BlocksByRange response returned out of range blocks"; - "response_initial_slot" => first_slot, - "requested_initial_slot" => batch.start_slot()); - // this batch can't be used, so we need to request it again. - self.failed_batch(network, batch); - return; - } + batch_id: BatchId, + ) -> ProcessingResult { + // Only process batches if this chain is Syncing, and only one at a time + if self.state != ChainSyncingState::Syncing || self.current_processing_batch.is_some() { + return ProcessingResult::KeepChain; } - // Add this completed batch to the list of completed batches. This list will then need to - // be checked if any batches can be processed and verified for errors or invalid responses - // from peers. The logic is simpler to create this ordered batch list and to then process - // the list. - - let insert_index = self - .completed_batches - .binary_search(&batch) - .unwrap_or_else(|index| index); - self.completed_batches.insert(insert_index, batch); - - // We have a list of completed batches. It is not sufficient to process batch successfully - // to consider the batch correct. This is because batches could be erroneously empty, or - // incomplete. Therefore, a batch is considered valid, only if the next sequential batch is - // processed successfully. Therefore the `completed_batches` will store batches that have - // already be processed but not verified and therefore have Id's less than - // `self.to_be_processed_id`. + let batch = self.batches.get_mut(&batch_id).expect("Batch exists"); - // pre-emptively request more blocks from peers whilst we process current blocks, - self.request_batches(network); + // NOTE: We send empty batches to the processor in order to trigger the block processor + // result callback. This is done, because an empty batch could end a chain and the logic + // for removing chains and checking completion is in the callback. - // Try and process any completed batches. This will spawn a new task to process any blocks - // that are ready to be processed. - self.process_completed_batches(); - } + let blocks = batch.start_processing(); + let process_id = ProcessId::RangeBatchId(self.id, batch_id); + self.current_processing_batch = Some(batch_id); - /// Tries to process any batches if there are any available and we are not currently processing - /// other batches. - fn process_completed_batches(&mut self) { - // Only process batches if this chain is Syncing - if self.state != ChainSyncingState::Syncing { - return; + if let Err(e) = self + .beacon_processor_send + .try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) + { + crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch", + "error" => %e, "batch" => self.processing_target); + // This is unlikely to happen but it would stall syncing since the batch now has no + // blocks to continue, and the chain is expecting a processing result that won't + // arrive. To mitigate this, (fake) fail this processing so that the batch is + // re-downloaded. + // TODO: needs better handling + self.on_batch_process_result(network, batch_id, &BatchProcessResult::Failed(false)) + } else { + ProcessingResult::KeepChain } + } - // Only process one batch at a time - if self.current_processing_batch.is_some() { - return; + /// Processes the next ready batch, prioritizing optimistic batches over the processing target. + fn process_completed_batches( + &mut self, + network: &mut SyncNetworkContext, + ) -> ProcessingResult { + // Only process batches if this chain is Syncing and only process one batch at a time + if self.state != ChainSyncingState::Syncing || self.current_processing_batch.is_some() { + return ProcessingResult::KeepChain; } - // Check if there is a batch ready to be processed - if !self.completed_batches.is_empty() - && self.completed_batches[0].start_epoch == self.processing_target - { - let batch = self.completed_batches.remove(0); - - // Note: We now send empty batches to the processor in order to trigger the block - // processor result callback. This is done, because an empty batch could end a chain - // and the logic for removing chains and checking completion is in the callback. - - // send the batch to the batch processor thread - return self.process_batch(batch); - } - } + // Find the id of the batch we are going to process. + // + // First try our optimistic start, if any. If this batch is ready, we process it. If the + // batch has not already been completed, check the current chain target. + let optimistic_id = if let Some(epoch) = self.optimistic_start { + if let Some(batch) = self.batches.get(&epoch) { + let state = batch.state(); + match state { + BatchState::AwaitingProcessing(..) => { + // this batch is ready + debug!(self.log, "Processing optimistic start"; "epoch" => epoch); + Some(epoch) + } + BatchState::Downloading(..) => { + // The optimistic batch is being downloaded. We wait for this before + // attempting to process other batches. + return ProcessingResult::KeepChain; + } + BatchState::Processing(_) + | BatchState::AwaitingDownload + | BatchState::Failed + | BatchState::Poisoned + | BatchState::AwaitingValidation(_) => { + // these are all inconsistent states: + // - Processing -> `self.current_processing_batch` is Some + // - Failed -> non recoverable batch. For a optimistic batch, it should + // have been removed + // - Poisoned -> this is an intermediate state that should never be reached + // - AwaitingDownload -> A recoverable failed batch should have been + // re-requested. + // - AwaitingValidation -> If an optimistic batch is successfully processed + // it is no longer considered an optimistic candidate. If the batch was + // empty the chain rejects it; if it was non empty the chain is advanced + // to this point (so that the old optimistic batch is now the processing + // target) + unreachable!( + "Optimistic batch indicates inconsistent chain state: {:?}", + state + ) + } + } + } else { + None + } + } else { + None + }; - /// Sends a batch to the beacon processor for async processing in a queue. - fn process_batch(&mut self, mut batch: Batch) { - let blocks = std::mem::replace(&mut batch.downloaded_blocks, Vec::new()); - let process_id = ProcessId::RangeBatchId(self.id, batch.start_epoch); - self.current_processing_batch = Some(batch); + // if the optimistic target can't be processed, check the processing target + let id = optimistic_id.or_else(|| { + if let Some(batch) = self.batches.get(&self.processing_target) { + let state = batch.state(); + match state { + BatchState::AwaitingProcessing(..) => Some(self.processing_target), + BatchState::Downloading(..) => { + // Batch is not ready, nothing to process + None + } + BatchState::Failed + | BatchState::AwaitingDownload + | BatchState::AwaitingValidation(_) + | BatchState::Processing(_) + | BatchState::Poisoned => { + // these are all inconsistent states: + // - Failed -> non recoverable batch. Chain should have beee removed + // - AwaitingDownload -> A recoverable failed batch should have been + // re-requested. + // - AwaitingValidation -> self.processing_target should have been moved + // forward + // - Processing -> `self.current_processing_batch` is Some + // - Poisoned -> Intermediate state that should never be reached + unreachable!( + "Robust target batch indicates inconsistent chain state: {:?}", + state + ) + } + } + } else { + crit!(self.log, "Batch not found for current processing target"; + "epoch" => self.processing_target); + None + } + }); - if let Err(e) = self - .beacon_processor_send - .try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) - { - error!( - self.log, - "Failed to send chain segment to processor"; - "msg" => "process_batch", - "error" => format!("{:?}", e) - ); + // we found a batch to process + if let Some(id) = id { + self.process_batch(network, id) + } else { + ProcessingResult::KeepChain } } @@ -275,92 +409,82 @@ impl SyncingChain { pub fn on_batch_process_result( &mut self, network: &mut SyncNetworkContext, - chain_id: ChainId, - batch_start_epoch: Epoch, - downloaded_blocks: &mut Option>>, + batch_id: BatchId, result: &BatchProcessResult, - ) -> Option { - if chain_id != self.id { - // the result does not belong to this chain - return None; - } + ) -> ProcessingResult { + // the first two cases are possible if the chain advances while waiting for a processing + // result match &self.current_processing_batch { - Some(current_batch) if current_batch.start_epoch != batch_start_epoch => { + Some(processing_id) if *processing_id != batch_id => { debug!(self.log, "Unexpected batch result"; - "batch_epoch" => batch_start_epoch, "expected_batch_epoch" => current_batch.start_epoch); - return None; + "batch_epoch" => batch_id, "expected_batch_epoch" => processing_id); + return ProcessingResult::KeepChain; } None => { debug!(self.log, "Chain was not expecting a batch result"; - "batch_epoch" => batch_start_epoch); - return None; + "batch_epoch" => batch_id); + return ProcessingResult::KeepChain; } _ => { - // chain_id and batch_id match, continue + // batch_id matches, continue + self.current_processing_batch = None; } } - // claim the result by consuming the option - let downloaded_blocks = downloaded_blocks.take().or_else(|| { - // if taken by another chain, we are no longer waiting on a result. - self.current_processing_batch = None; - crit!(self.log, "Processed batch taken by another chain"); - None - })?; - - // No longer waiting on a processing result - let mut batch = self.current_processing_batch.take().unwrap(); - // These are the blocks of this batch - batch.downloaded_blocks = downloaded_blocks; - - // double check batches are processed in order TODO: Remove for prod - if batch.start_epoch != self.processing_target { - crit!(self.log, "Batch processed out of order"; - "processed_starting_epoch" => batch.start_epoch, - "expected_epoch" => self.processing_target); - } - - let res = match result { - BatchProcessResult::Success => { - self.processing_target += EPOCHS_PER_BATCH; - - // If the processed batch was not empty, we can validate previous invalidated - // blocks including the current batch. - if !batch.downloaded_blocks.is_empty() { - self.mark_processed_batches_as_valid(network, &batch); + match result { + BatchProcessResult::Success(was_non_empty) => { + let batch = self + .batches + .get_mut(&batch_id) + .expect("Chain was expecting a known batch"); + let _ = batch.processing_completed(true); + // If the processed batch was not empty, we can validate previous unvalidated + // blocks. + if *was_non_empty { + self.advance_chain(network, batch_id); + } else if let Some(epoch) = self.optimistic_start { + // check if this batch corresponds to an optimistic batch. In this case, we + // reject it as an optimistic candidate since the batch was empty + if epoch == batch_id { + if let ProcessingResult::RemoveChain = self.reject_optimistic_batch( + network, + false, /* do not re-request */ + "batch was empty", + ) { + return ProcessingResult::RemoveChain; + }; + } } - // Add the current batch to processed batches to be verified in the future. - self.processed_batches.push(batch); + self.processing_target += EPOCHS_PER_BATCH; // check if the chain has completed syncing if self.current_processed_slot() >= self.target_head_slot { // chain is completed + debug!(self.log, "Chain is complete"); ProcessingResult::RemoveChain } else { // chain is not completed - // attempt to request more batches - self.request_batches(network); - + if let ProcessingResult::RemoveChain = self.request_batches(network) { + return ProcessingResult::RemoveChain; + } // attempt to process more batches - self.process_completed_batches(); - - // keep the chain - ProcessingResult::KeepChain + self.process_completed_batches(network) } } - BatchProcessResult::Partial => { - warn!(self.log, "Batch processing failed but at least one block was imported"; - "batch_epoch" => batch.start_epoch, "peer" => batch.current_peer.to_string() - ); - // At least one block was successfully verified and imported, so we can be sure all - // previous batches are valid and we only need to download the current failed - // batch. - self.mark_processed_batches_as_valid(network, &batch); - - // check that we have not exceeded the re-process retry counter - if batch.reprocess_retries > INVALID_BATCH_LOOKUP_ATTEMPTS { + BatchProcessResult::Failed(imported_blocks) => { + let batch = self + .batches + .get_mut(&batch_id) + .expect("Chain was expecting a known batch"); + let peer = batch + .current_peer() + .expect("batch is processing blocks from a peer"); + debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, + "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); + if let BatchState::Failed = batch.processing_completed(false) { + // check that we have not exceeded the re-process retry counter // If a batch has exceeded the invalid batch lookup attempts limit, it means // that it is likely all peers in this chain are are sending invalid batches // repeatedly and are either malicious or faulty. We drop the chain and @@ -370,109 +494,156 @@ impl SyncingChain { let action = PeerAction::LowToleranceError; warn!(self.log, "Batch failed to download. Dropping chain scoring peers"; "score_adjustment" => action.to_string(), - "batch_epoch"=> batch.start_epoch); - for peer_id in self.peer_pool.drain() { - network.report_peer(peer_id, action); + "batch_epoch"=> batch_id); + for (peer, _) in self.peers.drain() { + network.report_peer(peer, action); } ProcessingResult::RemoveChain } else { + // chain can continue. Check if it can be moved forward + if *imported_blocks { + // At least one block was successfully verified and imported, so we can be sure all + // previous batches are valid and we only need to download the current failed + // batch. + self.advance_chain(network, batch_id); + } // Handle this invalid batch, that is within the re-process retries limit. - self.handle_invalid_batch(network, batch); - ProcessingResult::KeepChain + self.handle_invalid_batch(network, batch_id) } } - BatchProcessResult::Failed => { - debug!(self.log, "Batch processing failed"; - "batch_epoch" => batch.start_epoch, "peer" => batch.current_peer.to_string(), "client" => network.client_type(&batch.current_peer).to_string()); - // The batch processing failed - // This could be because this batch is invalid, or a previous invalidated batch - // is invalid. We need to find out which and downvote the peer that has sent us - // an invalid batch. - - // check that we have not exceeded the re-process retry counter - if batch.reprocess_retries > INVALID_BATCH_LOOKUP_ATTEMPTS { - // If a batch has exceeded the invalid batch lookup attempts limit, it means - // that it is likely all peers in this chain are are sending invalid batches - // repeatedly and are either malicious or faulty. We drop the chain and - // downvote all peers. - let action = PeerAction::LowToleranceError; - warn!(self.log, "Batch failed to download. Dropping chain scoring peers"; - "score_adjustment" => action.to_string(), - "batch_epoch" => batch.start_epoch); - for peer_id in self.peer_pool.drain() { - network.report_peer(peer_id, action); - } - ProcessingResult::RemoveChain - } else { - // Handle this invalid batch, that is within the re-process retries limit. - self.handle_invalid_batch(network, batch); - ProcessingResult::KeepChain + } + } + + fn reject_optimistic_batch( + &mut self, + network: &mut SyncNetworkContext, + redownload: bool, + reason: &str, + ) -> ProcessingResult { + if let Some(epoch) = self.optimistic_start { + self.optimistic_start = None; + self.failed_optimistic_starts.insert(epoch); + // if this batch is inside the current processing range, keep it, otherwise drop + // it. NOTE: this is done to prevent non-sequential batches coming from optimistic + // starts from filling up the buffer size + if epoch < self.to_be_downloaded { + debug!(self.log, "Rejected optimistic batch left for future use"; "epoch" => %epoch, "reason" => reason); + // this batch is now treated as any other batch, and re-requested for future use + if redownload { + return self.retry_batch_download(network, epoch); } + } else { + debug!(self.log, "Rejected optimistic batch"; "epoch" => %epoch, "reason" => reason); + self.batches.remove(&epoch); } - }; + } - Some(res) + ProcessingResult::KeepChain } - /// Removes any batches awaiting validation. + /// Removes any batches previous to the given `validating_epoch` and updates the current + /// boundaries of the chain. /// - /// All blocks in `processed_batches` should be prior batches. As the `last_batch` has been - /// processed with blocks in it, all previous batches are valid. + /// The `validating_epoch` must align with batch boundaries. /// - /// If a previous batch has been validated and it had been re-processed, downvote - /// the original peer. - fn mark_processed_batches_as_valid( + /// If a previous batch has been validated and it had been re-processed, penalize the original + /// peer. + fn advance_chain( &mut self, network: &mut SyncNetworkContext, - last_batch: &Batch, + validating_epoch: Epoch, ) { - while !self.processed_batches.is_empty() { - let mut processed_batch = self.processed_batches.remove(0); - if processed_batch.start_epoch >= last_batch.start_epoch { - crit!(self.log, "A processed batch had a greater id than the current process id"; - "processed_start_epoch" => processed_batch.start_epoch, - "current_start_epoch" => last_batch.start_epoch); - } + // make sure this epoch produces an advancement + if validating_epoch <= self.start_epoch { + return; + } - // Go through passed attempts and downscore peers that returned invalid batches - while !processed_batch.attempts.is_empty() { - let attempt = processed_batch.attempts.remove(0); - // The validated batch has been re-processed - if attempt.hash != processed_batch.hash() { - // The re-downloaded version was different - if processed_batch.current_peer != attempt.peer_id { - // A different peer sent the correct batch, the previous peer did not - // We negatively score the original peer. - let action = PeerAction::LowToleranceError; - debug!( - self.log, "Re-processed batch validated. Scoring original peer"; - "batch_epoch" => processed_batch.start_epoch, - "score_adjustment" => action.to_string(), - "original_peer" => format!("{}",attempt.peer_id), - "new_peer" => format!("{}", processed_batch.current_peer) - ); - network.report_peer(attempt.peer_id, action); - } else { - // The same peer corrected it's previous mistake. There was an error, so we - // negative score the original peer. - let action = PeerAction::MidToleranceError; - debug!( - self.log, "Re-processed batch validated by the same peer."; - "batch_epoch" => processed_batch.start_epoch, - "score_adjustment" => action.to_string(), - "original_peer" => format!("{}",attempt.peer_id), - "new_peer" => format!("{}", processed_batch.current_peer) - ); - network.report_peer(attempt.peer_id, action); + // safety check for batch boundaries + if validating_epoch % EPOCHS_PER_BATCH != self.start_epoch % EPOCHS_PER_BATCH { + crit!(self.log, "Validating Epoch is not aligned"); + } + + // batches in the range [BatchId, ..) (not yet validated) + let remaining_batches = self.batches.split_off(&validating_epoch); + // batches less than `validating_epoch` + let removed_batches = std::mem::replace(&mut self.batches, remaining_batches); + + for (id, batch) in removed_batches.into_iter() { + // only for batches awaiting validation can we be sure the last attempt is + // right, and thus, that any different attempt is wrong + match batch.state() { + BatchState::AwaitingValidation(ref processed_attempt) => { + for attempt in batch.attempts() { + // The validated batch has been re-processed + if attempt.hash != processed_attempt.hash { + // The re-downloaded version was different + if processed_attempt.peer_id != attempt.peer_id { + // A different peer sent the correct batch, the previous peer did not + // We negatively score the original peer. + let action = PeerAction::LowToleranceError; + debug!(self.log, "Re-processed batch validated. Scoring original peer"; + "batch_epoch" => id, "score_adjustment" => %action, + "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id + ); + network.report_peer(attempt.peer_id.clone(), action); + } else { + // The same peer corrected it's previous mistake. There was an error, so we + // negative score the original peer. + let action = PeerAction::MidToleranceError; + debug!(self.log, "Re-processed batch validated by the same peer"; + "batch_epoch" => id, "score_adjustment" => %action, + "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id + ); + network.report_peer(attempt.peer_id.clone(), action); + } + } + } + } + BatchState::Downloading(peer, ..) => { + // remove this batch from the peer's active requests + if let Some(active_batches) = self.peers.get_mut(peer) { + active_batches.remove(&id); } } + BatchState::Failed | BatchState::Poisoned | BatchState::AwaitingDownload => { + unreachable!("batch indicates inconsistent chain state while advancing chain") + } + BatchState::AwaitingProcessing(..) => { + // TODO: can we be sure the old attempts are wrong? + } + BatchState::Processing(_) => { + assert_eq!( + id, + self.current_processing_batch.expect( + "A batch in a processing state means the chain is processing it" + ) + ); + self.current_processing_batch = None; + } } } + + self.processing_target = self.processing_target.max(validating_epoch); + let old_start = self.start_epoch; + self.start_epoch = validating_epoch; + self.to_be_downloaded = self.to_be_downloaded.max(validating_epoch); + if self.batches.contains_key(&self.to_be_downloaded) { + // if a chain is advanced by Range beyond the previous `seld.to_be_downloaded`, we + // won't have this batch, so we need to request it. + self.to_be_downloaded += EPOCHS_PER_BATCH; + } + if let Some(epoch) = self.optimistic_start { + if epoch <= validating_epoch { + self.optimistic_start = None; + } + } + debug!(self.log, "Chain advanced"; "previous_start" => old_start, + "new_start" => self.start_epoch, "processing_target" => self.processing_target); } - /// An invalid batch has been received that could not be processed. + /// An invalid batch has been received that could not be processed, but that can be retried. /// - /// These events occur when a peer as successfully responded with blocks, but the blocks we + /// These events occur when a peer has successfully responded with blocks, but the blocks we /// have received are incorrect or invalid. This indicates the peer has not performed as /// intended and can result in downvoting a peer. // TODO: Batches could have been partially downloaded due to RPC size-limit restrictions. We @@ -481,10 +652,10 @@ impl SyncingChain { fn handle_invalid_batch( &mut self, network: &mut SyncNetworkContext, - batch: Batch, - ) { + batch_id: BatchId, + ) -> ProcessingResult { // The current batch could not be processed, indicating either the current or previous - // batches are invalid + // batches are invalid. // The previous batch could be incomplete due to the block sizes being too large to fit in // a single RPC request or there could be consecutive empty batches which are not supposed @@ -494,70 +665,51 @@ impl SyncingChain { // potentially be faulty. If a batch returns a different result than the original and // results in successful processing, we downvote the original peer that sent us the batch. - // If all batches return the same result, we try this process INVALID_BATCH_LOOKUP_ATTEMPTS - // times before considering the entire chain invalid and downvoting all peers. - - // Find any pre-processed batches awaiting validation - while !self.processed_batches.is_empty() { - let past_batch = self.processed_batches.remove(0); - self.processing_target = std::cmp::min(self.processing_target, past_batch.start_epoch); - self.reprocess_batch(network, past_batch); + if let Some(epoch) = self.optimistic_start { + // If this batch is an optimistic batch, we reject this epoch as an optimistic + // candidate and try to re download it + if epoch == batch_id { + if let ProcessingResult::RemoveChain = + self.reject_optimistic_batch(network, true, "batch was invalid") + { + return ProcessingResult::RemoveChain; + } else { + // since this is the optimistic batch, we can't consider previous batches as + // invalid. + return ProcessingResult::KeepChain; + } + } + } + // this is our robust `processing_target`. All previous batches must be awaiting + // validation + let mut redownload_queue = Vec::new(); + + for (id, batch) in self.batches.range_mut(..batch_id) { + if let BatchState::Failed = batch.validation_failed() { + // remove the chain early + return ProcessingResult::RemoveChain; + } + redownload_queue.push(*id); } - // re-process the current batch - self.reprocess_batch(network, batch); - } - - /// This re-downloads and marks the batch as being re-processed. - /// - /// If the re-downloaded batch is different to the original and can be processed, the original - /// peer will be downvoted. - fn reprocess_batch( - &mut self, - network: &mut SyncNetworkContext, - mut batch: Batch, - ) { - // marks the batch as attempting to be reprocessed by hashing the downloaded blocks - let attempt = super::batch::Attempt { - peer_id: batch.current_peer.clone(), - hash: batch.hash(), - }; - - // add this attempt to the batch - batch.attempts.push(attempt); - - // remove previously downloaded blocks - batch.downloaded_blocks.clear(); - - // increment the re-process counter - batch.reprocess_retries += 1; + // no batch maxed out it process attempts, so now the chain's volatile progress must be + // reset + self.processing_target = self.start_epoch; - // attempt to find another peer to download the batch from (this potentially doubles up - // requests on a single peer) - let current_peer = &batch.current_peer; - let new_peer = self - .peer_pool - .iter() - .find(|peer| *peer != current_peer) - .unwrap_or_else(|| current_peer); - - batch.current_peer = new_peer.clone(); - - debug!(self.log, "Re-requesting batch"; - "start_slot" => batch.start_slot(), - "end_slot" => batch.end_slot -1, // The -1 shows inclusive blocks - "batch_epoch" => batch.start_epoch, - "peer" => batch.current_peer.to_string(), - "retries" => batch.retries, - "re-processes" => batch.reprocess_retries); - self.send_batch(network, batch); + for id in redownload_queue { + if let ProcessingResult::RemoveChain = self.retry_batch_download(network, id) { + return ProcessingResult::RemoveChain; + } + } + // finally, re-request the failed batch. + self.retry_batch_download(network, batch_id) } pub fn stop_syncing(&mut self) { self.state = ChainSyncingState::Stopped; } - // Either a new chain, or an old one with a peer list + /// Either a new chain, or an old one with a peer list /// This chain has been requested to start syncing. /// /// This could be new chain, or an old chain that is being resumed. @@ -565,125 +717,179 @@ impl SyncingChain { &mut self, network: &mut SyncNetworkContext, local_finalized_epoch: Epoch, - ) { - // A local finalized slot is provided as other chains may have made - // progress whilst this chain was Stopped or paused. If so, update the `processed_batch_id` to - // accommodate potentially downloaded batches from other chains. Also prune any old batches - // awaiting processing - - // If the local finalized epoch is ahead of our current processed chain, update the chain - // to start from this point and re-index all subsequent batches starting from one - // (effectively creating a new chain). - - let local_finalized_slot = local_finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let current_processed_slot = self.current_processed_slot(); - - if local_finalized_slot > current_processed_slot { - // Advance the chain to account for already downloaded blocks. - self.start_epoch = local_finalized_epoch; - - debug!(self.log, "Updating chain's progress"; - "prev_completed_slot" => current_processed_slot, - "new_completed_slot" => self.current_processed_slot()); - // Re-index batches - self.to_be_downloaded = local_finalized_epoch; - self.processing_target = local_finalized_epoch; - - // remove any completed or processed batches - self.completed_batches.clear(); - self.processed_batches.clear(); + optimistic_start_epoch: Epoch, + ) -> ProcessingResult { + // to avoid dropping local progress, we advance the chain wrt its batch boundaries. This + let align = |epoch| { + // start_epoch + (number of batches in between)*length_of_batch + self.start_epoch + ((epoch - self.start_epoch) / EPOCHS_PER_BATCH) * EPOCHS_PER_BATCH + }; + // get the *aligned* epoch that produces a batch containing the `local_finalized_epoch` + let validating_epoch = align(local_finalized_epoch); + // align the optimistic_start too. + let optimistic_epoch = align(optimistic_start_epoch); + + // advance the chain to the new validating epoch + self.advance_chain(network, validating_epoch); + if self.optimistic_start.is_none() + && optimistic_epoch > self.start_epoch + && !self.failed_optimistic_starts.contains(&optimistic_epoch) + { + self.optimistic_start = Some(optimistic_epoch); } + // update the state self.state = ChainSyncingState::Syncing; - // start processing batches if needed - self.process_completed_batches(); - // begin requesting blocks from the peer pool, until all peers are exhausted. - self.request_batches(network); + if let ProcessingResult::RemoveChain = self.request_batches(network) { + return ProcessingResult::RemoveChain; + } + + // start processing batches if needed + self.process_completed_batches(network) } /// Add a peer to the chain. /// /// If the chain is active, this starts requesting batches from this peer. - pub fn add_peer(&mut self, network: &mut SyncNetworkContext, peer_id: PeerId) { - self.peer_pool.insert(peer_id.clone()); - // do not request blocks if the chain is not syncing + pub fn add_peer( + &mut self, + network: &mut SyncNetworkContext, + peer_id: PeerId, + ) -> ProcessingResult { if let ChainSyncingState::Stopped = self.state { - debug!(self.log, "Peer added to a non-syncing chain"; - "peer_id" => format!("{}", peer_id)); - return; + debug!(self.log, "Peer added to non-syncing chain"; "peer" => %peer_id) + } + // add the peer without overwriting its active requests + if self.peers.entry(peer_id).or_default().is_empty() { + // Either new or not, this peer is idle, try to request more batches + self.request_batches(network) + } else { + ProcessingResult::KeepChain } - - // find the next batch and request it from any peers if we need to - self.request_batches(network); } /// Sends a STATUS message to all peers in the peer pool. pub fn status_peers(&self, network: &mut SyncNetworkContext) { - for peer_id in self.peer_pool.iter() { - network.status_peer(self.chain.clone(), peer_id.clone()); - } + network.status_peers(self.chain.clone(), self.peers.keys().cloned()); } /// An RPC error has occurred. /// - /// Checks if the request_id is associated with this chain. If so, attempts to re-request the - /// batch. If the batch has exceeded the number of retries, returns - /// Some(`ProcessingResult::RemoveChain)`. Returns `None` if the request isn't related to - /// this chain. + /// If the batch exists it is re-requested. pub fn inject_error( &mut self, network: &mut SyncNetworkContext, - peer_id: &PeerId, - request_id: RequestId, - ) -> Option { - if let Some(batch) = self.pending_batches.remove(request_id) { - debug!(self.log, "Batch failed. RPC Error"; - "batch_epoch" => batch.start_epoch, - "retries" => batch.retries, - "peer" => format!("{:?}", peer_id)); - - Some(self.failed_batch(network, batch)) + batch_id: BatchId, + peer_id: PeerId, + ) -> ProcessingResult { + if let Some(batch) = self.batches.get_mut(&batch_id) { + // A batch could be retried without the peer failing the request (disconnecting/ + // sending an error /timeout) if the peer is removed from the chain for other + // reasons. Check that this block belongs to the expected peer + if Some(&peer_id) != batch.current_peer() { + return ProcessingResult::KeepChain; + } + debug!(self.log, "Batch failed. RPC Error"; "batch_epoch" => batch_id); + let failed_peer = batch + .current_peer() + .expect("Batch is downloading from a peer"); + self.peers + .get_mut(failed_peer) + .expect("Peer belongs to the chain") + .remove(&batch_id); + if let BatchState::Failed = batch.download_failed() { + return ProcessingResult::RemoveChain; + } + self.retry_batch_download(network, batch_id) } else { - None + // this could be an error for an old batch, removed when the chain advances + ProcessingResult::KeepChain } } - /// A batch has failed. This occurs when a network timeout happens or the peer didn't respond. - /// These events do not indicate a malicious peer, more likely simple networking issues. - /// - /// Attempts to re-request from another peer in the peer pool (if possible) and returns - /// `ProcessingResult::RemoveChain` if the number of retries on the batch exceeds - /// `MAX_BATCH_RETRIES`. - pub fn failed_batch( + /// Sends and registers the request of a batch awaiting download. + pub fn retry_batch_download( &mut self, network: &mut SyncNetworkContext, - mut batch: Batch, + batch_id: BatchId, ) -> ProcessingResult { - batch.retries += 1; + let batch = match self.batches.get_mut(&batch_id) { + Some(batch) => batch, + None => return ProcessingResult::KeepChain, + }; - if batch.retries > MAX_BATCH_RETRIES || self.peer_pool.is_empty() { - // chain is unrecoverable, remove it - ProcessingResult::RemoveChain - } else { - // try to re-process the request using a different peer, if possible - let current_peer = &batch.current_peer; - let new_peer = self - .peer_pool + // Find a peer to request the batch + let failed_peers = batch.failed_peers(); + + let new_peer = { + let mut priorized_peers = self + .peers .iter() - .find(|peer| *peer != current_peer) - .unwrap_or_else(|| current_peer); - - batch.current_peer = new_peer.clone(); - debug!(self.log, "Re-Requesting batch"; - "start_slot" => batch.start_slot(), - "end_slot" => batch.end_slot -1, // The -1 shows inclusive blocks - "batch_epoch" => batch.start_epoch, - "peer" => batch.current_peer.to_string()); - self.send_batch(network, batch); - ProcessingResult::KeepChain + .map(|(peer, requests)| (failed_peers.contains(peer), requests.len(), peer)) + .collect::>(); + // Sort peers prioritizing unrelated peers with less active requests. + priorized_peers.sort_unstable(); + priorized_peers.get(0).map(|&(_, _, peer)| peer.clone()) + }; + + if let Some(peer) = new_peer { + self.send_batch(network, batch_id, peer) + } else { + // If we are here the chain has no more peers + ProcessingResult::RemoveChain + } + } + + /// Requests the batch asigned to the given id from a given peer. + pub fn send_batch( + &mut self, + network: &mut SyncNetworkContext, + batch_id: BatchId, + peer: PeerId, + ) -> ProcessingResult { + if let Some(batch) = self.batches.get_mut(&batch_id) { + let request = batch.to_blocks_by_range_request(); + // inform the batch about the new request + batch.start_downloading_from_peer(peer.clone()); + match network.blocks_by_range_request(peer.clone(), request, self.id, batch_id) { + Ok(()) => { + if self + .optimistic_start + .map(|epoch| epoch == batch_id) + .unwrap_or(false) + { + debug!(self.log, "Requesting optimistic batch"; "epoch" => batch_id, &batch); + } else { + debug!(self.log, "Requesting batch"; "epoch" => batch_id, &batch); + } + // register the batch for this peer + self.peers + .get_mut(&peer) + .expect("peer belongs to the peer pool") + .insert(batch_id); + return ProcessingResult::KeepChain; + } + Err(e) => { + // NOTE: under normal conditions this shouldn't happen but we handle it anyway + warn!(self.log, "Could not send batch request"; + "batch_id" => batch_id, "error" => e, &batch); + // register the failed download and check if the batch can be retried + self.peers + .get_mut(&peer) + .expect("peer belongs to the peer pool") + .remove(&batch_id); + if let BatchState::Failed = batch.download_failed() { + return ProcessingResult::RemoveChain; + } else { + return self.retry_batch_download(network, batch_id); + } + } + } } + + ProcessingResult::KeepChain } /// Returns true if this chain is currently syncing. @@ -696,119 +902,142 @@ impl SyncingChain { /// Attempts to request the next required batches from the peer pool if the chain is syncing. It will exhaust the peer /// pool and left over batches until the batch buffer is reached or all peers are exhausted. - fn request_batches(&mut self, network: &mut SyncNetworkContext) { - if let ChainSyncingState::Syncing = self.state { - while self.send_range_request(network) {} + fn request_batches( + &mut self, + network: &mut SyncNetworkContext, + ) -> ProcessingResult { + if !matches!(self.state, ChainSyncingState::Syncing) { + return ProcessingResult::KeepChain; } - } - /// Requests the next required batch from a peer. Returns true, if there was a peer available - /// to send a request and there are batches to request, false otherwise. - fn send_range_request(&mut self, network: &mut SyncNetworkContext) -> bool { // find the next pending batch and request it from the peer - if let Some(peer_id) = self.get_next_peer() { - if let Some(batch) = self.get_next_batch(peer_id) { - debug!(self.log, "Requesting batch"; - "start_slot" => batch.start_slot(), - "end_slot" => batch.end_slot -1, // The -1 shows inclusive blocks - "batch_epoch" => batch.start_epoch, - "peer" => format!("{}", batch.current_peer)); - // send the batch - self.send_batch(network, batch); - return true; - } - } - false - } - /// Returns a peer if there exists a peer which does not currently have a pending request. - /// - /// This is used to create the next request. - fn get_next_peer(&self) -> Option { - // TODO: Optimize this by combining with above two functions. // randomize the peers for load balancing let mut rng = rand::thread_rng(); - let mut peers = self.peer_pool.iter().collect::>(); - peers.shuffle(&mut rng); - for peer in peers { - if self.pending_batches.peer_is_idle(peer) { - return Some(peer.clone()); + let mut idle_peers = self + .peers + .iter() + .filter_map(|(peer, requests)| { + if requests.is_empty() { + Some(peer.clone()) + } else { + None + } + }) + .collect::>(); + idle_peers.shuffle(&mut rng); + + // check if we have the batch for our optimistic start. If not, request it first. + // We wait for this batch before requesting any other batches. + if let Some(epoch) = self.optimistic_start { + if !self.batches.contains_key(&epoch) { + if let Some(peer) = idle_peers.pop() { + let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH); + self.batches.insert(epoch, optimistic_batch); + if let ProcessingResult::RemoveChain = self.send_batch(network, epoch, peer) { + return ProcessingResult::RemoveChain; + } + } } + return ProcessingResult::KeepChain; } - None - } - /// Returns the next required batch from the chain if it exists. If there are no more batches - /// required, `None` is returned. - /// - /// Batches are downloaded excluding the first block of the epoch assuming it has already been - /// downloaded. - /// - /// For example: - /// - /// - /// Epoch boundary | | - /// ... | 30 | 31 | 32 | 33 | 34 | ... | 61 | 62 | 63 | 64 | 65 | - /// Batch 1 | Batch 2 | Batch 3 - fn get_next_batch(&mut self, peer_id: PeerId) -> Option> { - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let blocks_per_batch = slots_per_epoch * EPOCHS_PER_BATCH; + while let Some(peer) = idle_peers.pop() { + if let Some(batch_id) = self.include_next_batch() { + // send the batch + if let ProcessingResult::RemoveChain = self.send_batch(network, batch_id, peer) { + return ProcessingResult::RemoveChain; + } + } else { + // No more batches, simply stop + return ProcessingResult::KeepChain; + } + } + ProcessingResult::KeepChain + } + + /// Creates the next required batch from the chain. If there are no more batches required, + /// `false` is returned. + fn include_next_batch(&mut self) -> Option { + // don't request batches beyond the target head slot + if self + .to_be_downloaded + .start_slot(T::EthSpec::slots_per_epoch()) + > self.target_head_slot + { + return None; + } // only request batches up to the buffer size limit + // NOTE: we don't count batches in the AwaitingValidation state, to prevent stalling sync + // if the current processing window is contained in a long range of skip slots. + let in_buffer = |batch: &BatchInfo| { + matches!( + batch.state(), + BatchState::Downloading(..) | BatchState::AwaitingProcessing(..) + ) + }; if self - .completed_batches - .len() - .saturating_add(self.pending_batches.len()) + .batches + .iter() + .filter(|&(_epoch, batch)| in_buffer(batch)) + .count() > BATCH_BUFFER_SIZE as usize { return None; } - // don't request batches beyond the target head slot - if self.to_be_downloaded.start_slot(slots_per_epoch) > self.target_head_slot { - return None; + let batch_id = self.to_be_downloaded; + // this batch could have been included already being an optimistic batch + match self.batches.entry(batch_id) { + Entry::Occupied(_) => { + // this batch doesn't need downlading, let this same function decide the next batch + self.to_be_downloaded += EPOCHS_PER_BATCH; + self.include_next_batch() + } + Entry::Vacant(entry) => { + entry.insert(BatchInfo::new(&batch_id, EPOCHS_PER_BATCH)); + self.to_be_downloaded += EPOCHS_PER_BATCH; + Some(batch_id) + } } - - // truncate the batch to the epoch containing the target head of the chain - let batch_end_slot = std::cmp::min( - // request either a batch containing the max number of blocks per batch - self.to_be_downloaded.start_slot(slots_per_epoch) + blocks_per_batch + 1, - // or a batch of one epoch of blocks, which contains the `target_head_slot` - self.target_head_slot - .saturating_add(slots_per_epoch) - .epoch(slots_per_epoch) - .start_slot(slots_per_epoch), - ); - - let batch = Some(Batch::new(self.to_be_downloaded, batch_end_slot, peer_id)); - self.to_be_downloaded += EPOCHS_PER_BATCH; - batch } +} - /// Requests the provided batch from the provided peer. - fn send_batch( - &mut self, - network: &mut SyncNetworkContext, - batch: Batch, - ) { - let request = batch.to_blocks_by_range_request(); +impl slog::KV for &mut SyncingChain { + fn serialize( + &self, + record: &slog::Record, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + slog::KV::serialize(*self, record, serializer) + } +} - match network.blocks_by_range_request(batch.current_peer.clone(), request) { - Ok(request_id) => { - // add the batch to pending list - self.pending_batches.insert(request_id, batch); - } - Err(e) => { - warn!(self.log, "Batch request failed"; - "start_slot" => batch.start_slot(), - "end_slot" => batch.end_slot -1, // The -1 shows inclusive blocks - "start_epoch" => batch.start_epoch, - "peer" => batch.current_peer.to_string(), - "retries" => batch.retries, - "error" => e, - "re-processes" => batch.reprocess_retries); - self.failed_batch(network, batch); - } - } +impl slog::KV for SyncingChain { + fn serialize( + &self, + record: &slog::Record, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + use slog::Value; + serializer.emit_u64("id", self.id)?; + Value::serialize(&self.start_epoch, record, "from", serializer)?; + Value::serialize( + &self.target_head_slot.epoch(T::EthSpec::slots_per_epoch()), + record, + "to", + serializer, + )?; + serializer.emit_str("end_root", &self.target_head_root.to_string())?; + Value::serialize( + &self.processing_target, + record, + "current_target", + serializer, + )?; + serializer.emit_usize("batches", self.batches.len())?; + serializer.emit_usize("peers", self.peers.len())?; + slog::Result::Ok(()) } } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 3ccbb351b70..fdfa4b8ebdf 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -1,15 +1,18 @@ //! This provides the logic for the finalized and head chains. //! -//! Each chain type is stored in it's own vector. A variety of helper functions are given along -//! with this struct to to simplify the logic of the other layers of sync. +//! Each chain type is stored in it's own map. A variety of helper functions are given along with +//! this struct to simplify the logic of the other layers of sync. -use super::chain::{ChainSyncingState, SyncingChain}; +use super::chain::{ChainId, ChainSyncingState, ProcessingResult, SyncingChain}; +use super::sync_type::RangeSyncType; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::sync::network_context::SyncNetworkContext; use crate::sync::PeerSyncInfo; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::{types::SyncState, NetworkGlobals, PeerId}; -use slog::{debug, error, info, o}; +use fnv::FnvHashMap; +use slog::{crit, debug, error, info, trace}; +use std::collections::hash_map::Entry; use std::sync::Arc; use tokio::sync::mpsc; use types::EthSpec; @@ -83,9 +86,9 @@ pub struct ChainCollection { /// A reference to the global network parameters. network_globals: Arc>, /// The set of finalized chains being synced. - finalized_chains: Vec>, + finalized_chains: FnvHashMap>, /// The set of head chains being synced. - head_chains: Vec>, + head_chains: FnvHashMap>, /// The current sync state of the process. state: RangeSyncState, /// Logger for the collection. @@ -101,8 +104,8 @@ impl ChainCollection { ChainCollection { beacon_chain, network_globals, - finalized_chains: Vec::new(), - head_chains: Vec::new(), + finalized_chains: FnvHashMap::default(), + head_chains: FnvHashMap::default(), state: RangeSyncState::Idle, log, } @@ -113,7 +116,7 @@ impl ChainCollection { } /// Updates the global sync state and logs any changes. - pub fn update_sync_state(&mut self) { + pub fn update_sync_state(&mut self, network: &mut SyncNetworkContext) { // if there is no range sync occurring, the state is either synced or not based on // connected peers. @@ -129,16 +132,19 @@ impl ChainCollection { .unwrap_or_else(|| SyncState::Stalled); let mut peer_state = self.network_globals.sync_state.write(); if new_state != *peer_state { - info!(self.log, "Sync state updated"; "old_state" => format!("{}",peer_state), "new_state" => format!("{}",new_state)); + info!(self.log, "Sync state updated"; "old_state" => %peer_state, "new_state" => %new_state); + if new_state == SyncState::Synced { + network.subscribe_core_topics(); + } + *peer_state = new_state; } - *peer_state = new_state; } else { // The state is based on a range sync state, update it let mut node_sync_state = self.network_globals.sync_state.write(); let new_state: SyncState = self.state.clone().into(); if *node_sync_state != new_state { // we are updating the state, inform the user - info!(self.log, "Sync state updated"; "old_state" => format!("{}",node_sync_state), "new_state" => format!("{}",new_state)); + info!(self.log, "Sync state updated"; "old_state" => %node_sync_state, "new_state" => %new_state); } *node_sync_state = new_state; } @@ -148,12 +154,12 @@ impl ChainCollection { /// /// We could be awaiting a head sync. If we are in the head syncing state, without any head /// chains, then update the state to idle. - pub fn fully_synced_peer_found(&mut self) { + pub fn fully_synced_peer_found(&mut self, network: &mut SyncNetworkContext) { if let RangeSyncState::Head { .. } = self.state { if self.head_chains.is_empty() { // Update the global network state to either synced or stalled. self.state = RangeSyncState::Idle; - self.update_sync_state(); + self.update_sync_state(network); } } } @@ -179,30 +185,67 @@ impl ChainCollection { } } - /// Finds any finalized chain if it exists. - pub fn get_finalized_mut( - &mut self, - target_head_root: Hash256, - target_head_slot: Slot, - ) -> Option<&mut SyncingChain> { - ChainCollection::get_chain( - self.finalized_chains.as_mut(), - target_head_root, - target_head_slot, - ) + /// Calls `func` on every chain of the collection. If the result is + /// `ProcessingResult::RemoveChain`, the chain is removed and returned. + pub fn call_all(&mut self, mut func: F) -> Vec<(SyncingChain, RangeSyncType)> + where + F: FnMut(&mut SyncingChain) -> ProcessingResult, + { + let mut to_remove = Vec::new(); + + for (id, chain) in self.finalized_chains.iter_mut() { + if let ProcessingResult::RemoveChain = func(chain) { + to_remove.push((*id, RangeSyncType::Finalized)); + } + } + + for (id, chain) in self.head_chains.iter_mut() { + if let ProcessingResult::RemoveChain = func(chain) { + to_remove.push((*id, RangeSyncType::Head)); + } + } + + let mut results = Vec::with_capacity(to_remove.len()); + for (id, sync_type) in to_remove.into_iter() { + let chain = match sync_type { + RangeSyncType::Finalized => self.finalized_chains.remove(&id), + RangeSyncType::Head => self.head_chains.remove(&id), + }; + results.push((chain.expect("Chain exits"), sync_type)); + } + results } - /// Finds any finalized chain if it exists. - pub fn get_head_mut( + /// Executes a function on the chain with the given id. + /// + /// If the function returns `ProcessingResult::RemoveChain`, the chain is removed and returned. + /// If the chain is found, its syncing type is returned, or an error otherwise. + pub fn call_by_id( &mut self, - target_head_root: Hash256, - target_head_slot: Slot, - ) -> Option<&mut SyncingChain> { - ChainCollection::get_chain( - self.head_chains.as_mut(), - target_head_root, - target_head_slot, - ) + id: ChainId, + func: F, + ) -> Result<(Option>, RangeSyncType), ()> + where + F: FnOnce(&mut SyncingChain) -> ProcessingResult, + { + if let Entry::Occupied(mut entry) = self.finalized_chains.entry(id) { + // Search in our finalized chains first + if let ProcessingResult::RemoveChain = func(entry.get_mut()) { + Ok((Some(entry.remove()), RangeSyncType::Finalized)) + } else { + Ok((None, RangeSyncType::Finalized)) + } + } else if let Entry::Occupied(mut entry) = self.head_chains.entry(id) { + // Search in our head chains next + if let ProcessingResult::RemoveChain = func(entry.get_mut()) { + Ok((Some(entry.remove()), RangeSyncType::Head)) + } else { + Ok((None, RangeSyncType::Head)) + } + } else { + // Chain was not found in the finalized collection, nor the head collection + Err(()) + } } /// Updates the state of the chain collection. @@ -211,9 +254,8 @@ impl ChainCollection { /// updates the state of the collection. This starts head chains syncing if any are required to /// do so. pub fn update(&mut self, network: &mut SyncNetworkContext) { - let local_epoch = { - let local = match PeerSyncInfo::from_chain(&self.beacon_chain) { - Some(local) => local, + let (local_finalized_epoch, local_head_epoch) = + match PeerSyncInfo::from_chain(&self.beacon_chain) { None => { return error!( self.log, @@ -221,20 +263,21 @@ impl ChainCollection { "msg" => "likely due to head lock contention" ) } + Some(local) => ( + local.finalized_epoch, + local.head_slot.epoch(T::EthSpec::slots_per_epoch()), + ), }; - local.finalized_epoch - }; - // Remove any outdated finalized/head chains self.purge_outdated_chains(network); // Choose the best finalized chain if one needs to be selected. - self.update_finalized_chains(network, local_epoch); + self.update_finalized_chains(network, local_finalized_epoch, local_head_epoch); - if self.finalized_syncing_index().is_none() { + if self.finalized_syncing_chain().is_none() { // Handle head syncing chains if there are no finalized chains left. - self.update_head_chains(network, local_epoch); + self.update_head_chains(network, local_finalized_epoch, local_head_epoch); } } @@ -244,53 +287,57 @@ impl ChainCollection { &mut self, network: &mut SyncNetworkContext, local_epoch: Epoch, + local_head_epoch: Epoch, ) { - // Check if any chains become the new syncing chain - if let Some(index) = self.finalized_syncing_index() { - // There is a current finalized chain syncing - let _syncing_chain_peer_count = self.finalized_chains[index].peer_pool.len(); - - // search for a chain with more peers - if let Some((new_index, chain)) = - self.finalized_chains - .iter_mut() - .enumerate() - .find(|(_iter_index, _chain)| { - false - // && *iter_index != index - // && chain.peer_pool.len() > syncing_chain_peer_count - }) - { - // A chain has more peers. Swap the syncing chain - debug!(self.log, "Switching finalized chains to sync"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_epoch"=> local_epoch); - - // update the state to a new finalized state - let state = RangeSyncState::Finalized { - start_slot: chain.start_epoch.start_slot(T::EthSpec::slots_per_epoch()), - head_slot: chain.target_head_slot, - head_root: chain.target_head_root, - }; - self.state = state; - - // Stop the current chain from syncing - self.finalized_chains[index].stop_syncing(); - // Start the new chain - self.finalized_chains[new_index].start_syncing(network, local_epoch); - } - } else if let Some(chain) = self + // Find the chain with most peers and check if it is already syncing + if let Some((new_id, peers)) = self .finalized_chains - .iter_mut() - .max_by_key(|chain| chain.peer_pool.len()) + .iter() + .max_by_key(|(_, chain)| chain.available_peers()) + .map(|(id, chain)| (*id, chain.available_peers())) { - // There is no currently syncing finalization chain, starting the one with the most peers - debug!(self.log, "New finalized chain started syncing"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_epoch"=> chain.start_epoch); - chain.start_syncing(network, local_epoch); + let old_id = self.finalized_syncing_chain().map( + |(currently_syncing_id, currently_syncing_chain)| { + if *currently_syncing_id != new_id + && peers > currently_syncing_chain.available_peers() + { + currently_syncing_chain.stop_syncing(); + // we stop this chain and start syncing the one with more peers + Some(*currently_syncing_id) + } else { + // the best chain is already the syncing chain, advance it if possible + None + } + }, + ); + + let chain = self + .finalized_chains + .get_mut(&new_id) + .expect("Chain exists"); + + match old_id { + Some(Some(old_id)) => debug!(self.log, "Switching finalized chains"; + "old_id" => old_id, &chain), + None => debug!(self.log, "Syncing new chain"; &chain), + Some(None) => trace!(self.log, "Advancing currently syncing chain"), + // this is the same chain. We try to advance it. + } + // update the state to a new finalized state let state = RangeSyncState::Finalized { start_slot: chain.start_epoch.start_slot(T::EthSpec::slots_per_epoch()), head_slot: chain.target_head_slot, head_root: chain.target_head_root, }; self.state = state; + + if let ProcessingResult::RemoveChain = + chain.start_syncing(network, local_epoch, local_head_epoch) + { + // this happens only if sending a batch over the `network` fails a lot + error!(self.log, "Chain removed while switching chains"); + self.finalized_chains.remove(&new_id); + } } } @@ -299,6 +346,7 @@ impl ChainCollection { &mut self, network: &mut SyncNetworkContext, local_epoch: Epoch, + local_head_epoch: Epoch, ) { // There are no finalized chains, update the state. if self.head_chains.is_empty() { @@ -308,42 +356,41 @@ impl ChainCollection { let mut currently_syncing = self .head_chains - .iter() + .values() .filter(|chain| chain.is_syncing()) .count(); let mut not_syncing = self.head_chains.len() - currently_syncing; - // Find all head chains that are not currently syncing ordered by peer count. while currently_syncing <= PARALLEL_HEAD_CHAINS && not_syncing > 0 { // Find the chain with the most peers and start syncing - if let Some((_index, chain)) = self + if let Some((_id, chain)) = self .head_chains .iter_mut() - .filter(|chain| !chain.is_syncing()) - .enumerate() - .max_by_key(|(_index, chain)| chain.peer_pool.len()) + .filter(|(_id, chain)| !chain.is_syncing()) + .max_by_key(|(_id, chain)| chain.available_peers()) { // start syncing this chain - debug!(self.log, "New head chain started syncing"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_epoch"=> chain.start_epoch); - chain.start_syncing(network, local_epoch); + debug!(self.log, "New head chain started syncing"; &chain); + if let ProcessingResult::RemoveChain = + chain.start_syncing(network, local_epoch, local_head_epoch) + { + error!(self.log, "Chain removed while switching head chains") + } } - // update variables currently_syncing = self .head_chains .iter() - .filter(|chain| chain.is_syncing()) + .filter(|(_id, chain)| chain.is_syncing()) .count(); not_syncing = self.head_chains.len() - currently_syncing; } - // Start // for the syncing API, we find the minimal start_slot and the maximum // target_slot of all head chains to report back. - let (min_epoch, max_slot) = self .head_chains - .iter() + .values() .filter(|chain| chain.is_syncing()) .fold( (Epoch::from(0u64), Slot::from(0u64)), @@ -365,10 +412,9 @@ impl ChainCollection { /// chains and re-status their peers. pub fn clear_head_chains(&mut self, network: &mut SyncNetworkContext) { let log_ref = &self.log; - self.head_chains.retain(|chain| { - if !chain.is_syncing() - { - debug!(log_ref, "Removing old head chain"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); + self.head_chains.retain(|_id, chain| { + if !chain.is_syncing() { + debug!(log_ref, "Removing old head chain"; &chain); chain.status_peers(network); false } else { @@ -377,140 +423,20 @@ impl ChainCollection { }); } - /// Add a new finalized chain to the collection. - pub fn new_finalized_chain( - &mut self, - local_finalized_epoch: Epoch, - target_head: Hash256, - target_slot: Slot, - peer_id: PeerId, - beacon_processor_send: mpsc::Sender>, - ) { - let chain_id = rand::random(); - self.finalized_chains.push(SyncingChain::new( - chain_id, - local_finalized_epoch, - target_slot, - target_head, - peer_id, - beacon_processor_send, - self.beacon_chain.clone(), - self.log.new(o!("chain" => chain_id)), - )); - } - - /// Add a new finalized chain to the collection and starts syncing it. - #[allow(clippy::too_many_arguments)] - pub fn new_head_chain( - &mut self, - remote_finalized_epoch: Epoch, - target_head: Hash256, - target_slot: Slot, - peer_id: PeerId, - beacon_processor_send: mpsc::Sender>, - ) { - // remove the peer from any other head chains - - self.head_chains.iter_mut().for_each(|chain| { - chain.peer_pool.remove(&peer_id); - }); - self.head_chains.retain(|chain| !chain.peer_pool.is_empty()); - - let chain_id = rand::random(); - let new_head_chain = SyncingChain::new( - chain_id, - remote_finalized_epoch, - target_slot, - target_head, - peer_id, - beacon_processor_send, - self.beacon_chain.clone(), - self.log.clone(), - ); - self.head_chains.push(new_head_chain); - } - /// Returns if `true` if any finalized chains exist, `false` otherwise. pub fn is_finalizing_sync(&self) -> bool { !self.finalized_chains.is_empty() } - /// Given a chain iterator, runs a given function on each chain until the function returns - /// `Some`. This allows the `RangeSync` struct to loop over chains and optionally remove the - /// chain from the collection if the function results in completing the chain. - fn request_function<'a, F, I, U>(chain: I, mut func: F) -> Option<(usize, U)> - where - I: Iterator>, - F: FnMut(&'a mut SyncingChain) -> Option, - { - chain - .enumerate() - .find_map(|(index, chain)| Some((index, func(chain)?))) - } - - /// Given a chain iterator, runs a given function on each chain and return all `Some` results. - fn request_function_all<'a, F, I, U>(chain: I, mut func: F) -> Vec<(usize, U)> - where - I: Iterator>, - F: FnMut(&'a mut SyncingChain) -> Option, - { - chain - .enumerate() - .filter_map(|(index, chain)| Some((index, func(chain)?))) - .collect() - } - - /// Runs a function on finalized chains until we get the first `Some` result from `F`. - pub fn finalized_request(&mut self, func: F) -> Option<(usize, U)> - where - F: FnMut(&mut SyncingChain) -> Option, - { - ChainCollection::request_function(self.finalized_chains.iter_mut(), func) - } - - /// Runs a function on head chains until we get the first `Some` result from `F`. - pub fn head_request(&mut self, func: F) -> Option<(usize, U)> - where - F: FnMut(&mut SyncingChain) -> Option, - { - ChainCollection::request_function(self.head_chains.iter_mut(), func) - } - - /// Runs a function on finalized and head chains until we get the first `Some` result from `F`. - pub fn head_finalized_request(&mut self, func: F) -> Option<(usize, U)> - where - F: FnMut(&mut SyncingChain) -> Option, - { - ChainCollection::request_function( - self.finalized_chains - .iter_mut() - .chain(self.head_chains.iter_mut()), - func, - ) - } - - /// Runs a function on all finalized and head chains and collects all `Some` results from `F`. - pub fn head_finalized_request_all(&mut self, func: F) -> Vec<(usize, U)> - where - F: FnMut(&mut SyncingChain) -> Option, - { - ChainCollection::request_function_all( - self.finalized_chains - .iter_mut() - .chain(self.head_chains.iter_mut()), - func, - ) - } - /// Removes any outdated finalized or head chains. - /// /// This removes chains with no peers, or chains whose start block slot is less than our current /// finalized block slot. pub fn purge_outdated_chains(&mut self, network: &mut SyncNetworkContext) { // Remove any chains that have no peers self.finalized_chains - .retain(|chain| !chain.peer_pool.is_empty()); - self.head_chains.retain(|chain| !chain.peer_pool.is_empty()); + .retain(|_id, chain| chain.available_peers() > 0); + self.head_chains + .retain(|_id, chain| chain.available_peers() > 0); let local_info = match PeerSyncInfo::from_chain(&self.beacon_chain) { Some(local) => local, @@ -530,28 +456,28 @@ impl ChainCollection { let beacon_chain = &self.beacon_chain; let log_ref = &self.log; // Remove chains that are out-dated and re-status their peers - self.finalized_chains.retain(|chain| { + self.finalized_chains.retain(|_id, chain| { if chain.target_head_slot <= local_finalized_slot || beacon_chain .fork_choice .read() .contains_block(&chain.target_head_root) { - debug!(log_ref, "Purging out of finalized chain"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); + debug!(log_ref, "Purging out of finalized chain"; &chain); chain.status_peers(network); false } else { true } }); - self.head_chains.retain(|chain| { + self.head_chains.retain(|_id, chain| { if chain.target_head_slot <= local_finalized_slot || beacon_chain .fork_choice .read() .contains_block(&chain.target_head_root) { - debug!(log_ref, "Purging out of date head chain"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); + debug!(log_ref, "Purging out of date head chain"; &chain); chain.status_peers(network); false } else { @@ -560,63 +486,71 @@ impl ChainCollection { }); } - /// Removes and returns a finalized chain from the collection. - pub fn remove_finalized_chain(&mut self, index: usize) -> SyncingChain { - self.finalized_chains.swap_remove(index) - } - - /// Removes and returns a head chain from the collection. - pub fn remove_head_chain(&mut self, index: usize) -> SyncingChain { - self.head_chains.swap_remove(index) - } - - /// Removes a chain from either finalized or head chains based on the index. Using a request - /// iterates of finalized chains before head chains. Thus an index that is greater than the - /// finalized chain length, indicates a head chain. - /// - /// This will re-status the chains peers on removal. The index must exist. - pub fn remove_chain(&mut self, network: &mut SyncNetworkContext, index: usize) { - let chain = if index >= self.finalized_chains.len() { - let index = index - self.finalized_chains.len(); - let chain = self.head_chains.swap_remove(index); - chain.status_peers(network); - chain + /// Adds a peer to a chain with the given target, or creates a new syncing chain if it doesn't + /// exits. + #[allow(clippy::too_many_arguments)] + pub fn add_peer_or_create_chain( + &mut self, + start_epoch: Epoch, + target_head_root: Hash256, + target_head_slot: Slot, + peer: PeerId, + sync_type: RangeSyncType, + beacon_processor_send: &mpsc::Sender>, + network: &mut SyncNetworkContext, + ) { + let id = SyncingChain::::id(&target_head_root, &target_head_slot); + let collection = if let RangeSyncType::Finalized = sync_type { + if let Some(chain) = self.head_chains.get(&id) { + // sanity verification for chain duplication / purging issues + crit!(self.log, "Adding known head chain as finalized chain"; chain); + } + &mut self.finalized_chains } else { - let chain = self.finalized_chains.swap_remove(index); - chain.status_peers(network); - chain + if let Some(chain) = self.finalized_chains.get(&id) { + // sanity verification for chain duplication / purging issues + crit!(self.log, "Adding known finalized chain as head chain"; chain); + } + &mut self.head_chains }; - - debug!(self.log, "Chain was removed"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); - - // update the state - self.update(network); + match collection.entry(id) { + Entry::Occupied(mut entry) => { + let chain = entry.get_mut(); + debug!(self.log, "Adding peer to known chain"; "peer_id" => %peer, "sync_type" => ?sync_type, &chain); + assert_eq!(chain.target_head_root, target_head_root); + assert_eq!(chain.target_head_slot, target_head_slot); + if let ProcessingResult::RemoveChain = chain.add_peer(network, peer) { + debug!(self.log, "Chain removed after adding peer"; "chain" => id); + entry.remove(); + } + } + Entry::Vacant(entry) => { + let peer_rpr = peer.to_string(); + let new_chain = SyncingChain::new( + start_epoch, + target_head_slot, + target_head_root, + peer, + beacon_processor_send.clone(), + self.beacon_chain.clone(), + &self.log, + ); + assert_eq!(new_chain.get_id(), id); + debug!(self.log, "New chain added to sync"; "peer_id" => peer_rpr, "sync_type" => ?sync_type, &new_chain); + entry.insert(new_chain); + } + } } /// Returns the index of finalized chain that is currently syncing. Returns `None` if no /// finalized chain is currently syncing. - fn finalized_syncing_index(&self) -> Option { - self.finalized_chains - .iter() - .enumerate() - .find_map(|(index, chain)| { - if chain.state == ChainSyncingState::Syncing { - Some(index) - } else { - None - } - }) - } - - /// Returns a chain given the target head root and slot. - fn get_chain<'a>( - chain: &'a mut [SyncingChain], - target_head_root: Hash256, - target_head_slot: Slot, - ) -> Option<&'a mut SyncingChain> { - chain.iter_mut().find(|iter_chain| { - iter_chain.target_head_root == target_head_root - && iter_chain.target_head_slot == target_head_slot + fn finalized_syncing_chain(&mut self) -> Option<(&ChainId, &mut SyncingChain)> { + self.finalized_chains.iter_mut().find_map(|(id, chain)| { + if chain.state == ChainSyncingState::Syncing { + Some((id, chain)) + } else { + None + } }) } } diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index 85db6d378fe..07939569a35 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -7,6 +7,6 @@ mod chain_collection; mod range; mod sync_type; -pub use batch::Batch; -pub use chain::{ChainId, EPOCHS_PER_BATCH}; +pub use batch::BatchInfo; +pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; pub use range::RangeSync; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 59dfec16de6..6847838e042 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -39,7 +39,7 @@ //! Each chain is downloaded in batches of blocks. The batched blocks are processed sequentially //! and further batches are requested as current blocks are being processed. -use super::chain::{ChainId, ProcessingResult}; +use super::chain::ChainId; use super::chain_collection::{ChainCollection, RangeSyncState}; use super::sync_type::RangeSyncType; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; @@ -49,7 +49,7 @@ use crate::sync::PeerSyncInfo; use crate::sync::RequestId; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::{NetworkGlobals, PeerId}; -use slog::{debug, error, trace}; +use slog::{debug, error, trace, warn}; use std::collections::HashSet; use std::sync::Arc; use tokio::sync::mpsc; @@ -98,8 +98,8 @@ impl RangeSync { /// On re-status, a peer that has no head to download indicates that this state can be set to /// idle as there are in fact no head chains to download. This function notifies the chain /// collection that the state can safely be set to idle. - pub fn fully_synced_peer_found(&mut self) { - self.chains.fully_synced_peer_found() + pub fn fully_synced_peer_found(&mut self, network: &mut SyncNetworkContext) { + self.chains.fully_synced_peer_found(network) } /// A useful peer has been added. The SyncManager has identified this peer as needing either @@ -121,21 +121,15 @@ impl RangeSync { let local_info = match PeerSyncInfo::from_chain(&self.beacon_chain) { Some(local) => local, None => { - return error!( - self.log, - "Failed to get peer sync info"; - "msg" => "likely due to head lock contention" - ) + return error!(self.log, "Failed to get peer sync info"; + "msg" => "likely due to head lock contention") } }; - // convenience variables + // convenience variable let remote_finalized_slot = remote_info .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); - let local_finalized_slot = local_info - .finalized_epoch - .start_slot(T::EthSpec::slots_per_epoch()); // NOTE: A peer that has been re-status'd may now exist in multiple finalized chains. @@ -146,7 +140,7 @@ impl RangeSync { match RangeSyncType::new(&self.beacon_chain, &local_info, &remote_info) { RangeSyncType::Finalized => { // Finalized chain search - debug!(self.log, "Finalization sync peer joined"; "peer_id" => format!("{:?}", peer_id)); + debug!(self.log, "Finalization sync peer joined"; "peer_id" => %peer_id); // remove the peer from the awaiting_head_peers list if it exists self.awaiting_head_peers.remove(&peer_id); @@ -154,37 +148,19 @@ impl RangeSync { // Note: We keep current head chains. These can continue syncing whilst we complete // this new finalized chain. - // If a finalized chain already exists that matches, add this peer to the chain's peer - // pool. - if let Some(chain) = self - .chains - .get_finalized_mut(remote_info.finalized_root, remote_finalized_slot) - { - debug!(self.log, "Finalized chain exists, adding peer"; "peer_id" => peer_id.to_string(), "target_root" => chain.target_head_root.to_string(), "targe_slot" => chain.target_head_slot); - - // add the peer to the chain's peer pool - chain.add_peer(network, peer_id); + self.chains.add_peer_or_create_chain( + local_info.finalized_epoch, + remote_info.finalized_root, + remote_finalized_slot, + peer_id, + RangeSyncType::Finalized, + &self.beacon_processor_send, + network, + ); - // check if the new peer's addition will favour a new syncing chain. - self.chains.update(network); - // update the global sync state if necessary - self.chains.update_sync_state(); - } else { - // there is no finalized chain that matches this peer's last finalized target - // create a new finalized chain - debug!(self.log, "New finalized chain added to sync"; "peer_id" => format!("{:?}", peer_id), "start_slot" => local_finalized_slot, "end_slot" => remote_finalized_slot, "finalized_root" => format!("{}", remote_info.finalized_root)); - - self.chains.new_finalized_chain( - local_info.finalized_epoch, - remote_info.finalized_root, - remote_finalized_slot, - peer_id, - self.beacon_processor_send.clone(), - ); - self.chains.update(network); - // update the global sync state - self.chains.update_sync_state(); - } + self.chains.update(network); + // update the global sync state + self.chains.update_sync_state(network); } RangeSyncType::Head => { // This peer requires a head chain sync @@ -192,7 +168,7 @@ impl RangeSync { if self.chains.is_finalizing_sync() { // If there are finalized chains to sync, finish these first, before syncing head // chains. This allows us to re-sync all known peers - trace!(self.log, "Waiting for finalized sync to complete"; "peer_id" => format!("{:?}", peer_id)); + trace!(self.log, "Waiting for finalized sync to complete"; "peer_id" => %peer_id); // store the peer to re-status after all finalized chains complete self.awaiting_head_peers.insert(peer_id); return; @@ -203,33 +179,20 @@ impl RangeSync { // The new peer has the same finalized (earlier filters should prevent a peer with an // earlier finalized chain from reaching here). - debug!(self.log, "New peer added for recent head sync"; "peer_id" => format!("{:?}", peer_id)); - // search if there is a matching head chain, then add the peer to the chain - if let Some(chain) = self - .chains - .get_head_mut(remote_info.head_root, remote_info.head_slot) - { - debug!(self.log, "Adding peer to the existing head chain peer pool"; "head_root" => format!("{}",remote_info.head_root), "head_slot" => remote_info.head_slot, "peer_id" => format!("{:?}", peer_id)); - - // add the peer to the head's pool - chain.add_peer(network, peer_id); - } else { - // There are no other head chains that match this peer's status, create a new one, and - let start_epoch = std::cmp::min(local_info.head_slot, remote_finalized_slot) - .epoch(T::EthSpec::slots_per_epoch()); - debug!(self.log, "Creating a new syncing head chain"; "head_root" => format!("{}",remote_info.head_root), "start_epoch" => start_epoch, "head_slot" => remote_info.head_slot, "peer_id" => format!("{:?}", peer_id)); - - self.chains.new_head_chain( - start_epoch, - remote_info.head_root, - remote_info.head_slot, - peer_id, - self.beacon_processor_send.clone(), - ); - } + let start_epoch = std::cmp::min(local_info.head_slot, remote_finalized_slot) + .epoch(T::EthSpec::slots_per_epoch()); + self.chains.add_peer_or_create_chain( + start_epoch, + remote_info.head_root, + remote_info.head_slot, + peer_id, + RangeSyncType::Head, + &self.beacon_processor_send, + network, + ); self.chains.update(network); - self.chains.update_sync_state(); + self.chains.update_sync_state(network); } } } @@ -245,23 +208,27 @@ impl RangeSync { request_id: RequestId, beacon_block: Option>, ) { - // Find the request. Most likely the first finalized chain (the syncing chain). If there - // are no finalized chains, then it will be a head chain. At most, there should only be - // `connected_peers` number of head chains, which should be relatively small and this - // lookup should not be very expensive. However, we could add an extra index that maps the - // request id to index of the vector to avoid O(N) searches and O(N) hash lookups. - - let id_not_found = self - .chains - .head_finalized_request(|chain| { - chain.on_block_response(network, request_id, &beacon_block) - }) - .is_none(); - if id_not_found { - // The request didn't exist in any `SyncingChain`. Could have been an old request or - // the chain was purged due to being out of date whilst a request was pending. Log - // and ignore. - debug!(self.log, "Range response without matching request"; "peer" => format!("{:?}", peer_id), "request_id" => request_id); + // get the chain and batch for which this response belongs + if let Some((chain_id, batch_id)) = + network.blocks_by_range_response(request_id, beacon_block.is_none()) + { + // check if this chunk removes the chain + match self.chains.call_by_id(chain_id, |chain| { + chain.on_block_response(network, batch_id, peer_id, beacon_block) + }) { + Ok((removed_chain, sync_type)) => { + if let Some(removed_chain) = removed_chain { + debug!(self.log, "Chain removed after block response"; "sync_type" => ?sync_type, "chain_id" => chain_id); + removed_chain.status_peers(network); + // TODO: update & update_sync_state? + } + } + Err(_) => { + debug!(self.log, "BlocksByRange response for removed chain"; "chain" => chain_id) + } + } + } else { + warn!(self.log, "Response/Error for non registered request"; "request_id" => request_id) } } @@ -269,76 +236,57 @@ impl RangeSync { &mut self, network: &mut SyncNetworkContext, chain_id: ChainId, - epoch: Epoch, - downloaded_blocks: Vec>, + batch_id: Epoch, result: BatchProcessResult, ) { - // build an option for passing the downloaded_blocks to each chain - let mut downloaded_blocks = Some(downloaded_blocks); - - match self.chains.finalized_request(|chain| { - chain.on_batch_process_result(network, chain_id, epoch, &mut downloaded_blocks, &result) + // check if this response removes the chain + match self.chains.call_by_id(chain_id, |chain| { + chain.on_batch_process_result(network, batch_id, &result) }) { - Some((index, ProcessingResult::RemoveChain)) => { - let chain = self.chains.remove_finalized_chain(index); - debug!(self.log, "Finalized chain removed"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); - // update the state of the collection - self.chains.update(network); - - // the chain is complete, re-status it's peers - chain.status_peers(network); - - // set the state to a head sync if there are no finalized chains, to inform the manager that we are awaiting a - // head chain. - self.chains.set_head_sync(); - // Update the global variables - self.chains.update_sync_state(); - - // if there are no more finalized chains, re-status all known peers awaiting a head - // sync - match self.chains.state() { - RangeSyncState::Idle | RangeSyncState::Head { .. } => { - for peer_id in self.awaiting_head_peers.drain() { - network.status_peer(self.beacon_chain.clone(), peer_id); + Ok((None, _sync_type)) => { + // Chain was found and not removed + } + Ok((Some(removed_chain), sync_type)) => { + debug!(self.log, "Chain removed after processing result"; "chain" => chain_id, "sync_type" => ?sync_type); + // Chain ended, re-status its peers + removed_chain.status_peers(network); + match sync_type { + RangeSyncType::Finalized => { + // update the state of the collection + self.chains.update(network); + // set the state to a head sync if there are no finalized chains, to inform + // the manager that we are awaiting a head chain. + self.chains.set_head_sync(); + // Update the global variables + self.chains.update_sync_state(network); + // if there are no more finalized chains, re-status all known peers + // awaiting a head sync + match self.chains.state() { + RangeSyncState::Idle | RangeSyncState::Head { .. } => { + network.status_peers( + self.beacon_chain.clone(), + self.awaiting_head_peers.drain(), + ); + } + RangeSyncState::Finalized { .. } => {} // Have more finalized chains to complete } } - RangeSyncState::Finalized { .. } => {} // Have more finalized chains to complete - } - } - Some((_, ProcessingResult::KeepChain)) => {} - None => { - match self.chains.head_request(|chain| { - chain.on_batch_process_result( - network, - chain_id, - epoch, - &mut downloaded_blocks, - &result, - ) - }) { - Some((index, ProcessingResult::RemoveChain)) => { - let chain = self.chains.remove_head_chain(index); - debug!(self.log, "Head chain completed"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); - // the chain is complete, re-status it's peers and remove it - chain.status_peers(network); - - // Remove non-syncing head chains and re-status the peers - // This removes a build-up of potentially duplicate head chains. Any - // legitimate head chains will be re-established + RangeSyncType::Head => { + // Remove non-syncing head chains and re-status the peers. This removes a + // build-up of potentially duplicate head chains. Any legitimate head + // chains will be re-established self.chains.clear_head_chains(network); // update the state of the collection self.chains.update(network); // update the global state and log any change - self.chains.update_sync_state(); - } - Some((_, ProcessingResult::KeepChain)) => {} - None => { - // This can happen if a chain gets purged due to being out of date whilst a - // batch process is in progress. - debug!(self.log, "No chains match the block processing id"; "batch_epoch" => epoch, "chain_id" => chain_id); + self.chains.update_sync_state(network); } } } + + Err(_) => { + debug!(self.log, "BlocksByRange response for removed chain"; "chain" => chain_id) + } } } @@ -352,39 +300,26 @@ impl RangeSync { // if the peer is in the awaiting head mapping, remove it self.awaiting_head_peers.remove(peer_id); - // remove the peer from any peer pool + // remove the peer from any peer pool, failing its batches self.remove_peer(network, peer_id); // update the state of the collection self.chains.update(network); // update the global state and inform the user - self.chains.update_sync_state(); + self.chains.update_sync_state(network); } - /// When a peer gets removed, both the head and finalized chains need to be searched to check which pool the peer is in. The chain may also have a batch or batches awaiting + /// When a peer gets removed, both the head and finalized chains need to be searched to check + /// which pool the peer is in. The chain may also have a batch or batches awaiting /// for this peer. If so we mark the batch as failed. The batch may then hit it's maximum /// retries. In this case, we need to remove the chain and re-status all the peers. fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { - for (index, result) in self.chains.head_finalized_request_all(|chain| { - if chain.peer_pool.remove(peer_id) { - // this chain contained the peer - while let Some(batch) = chain.pending_batches.remove_batch_by_peer(peer_id) { - if let ProcessingResult::RemoveChain = chain.failed_batch(network, batch) { - // a single batch failed, remove the chain - return Some(ProcessingResult::RemoveChain); - } - } - // peer removed from chain, no batch failed - Some(ProcessingResult::KeepChain) - } else { - None - } - }) { - if result == ProcessingResult::RemoveChain { - // the chain needed to be removed - debug!(self.log, "Chain being removed due to failed batch"); - self.chains.remove_chain(network, index); - } + for (removed_chain, sync_type) in self + .chains + .call_all(|chain| chain.remove_peer(peer_id, network)) + { + debug!(self.log, "Chain removed after removing peer"; "sync_type" => ?sync_type, "chain" => removed_chain.get_id()); + // TODO: anything else to do? } } @@ -398,17 +333,25 @@ impl RangeSync { peer_id: PeerId, request_id: RequestId, ) { - // check that this request is pending - match self - .chains - .head_finalized_request(|chain| chain.inject_error(network, &peer_id, request_id)) - { - Some((_, ProcessingResult::KeepChain)) => {} // error handled chain persists - Some((index, ProcessingResult::RemoveChain)) => { - debug!(self.log, "Chain being removed due to RPC error"); - self.chains.remove_chain(network, index) + // get the chain and batch for which this response belongs + if let Some((chain_id, batch_id)) = network.blocks_by_range_response(request_id, true) { + // check that this request is pending + match self.chains.call_by_id(chain_id, |chain| { + chain.inject_error(network, batch_id, peer_id) + }) { + Ok((removed_chain, sync_type)) => { + if let Some(removed_chain) = removed_chain { + debug!(self.log, "Chain removed on rpc error"; "sync_type" => ?sync_type, "chain" => removed_chain.get_id()); + removed_chain.status_peers(network); + // TODO: update & update_sync_state? + } + } + Err(_) => { + debug!(self.log, "BlocksByRange response for removed chain"; "chain" => chain_id) + } } - None => {} // request wasn't in the finalized chains, check the head chains + } else { + warn!(self.log, "Response/Error for non registered request"; "request_id" => request_id) } } } diff --git a/beacon_node/network/src/sync/range_sync/sync_type.rs b/beacon_node/network/src/sync/range_sync/sync_type.rs index 103ef77b8f1..d9f1d3f1721 100644 --- a/beacon_node/network/src/sync/range_sync/sync_type.rs +++ b/beacon_node/network/src/sync/range_sync/sync_type.rs @@ -6,6 +6,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use std::sync::Arc; /// The type of Range sync that should be done relative to our current state. +#[derive(Debug)] pub enum RangeSyncType { /// A finalized chain sync should be started with this peer. Finalized, diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 0ac95f7ca9b..ad2688bb0ff 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -192,12 +192,11 @@ pub fn get_all_validators( }; let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?; - state.update_pubkey_cache()?; - state - .validators + let validators = state.validators.clone(); + validators .iter() - .map(|validator| validator_response_by_pubkey(&state, validator.pubkey.clone())) + .map(|validator| validator_response_by_pubkey(&mut state, validator.pubkey.clone())) .collect::, _>>() } @@ -215,13 +214,14 @@ pub fn get_active_validators( }; let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?; - state.update_pubkey_cache()?; - state - .validators + let validators = state.validators.clone(); + let current_epoch = state.current_epoch(); + + validators .iter() - .filter(|validator| validator.is_active_at(state.current_epoch())) - .map(|validator| validator_response_by_pubkey(&state, validator.pubkey.clone())) + .filter(|validator| validator.is_active_at(current_epoch)) + .map(|validator| validator_response_by_pubkey(&mut state, validator.pubkey.clone())) .collect::, _>>() } @@ -279,11 +279,10 @@ fn validator_responses_by_pubkey( validator_pubkeys: Vec, ) -> Result, ApiError> { let mut state = get_state_from_root_opt(beacon_chain, state_root_opt)?; - state.update_pubkey_cache()?; validator_pubkeys .into_iter() - .map(|validator_pubkey| validator_response_by_pubkey(&state, validator_pubkey)) + .map(|validator_pubkey| validator_response_by_pubkey(&mut state, validator_pubkey)) .collect::, ApiError>>() } @@ -291,7 +290,7 @@ fn validator_responses_by_pubkey( /// /// The provided `state` must have a fully up-to-date pubkey cache. fn validator_response_by_pubkey( - state: &BeaconState, + state: &mut BeaconState, validator_pubkey: PublicKeyBytes, ) -> Result { let validator_index_opt = state diff --git a/beacon_node/rest_api/src/consensus.rs b/beacon_node/rest_api/src/consensus.rs index d82b05b7a7f..9df57f05528 100644 --- a/beacon_node/rest_api/src/consensus.rs +++ b/beacon_node/rest_api/src/consensus.rs @@ -92,10 +92,6 @@ pub fn post_individual_votes( let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; validator_statuses.process_attestations(&state, spec)?; - state.update_pubkey_cache().map_err(|e| { - ApiError::ServerError(format!("Unable to build pubkey cache: {:?}", e)) - })?; - body.pubkeys .into_iter() .map(|pubkey| { diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index e1c3c37dbf7..49342ddaa30 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -156,9 +156,6 @@ fn return_validator_duties( state .build_committee_cache(relative_epoch, &beacon_chain.spec) .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; - state - .update_pubkey_cache() - .map_err(|e| ApiError::ServerError(format!("Unable to build pubkey cache: {:?}", e)))?; // Get a list of all validators for this epoch. // diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index ff64a600cda..9f6ee79b15a 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -193,6 +193,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true), ) + /* + * Standard staking flags + */ + + .arg( + Arg::with_name("staking") + .long("staking") + .help("Standard option for a staking beacon node. Equivalent to \ + `lighthouse bn --http --eth1 `. This will enable the http server on localhost:5052 \ + and try connecting to an eth1 node on localhost:8545") + .takes_value(false) + ) + /* * Eth1 Integration */ diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 33cec45800f..aabdbb35ca4 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -5,7 +5,7 @@ use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use eth2_libp2p::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use eth2_testnet_config::Eth2TestnetConfig; -use slog::{crit, info, Logger}; +use slog::{crit, info, warn, Logger}; use ssz::Encode; use std::cmp; use std::fs; @@ -81,6 +81,16 @@ pub fn get_config( false, )?; + /* + * Staking flag + * Note: the config values set here can be overwritten by other more specific cli params + */ + + if cli_args.is_present("staking") { + client_config.rest_api.enabled = true; + client_config.sync_eth1_chain = true; + } + /* * Http server */ @@ -110,6 +120,15 @@ pub fn get_config( client_config.rest_api.allow_origin = allow_origin.to_string(); } + // Log a warning indicating an open HTTP server if it wasn't specified explicitly + // (e.g. using the --staking flag). + if cli_args.is_present("staking") { + warn!( + log, + "Running HTTP server on port {}", client_config.rest_api.port + ); + } + /* * Websocket server */ @@ -424,7 +443,7 @@ pub fn set_network_config( if cli_args.is_present("disable-discovery") { config.disable_discovery = true; - slog::warn!(log, "Discovery is disabled. New peers will not be found"); + warn!(log, "Discovery is disabled. New peers will not be found"); } Ok(()) diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 19931916013..a09f8c6cd32 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -7,7 +7,7 @@ mod config; pub use beacon_chain; pub use cli::cli_app; pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; -pub use config::{get_data_dir, get_eth2_testnet_config, set_network_config}; +pub use config::{get_config, get_data_dir, get_eth2_testnet_config, set_network_config}; pub use eth2_config::Eth2Config; use beacon_chain::events::TeeEventHandler; @@ -17,7 +17,6 @@ use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock, }; use clap::ArgMatches; -use config::get_config; use environment::RuntimeContext; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; @@ -54,7 +53,7 @@ impl ProductionBeaconNode { /// configurations hosted remotely. pub async fn new_from_cli( context: RuntimeContext, - matches: &ArgMatches<'_>, + matches: ArgMatches<'static>, ) -> Result { let client_config = get_config::( &matches, diff --git a/book/src/become-a-validator-source.md b/book/src/become-a-validator-source.md index adab86ce398..8b116985517 100644 --- a/book/src/become-a-validator-source.md +++ b/book/src/become-a-validator-source.md @@ -47,7 +47,7 @@ the internet and maintains a view of the chain. Start your beacon node with: ```bash - lighthouse --testnet medalla beacon --eth1 --http + lighthouse --testnet medalla beacon --staking ``` > The `--testnet` parameter is optional. Omitting it will default to the @@ -55,8 +55,11 @@ Start your beacon node with: > Current values are either `altona` or `medalla`. This is true for all the > following commands in this document. ->Note: the `--http` flag enables the HTTP API for the validator client. And the `--eth1` flag tells the beacon node that it should sync with an Ethereum1 node (e.g. Geth). These flags are only required if you wish to run a validator. +You can also pass an external http endpoint (e.g. Infura) for the Eth1 node using the `--eth1-endpoint` flag: +```bash + lighthouse --testnet medalla beacon --staking --eth1-endpoint +``` Your beacon node has started syncing when you see the following (truncated) log: @@ -88,10 +91,10 @@ validator](./validator-create.md). A two-step example follows: Create a wallet with: ```bash -lighthouse --testnet medalla account wallet create --name my-validators --password-file my-validators.pass +lighthouse --testnet medalla account wallet create ``` -The output will look like this: +You will be prompted for a wallet name and a password. The output will look like this: ``` Your wallet's 12-word BIP-39 mnemonic is: @@ -124,10 +127,10 @@ used to restore your validator if there is a data loss. Create a validator from the wallet with: ```bash -lighthouse --testnet medalla account validator create --wallet-name my-validators --wallet-password my-validators.pass --count 1 +lighthouse --testnet medalla account validator create --count 1 ``` -The output will look like this: +Enter your wallet's name and password when prompted. The output will look like this: ```bash 1/1 0x80f3dce8d6745a725d8442c9bc3ca0852e772394b898c95c134b94979ebb0af6f898d5c5f65b71be6889185c486918a7 diff --git a/book/src/cross-compiling.md b/book/src/cross-compiling.md index 7dee3320e96..837cc13a66d 100644 --- a/book/src/cross-compiling.md +++ b/book/src/cross-compiling.md @@ -18,15 +18,17 @@ project. The `Makefile` in the project contains four targets for cross-compiling: -- `build-x86_64`: builds an optimized version for x86_64 processors (suitable for most users). - Supports Intel Broadwell (2014) and newer, and AMD Ryzen (2017) and newer. -- `build-x86_64-portable`: builds a version for x86_64 processors which avoids using some modern CPU - instructions that are incompatible with older CPUs. Suitable for pre-Broadwell/Ryzen CPUs. -- `build-aarch64`: builds an optimized version for 64-bit ARM processors +- `build-x86_64`: builds an optimized version for x86_64 processors (suitable + for most users). +- `build-x86_64-portable`: builds a version x86_64 processors which avoids + using some modern CPU instructions that might cause an "illegal + instruction" error on older CPUs. +- `build-aarch64`: builds an optimized version for 64bit ARM processors (suitable for Raspberry Pi 4). -- `build-aarch64-portable`: builds a version for 64-bit ARM processors which avoids using some - modern CPU instructions. In practice, very few ARM processors lack the instructions necessary to - run the faster non-portable build. +- `build-aarch64-portable`: builds a version 64 bit ARM processors which avoids + using some modern CPU instructions that might cause an "illegal + instruction" error on older CPUs. + ### Example diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 2884db216b8..ea3a2acba80 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "0.2.11" +version = "0.2.12" authors = ["Sigma Prime "] edition = "2018" diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 8af026641e4..77351a7b902 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -17,6 +17,8 @@ pub mod validator_definitions; pub use eth2_keystore; pub use eth2_wallet::PlainText; +/// The minimum number of characters required for a wallet password. +pub const MINIMUM_PASSWORD_LEN: usize = 12; /// The `Alphanumeric` crate only generates a-z, A-Z, 0-9, therefore it has a range of 62 /// characters. /// @@ -108,7 +110,7 @@ pub fn read_password_from_user(use_stdin: bool) -> Result } /// Reads a mnemonic phrase from TTY or stdin if `use_stdin == true`. -pub fn read_mnemonic_from_user(use_stdin: bool) -> Result { +pub fn read_input_from_user(use_stdin: bool) -> Result { let mut input = String::new(); if use_stdin { io::stdin() @@ -121,9 +123,33 @@ pub fn read_mnemonic_from_user(use_stdin: bool) -> Result { .read_line(&mut input) .map_err(|e| format!("Error reading from tty: {}", e))?; } + trim_newline(&mut input); Ok(input) } +fn trim_newline(s: &mut String) { + if s.ends_with('\n') { + s.pop(); + if s.ends_with('\r') { + s.pop(); + } + } +} + +/// Takes a string password and checks that it meets minimum requirements. +/// +/// The current minimum password requirement is a 12 character length character length. +pub fn is_password_sufficiently_complex(password: &[u8]) -> Result<(), String> { + if password.len() >= MINIMUM_PASSWORD_LEN { + Ok(()) + } else { + Err(format!( + "Please use at least {} characters for your password.", + MINIMUM_PASSWORD_LEN + )) + } +} + /// Provides a new-type wrapper around `String` that is zeroized on `Drop`. /// /// Useful for ensuring that password memory is zeroed-out on drop. @@ -146,6 +172,7 @@ impl AsRef<[u8]> for ZeroizeString { #[cfg(test)] mod test { + use super::is_password_sufficiently_complex; use super::strip_off_newlines; #[test] @@ -181,4 +208,20 @@ mod test { expected ); } + + #[test] + fn test_password_over_min_length() { + is_password_sufficiently_complex(b"TestPasswordLong").unwrap(); + } + + #[test] + fn test_password_exactly_min_length() { + is_password_sufficiently_complex(b"TestPassword").unwrap(); + } + + #[test] + #[should_panic] + fn test_password_too_short() { + is_password_sufficiently_complex(b"TestPass").unwrap(); + } } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index ee3898ecff1..89b488d33a2 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -52,7 +52,6 @@ pub struct Eth2NetArchiveAndDirectory<'a> { pub name: &'a str, pub unique_id: &'a str, pub archive_name: &'a str, - pub commit: &'a str, pub genesis_is_known: bool, } @@ -76,26 +75,21 @@ impl<'a> Eth2NetArchiveAndDirectory<'a> { #[macro_export] macro_rules! unique_id { - ($name: tt, $commit: tt, $genesis_is_known: tt) => { - concat!("testnet_", $name, "_", $commit, "_", $genesis_is_known); - }; - - ($name: tt, $commit: tt) => { - concat!("testnet_", $name, "_", $commit, ".zip"); + ($name: tt) => { + concat!("testnet_", $name); }; } macro_rules! define_net { - ($title: ident, $macro_title: tt, $name: tt, $commit: tt, $genesis_is_known: tt) => { + ($title: ident, $macro_title: tt, $name: tt, $genesis_is_known: tt) => { #[macro_use] pub mod $title { use super::*; pub const ETH2_NET_DIR: Eth2NetArchiveAndDirectory = Eth2NetArchiveAndDirectory { name: $name, - unique_id: unique_id!($name, $commit, $genesis_is_known), - archive_name: unique_id!($name, $commit), - commit: $commit, + unique_id: unique_id!($name), + archive_name: concat!(unique_id!($name), ".zip"), genesis_is_known: $genesis_is_known, }; @@ -104,33 +98,18 @@ macro_rules! define_net { #[macro_export] macro_rules! $macro_title { ($base_dir: tt, $filename: tt) => { - include_bytes!(concat!( - $base_dir, - unique_id!($name, $commit, $genesis_is_known), - "/", - $filename - )) + include_bytes!(concat!($base_dir, unique_id!($name), "/", $filename)) }; } } }; } -define_net!( - altona, - include_altona_file, - "altona", - "a94e00c1a03df851f960fcf44a79f2a6b1d29af1", - true -); - -define_net!( - medalla, - include_medalla_file, - "medalla", - "09bbf2c9d108944ac934f94ec6a1d0684ca062a5", - true -); +define_net!(altona, include_altona_file, "altona", true); + +define_net!(medalla, include_medalla_file, "medalla", true); + +define_net!(spadina, include_spadina_file, "spadina", false); #[cfg(test)] mod tests { diff --git a/common/eth2_testnet_config/build.rs b/common/eth2_testnet_config/build.rs index eeda3180730..588ec90a04e 100644 --- a/common/eth2_testnet_config/build.rs +++ b/common/eth2_testnet_config/build.rs @@ -1,67 +1,55 @@ //! Downloads a testnet configuration from Github. -use eth2_config::{altona, medalla, Eth2NetArchiveAndDirectory}; +use eth2_config::{altona, medalla, spadina, Eth2NetArchiveAndDirectory}; use std::fs; use std::fs::File; use std::io; use zip::ZipArchive; -const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = - &[altona::ETH2_NET_DIR, medalla::ETH2_NET_DIR]; +const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[ + altona::ETH2_NET_DIR, + medalla::ETH2_NET_DIR, + spadina::ETH2_NET_DIR, +]; fn main() { for testnet in ETH2_NET_DIRS { - let testnet_dir = testnet.dir(); - let archive_fullpath = testnet.archive_fullpath(); - //no need to do anything if archives have already been uncompressed before - if !testnet_dir.exists() { - if archive_fullpath.exists() { - //uncompress archive and continue - let archive_file = match File::open(&archive_fullpath) { - Ok(f) => f, - Err(e) => panic!("Problem opening archive file: {}", e), - }; - - match uncompress(archive_file) { - Ok(_) => (), - Err(e) => panic!(e), - }; - } else { - panic!( - "Couldn't find testnet archive at this location: {:?}", - archive_fullpath - ); - } + match uncompress(testnet) { + Ok(()) => (), + Err(e) => panic!("Failed to uncompress testnet zip file: {}", e), } } } -fn uncompress(archive_file: File) -> Result<(), String> { +/// Uncompress the testnet configs archive into a testnet configs folder. +fn uncompress(testnet: &Eth2NetArchiveAndDirectory<'static>) -> Result<(), String> { + let archive_file = File::open(&testnet.archive_fullpath()) + .map_err(|e| format!("Failed to open archive file: {:?}", e))?; + let mut archive = ZipArchive::new(archive_file).map_err(|e| format!("Error with zip file: {}", e))?; + + // Create testnet dir + fs::create_dir_all(testnet.dir()) + .map_err(|e| format!("Failed to create testnet directory: {:?}", e))?; + + // Create empty genesis.ssz if genesis is unknown + if !testnet.genesis_is_known { + File::create(testnet.dir().join("genesis.ssz")) + .map_err(|e| format!("Failed to create genesis.ssz: {}", e))?; + } + for i in 0..archive.len() { let mut file = archive .by_index(i) .map_err(|e| format!("Error retrieving file {} inside zip: {}", i, e))?; - let outpath = file.sanitized_name(); + let path = testnet.dir().join(file.name()); - if file.name().ends_with('/') { - fs::create_dir_all(&outpath) - .map_err(|e| format!("Error creating testnet directories: {}", e))?; - } else { - if let Some(p) = outpath.parent() { - if !p.exists() { - fs::create_dir_all(&p) - .map_err(|e| format!("Error creating testnet directories: {}", e))?; - } - } - - let mut outfile = File::create(&outpath) - .map_err(|e| format!("Error while creating file {:?}: {}", outpath, e))?; - io::copy(&mut file, &mut outfile) - .map_err(|e| format!("Error writing file {:?}: {}", outpath, e))?; - } + let mut outfile = File::create(&path) + .map_err(|e| format!("Error while creating file {:?}: {}", path, e))?; + io::copy(&mut file, &mut outfile) + .map_err(|e| format!("Error writing file {:?}: {}", path, e))?; } Ok(()) diff --git a/common/eth2_testnet_config/src/lib.rs b/common/eth2_testnet_config/src/lib.rs index d4636e9873f..1b0d4a933a4 100644 --- a/common/eth2_testnet_config/src/lib.rs +++ b/common/eth2_testnet_config/src/lib.rs @@ -7,7 +7,7 @@ //! //! https://github.com/sigp/lighthouse/pull/605 //! -use eth2_config::{include_altona_file, include_medalla_file, unique_id}; +use eth2_config::{include_altona_file, include_medalla_file, include_spadina_file, unique_id}; use enr::{CombinedKey, Enr}; use ssz::{Decode, Encode}; @@ -53,8 +53,9 @@ macro_rules! define_net { const ALTONA: HardcodedNet = define_net!(altona, include_altona_file); const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file); +const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file); -const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA]; +const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA]; pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla"; /// Specifies an Eth2 testnet. diff --git a/common/eth2_testnet_config/testnet_altona.zip b/common/eth2_testnet_config/testnet_altona.zip new file mode 100644 index 00000000000..b2c264803aa Binary files /dev/null and b/common/eth2_testnet_config/testnet_altona.zip differ diff --git a/common/eth2_testnet_config/testnet_altona_a94e00c1a03df851f960fcf44a79f2a6b1d29af1.zip b/common/eth2_testnet_config/testnet_altona_a94e00c1a03df851f960fcf44a79f2a6b1d29af1.zip deleted file mode 100644 index 29b9cd0cef3..00000000000 Binary files a/common/eth2_testnet_config/testnet_altona_a94e00c1a03df851f960fcf44a79f2a6b1d29af1.zip and /dev/null differ diff --git a/common/eth2_testnet_config/testnet_medalla.zip b/common/eth2_testnet_config/testnet_medalla.zip new file mode 100644 index 00000000000..fdf8b4e1798 Binary files /dev/null and b/common/eth2_testnet_config/testnet_medalla.zip differ diff --git a/common/eth2_testnet_config/testnet_medalla_09bbf2c9d108944ac934f94ec6a1d0684ca062a5.zip b/common/eth2_testnet_config/testnet_medalla_09bbf2c9d108944ac934f94ec6a1d0684ca062a5.zip deleted file mode 100644 index a6cd0a8430a..00000000000 Binary files a/common/eth2_testnet_config/testnet_medalla_09bbf2c9d108944ac934f94ec6a1d0684ca062a5.zip and /dev/null differ diff --git a/common/eth2_testnet_config/testnet_spadina.zip b/common/eth2_testnet_config/testnet_spadina.zip new file mode 100644 index 00000000000..7b0cb3ef07d Binary files /dev/null and b/common/eth2_testnet_config/testnet_spadina.zip differ diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 0a6b6b57c60..19a7a6851dc 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -10,7 +10,7 @@ use target_info::Target; /// `Lighthouse/v0.2.0-1419501f2+` pub const VERSION: &str = git_version!( args = ["--always", "--dirty=+"], - prefix = "Lighthouse/v0.2.11-", + prefix = "Lighthouse/v0.2.12-", fallback = "unknown" ); diff --git a/consensus/safe_arith/src/lib.rs b/consensus/safe_arith/src/lib.rs index 2275682109b..ab5985a6e11 100644 --- a/consensus/safe_arith/src/lib.rs +++ b/consensus/safe_arith/src/lib.rs @@ -28,24 +28,24 @@ macro_rules! assign_method { } /// Trait providing safe arithmetic operations for built-in types. -pub trait SafeArith: Sized + Copy { +pub trait SafeArith: Sized + Copy { const ZERO: Self; const ONE: Self; /// Safe variant of `+` that guards against overflow. - fn safe_add(&self, other: Self) -> Result; + fn safe_add(&self, other: Rhs) -> Result; /// Safe variant of `-` that guards against overflow. - fn safe_sub(&self, other: Self) -> Result; + fn safe_sub(&self, other: Rhs) -> Result; /// Safe variant of `*` that guards against overflow. - fn safe_mul(&self, other: Self) -> Result; + fn safe_mul(&self, other: Rhs) -> Result; /// Safe variant of `/` that guards against division by 0. - fn safe_div(&self, other: Self) -> Result; + fn safe_div(&self, other: Rhs) -> Result; /// Safe variant of `%` that guards against division by 0. - fn safe_rem(&self, other: Self) -> Result; + fn safe_rem(&self, other: Rhs) -> Result; /// Safe variant of `<<` that guards against overflow. fn safe_shl(&self, other: u32) -> Result; @@ -53,18 +53,13 @@ pub trait SafeArith: Sized + Copy { /// Safe variant of `>>` that guards against overflow. fn safe_shr(&self, other: u32) -> Result; - assign_method!(safe_add_assign, safe_add, "+="); - assign_method!(safe_sub_assign, safe_sub, "-="); - assign_method!(safe_mul_assign, safe_mul, "*="); - assign_method!(safe_div_assign, safe_div, "/="); - assign_method!(safe_rem_assign, safe_rem, "%="); + assign_method!(safe_add_assign, safe_add, Rhs, "+="); + assign_method!(safe_sub_assign, safe_sub, Rhs, "-="); + assign_method!(safe_mul_assign, safe_mul, Rhs, "*="); + assign_method!(safe_div_assign, safe_div, Rhs, "/="); + assign_method!(safe_rem_assign, safe_rem, Rhs, "%="); assign_method!(safe_shl_assign, safe_shl, u32, "<<="); assign_method!(safe_shr_assign, safe_shr, u32, ">>="); - - /// Mutate `self` by adding 1, erroring on overflow. - fn increment(&mut self) -> Result<()> { - self.safe_add_assign(Self::ONE) - } } macro_rules! impl_safe_arith { @@ -136,8 +131,7 @@ mod test { #[test] fn mutate() { let mut x = 0u8; - x.increment().unwrap(); - x.increment().unwrap(); + x.safe_add_assign(2).unwrap(); assert_eq!(x, 2); x.safe_sub_assign(1).unwrap(); assert_eq!(x, 1); diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index bae0705c8e4..bd0de6c198f 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -27,14 +27,16 @@ log = "0.4.8" safe_arith = { path = "../safe_arith" } tree_hash = "0.1.0" tree_hash_derive = "0.2.0" -types = { path = "../types" } +types = { path = "../types", default-features = false } rayon = "1.3.0" eth2_hashing = "0.1.0" int_to_bytes = { path = "../int_to_bytes" } arbitrary = { version = "0.4.4", features = ["derive"], optional = true } [features] +default = ["legacy-arith"] fake_crypto = ["bls/fake_crypto"] +legacy-arith = ["types/legacy-arith"] arbitrary-fuzz = [ "arbitrary", "types/arbitrary-fuzz", diff --git a/consensus/state_processing/src/common/deposit_data_tree.rs b/consensus/state_processing/src/common/deposit_data_tree.rs index 319c437eeea..46f1ed8ccd0 100644 --- a/consensus/state_processing/src/common/deposit_data_tree.rs +++ b/consensus/state_processing/src/common/deposit_data_tree.rs @@ -47,7 +47,7 @@ impl DepositDataTree { /// Add a deposit to the merkle tree. pub fn push_leaf(&mut self, leaf: Hash256) -> Result<(), MerkleTreeError> { self.tree.push_leaf(leaf, self.depth)?; - self.mix_in_length.increment()?; + self.mix_in_length.safe_add_assign(1)?; Ok(()) } } diff --git a/consensus/state_processing/src/common/initiate_validator_exit.rs b/consensus/state_processing/src/common/initiate_validator_exit.rs index 00cd02de1ce..3d2638a35a7 100644 --- a/consensus/state_processing/src/common/initiate_validator_exit.rs +++ b/consensus/state_processing/src/common/initiate_validator_exit.rs @@ -1,3 +1,4 @@ +use safe_arith::SafeArith; use std::cmp::max; use types::{BeaconStateError as Error, *}; @@ -22,7 +23,7 @@ pub fn initiate_validator_exit( state.exit_cache.build(&state.validators, spec)?; // Compute exit queue epoch - let delayed_epoch = state.compute_activation_exit_epoch(state.current_epoch(), spec); + let delayed_epoch = state.compute_activation_exit_epoch(state.current_epoch(), spec)?; let mut exit_queue_epoch = state .exit_cache .max_epoch()? @@ -30,13 +31,13 @@ pub fn initiate_validator_exit( let exit_queue_churn = state.exit_cache.get_churn_at(exit_queue_epoch)?; if exit_queue_churn >= state.get_churn_limit(spec)? { - exit_queue_epoch += 1; + exit_queue_epoch.safe_add_assign(1)?; } state.exit_cache.record_validator_exit(exit_queue_epoch)?; state.validators[index].exit_epoch = exit_queue_epoch; state.validators[index].withdrawable_epoch = - exit_queue_epoch + spec.min_validator_withdrawability_delay; + exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; Ok(()) } diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 754534f0c39..0b087481937 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -23,7 +23,7 @@ pub fn slash_validator( state.validators[slashed_index].slashed = true; state.validators[slashed_index].withdrawable_epoch = cmp::max( state.validators[slashed_index].withdrawable_epoch, - epoch + Epoch::from(T::EpochsPerSlashingsVector::to_u64()), + epoch.safe_add(T::EpochsPerSlashingsVector::to_u64())?, ); let validator_effective_balance = state.get_effective_balance(slashed_index, spec)?; state.set_slashings( diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index d6747b65e5d..e6c57beb24e 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -368,7 +368,7 @@ pub fn process_attestations( let pending_attestation = PendingAttestation { aggregation_bits: attestation.aggregation_bits.clone(), data: attestation.data.clone(), - inclusion_delay: (state.slot - attestation.data.slot).as_u64(), + inclusion_delay: state.slot.safe_sub(attestation.data.slot)?.as_u64(), proposer_index, }; @@ -444,11 +444,7 @@ pub fn process_deposit( .map_err(|e| e.into_with_index(deposit_index))?; } - state.eth1_deposit_index.increment()?; - - // Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the - // depositing validator already exists in the registry. - state.update_pubkey_cache()?; + state.eth1_deposit_index.safe_add_assign(1)?; // Get an `Option` where `u64` is the validator index if this deposit public key // already exists in the beacon_state. diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 3ab962e2e2b..678ba28e160 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -2,6 +2,7 @@ use super::errors::{AttestationInvalid as Invalid, BlockOperationError}; use super::VerifySignatures; use crate::common::get_indexed_attestation; use crate::per_block_processing::is_valid_indexed_attestation; +use safe_arith::SafeArith; use types::*; type Result = std::result::Result>; @@ -25,7 +26,7 @@ pub fn verify_attestation_for_block_inclusion( let data = &attestation.data; verify!( - data.slot + spec.min_attestation_inclusion_delay <= state.slot, + data.slot.safe_add(spec.min_attestation_inclusion_delay)? <= state.slot, Invalid::IncludedTooEarly { state: state.slot, delay: spec.min_attestation_inclusion_delay, @@ -33,7 +34,7 @@ pub fn verify_attestation_for_block_inclusion( } ); verify!( - state.slot <= data.slot + T::slots_per_epoch(), + state.slot <= data.slot.safe_add(T::slots_per_epoch())?, Invalid::IncludedTooLate { state: state.slot, attestation: data.slot, diff --git a/consensus/state_processing/src/per_block_processing/verify_deposit.rs b/consensus/state_processing/src/per_block_processing/verify_deposit.rs index 510b73b2a86..5e7e6f1ad10 100644 --- a/consensus/state_processing/src/per_block_processing/verify_deposit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_deposit.rs @@ -35,7 +35,7 @@ pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> /// /// Errors if the state's `pubkey_cache` is not current. pub fn get_existing_validator_index( - state: &BeaconState, + state: &mut BeaconState, pub_key: &PublicKeyBytes, ) -> Result> { let validator_index = state.get_validator_index(pub_key)?; diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index c77ffe53613..16c4db221d6 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -3,6 +3,7 @@ use crate::per_block_processing::{ signature_sets::{exit_signature_set, get_pubkey_from_state}, VerifySignatures, }; +use safe_arith::SafeArith; use types::*; type Result = std::result::Result>; @@ -77,11 +78,14 @@ fn verify_exit_parametric( ); // Verify the validator has been active long enough. + let earliest_exit_epoch = validator + .activation_epoch + .safe_add(spec.shard_committee_period)?; verify!( - state.current_epoch() >= validator.activation_epoch + spec.shard_committee_period, + state.current_epoch() >= earliest_exit_epoch, ExitInvalid::TooYoungToExit { current_epoch: state.current_epoch(), - earliest_exit_epoch: validator.activation_epoch + spec.shard_committee_period, + earliest_exit_epoch, } ); diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 0321bce35a1..19b87aa57b9 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -84,7 +84,7 @@ pub fn process_justification_and_finalization( state: &mut BeaconState, total_balances: &TotalBalances, ) -> Result<(), Error> { - if state.current_epoch() <= T::genesis_epoch() + 1 { + if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { return Ok(()); } @@ -126,25 +126,25 @@ pub fn process_justification_and_finalization( // The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source. if (1..4).all(|i| bits.get(i).unwrap_or(false)) - && old_previous_justified_checkpoint.epoch + 3 == current_epoch + && old_previous_justified_checkpoint.epoch.safe_add(3)? == current_epoch { state.finalized_checkpoint = old_previous_justified_checkpoint; } // The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source. else if (1..3).all(|i| bits.get(i).unwrap_or(false)) - && old_previous_justified_checkpoint.epoch + 2 == current_epoch + && old_previous_justified_checkpoint.epoch.safe_add(2)? == current_epoch { state.finalized_checkpoint = old_previous_justified_checkpoint; } // The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3nd as source. if (0..3).all(|i| bits.get(i).unwrap_or(false)) - && old_current_justified_checkpoint.epoch + 2 == current_epoch + && old_current_justified_checkpoint.epoch.safe_add(2)? == current_epoch { state.finalized_checkpoint = old_current_justified_checkpoint; } // The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source. else if (0..2).all(|i| bits.get(i).unwrap_or(false)) - && old_current_justified_checkpoint.epoch + 1 == current_epoch + && old_current_justified_checkpoint.epoch.safe_add(1)? == current_epoch { state.finalized_checkpoint = old_current_justified_checkpoint; } @@ -160,10 +160,15 @@ pub fn process_final_updates( spec: &ChainSpec, ) -> Result<(), Error> { let current_epoch = state.current_epoch(); - let next_epoch = state.next_epoch(); + let next_epoch = state.next_epoch()?; // Reset eth1 data votes. - if (state.slot + 1) % T::SlotsPerEth1VotingPeriod::to_u64() == 0 { + if state + .slot + .safe_add(1)? + .safe_rem(T::SlotsPerEth1VotingPeriod::to_u64())? + == 0 + { state.eth1_data_votes = VariableList::empty(); } diff --git a/consensus/state_processing/src/per_epoch_processing/apply_rewards.rs b/consensus/state_processing/src/per_epoch_processing/apply_rewards.rs index 18c946520b5..4115bfef3bb 100644 --- a/consensus/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/consensus/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -71,7 +71,10 @@ fn get_attestation_deltas( validator_statuses: &ValidatorStatuses, spec: &ChainSpec, ) -> Result, Error> { - let finality_delay = (state.previous_epoch() - state.finalized_checkpoint.epoch).as_u64(); + let finality_delay = state + .previous_epoch() + .safe_sub(state.finalized_checkpoint.epoch)? + .as_u64(); let mut deltas = vec![Delta::default(); state.validators.len()]; diff --git a/consensus/state_processing/src/per_epoch_processing/process_slashings.rs b/consensus/state_processing/src/per_epoch_processing/process_slashings.rs index 4901d303063..40d96f30ccd 100644 --- a/consensus/state_processing/src/per_epoch_processing/process_slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/process_slashings.rs @@ -11,17 +11,21 @@ pub fn process_slashings( ) -> Result<(), Error> { let epoch = state.current_epoch(); let sum_slashings = state.get_all_slashings().iter().copied().safe_sum()?; + let adjusted_total_slashing_balance = std::cmp::min( + sum_slashings.safe_mul(spec.proportional_slashing_multiplier)?, + total_balance, + ); for (index, validator) in state.validators.iter().enumerate() { if validator.slashed - && epoch + T::EpochsPerSlashingsVector::to_u64().safe_div(2)? + && epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)? == validator.withdrawable_epoch { let increment = spec.effective_balance_increment; let penalty_numerator = validator .effective_balance .safe_div(increment)? - .safe_mul(std::cmp::min(sum_slashings.safe_mul(3)?, total_balance))?; + .safe_mul(adjusted_total_slashing_balance)?; let penalty = penalty_numerator .safe_div(total_balance)? .safe_mul(increment)?; diff --git a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs index 79ece8c606d..26f055ba4f7 100644 --- a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs @@ -1,6 +1,6 @@ -use super::super::common::initiate_validator_exit; -use super::Error; +use crate::{common::initiate_validator_exit, per_epoch_processing::Error}; use itertools::Itertools; +use safe_arith::SafeArith; use types::*; /// Performs a validator registry update, if required. @@ -31,7 +31,7 @@ pub fn process_registry_updates( for index in indices_to_update { if state.validators[index].is_eligible_for_activation_queue(spec) { - state.validators[index].activation_eligibility_epoch = current_epoch + 1; + state.validators[index].activation_eligibility_epoch = current_epoch.safe_add(1)?; } if is_ejectable(&state.validators[index]) { initiate_validator_exit(state, index, spec)?; @@ -50,7 +50,7 @@ pub fn process_registry_updates( // Dequeue validators for activation up to churn limit let churn_limit = state.get_churn_limit(spec)? as usize; - let delayed_activation_epoch = state.compute_activation_exit_epoch(current_epoch, spec); + let delayed_activation_epoch = state.compute_activation_exit_epoch(current_epoch, spec)?; for index in activation_queue.into_iter().take(churn_limit) { let validator = &mut state.validators[index]; validator.activation_epoch = delayed_activation_epoch; diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 02acfc825a6..a818bde52bf 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,10 +1,18 @@ use crate::{per_epoch_processing::EpochProcessingSummary, *}; +use safe_arith::{ArithError, SafeArith}; use types::*; #[derive(Debug, PartialEq)] pub enum Error { BeaconStateError(BeaconStateError), EpochProcessingError(EpochProcessingError), + ArithError(ArithError), +} + +impl From for Error { + fn from(e: ArithError) -> Self { + Self::ArithError(e) + } } /// Advances a state forward by one slot, performing per-epoch processing if required. @@ -21,14 +29,15 @@ pub fn per_slot_processing( ) -> Result, Error> { cache_state(state, state_root)?; - let summary = if state.slot > spec.genesis_slot && (state.slot + 1) % T::slots_per_epoch() == 0 + let summary = if state.slot > spec.genesis_slot + && state.slot.safe_add(1)?.safe_rem(T::slots_per_epoch())? == 0 { Some(per_epoch_processing(state, spec)?) } else { None }; - state.slot += 1; + state.slot.safe_add_assign(1)?; Ok(summary) } @@ -48,7 +57,7 @@ fn cache_state( // // This is a bit hacky, however it gets the job safely without lots of code. let previous_slot = state.slot; - state.slot += 1; + state.slot.safe_add_assign(1)?; // Store the previous slot's post state transition root. state.set_state_root(previous_slot, previous_state_root)?; @@ -63,7 +72,7 @@ fn cache_state( state.set_block_root(previous_slot, latest_block_root)?; // Set the state slot back to what it should be. - state.slot -= 1; + state.slot.safe_sub_assign(1)?; Ok(()) } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 77dfaa2c206..80b4007b973 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -45,7 +45,9 @@ serde_json = "1.0.52" criterion = "0.3.2" [features] -default = ["sqlite"] +default = ["sqlite", "legacy-arith"] +# Allow saturating arithmetic on slots and epochs. Enabled by default, but deprecated. +legacy-arith = [] sqlite = ["rusqlite"] arbitrary-fuzz = [ "arbitrary", diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 9594a22175d..a2d923da9d3 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -100,10 +100,10 @@ enum AllowNextEpoch { } impl AllowNextEpoch { - fn upper_bound_of(self, current_epoch: Epoch) -> Epoch { + fn upper_bound_of(self, current_epoch: Epoch) -> Result { match self { - AllowNextEpoch::True => current_epoch + 1, - AllowNextEpoch::False => current_epoch, + AllowNextEpoch::True => Ok(current_epoch.safe_add(1)?), + AllowNextEpoch::False => Ok(current_epoch), } } } @@ -300,19 +300,12 @@ impl BeaconState { } } - /// If a validator pubkey exists in the validator registry, returns `Some(i)`, otherwise - /// returns `None`. - /// - /// Requires a fully up-to-date `pubkey_cache`, returns an error if this is not the case. - pub fn get_validator_index(&self, pubkey: &PublicKeyBytes) -> Result, Error> { - if self.pubkey_cache.len() == self.validators.len() { - Ok(self.pubkey_cache.get(pubkey)) - } else { - Err(Error::PubkeyCacheIncomplete { - cache_len: self.pubkey_cache.len(), - registry_len: self.validators.len(), - }) - } + /// This method ensures the state's pubkey cache is fully up-to-date before checking if the validator + /// exists in the registry. If a validator pubkey exists in the validator registry, returns `Some(i)`, + /// otherwise returns `None`. + pub fn get_validator_index(&mut self, pubkey: &PublicKeyBytes) -> Result, Error> { + self.update_pubkey_cache()?; + Ok(self.pubkey_cache.get(pubkey)) } /// The epoch corresponding to `self.slot`. @@ -330,7 +323,9 @@ impl BeaconState { pub fn previous_epoch(&self) -> Epoch { let current_epoch = self.current_epoch(); if current_epoch > T::genesis_epoch() { - current_epoch - 1 + current_epoch + .safe_sub(1) + .expect("current epoch greater than genesis implies greater than 0") } else { current_epoch } @@ -339,8 +334,8 @@ impl BeaconState { /// The epoch following `self.current_epoch()`. /// /// Spec v0.12.1 - pub fn next_epoch(&self) -> Epoch { - self.current_epoch() + 1 + pub fn next_epoch(&self) -> Result { + Ok(self.current_epoch().safe_add(1)?) } /// Compute the number of committees at `slot`. @@ -385,7 +380,7 @@ impl BeaconState { epoch: Epoch, spec: &ChainSpec, ) -> Result, Error> { - if epoch >= self.compute_activation_exit_epoch(self.current_epoch(), spec) { + if epoch >= self.compute_activation_exit_epoch(self.current_epoch(), spec)? { Err(BeaconStateError::EpochOutOfBounds) } else { Ok(get_active_validator_indices(&self.validators, epoch)) @@ -482,7 +477,7 @@ impl BeaconState { { return Ok(candidate_index); } - i.increment()?; + i.safe_add_assign(1)?; } } @@ -560,7 +555,7 @@ impl BeaconState { /// /// Spec v0.12.1 fn get_latest_block_roots_index(&self, slot: Slot) -> Result { - if slot < self.slot && self.slot <= slot + self.block_roots.len() as u64 { + if slot < self.slot && self.slot <= slot.safe_add(self.block_roots.len() as u64)? { Ok(slot.as_usize().safe_rem(self.block_roots.len())?) } else { Err(BeaconStateError::SlotOutOfBounds) @@ -612,7 +607,9 @@ impl BeaconState { let current_epoch = self.current_epoch(); let len = T::EpochsPerHistoricalVector::to_u64(); - if current_epoch < epoch + len && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { + if current_epoch < epoch.safe_add(len)? + && epoch <= allow_next_epoch.upper_bound_of(current_epoch)? + { Ok(epoch.as_usize().safe_rem(len as usize)?) } else { Err(Error::EpochOutOfBounds) @@ -659,7 +656,7 @@ impl BeaconState { /// /// Spec v0.12.1 fn get_latest_state_roots_index(&self, slot: Slot) -> Result { - if slot < self.slot && self.slot <= slot + self.state_roots.len() as u64 { + if slot < self.slot && self.slot <= slot.safe_add(self.state_roots.len() as u64)? { Ok(slot.as_usize().safe_rem(self.state_roots.len())?) } else { Err(BeaconStateError::SlotOutOfBounds) @@ -679,7 +676,7 @@ impl BeaconState { /// Spec v0.12.1 pub fn get_oldest_state_root(&self) -> Result<&Hash256, Error> { let i = - self.get_latest_state_roots_index(self.slot - Slot::from(self.state_roots.len()))?; + self.get_latest_state_roots_index(self.slot.saturating_sub(self.state_roots.len()))?; Ok(&self.state_roots[i]) } @@ -687,7 +684,9 @@ impl BeaconState { /// /// Spec v0.12.1 pub fn get_oldest_block_root(&self) -> Result<&Hash256, Error> { - let i = self.get_latest_block_roots_index(self.slot - self.block_roots.len() as u64)?; + let i = self.get_latest_block_roots_index( + self.slot.saturating_sub(self.block_roots.len() as u64), + )?; Ok(&self.block_roots[i]) } @@ -719,8 +718,8 @@ impl BeaconState { // We allow the slashings vector to be accessed at any cached epoch at or before // the current epoch, or the next epoch if `AllowNextEpoch::True` is passed. let current_epoch = self.current_epoch(); - if current_epoch < epoch + T::EpochsPerSlashingsVector::to_u64() - && epoch <= allow_next_epoch.upper_bound_of(current_epoch) + if current_epoch < epoch.safe_add(T::EpochsPerSlashingsVector::to_u64())? + && epoch <= allow_next_epoch.upper_bound_of(current_epoch)? { Ok(epoch .as_usize() @@ -782,7 +781,10 @@ impl BeaconState { // Bypass the safe getter for RANDAO so we can gracefully handle the scenario where `epoch // == 0`. let mix = { - let i = epoch + T::EpochsPerHistoricalVector::to_u64() - spec.min_seed_lookahead - 1; + let i = epoch + .safe_add(T::EpochsPerHistoricalVector::to_u64())? + .safe_sub(spec.min_seed_lookahead)? + .safe_sub(1)?; self.randao_mixes[i.as_usize().safe_rem(self.randao_mixes.len())?] }; let domain_bytes = int_to_bytes4(spec.get_domain_constant(domain_type)); @@ -818,8 +820,12 @@ impl BeaconState { /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. /// /// Spec v0.12.1 - pub fn compute_activation_exit_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch { - epoch + 1 + spec.max_seed_lookahead + pub fn compute_activation_exit_epoch( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result { + Ok(epoch.safe_add(1)?.safe_add(spec.max_seed_lookahead)?) } /// Return the churn limit for the current epoch (number of validators who can leave per epoch). diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index f71ad2e893e..6ee24cd2bb2 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -3,6 +3,7 @@ use super::BeaconState; use crate::*; use core::num::NonZeroUsize; +use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::ops::Range; @@ -197,7 +198,7 @@ impl CommitteeCache { let epoch_start_slot = self.initialized_epoch?.start_slot(self.slots_per_epoch); let slot_offset = global_committee_index / self.committees_per_slot; let index = global_committee_index % self.committees_per_slot; - Some((epoch_start_slot + slot_offset, index)) + Some((epoch_start_slot.safe_add(slot_offset).ok()?, index)) } /// Returns the number of active validators in the initialized epoch. diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index ee2ca8eed06..e1256cb4859 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -53,8 +53,8 @@ fn initializes_with_the_right_epoch() { let cache = CommitteeCache::initialized(&state, state.previous_epoch(), &spec).unwrap(); assert_eq!(cache.initialized_epoch, Some(state.previous_epoch())); - let cache = CommitteeCache::initialized(&state, state.next_epoch(), &spec).unwrap(); - assert_eq!(cache.initialized_epoch, Some(state.next_epoch())); + let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), &spec).unwrap(); + assert_eq!(cache.initialized_epoch, Some(state.next_epoch().unwrap())); } #[test] @@ -81,7 +81,7 @@ fn shuffles_for_the_right_epoch() { .get_seed(state.current_epoch(), Domain::BeaconAttester, spec) .unwrap(); let next_seed = state - .get_seed(state.next_epoch(), Domain::BeaconAttester, spec) + .get_seed(state.next_epoch().unwrap(), Domain::BeaconAttester, spec) .unwrap(); assert!((previous_seed != current_seed) && (current_seed != next_seed)); @@ -114,7 +114,7 @@ fn shuffles_for_the_right_epoch() { assert_eq!(cache.shuffling, shuffling_with_seed(previous_seed)); assert_shuffling_positions_accurate(&cache); - let cache = CommitteeCache::initialized(&state, state.next_epoch(), spec).unwrap(); + let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), spec).unwrap(); assert_eq!(cache.shuffling, shuffling_with_seed(next_seed)); assert_shuffling_positions_accurate(&cache); } diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index 0f75e0f28ab..364c1daf0d5 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -44,7 +44,7 @@ impl ExitCache { self.exit_epoch_counts .entry(exit_epoch) .or_insert(0) - .increment()?; + .safe_add_assign(1)?; Ok(()) } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 5afe7db8fdd..c621acb81b8 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -52,6 +52,7 @@ pub struct ChainSpec { pub hysteresis_quotient: u64, pub hysteresis_downward_multiplier: u64, pub hysteresis_upward_multiplier: u64, + pub proportional_slashing_multiplier: u64, /* * Gwei values @@ -243,7 +244,7 @@ impl ChainSpec { /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. /// - /// Spec v0.12.1 + /// Spec v0.12.3 pub fn mainnet() -> Self { Self { /* @@ -267,6 +268,7 @@ impl ChainSpec { hysteresis_quotient: 4, hysteresis_downward_multiplier: 1, hysteresis_upward_multiplier: 5, + proportional_slashing_multiplier: 3, /* * Gwei values @@ -437,21 +439,15 @@ mod tests { } } -/// Union of a ChainSpec struct and an EthSpec struct that holds constants used for the configs -/// from the Ethereum 2 specs repo (https://github.com/ethereum/eth2.0-specs/tree/dev/configs) -/// -/// Doesn't include fields of the YAML that we don't need yet (e.g. Phase 1 stuff). +/// YAML config file as defined by the spec. /// -/// Spec v0.12.1 -// Yaml Config is declared here in order to access domain fields of ChainSpec which are private. +/// Spec v0.12.3 #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] #[serde(rename_all = "UPPERCASE")] -#[serde(default)] pub struct YamlConfig { + #[serde(default)] + config_name: String, // ChainSpec - far_future_epoch: u64, - base_rewards_per_epoch: u64, - deposit_contract_tree_depth: u64, max_committees_per_slot: usize, target_committee_size: usize, min_per_epoch_churn_limit: u64, @@ -467,7 +463,9 @@ pub struct YamlConfig { hysteresis_quotient: u64, hysteresis_downward_multiplier: u64, hysteresis_upward_multiplier: u64, - genesis_slot: u64, + // Proportional slashing multiplier defaults to 3 for compatibility with Altona and Medalla. + #[serde(default = "default_proportional_slashing_multiplier")] + proportional_slashing_multiplier: u64, #[serde( serialize_with = "fork_to_hex_str", deserialize_with = "fork_from_hex_str" @@ -524,14 +522,8 @@ pub struct YamlConfig { serialize_with = "u32_to_hex_str" )] domain_aggregate_and_proof: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] // EthSpec - justification_bits_length: u32, max_validators_per_committee: u32, - genesis_epoch: Epoch, slots_per_epoch: u64, epochs_per_eth1_voting_period: u64, slots_per_historical_root: usize, @@ -544,13 +536,22 @@ pub struct YamlConfig { max_attestations: u32, max_deposits: u32, max_voluntary_exits: u32, - // Validator eth1_follow_distance: u64, target_aggregators_per_committee: u64, random_subnets_per_validator: u64, epochs_per_random_subnet_subscription: u64, seconds_per_eth1_block: u64, + /* TODO: incorporate these into ChainSpec and turn on `serde(deny_unknown_fields)` + deposit_chain_id: u64, + deposit_network_id: u64, + deposit_contract_address: String, + */ +} + +// Compatibility shim for proportional slashing multpilier on Altona and Medalla. +fn default_proportional_slashing_multiplier() -> u64 { + 3 } impl Default for YamlConfig { @@ -565,10 +566,8 @@ impl YamlConfig { #[allow(clippy::integer_arithmetic)] pub fn from_spec(spec: &ChainSpec) -> Self { Self { + config_name: T::spec_name().to_string(), // ChainSpec - far_future_epoch: spec.far_future_epoch.into(), - base_rewards_per_epoch: spec.base_rewards_per_epoch, - deposit_contract_tree_depth: spec.deposit_contract_tree_depth, max_committees_per_slot: spec.max_committees_per_slot, target_committee_size: spec.target_committee_size, min_per_epoch_churn_limit: spec.min_per_epoch_churn_limit, @@ -584,7 +583,7 @@ impl YamlConfig { hysteresis_quotient: spec.hysteresis_quotient, hysteresis_downward_multiplier: spec.hysteresis_downward_multiplier, hysteresis_upward_multiplier: spec.hysteresis_upward_multiplier, - genesis_slot: spec.genesis_slot.into(), + proportional_slashing_multiplier: spec.proportional_slashing_multiplier, bls_withdrawal_prefix: spec.bls_withdrawal_prefix_byte, seconds_per_slot: spec.milliseconds_per_slot / 1000, min_attestation_inclusion_delay: spec.min_attestation_inclusion_delay, @@ -609,9 +608,7 @@ impl YamlConfig { domain_aggregate_and_proof: spec.domain_aggregate_and_proof, // EthSpec - justification_bits_length: T::JustificationBitsLength::to_u32(), max_validators_per_committee: T::MaxValidatorsPerCommittee::to_u32(), - genesis_epoch: T::genesis_epoch(), slots_per_epoch: T::slots_per_epoch(), epochs_per_eth1_voting_period: T::EpochsPerEth1VotingPeriod::to_u64(), slots_per_historical_root: T::slots_per_historical_root(), @@ -642,10 +639,8 @@ impl YamlConfig { } pub fn apply_to_chain_spec(&self, chain_spec: &ChainSpec) -> Option { - // Checking for EthSpec constants - if self.justification_bits_length != T::JustificationBitsLength::to_u32() - || self.max_validators_per_committee != T::MaxValidatorsPerCommittee::to_u32() - || self.genesis_epoch != T::genesis_epoch() + // Check that YAML values match type-level EthSpec constants + if self.max_validators_per_committee != T::MaxValidatorsPerCommittee::to_u32() || self.slots_per_epoch != T::slots_per_epoch() || self.epochs_per_eth1_voting_period != T::EpochsPerEth1VotingPeriod::to_u64() || self.slots_per_historical_root != T::slots_per_historical_root() @@ -664,25 +659,48 @@ impl YamlConfig { // Create a ChainSpec from the yaml config Some(ChainSpec { - far_future_epoch: Epoch::from(self.far_future_epoch), - base_rewards_per_epoch: self.base_rewards_per_epoch, - deposit_contract_tree_depth: self.deposit_contract_tree_depth, + /* + * Misc + */ + max_committees_per_slot: self.max_committees_per_slot, target_committee_size: self.target_committee_size, min_per_epoch_churn_limit: self.min_per_epoch_churn_limit, churn_limit_quotient: self.churn_limit_quotient, shuffle_round_count: self.shuffle_round_count, min_genesis_active_validator_count: self.min_genesis_active_validator_count, min_genesis_time: self.min_genesis_time, - min_deposit_amount: self.min_deposit_amount, - genesis_delay: self.genesis_delay, - max_effective_balance: self.max_effective_balance, hysteresis_quotient: self.hysteresis_quotient, hysteresis_downward_multiplier: self.hysteresis_downward_multiplier, hysteresis_upward_multiplier: self.hysteresis_upward_multiplier, + proportional_slashing_multiplier: self.proportional_slashing_multiplier, + /* + * Fork Choice + */ + safe_slots_to_update_justified: self.safe_slots_to_update_justified, + /* + * Validator + */ + eth1_follow_distance: self.eth1_follow_distance, + target_aggregators_per_committee: self.target_aggregators_per_committee, + random_subnets_per_validator: self.random_subnets_per_validator, + epochs_per_random_subnet_subscription: self.epochs_per_random_subnet_subscription, + seconds_per_eth1_block: self.seconds_per_eth1_block, + /* + * Gwei values + */ + min_deposit_amount: self.min_deposit_amount, + max_effective_balance: self.max_effective_balance, ejection_balance: self.ejection_balance, effective_balance_increment: self.effective_balance_increment, - genesis_slot: Slot::from(self.genesis_slot), + /* + * Initial values + */ + genesis_fork_version: self.genesis_fork_version, bls_withdrawal_prefix_byte: self.bls_withdrawal_prefix, + /* + * Time parameters + */ + genesis_delay: self.genesis_delay, milliseconds_per_slot: self.seconds_per_slot.saturating_mul(1000), min_attestation_inclusion_delay: self.min_attestation_inclusion_delay, min_seed_lookahead: Epoch::from(self.min_seed_lookahead), @@ -692,20 +710,43 @@ impl YamlConfig { ), shard_committee_period: self.shard_committee_period, min_epochs_to_inactivity_penalty: self.min_epochs_to_inactivity_penalty, + /* + * Reward and penalty quotients + */ base_reward_factor: self.base_reward_factor, whistleblower_reward_quotient: self.whistleblower_reward_quotient, proposer_reward_quotient: self.proposer_reward_quotient, inactivity_penalty_quotient: self.inactivity_penalty_quotient, min_slashing_penalty_quotient: self.min_slashing_penalty_quotient, + /* + * Signature domains + */ domain_beacon_proposer: self.domain_beacon_proposer, domain_beacon_attester: self.domain_beacon_attester, domain_randao: self.domain_randao, domain_deposit: self.domain_deposit, domain_voluntary_exit: self.domain_voluntary_exit, + domain_selection_proof: self.domain_selection_proof, + domain_aggregate_and_proof: self.domain_aggregate_and_proof, + /* + * Lighthouse-specific parameters + * + * These are paramaters that are present in the chain spec but aren't part of the YAML + * config. We avoid using `..chain_spec` so that changes to the set of fields don't + * accidentally get forgotten (explicit better than implicit, yada yada). + */ boot_nodes: chain_spec.boot_nodes.clone(), - genesis_fork_version: self.genesis_fork_version, - eth1_follow_distance: self.eth1_follow_distance, - ..*chain_spec + network_id: chain_spec.network_id, + attestation_propagation_slot_range: chain_spec.attestation_propagation_slot_range, + maximum_gossip_clock_disparity_millis: chain_spec.maximum_gossip_clock_disparity_millis, + attestation_subnet_count: chain_spec.attestation_subnet_count, + /* + * Constants, not configurable. + */ + genesis_slot: chain_spec.genesis_slot, + far_future_epoch: chain_spec.far_future_epoch, + base_rewards_per_epoch: chain_spec.base_rewards_per_epoch, + deposit_contract_tree_depth: chain_spec.deposit_contract_tree_depth, }) } } @@ -768,7 +809,7 @@ mod yaml_tests { let yamlconfig = YamlConfig::from_spec::(&spec); // modifying the original spec - spec.deposit_contract_tree_depth += 1; + spec.max_committees_per_slot += 1; // Applying a yaml config with incorrect EthSpec should fail let res = yamlconfig.apply_to_chain_spec::(&spec); assert_eq!(res, None); diff --git a/consensus/types/src/relative_epoch.rs b/consensus/types/src/relative_epoch.rs index 381f1730851..e681ce15c20 100644 --- a/consensus/types/src/relative_epoch.rs +++ b/consensus/types/src/relative_epoch.rs @@ -1,9 +1,17 @@ use crate::*; +use safe_arith::{ArithError, SafeArith}; #[derive(Debug, PartialEq, Clone, Copy)] pub enum Error { EpochTooLow { base: Epoch, other: Epoch }, EpochTooHigh { base: Epoch, other: Epoch }, + ArithError(ArithError), +} + +impl From for Error { + fn from(e: ArithError) -> Self { + Self::ArithError(e) + } } #[cfg(feature = "arbitrary-fuzz")] @@ -32,8 +40,8 @@ impl RelativeEpoch { match self { // Due to saturating nature of epoch, check for current first. RelativeEpoch::Current => base, - RelativeEpoch::Previous => base - 1, - RelativeEpoch::Next => base + 1, + RelativeEpoch::Previous => base.saturating_sub(1u64), + RelativeEpoch::Next => base.saturating_add(1u64), } } @@ -46,12 +54,11 @@ impl RelativeEpoch { /// /// Spec v0.12.1 pub fn from_epoch(base: Epoch, other: Epoch) -> Result { - // Due to saturating nature of epoch, check for current first. if other == base { Ok(RelativeEpoch::Current) - } else if other == base - 1 { + } else if other.safe_add(1)? == base { Ok(RelativeEpoch::Previous) - } else if other == base + 1 { + } else if other == base.safe_add(1)? { Ok(RelativeEpoch::Next) } else if other < base { Err(Error::EpochTooLow { base, other }) diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index ea4e23375ca..ffb88c7ee6d 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -14,21 +14,23 @@ use crate::test_utils::TestRandom; use crate::SignedRoot; use rand::RngCore; +use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; -use std::cmp::{Ord, Ordering}; use std::fmt; -use std::hash::{Hash, Hasher}; +use std::hash::Hash; use std::iter::Iterator; + +#[cfg(feature = "legacy-arith")] use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)] +#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct Slot(u64); #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)] +#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct Epoch(u64); @@ -41,7 +43,9 @@ impl Slot { } pub fn epoch(self, slots_per_epoch: u64) -> Epoch { - Epoch::from(self.0) / Epoch::from(slots_per_epoch) + Epoch::new(self.0) + .safe_div(slots_per_epoch) + .expect("slots_per_epoch is not 0") } pub fn max_value() -> Slot { @@ -96,9 +100,6 @@ impl Epoch { } } -impl SignedRoot for Epoch {} -impl SignedRoot for Slot {} - pub struct SlotIter<'a> { current_iteration: u64, epoch: &'a Epoch, @@ -115,7 +116,7 @@ impl<'a> Iterator for SlotIter<'a> { let start_slot = self.epoch.start_slot(self.slots_per_epoch); let previous = self.current_iteration; self.current_iteration = self.current_iteration.checked_add(1)?; - Some(start_slot + previous) + start_slot.safe_add(previous).ok() } } } diff --git a/consensus/types/src/slot_epoch_macros.rs b/consensus/types/src/slot_epoch_macros.rs index 15263f654e8..26b80692c94 100644 --- a/consensus/types/src/slot_epoch_macros.rs +++ b/consensus/types/src/slot_epoch_macros.rs @@ -42,22 +42,84 @@ macro_rules! impl_from_into_usize { }; } -macro_rules! impl_math_between { - ($main: ident, $other: ident) => { - impl PartialOrd<$other> for $main { - /// Utilizes `partial_cmp` on the underlying `u64`. - fn partial_cmp(&self, other: &$other) -> Option { - Some(self.0.cmp(&(*other).into())) +macro_rules! impl_u64_eq_ord { + ($type: ident) => { + impl PartialEq for $type { + fn eq(&self, other: &u64) -> bool { + self.as_u64() == *other + } + } + + impl PartialOrd for $type { + fn partial_cmp(&self, other: &u64) -> Option { + self.as_u64().partial_cmp(other) } } + }; +} + +macro_rules! impl_safe_arith { + ($type: ident, $rhs_ty: ident) => { + impl safe_arith::SafeArith<$rhs_ty> for $type { + const ZERO: Self = $type::new(0); + const ONE: Self = $type::new(1); + + fn safe_add(&self, other: $rhs_ty) -> safe_arith::Result { + self.0 + .checked_add(other.into()) + .map(Self::new) + .ok_or(safe_arith::ArithError::Overflow) + } + + fn safe_sub(&self, other: $rhs_ty) -> safe_arith::Result { + self.0 + .checked_sub(other.into()) + .map(Self::new) + .ok_or(safe_arith::ArithError::Overflow) + } + + fn safe_mul(&self, other: $rhs_ty) -> safe_arith::Result { + self.0 + .checked_mul(other.into()) + .map(Self::new) + .ok_or(safe_arith::ArithError::Overflow) + } + + fn safe_div(&self, other: $rhs_ty) -> safe_arith::Result { + self.0 + .checked_div(other.into()) + .map(Self::new) + .ok_or(safe_arith::ArithError::DivisionByZero) + } + + fn safe_rem(&self, other: $rhs_ty) -> safe_arith::Result { + self.0 + .checked_rem(other.into()) + .map(Self::new) + .ok_or(safe_arith::ArithError::DivisionByZero) + } + + fn safe_shl(&self, other: u32) -> safe_arith::Result { + self.0 + .checked_shl(other) + .map(Self::new) + .ok_or(safe_arith::ArithError::Overflow) + } - impl PartialEq<$other> for $main { - fn eq(&self, other: &$other) -> bool { - let other: u64 = (*other).into(); - self.0 == other + fn safe_shr(&self, other: u32) -> safe_arith::Result { + self.0 + .checked_shr(other) + .map(Self::new) + .ok_or(safe_arith::ArithError::Overflow) } } + }; +} +// Deprecated: prefer `SafeArith` methods for new code. +#[cfg(feature = "legacy-arith")] +macro_rules! impl_math_between { + ($main: ident, $other: ident) => { impl Add<$other> for $main { type Output = $main; @@ -144,33 +206,17 @@ macro_rules! impl_math { ($type: ident) => { impl $type { pub fn saturating_sub>(&self, other: T) -> $type { - *self - other.into() + $type::new(self.as_u64().saturating_sub(other.into().as_u64())) } pub fn saturating_add>(&self, other: T) -> $type { - *self + other.into() - } - - pub fn checked_div>(&self, rhs: T) -> Option<$type> { - let rhs: $type = rhs.into(); - if rhs == 0 { - None - } else { - Some(*self / rhs) - } + $type::new(self.as_u64().saturating_add(other.into().as_u64())) } pub fn is_power_of_two(&self) -> bool { self.0.is_power_of_two() } } - - impl Ord for $type { - fn cmp(&self, other: &$type) -> Ordering { - let other: u64 = (*other).into(); - self.0.cmp(&other) - } - } }; } @@ -257,6 +303,8 @@ macro_rules! impl_ssz { } } + impl SignedRoot for $type {} + impl TestRandom for $type { fn random_for_test(rng: &mut impl RngCore) -> Self { $type::from(u64::random_for_test(rng)) @@ -265,29 +313,21 @@ macro_rules! impl_ssz { }; } -macro_rules! impl_hash { - ($type: ident) => { - // Implemented to stop clippy lint: - // https://rust-lang.github.io/rust-clippy/master/index.html#derive_hash_xor_eq - impl Hash for $type { - fn hash(&self, state: &mut H) { - ssz_encode(self).hash(state) - } - } - }; -} - macro_rules! impl_common { ($type: ident) => { impl_from_into_u64!($type); impl_from_into_usize!($type); + impl_u64_eq_ord!($type); + impl_safe_arith!($type, $type); + impl_safe_arith!($type, u64); + #[cfg(feature = "legacy-arith")] impl_math_between!($type, $type); + #[cfg(feature = "legacy-arith")] impl_math_between!($type, u64); impl_math!($type); impl_display!($type); impl_debug!($type); impl_ssz!($type); - impl_hash!($type); }; } @@ -335,6 +375,7 @@ macro_rules! math_between_tests { ($type: ident, $other: ident) => { #[test] fn partial_ord() { + use std::cmp::Ordering; let assert_partial_ord = |a: u64, partial_ord: Ordering, b: u64| { let other: $other = $type(b).into(); assert_eq!($type(a).partial_cmp(&other), Some(partial_ord)); @@ -518,7 +559,7 @@ macro_rules! math_tests { #[test] fn checked_div() { let assert_checked_div = |a: u64, b: u64, result: Option| { - let division_result_as_u64 = match $type(a).checked_div($type(b)) { + let division_result_as_u64 = match $type(a).safe_div($type(b)).ok() { None => None, Some(val) => Some(val.as_u64()), }; @@ -560,6 +601,7 @@ macro_rules! math_tests { #[test] fn ord() { + use std::cmp::Ordering; let assert_ord = |a: u64, ord: Ordering, b: u64| { assert_eq!($type(a).cmp(&$type(b)), ord); }; diff --git a/consensus/types/src/test_utils/builders/testing_attestation_data_builder.rs b/consensus/types/src/test_utils/builders/testing_attestation_data_builder.rs index 9ecef281586..56b3e3bbe01 100644 --- a/consensus/types/src/test_utils/builders/testing_attestation_data_builder.rs +++ b/consensus/types/src/test_utils/builders/testing_attestation_data_builder.rs @@ -1,5 +1,6 @@ use crate::test_utils::AttestationTestTask; use crate::*; +use safe_arith::SafeArith; /// Builds an `AttestationData` to be used for testing purposes. /// @@ -49,12 +50,19 @@ impl TestingAttestationDataBuilder { match test_task { AttestationTestTask::IncludedTooEarly => { - slot = state.slot - spec.min_attestation_inclusion_delay + 1 + slot = state + .slot + .safe_sub(spec.min_attestation_inclusion_delay) + .unwrap() + .safe_add(1u64) + .unwrap(); } - AttestationTestTask::IncludedTooLate => slot -= T::SlotsPerEpoch::to_u64(), + AttestationTestTask::IncludedTooLate => slot + .safe_sub_assign(Slot::new(T::SlotsPerEpoch::to_u64())) + .unwrap(), AttestationTestTask::TargetEpochSlotMismatch => { target = Checkpoint { - epoch: current_epoch + 1, + epoch: current_epoch.safe_add(1u64).unwrap(), root: Hash256::zero(), }; assert_ne!(target.epoch, slot.epoch(T::slots_per_epoch())); diff --git a/consensus/types/src/test_utils/builders/testing_beacon_block_builder.rs b/consensus/types/src/test_utils/builders/testing_beacon_block_builder.rs index c396d8c9666..97fe62780d8 100644 --- a/consensus/types/src/test_utils/builders/testing_beacon_block_builder.rs +++ b/consensus/types/src/test_utils/builders/testing_beacon_block_builder.rs @@ -9,6 +9,7 @@ use crate::{ use int_to_bytes::int_to_bytes32; use merkle_proof::MerkleTree; use rayon::prelude::*; +use safe_arith::SafeArith; use tree_hash::TreeHash; /// Builds a beacon block to be used for testing purposes. @@ -172,7 +173,10 @@ impl TestingBeaconBlockBuilder { num_attestations: usize, spec: &ChainSpec, ) -> Result<(), BeaconStateError> { - let mut slot = self.block.slot - spec.min_attestation_inclusion_delay; + let mut slot = self + .block + .slot + .safe_sub(spec.min_attestation_inclusion_delay)?; let mut attestations_added = 0; // Stores the following (in order): @@ -192,7 +196,7 @@ impl TestingBeaconBlockBuilder { // - The slot is too old to be included in a block at this slot. // - The `MAX_ATTESTATIONS`. loop { - if state.slot >= slot + T::slots_per_epoch() { + if state.slot >= slot.safe_add(T::slots_per_epoch())? { break; } @@ -211,7 +215,7 @@ impl TestingBeaconBlockBuilder { attestations_added += 1; } - slot -= 1; + slot.safe_sub_assign(1u64)?; } // Loop through all the committees, splitting each one in half until we have diff --git a/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs b/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs index 7593a232349..922d4017fea 100644 --- a/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -131,11 +131,11 @@ impl TestingBeaconStateBuilder { state.slot = slot; - state.previous_justified_checkpoint.epoch = epoch - 3; - state.current_justified_checkpoint.epoch = epoch - 2; + state.previous_justified_checkpoint.epoch = epoch.saturating_sub(3u64); + state.current_justified_checkpoint.epoch = epoch.saturating_sub(2u64); state.justification_bits = BitVector::from_bytes(vec![0b0000_1111]).unwrap(); - state.finalized_checkpoint.epoch = epoch - 3; + state.finalized_checkpoint.epoch = state.previous_justified_checkpoint.epoch; } /// Creates a full set of attestations for the `BeaconState`. Each attestation has full diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index d7459ebf497..e1cb1fde319 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -17,7 +17,7 @@ eth2_hashing = "0.1.0" ethereum-types = "0.9.1" arbitrary = { version = "0.4.4", features = ["derive"], optional = true } zeroize = { version = "1.0.0", features = ["zeroize_derive"] } -blst = { git = "https://github.com/supranational/blst.git", rev = "a8398ed284b0d78858302ad1ceb25a80e7bbe535" } +blst = { git = "https://github.com/sigp/blst.git", rev = "284f7059642851c760a09fb1708bcb59c7ca323c" } [features] default = ["supranational"] @@ -25,4 +25,3 @@ fake_crypto = [] milagro = [] supranational = [] supranational-portable = ["supranational", "blst/portable"] -supranational-force-adx = ["supranational", "blst/force-adx"] diff --git a/crypto/eth2_key_derivation/src/derived_key.rs b/crypto/eth2_key_derivation/src/derived_key.rs index 74dfcfbf3d8..8ed6c9bd44d 100644 --- a/crypto/eth2_key_derivation/src/derived_key.rs +++ b/crypto/eth2_key_derivation/src/derived_key.rs @@ -2,6 +2,7 @@ use crate::{lamport_secret_key::LamportSecretKey, secret_bytes::SecretBytes, Zer use num_bigint_dig::BigUint; use ring::hkdf::{KeyType, Prk, Salt, HKDF_SHA256}; use sha2::{Digest, Sha256}; +use std::convert::TryFrom; use zeroize::Zeroize; /// The byte size of a SHA256 hash. @@ -21,7 +22,7 @@ pub const R: &str = "52435875175126190479447740508185965837690552500527637822603 /// /// In EIP-2333 this value is defined as: /// -/// `ceil((1.5 * ceil(log2(r))) / 8)` +/// `ceil((3 * ceil(log2(r))) / 16)` pub const MOD_R_L: usize = 48; /// A BLS secret key that is derived from some `seed`, or generated as a child from some other @@ -81,9 +82,30 @@ fn derive_child_sk(parent_sk: &[u8], index: u32) -> ZeroizeHash { /// /// Equivalent to `HKDF_mod_r` in EIP-2333. fn hkdf_mod_r(ikm: &[u8]) -> ZeroizeHash { - let prk = hkdf_extract(b"BLS-SIG-KEYGEN-SALT-", ikm); - let okm = &hkdf_expand(prk, MOD_R_L); - mod_r(okm.as_bytes()) + // ikm = ikm + I2OSP(0,1) + let mut ikm_with_postfix = SecretBytes::zero(ikm.len() + 1); + ikm_with_postfix.as_mut_bytes()[..ikm.len()].copy_from_slice(ikm); + + // info = "" + I2OSP(L, 2) + let info = u16::try_from(MOD_R_L) + .expect("MOD_R_L too large") + .to_be_bytes(); + + let mut output = ZeroizeHash::zero(); + let zero_hash = ZeroizeHash::zero(); + + let mut salt = b"BLS-SIG-KEYGEN-SALT-".to_vec(); + while output.as_bytes() == zero_hash.as_bytes() { + let mut hasher = Sha256::new(); + hasher.update(salt.as_slice()); + salt = hasher.finalize().to_vec(); + + let prk = hkdf_extract(&salt, ikm_with_postfix.as_bytes()); + let okm = &hkdf_expand(prk, &info, MOD_R_L); + + output = mod_r(okm.as_bytes()); + } + output } /// Interprets `bytes` as a big-endian integer and returns that integer modulo the order of the @@ -145,7 +167,7 @@ fn parent_sk_to_lamport_pk(ikm: &[u8], index: u32) -> ZeroizeHash { /// Equivalent to `IKM_to_lamport_SK` in EIP-2333. fn ikm_to_lamport_sk(salt: &[u8], ikm: &[u8]) -> LamportSecretKey { let prk = hkdf_extract(salt, ikm); - let okm = hkdf_expand(prk, HASH_SIZE * LAMPORT_ARRAY_SIZE as usize); + let okm = hkdf_expand(prk, &[], HASH_SIZE * LAMPORT_ARRAY_SIZE as usize); LamportSecretKey::from_bytes(okm.as_bytes()) } @@ -159,7 +181,7 @@ fn hkdf_extract(salt: &[u8], ikm: &[u8]) -> Prk { /// Peforms a `HKDF-Expand` on the `pkr` (pseudo-random key), returning `l` bytes. /// /// Defined in [RFC5869](https://tools.ietf.org/html/rfc5869). -fn hkdf_expand(prk: Prk, l: usize) -> SecretBytes { +fn hkdf_expand(prk: Prk, info: &[u8], l: usize) -> SecretBytes { struct ExpandLen(usize); impl KeyType for ExpandLen { @@ -169,7 +191,7 @@ fn hkdf_expand(prk: Prk, l: usize) -> SecretBytes { } let mut okm = SecretBytes::zero(l); - prk.expand(&[], ExpandLen(l)) + prk.expand(&[info], ExpandLen(l)) .expect("expand len is constant and cannot be too large") .fill(okm.as_mut_bytes()) .expect("fill len is constant and cannot be too large"); @@ -307,528 +329,528 @@ mod test { /// Returns the copy-paste values from the spec. fn get_raw_vector() -> RawTestVector { RawTestVector { - seed: "0xc55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", - master_sk: - "12513733877922233913083619867448865075222526338446857121953625441395088009793", - child_index: 0, - lamport_0: vec![ - "0x7b4a587eac94d7f56843e718a04965d4832ef826419b4001a3ad0ba77eb44a3b", - "0x90f45a712112122429412921ece5c30eb2a6daf739dc9034fc79424daeb5eff6", - "0xd061c2799de00b2be90eb1cc295f4c31e22d4b45c59a9b9b2554379bea7783cb", - "0x3ad17e4cda2913b5180557fbe7db04b5ba440ce8bb035ae27878d66fbfa50d2c", - "0xf5b954490933ad47f8bf612d4a4f329b3aa8914b1b83d59e15e271e2a087e002", - "0x95d68d505bf4ff3e5149bc5499cf4b2f00686c674a29a8d903f70e569557d867", - "0x1b59c76d9bb2170b220a87833582ede5970d4a336d91c99a812825afe963e056", - "0x4310ff73cfbbf7b81c39ecbf1412da33e9388c1a95d71a75e51fe12256551ceb", - "0xee696343f823e5716e16747f3bbae2fc6de233fe10eea8e45b4579018da0874f", - "0xae12a437aaa7ae59f7d8328944b6a2b973a43565c55d5807dc2faf223a33aa73", - "0x2a3ae0b47f145bab629452661ff7741f111272e33ec571030d0eb222e1ed1390", - "0x1a3ea396e8cbd1d97733ef4753d6840b42c0795d2d693f18e6f0e7b3fff2beb2", - "0x472429d0643c888bfdfe6e6ccfdeee6d345d60c6710859ac29fc289fd3656347", - "0xa32d4d955949b8bed0eb20f586d8fd516d6ddec84fbbc36998d692633c349822", - "0xe5ac8ac5ee1d40e53a7abf36e8269d5d5fce450a87feae8e59f432a44bcc7666", - "0xddf9e497ed78032fbd72d9b8abd5204d81c3475f29afa44cdf1ded8ea72dd1dc", - "0x945c62e88fb1e5f3c15ff57cd5eb1586ee93ec5ec80154c5a9c50241c5adae0a", - "0xc8868b50fc8423c96b7efa1ede4d3203a6b835dbeb6b2ababc58397e6b31d9dd", - "0x66de9bd86b50e2b6a755310520af655759c1753bff34b79a5cd63d6811fc8c65", - "0x5b13786c6068df7735343e5591393bea8aee92ac5826d6132bf4f5ebf1098776", - "0xa2038fc7d8e3cb2eda2bd303cfa76a9e5d8b88293918bec8b2fc03be75684f14", - "0x47a13f6b2308a50eded830fdee7c504bf49d1fe6a95e337b0825d0d77a520129", - "0xb534cdddcf1aa1c6b4cbba46d1db31b766d958e0a0306450bc031d1e3ed79d97", - "0x54aa051b754c31658377f7bff00b7deaa861e74cb12e1eb84216666e19b23d69", - "0x0220d57f63435948818eb376367b113c188e37451c216380f65d1ad55f73f527", - "0xf9dd2e391565534a4db84980433bf5a56250f45fe294fce2679bcf115522c081", - "0x1166591ee2ca59b9f4e525900f085141be8879c66ef18529968babeb87c44814", - "0xf4fa2e8de39bdbeb29b64d8b440d3a6c9a6ca5bdce543877eaee93c11bd70ab8", - "0x07f466d73b93db283b3f7bfaf9c39ae296adc376ab307ef12312631d0926790e", - "0xb2ecff93acb4fa44c1dbf8464b81734a863b6d7142b02f5c008907ea4dc9aaa1", - "0xa1d9c342f6c293ac6ef8b5013cba82c4bad6ed7024d782948cb23cd490039ba1", - "0xc7d04a639ba00517ece4dbc5ef4aaf20e0ccde6e4a24c28936fabe93dec594db", - "0xe3cbb9810472d9dd1cdb5eed2f74b67ea60e973d2d2e897bd64728c9b1aa0679", - "0xe36884703413958ff2aba7a1f138a26d0ac0a371270f0169219beb00a5add5f0", - "0xe5ea300a09895b3f98de5232d92a36d5611cbcf9aaf9e7bb20cf6d1696ad1cb4", - "0xc136cda884e18175ab45148ed4f9d0d1a3c5e11ad0275058e61ae48eb151a81f", - "0x3ee1101e944c040021187e93b6e0beb1048c75fb74f3fdd67756b1c8517a311f", - "0x016964fd6fc32b9ad07a630949596715dee84d78230640368ff0929a280cf3a2", - "0xe33865fc03120b94333bb754fd097dc0f90e69ff6fd221d6aae59fcf2d762d76", - "0xe80bb3515a09ac6ecb4ec59de22701cdf954b1ae8a677fd85508c5b041f28058", - "0x3889af7cd325141ec288021ede136652a0411d20364005b9d3ca9102cb368f57", - "0x18dad0bc975cf8800addd54c7867389d3f7fe1b97d348bd8412a6cbfb75c520a", - "0x09035218686061ee91bd2ad57dc6fb6da7243b8177a153484524b2b228da5314", - "0x688fd7a97551c64eae33f91abb073a46eafbbacd5595c6bac2e57dd536acdfe2", - "0x1fc164dce565a1d0da59cc8048b334cc5eb84bf04de2399ddb847c22a7e32ab7", - "0xa2a340ba05c8a30dd1cab886a926b761758eba0e41b5c4c5dfd4a42f249655c1", - "0xc43dffe01479db836a6a1a74564b297fad0d69c6b06cf593f6db9f26b4f307d5", - "0x73cef7f3ff724a30a79e1dca74cef74954afeefa2e476c4dec65afe50c16c5c4", - "0xa54002253ab7b95cc5b664b3f08976400475cc56f170b939f6792e730ff5170b", - "0x9ade43053d41afebc002f09476dffd1b13ecbf67f810791540b92ca56d5e63e4", - "0x234e7cbfbe45b22a871db26738fa05de09213a925439d7f3e5108132e521b280", - "0x066b712417332c7cfca871fb1bb5839f0341acf9266229603a3eddbc8a93b59f", - "0xb5857acdcf636330da2cfcc99c81d9fdbd20c506a3c0e4f4f6a139d2a64f051c", - "0xe119908a150a49704b6bbba2c470cd619a0ae10dd9736e8d491890e3c8509fff", - "0xb8a5c5dbb51e6cb73cca95b4ad63ea3c7399cd16b05ab6261535495b3af2ca51", - "0x05624a1d4d2d2a31160bc48a6314bbf13eaddf56cddb0f0aa4ed3fb87f8b479f", - "0x483daceff1c3baa0ed0f3be7e534eebf5f4aed424ecd804edfbf5c56b3476b50", - "0x424d04694e7ae673707c77eb1c6d0996d250cfab6832ee3506a12e0384a3c5c9", - "0xa11fed0ed8057966bfe7136a15a814d06a516fbc9d44aeef87c509137a26190e", - "0x3694d22d1bc64658f3adbe2cc9f1716aee889066e0950e0b7a2fd576ed36bb76", - "0x49a13000a87f39f93d0ae9c3a4cfccbf440c0a75cce4c9d70dac627b6d6958b3", - "0xb3ff0cdd878d5ac1cb12e7d0b300d649fdd008800d498ae4f9fbf9510c74249a", - "0xe52a867cfb87d2fe7102d23d8d64925f7b75ca3f7d6bb763f7337352c255e0be", - "0x6513b372e4e557cca59979e48ec27620e9d7cdb238fcf4a9f19c3ba502963be0", - "0x9f69d82d4d51736902a987c8b5c30c2b25a895f2af5d2c846667ff6768bcc774", - "0x049a220dbe3340749f94643a429cb3cba3c92b561dc756a733d652d838728ab3", - "0x4fa2cd877aa115b476082b11053309f3537fa03d9158085f5f3f4bab6083e6da", - "0xed12db4069eb9f347735816afcee3fe43d4a6999fef8240b91bf4b05447d734f", - "0x3ecbe5eda469278f68548c450836a05cc500864664c7dda9b7526f084a891032", - "0x690d8f928fc61949c22e18cceaa2a446f8e1b65bd2e7af9e0a8e8284134ab3d2", - "0x99e09167a09f8261e7e8571d19148b7d7a75990d0702d9d582a2e4a96ac34f8e", - "0x6d33931693ed7c2e1d080b6a37da52c279a06cec5f534305819f7adf7db0afe3", - "0xc4b735462a9a656e28a52b1d4992ea9dea826b858971d698453a4be534d6bb70", - "0xedf92b10302dc41f8d362b360f4c2ef551d50e2ded012312c964002d2afc46d7", - "0x58f6691cca081ae5c3661dd171b87cc49c90359bb03cc0e57e503f7fcf14aefc", - "0x5d29b8b4ee295a73c4a8618927b3d14b76c7da049133a2257192b10be8c17a6a", - "0x646802fa42801e0ae24011fb4f62e87219ef1da01f7fc14bf8d6bd2d9e7c21f1", - "0x23abf45eee65cc4c1e95ccab42ad280a00bb3b14d243e2021a684075f900141e", - "0x2b1ae95c975bf9c387eae506fdb5e58afd2d198f00a21cd3fddb5855e8021e4d", - "0x0ef9f6e1c0583493d343e75f9c0c557fa6da0dc12b17a96c5757292916b72ee3", - "0x04c7fc76195c64a3285af14161077c045ff6ddbb67c0ff91b080f98eb6781e5c", - "0xba12679b97027d0e7076e6d19086c07792eaa7f78350842fbef8ddf5bcd3ecc0", - "0xcead458e6799df4d2f6cbf7f13cb3afec3441a354816e3071856ed49cbdbb1a7", - "0xbe6c56256556bb5c6727a1d9cb641d969677f56bb5ad7f8f7a7c9cfd128427b4", - "0xc80f11963ff40cb1888054b83c0463d32f737f2e7d42098e639023db0dfc84d4", - "0xac80006c1296bcfde86697efebb87fb0fddfb70dd34dd2ee4c152482af4687eb", - "0xbb7d13ce184249df4576fc3d13351e1683500e48726cd4198423f14f9094068b", - "0x1b2d9c40c55bd7362664fa46c1268e094d56c8193e3d991c08dc7a6e4ca14fa1", - "0x9bd236254d0565f5b2d24552d4b4d732de43b0adaa64ecd8be3efc6508577591", - "0x38078cefccc04e8312d79e0636e0e3157434c50a2ad4e3e87cc6584c41eec8b5", - "0xb5d15a8527ff3fa254ba61ffceb02d2570b53361894f351a9e839c0bb716857d", - "0x6763dad684bf2e914f40ae0a7ee0cdf12c97f41fc05a485d5991b4daad21a3f8", - "0xc80363c20df589333ecbe05bd5f2c19942ebc2593626dc50d00835c40fb8d005", - "0x48502b56ae93acd2794f847cbe825525d5d5f59f0f75c67aff84e5338776b3af", - "0xfd8e033493ba8af264a855a78ab07f37d936351d2879b95928909ed8df1b4f91", - "0x11f75bee9eac7356e65ebc7f004ccdc1da80807380d69143293d1421f50b1c97", - "0x903a88a3ebe84ca1c52a752b1faffa9ca1daedac9cbf1aa70942efc9beb44b79", - "0x2c0dcd68837f32a69da651045ad836b8cd6b48f2c8c5d73a3bd3bba6148d345a", - "0x0aa0f49b3476f3fdb6393f2ab601e0009586090b72ee54a525734f51598960d5", - "0xf7a789f013f702731656c562caa15b04cb7c9957376c4d80b8839167bb7fa626", - "0x4e0be1b19e305d82db3fd8affd67b0d2559da3edbfb08d19632a5cc46a90ed07", - "0x3caaccfc546d84d543eaf4f4c50c9c8fd831c12a8de56fdb9dfd04cc082882fe", - "0x894f6a01fd34f0642077e22981752011678548eb70eb55e8072c1caffc16fe02", - "0xae7eb54adaa68679348ea3537a49be669d1d61001fbab9fac259ba727dbc9a1a", - "0x291a1cbdceff957b5a65440ab67fb8672de881230fe3108a15ca487c2662c2c7", - "0x891d43b867137bf8beb9df4da2d951b5984a266a8cd74ec1593801d005f83f08", - "0xc558407f6491b37a10835e0ad7ce74f4e368aa49157a28873f7229310cb2d7fd", - "0x9ce061b0a072e1fe645f3479dac089b5bfb78cfa6cfbe5fd603bcdb504711315", - "0xa8e30d07b09275115dd96472ecf9bc316581caf307735176ca226d4cd9022925", - "0x918ee6d2efba7757266577691203f973cf4f4cac10f7d5f86acd2a797ff66583", - "0xfa31ba95e15d1635d087522f3d0da9cf7acac4ed6d0ac672654032a3c39244a6", - "0xf2952b58f015d6733af06938cd1f82fbddb3b796823bee7a3dbffa04efc117c2", - "0x46f8f742d3683de010ede528128d1181e8819f4252474f51371a177bfa518fa4", - "0x4ca1cc80094f2910cf83a9e65ad70e234690ffb9142793911ec7cf71663545b3", - "0x381965037b5725c71bfa6989d4c432f6611de8e8ec387f3cfc0dcb1a15191b73", - "0x2562b88ed3b86ba188be056805a3b7a47cb1a3f630d0e2f39647b0792ec6b7d8", - "0x565f6d14e7f22724f06d40f54465ad40d265b6de072b34a09d6e37a97a118cd8", - "0xc2982c861ad3278063b4a5f584eaf866db684cc4e712d64230fc9ee33bb4253b", - "0xfd806c91927e549d8d400ab7aa68dbe60af988fbabf228483ab0c8de7dab7eee", - "0xafae6ff16c168a3a3b5c2f1742d3f89fa4777c4bd0108f174014debf8f4d629c", - "0xaf5a4be694de5e53632be9f1a49bd582bf76002259460719197079c8c4be7e66", - "0xa8df4a4b4c5bf7a4498a11186f8bb7679137395f28e5c2179589e1c1f26504b5", - "0xce8b77c64c646bb6023f3efaed21ca2e928e21517422b124362cf8f4d9667405", - "0x62e67a8c423bc6c6c73e6cd8939c5c1b110f1a38b2ab75566988823762087693", - "0x7e778f29937daaa272d06c62d6bf3c9c0112d45a3df1689c602d828b5a315a9f", - "0xe9b5abd46c2377e602ff329050afa08afe152f4b0861db8a887be910ff1570bf", - "0xa267b1b2ccd5d96ae8a916b0316f06fafb886b3bb41286b20763a656e3ca0052", - "0xb8ed85a67a64b3453888a10dedf4705bd27719664deff0996a51bb82bc07194f", - "0x57907c3c88848f9e27bc21dd8e7b9d61de48765f64d0e943e7a6bb94cc2021ab", - "0xd2f6f1141a3b76bf9bf581d49091142944c7f9f323578f5bdd5522ba32291243", - "0xc89f104200ed4c5d5f7046d99e68ae6f8ec31e2eeceb568eb05087e3aa546a74", - "0xc9f367fae45c39299693b134229bb6dd0da112fd1a7d19b7f4772c01e5cbe479", - "0x64e2d4ad51948764dd578d26357e29e8e4d076d65c05cffdf8211b624fefe9ac", - "0xf9a9b4e6d5be7fc051df8ecd9c389d16b1af86c749308e6a23f7ff4871f0ba9a", - "0x0d2b2a228b86ebf9499e1bf7674335087ced2eb35ce0eb90954a0f75751a2bf4", - "0xff8531b45420a960d6e48ca75d77758c25733abde83cd4a6160beae978aa735e", - "0xd6d412bd1cb96a2b568d30e7986b7e8994ca92fd65756a758295499e11ea52b6", - "0xad8533fccbecdd4a0b00d648bfe992360d265f7be70c41d9631cefad5d4fe2f6", - "0x31fbf2afb8d5cc896d517cfc5201ee24527e8d283f9c37ca10233bef01000a20", - "0x2fd67b7365efc258131eb410f46bf3b1cbd3e9c76fd6e9c3e86c9ff1054116ff", - "0xab6aa29f33d18244be26b23abadb39679a8aa56dafc0dd7b87b672df5f5f5db6", - "0xbad3b0f401ca0a53a3d465de5cecd57769ec9d4df2c04b78f8c342a7ed35bbee", - "0xbdc24d46e471835d83ce8c5b9ecbe675aab2fd8f7831c548e8efd268c2ee2232", - "0x87265fabd7397d08f0729f13a2f3a25bbc8c874b6b50f65715c92b62f665f925", - "0xa379fd268e7ff392c067c2dd823996f72714bf3f936d5eeded71298859f834cb", - "0xf3ab452c9599ebfbb234f72a86f3062aed12ae1f634abbe542ff60f5cefc1fcf", - "0x2b17ebb053a3034c07da36ed2ba42c25ad8e61dec87b5527f5e1c755eb55405a", - "0x305b40321bd67bf48bfd121ee4d5d347268578bd4b8344560046594771a11129", - "0xe7029c9bea020770d77fe06ca53b521b180ad6a9e747545aadc1c74beef7241c", - "0xabc357cec0f4351a5ada22483d3b103890392f8d8f9cb8073a61969ed1be4e08", - "0x97f88c301946508428044d05584dc41af2e6a0de946de7d7f5269c05468afe20", - "0xbdc08fe8d6f9a05ad8350626b622ad8eec80c52331d154a3860c98676719cfbd", - "0x161590fc9f7fcf4eaba2f950cf588e6da79e921f139d3c2d7ebe017003a4799e", - "0x91b658db75bc3d1954bfde2ef4bc12980ff1688e09d0537f170c9ab47c162320", - "0x76d995f121406a63ce26502e7ec2b653c221cda357694a8d53897a99e6ce731e", - "0x3d6b2009586aceb7232c01259bb9428523c02b0f42c2100ec0d392418260c403", - "0x14ca74ecbc8ec0c67444c6cb661a2bce907aa2a1453b11f16002b815b94a1c49", - "0x553b4dc88554ebe7b0a3bd0813104fd1165a1f950ceace11f5841aa74b756d85", - "0x4025bf4ad86751a156d447ce3cabafde9b688efcdafd8aa4be69e670f8a06d9e", - "0x74260cf266997d19225e9a0351a9acfa17471fccdf5edc9ccc3bb0d23ef551c5", - "0xf9dbca3e16d234e448cf03877746baeb62a8a25c261eff42498b1813565c752a", - "0x2652ec98e05c1b6920fb6ddc3b57e366d514ffa4b35d068f73b5603c47f68f2f", - "0x83f090efeb36db91eb3d4dfbb17335c733fce7c64317d0d3324d7caaaf880af5", - "0x1e86257f1151fb7022ed9ed00fb961a9a9989e58791fb72043bb63ed0811791c", - "0xd59e4dcc97cba88a48c2a9a2b29f79125099a39f74f4fb418547de8389cd5d15", - "0x875a19b152fe1eb3fe1de288fa9a84864a84a79bac30b1dbd70587b519a9770e", - "0x9c9dc2d3c8f2f6814cfc61b42ee0852bbaf3f523e0409dd5df3081b750a5b301", - "0xf6f7f81c51581c2e5861a00b66c476862424151dd750efeb20b7663d552a2e94", - "0x723fcb7ca43a42483b31443d4be9b756b34927176f91a391c71d0b774c73a299", - "0x2b02d8acf63bc8f528706ed4d5463a58e9428d5b71d577fd5daa13ba48ac56cf", - "0x2ff6911f574c0f0498fc6199da129446b40fca35ccbf362bc76534ba71c7ca22", - "0x1ef4b959b11bc87b11e4a5f84b4d757c6bdcfad874acec9a6c9eee23dc4bbe1b", - "0x68e2df9f512be9f64b7e3a2dee462149dac50780073d78b569a20256aea5f751", - "0xd1a3682e12b90ae1eab27fc5dc2aef3b8e4dbb813925e9a91e58d6c9832767b6", - "0x75778ccc102d98c5e0b4b83f7d4ef7fe8bc7263cc3317723001cb0b314d1e9e8", - "0xc7f44e2cead108dc167f0036ac8a278d3549cc3dd5cc067d074ccad9b1d9f8d4", - "0x4cba0223c5df2796b0ee9fbc084d69f10e6aedda8f0cf86171bebb156ede676c", - "0x628deda825661f586a5713e43c806fdd55e1a53fbe90a4ddb5f3786570740954", - "0xfc82a253bc7e0ac96252b238fbb411a54e0adf78d089f804a7fc83a4959b401e", - "0x72a6491f5daae0ceb85b61a5ed69009dd2a167c64cb35cabf38b846e27268e9d", - "0xee139a913d4fcf25ba54bb36fc8051b91f2ec73ba820cc193c46fb2f7c37a106", - "0x7f75021f2b1d0c78859478e27f6f40646b5776c060f1a5f6f0944c840a0121f8", - "0x5b60a1b78feca1d2602ac8110d263ad6b3663cbf49e6bdc1077b4b80af2feb6f", - "0xd61f15d80b1e88469b6a76ed6a6a2b94143b6acc3bd717357264818f9f2d5c6d", - "0xea85da1780b3879a4d81b685ba40b91c060866abd5080b30fbbb41730724a7dd", - "0xb9b9da9461e83153f3ae0af59fbd61febfde39eb6ac72db5ed014797495d4c26", - "0xf737762fe8665df8475ff341b3762aaeb90e52974fe5612f5efd0fc1c409d7f8", - "0xaaa25d934a1d5aa6b2a1863704d7a7f04794ed210883582c1f798be5ca046cf7", - "0x932f46d0b6444145221b647f9d3801b6cb8b1450a1a531a959abdaacf2b5656b", - "0xf4a8b0e52f843ad27635c4f5a467fbf98ba06ba9a2b93a8a97170b5c41bf4958", - "0x196ed380785ee2925307ec904161dc02a4596a55499e5b0a3897f95485b3e74a", - "0x772e829a405219e4f8cd93a1ef15c250be85c828c1e29ef6b3f7b46958a85b44", - "0xd66cfc9af9941515d788f9f5e3b56fddb92464173ddb67b83bf265e7ea502170", - "0xf5b040bfc246425278e2423b1953d8ad518de911cf04d16c67d8580a09f90e62", - "0xd2d18b2ae8a53dde14b4000e5e7e414505825f50401a3797dd8820cf510dc448", - "0xc01dcc064e644266739cd0ec7edf92fc2ef8e92e0beedf0e8aa30efcff1644fe", - "0x24720d325913ba137daf031924ad3bfaa1c8c00a53a2d048fe5667aef45efce3", - "0x70a24e1c89b3ea78d76ef458d498dcb5b8561d484853b2a8b2adcd61869857df", - "0x0ff3313997f14e1b1dcd80f1d62c58aaefb19efd7c0ea15dde21aa4e2a516e80", - "0x960c1f50062a4df851638f42c0259b6e0a0217300884f13a3c5c8d94adb34f21", - "0xb71ca7cc8578149da556131268f4625b51620dfc3a6e9fbd47f5df03afbd410e", - "0xa1a3eeec0addec7b9e15f416a07608a1b5d94f0b42d5c203b8ced03a07484f5b", - "0xa4bb8b059aa122ca4652115b83b17af80cfbea0d3e1e8979a396a667f94e85f3", - "0x31c4d2f252167fe2a4d41944224a80b2f1afaf76f8dd6a3d52d71751849e44bb", - "0x79642dd6a255f96c9efe569304d58c327a441448db0431aa81fe072d0d359b52", - "0x42a4b504714aba1b67defe9458fff0c8cb1f216dcab28263cef67a65693b2036", - "0xe3d2f6a9d882d0f026ef316940dfcbf131342060ea28944475fe1f56392c9ad2", - "0x986af9aeff236394a0afa83823e643e76f7624e9bfd47d5468f9b83758a86caa", - "0xafe2de6ede50ee351d63ed38d1f2ae5203174c731f41bbed95db467461ad5492", - "0x9ad40f0785fe1c8a5e4c3342b3c91987cd47a862ece6573674b52fa0456f697a", - "0xde4cde6d0fc6def3a89b79da0e01accdbec049f1c9471d13a5d59286bd679af1", - "0xecd0d1f70116d6b3ae21c57fb06ad90eed33d040e2c5c3d12714b3be934fa5ce", - "0x3c53c5bf2d1b1d4038e1f0e8a2e6d12e0d4613d5cd12562578b6909921224c10", - "0x36087382b37e9e306642cc6e867e0fb2971b6b2b28b6caf2f9c96b790e8db70a", - "0xa957496d6a4218a19998f90282d05bd93e6baabf55e55e8a5f74a933a4dec045", - "0x077d6f094e8467a21f02c67753565ec5755156015d4e86f1f82a22f9cf21c869", - "0x12dd3b1f29e1462ca392c12388a77c58044151154cf86f23873f92a99b6bb762", - "0x7fdbcdedcc02ecf16657792bd8ef4fa4adeee497f30207d4cc060eb0d528b26b", - "0x245554b12bf8edf9e9732d6e2fa50958376e355cb695515c94676e64c6e97009", - "0xccd3b1841b517f7853e35f85471710777e437a8665e352a0b61c7d7083c3babc", - "0xd970545a326dcd92e31310d1fdce3703dff8ef7c0f3411dfa74fab8b4b0763ac", - "0xd24163068918e2783f9e79c8f2dcc1c5ebac7796ce63070c364837aac91ee239", - "0x256a330055357e20691e53ca5be846507c2f02cfde09cafb5809106f0af9180e", - "0xfa446a5d1876c2051811af2a341a35dbcd3f7f8e2e4f816f501139d27dd7cd82", - "0xbafbc7a8f871d95736a41e5721605d37e7532e41eb1426897e33a72ed2f0bf1d", - "0x8055af9a105b6cf17cfeb3f5320e7dab1a6480500ff03a16c437dfec0724c290", - "0x1de6ee3e989497c1cc7ca1d16b7b01b2f336524aa2f75a823eaa1716c3a1a294", - "0x12bb9508d646dda515745d104199f71276d188b3e164083ad27dfdcdc68e290b", - "0x7ea9f9939ad4f3b44fe7b780e0587da4417c34459b2996b3a449bb5b3ff8c8cb", - "0xa88d2f8f35bc669aa6480ce82571df65fea366834670b4084910c7bb6a735dde", - "0x9486e045adb387a550b3c7a603c30e07ed8625d322d1158f4c424d30befe4a65", - "0xb283a70ba539fe1945be096cb90edb993fac77e8bf53616bde35cdcaa04ab732", - "0xab39a81558e9309831a2caf03e9df22e8233e20b1769f16e613debcdb8e2610f", - "0x1fc12540473fbbad97c08770c41f517ce19dc7106aa2be2e9b77867046627509", - "0xec33dbec9d655c4c581e07d1c40a587cf3217bc8168a81521b2d0021bd0ec133", - "0xc8699e3b41846bc291209bbb9c06f565f66c6ccecbf03ebc27593e798c21fe94", - "0x240d7eae209c19d453b666c669190db22db06279386aa30710b6edb885f6df94", - "0xb181c07071a750fc7638dd67e868dddbeeee8e8e0dcbc862539ee2084674a89e", - "0xb8792555c891b3cbfddda308749122a105938a80909c2013637289e115429625", - "0xfe3e9e5b4a5271d19a569fee6faee31814e55f156ba843b6e8f8dc439d60e67a", - "0x912e9ba3b996717f89d58f1e64243d9cca133614394e6ae776e2936cf1a9a859", - "0xa0671c91a21fdfd50e877afa9fe3974aa3913855a2a478ae2c242bcdb71c73d7", - "0x5b55d171b346db9ba27b67105b2b4800ca5ba06931ed6bd1bafb89d31e6472e6", - "0x68438458f1af7bd0103ef33f8bc5853fa857b8c1f84b843882d8c328c595940d", - "0x21fe319fe8c08c1d00f977d33d4a6f18aecaa1fc7855b157b653d2d3cbd8357f", - "0x23cce560bc31f68e699ece60f21dd7951c53c292b3f5522b9683eb2b3c85fc53", - "0x917fa32d172c352e5a77ac079df84401cdd960110c93aa9df51046d1525a9b49", - "0x3fc397180b65585305b88fe500f2ec17bc4dccb2ec254dbb72ffb40979f14641", - "0xf35fb569e7a78a1443b673251ac70384abea7f92432953ca9c0f31c356be9bd9", - "0x7955afa3cd34deb909cd031415e1079f44b76f3d6b0aaf772088445aaff77d08", - "0x45c0ca029356bf6ecfc845065054c06024977786b6fbfaea74b773d9b26f0e6c", - "0xe5c1dac2a6181f7c46ab77f2e99a719504cb1f3e3c89d720428d019cb142c156", - "0x677b0e575afcccf9ddefc9470e96a6cfff155e626600b660247b7121b17b030a", - "0xbeed763e9a38277efe57b834a946d05964844b1f51dba2c92a5f3b8d0b7c67d0", - "0x962b17ed1a9343d8ebfae3873162eef13734985f528ca06c90b0c1e68adfdd89", - ], - lamport_1: vec![ - "0xb3a3a79f061862f46825c00fec4005fb8c8c3462a1eb0416d0ebe9028436d3a9", - "0x6692676ce3b07f4c5ad4c67dc2cf1dfa784043a0e95dd6965e59dc00b9eaff2d", - "0xbf7b849feb312db230e6e2383681b9e35c064e2d037cbc3c9cc9cd49220e80c9", - "0xa54e391dd3b717ea818f5954eec17b4a393a12830e28fabd62cbcecf509c17dc", - "0x8d26d800ac3d4453c211ef35e9e5bb23d3b9ede74f26c1c417d6549c3110314d", - "0xbb8153e24a52398d92480553236850974576876c7da561651bc551498f184d10", - "0x0d30e0e203dc4197f01f0c1aba409321fbf94ec7216e47ab89a66fb45e295eff", - "0x01dc81417e36e527776bf37a3f9d74a4cf01a7fb8e1f407f6bd525743865791d", - "0xa6318e8a57bec438245a6834f44eb9b7fb77def1554d137ea12320fc572f42c9", - "0xd25db9df4575b595130b6159a2e8040d3879c1d877743d960bf9aa88363fbf9f", - "0x61bb8baeb2b92a4f47bb2c8569a1c68df31b3469e634d5e74221bc7065f07a96", - "0xb18962aee4db140c237c24fec7fd073b400b2e56b0d503f8bc74a9114bf183bf", - "0x205473cc0cdab4c8d0c6aeceda9262c225b9db2b7033babfe48b7e919751a2c6", - "0xc5aa7df7552e5bb17a08497b82d8b119f93463ccb67282960aee306e0787f228", - "0x36da99e7d38ce6d7eab90ea109ba26615ad75233f65b3ae5056fba79c0c6682a", - "0xd68b71bba6266b68aec0df39b7c2311e54d46a3eab35f07a9fe60d70f52eec58", - "0xbbe56f1274ada484277add5cb8c90ef687d0b69a4c95da29e32730d90a2d059f", - "0x0982d1d1c15a560339d9151dae5c05e995647624261022bbedce5dce8a220a31", - "0x8ef54ad546d2c6144fc26e1e2ef92919c676d7a76cfdfb5c6a64f09a54e82e71", - "0x1e3ac0133eef9cdbeb590f14685ce86180d02b0eea3ef600fd515c38992b1f26", - "0x642e6b1c4bec3d4ba0ff2f15fbd69dcb57e4ba8785582e1bc2b452f0c139b590", - "0xca713c8cf4afa9c5d0c2db4fc684a8a233b3b01c219b577f0a053548bedf8201", - "0xd0569ba4e1f6c02c69018b9877d6a409659cb5e0aa086df107c2cc57aaba62da", - "0x4ebe68755e14b74973e7f0fa374b87cee9c370439318f5783c734f00bb13e4b5", - "0x788b5292dc5295ae4d0ea0be345034af97a61eec206fda885bbc0f049678c574", - "0x0ebd88acd4ae195d1d3982038ced5af1b6f32a07349cf7fffbff3ce410c10df2", - "0xc7faf0a49234d149036c151381d38427b74bae9bd1601fc71663e603bc15a690", - "0xc5247bf09ebe9fa4e1013240a1f88c703f25a1437196c71ee02ca3033a61f946", - "0x719f8c68113d9f9118b4281e1f42c16060def3e3eeef15f0a10620e886dc988f", - "0x28da4f8d9051a8b4d6158503402bdb6c49ba2fb1174344f97b569c8f640504e6", - "0x96f6773576af69f7888b40b0a15bc18cc9ec8ca5e1bb88a5de58795c6ddf678e", - "0x8d80d188a4e7b85607deccf654a58616b6607a0299dd8c3f1165c453fd33d2e4", - "0x9c08dcc4f914486d33aa24d10b89fd0aabcc635aa2f1715dfb1a18bf4e66692a", - "0x0ff7045b5f6584cc22c140f064dec0692762aa7b9dfa1defc7535e9a76a83e35", - "0x8e2dae66fa93857b39929b8fc531a230a7cfdd2c449f9f52675ab5b5176461d5", - "0xf449017c5d429f9a671d9cc6983aafd0c70dd39b26a142a1d7f0773de091ac41", - "0xed3d4cab2d44fec0d5125a97b3e365a77620db671ecdda1b3c429048e2ebdae6", - "0x836a332a84ee2f4f5bf24697df79ed4680b4f3a9d87c50665f46edaeed309144", - "0x7a79278754a4788e5c1cf3b9145edb55a2ba0428ac1c867912b5406bb7c4ce96", - "0x51e6e2ba81958328b38fd0f052208178cec82a9c9abd403311234e93aff7fa70", - "0x217ec3ec7021599e4f34410d2c14a8552fff0bc8f6894ebb52ec79bf6ec80dc9", - "0x8a95bf197d8e359edabab1a77f5a6d04851263352aa46830f287d4e0564f0be0", - "0x60d0cbfb87340b7c92831872b48997ce715da91c576296df215070c6c20046d4", - "0x1739fbca476c540d081b3f699a97387b68af5d14be52a0768d5185bc9b26961b", - "0xac277974f945a02d89a0f8275e02de9353e960e319879a4ef137676b537a7240", - "0x959b7640821904ba10efe8561e442fbdf137ccb030aee7472d10095223e320ba", - "0xdba61c8785a64cb332342ab0510126c92a7d61f6a8178c5860d018d3dad571c6", - "0xc191fb6a92eb1f1fb9e7eb2bdecd7ec3b2380dd79c3198b3620ea00968f2bd74", - "0x16ef4e88e182dfc03e17dc9efaa4a9fbf4ff8cb143304a4a7a9c75d306729832", - "0x39080e4124ca577ff2718dfbcb3415a4220c5a7a4108729e0d87bd05adda5970", - "0xa29a740eef233956baff06e5b11c90ed7500d7947bada6da1c6b5d9336fc37b6", - "0x7fda7050e6be2675251d35376bacc895813620d245397ab57812391d503716ee", - "0x401e0bf36af9992deb87efb6a64aaf0a4bc9f5ad7b9241456b3d5cd650418337", - "0x814e70c57410e62593ebc351fdeb91522fe011db310fcf07e54ac3f6fefe6be5", - "0x03c1e52ecbef0d79a4682af142f012dc6b037a51f972a284fc7973b1b2c66dcf", - "0x57b22fb091447c279f8d47bdcc6a801a946ce78339e8cd2665423dfcdd58c671", - "0x53aeb39ab6d7d4375dc4880985233cba6a1be144289e13cf0bd04c203257d51b", - "0x795e5d1af4becbca66c8f1a2e751dcc8e15d7055b6fc09d0e053fa026f16f48f", - "0x1cd02dcd183103796f7961add835a7ad0ba636842f412643967c58fe9545bee4", - "0x55fc1550be9abf92cacb630acf58bad11bf734114ebe502978a261cc38a4dd70", - "0x6a044e0ea5c361d3fb2ca1ba795301e7eb63db4e8a0314638f42e358ea9cfc3e", - "0x57d9f15d4db199cbcb7cbd6524c52a1b799d52b0277b5a270d2985fcee1e2acb", - "0x66c78c412e586bd01febc3e4d909cc278134e74d51d6f60e0a55b35df6fb5b09", - "0x1076799e15a49d6b15c2486032f5e0b50f43c11bc076c401e0779d224e33f6fc", - "0x5f70e3a2714d8b4483cf3155865ba792197e957f5b3a6234e4c408bf2e55119d", - "0x9b105b0f89a05eb1ff7caed74cf9573dc55ac8bc4881529487b3700f5842de16", - "0x1753571b3cfadca4277c59aee89f607d1b1e3a6aa515d9051bafb2f0d8ce0daa", - "0x4014fff940b0950706926a19906a370ccbd652836dab678c82c539c00989201a", - "0x0423fa59ee58035a0beb9653841036101b2d5903ddeabddabf697dbc6f168e61", - "0x78f6781673d991f9138aa1f5142214232d6e3d6986acb6cc7fb000e1a055f425", - "0x21b8a1f6733b5762499bf2de90c9ef06af1c6c8b3ddb3a04cce949caad723197", - "0x83847957e909153312b5bd9a1a37db0bd6c72a417024a69df3e18512973a18b4", - "0x948addf423afd0c813647cfe32725bc55773167d5065539e6a3b50e6ebbdab38", - "0x0b0485d1bec07504a2e5e3a89addd6f25d497cd37a0c04bc38355f8bdb01cd48", - "0x31be8bda5143d39ea2655e9eca6a294791ca7854a829904d8574bedc5057ddc4", - "0x16a0d2d657fadce0d81264320e42e504f4d39b931dff9888f861f3cc78753f99", - "0xb43786061420c5231bf1ff638cb210f89bf4cd2d3e8bafbf34f497c9a298a13b", - "0x1f5986cbd7107d2a3cbc1826ec6908d976addbf9ae78f647c1d159cd5397e1bd", - "0xa883ccdbfd91fad436be7a4e2e74b7796c0aadfe03b7eea036d492eaf74a1a6f", - "0x5bc9eb77bbbf589db48bca436360d5fc1d74b9195237f11946349951f2a9f7f6", - "0xb6bc86de74a887a5dceb012d58c62399897141cbcc51bad9cb882f53991f499c", - "0xa6c3260e7c2dd13f26cf22bf4cd667688142ff7a3511ec895bc8f92ebfa694b6", - "0xb97da27e17d26608ef3607d83634d6e55736af10cc7e4744940a3e35d926c2ad", - "0x9df44067c2dc947c2f8e07ecc90ba54db11eac891569061a8a8821f8f9773694", - "0x865cc98e373800825e2b5ead6c21ac9112ff25a0dc2ab0ed61b16dc30a4a7cd7", - "0xe06a5b157570c5e010a52f332cacd4e131b7aed9555a5f4b5a1c9c4606caca75", - "0x824eccb5cf079b5943c4d17771d7f77555a964a106245607cedac33b7a14922e", - "0xe86f721d7a3b52524057862547fc72de58d88728868f395887057153bccaa566", - "0x3344e76d79f019459188344fb1744c93565c7a35799621d7f4505f5b6119ac82", - "0x401b3589bdd1b0407854565329e3f22251657912e27e1fb2d978bf41c435c3ac", - "0xb12fd0b2567eb14a562e710a6e46eef5e280187bf1411f5573bb86ecbe05e328", - "0xe6dc27bab027cbd9fbb5d80054a3f25b576bd0b4902527a0fc6d0de0e45a3f9f", - "0x1de222f0e731001c60518fc8d2be7d7a48cc84e0570f03516c70975fdf7dc882", - "0xb8ff6563e719fc182e15bbe678cf045696711244aacc7ce4833c72d2d108b1b9", - "0x53e28ac2df219bcbbc9b90272e623d3f6ca3221e57113023064426eff0e2f4f2", - "0x8a4e0776f03819e1f35b3325f20f793d026ccae9a769d6e0f987466e00bd1ce7", - "0x2f65f20089a31f79c2c0ce668991f4440b576ecf05776c1f6abea5e9b14b570f", - "0x448e124079a48f62d0d79b96d5ed1ffb86610561b10d5c4236280b01f8f1f406", - "0x419b34eca1440c847f7bff9e948c9913075d8e13c270e67f64380a3f31de9bb2", - "0x2f6e4fee667acaa81ba8e51172b8329ed936d57e9756fb31f635632dbc2709b7", - "0xdd5afc79e8540fcee6a896c43887bd59c9de5d61b3d1b86539faeb41a14b251d", - "0xc707bed926a46cc451a6b05e642b6098368dbdbf14528c4c28733d5d005af516", - "0x153e850b606eb8a05eacecc04db4b560d007305e664bbfe01595cb69d26b8597", - "0x1b91cc07570c812bb329d025e85ef520132981337d7ffc3d84003f81a90bf7a7", - "0x4ca32e77a12951a95356ca348639ebc451170280d979e91b13316844f65ed42a", - "0xe49ea1998e360bd68771bd69c3cd4cf406b41ccca4386378bec66ea210c40084", - "0x01aaffbde1a672d253e0e317603c2dc1d0f752100d9e853f840bca96e57f314c", - "0x170d0befcbbaafb317c8684213a4989368332f66e889824cc4becf148f808146", - "0x56f973308edf5732a60aa3e7899ae1162c7a2c7b528c3315237e20f9125b34e0", - "0x66c54fd5f6d480cab0640e9f3ec1a4eafbafc0501528f57bb0d5c78fd03068ef", - "0xaca6c83f665c64d76fbc4858da9f264ead3b6ecdc3d7437bb800ef7240abffb9", - "0xf1d4e02e7c85a92d634d16b12dc99e1d6ec9eae3d8dfbca77e7c609e226d0ce7", - "0x094352545250e843ced1d3c6c7957e78c7d8ff80c470974778930adbe9a4ed1a", - "0x76efa93070d78b73e12eb1efa7f36d49e7944ddcc3a043b916466ee83dca52ce", - "0x1772a2970588ddb584eadf02178cdb52a98ab6ea8a4036d29e59f179d7ba0543", - "0xe4bbf2d97d65331ac9f680f864208a9074d1def3c2433458c808427e0d1d3167", - "0x8ccfb5252b22c77ea631e03d491ea76eb9b74bc02072c3749f3e9d63323b44df", - "0x9e212a9bdf4e7ac0730a0cecd0f6cc49afc7e3eca7a15d0f5f5a68f72e45363b", - "0x52e548ea6445aae3f75509782a7ab1f4f02c2a85cdd0dc928370f8c76ae8802d", - "0xb62e7d73bf76c07e1a6f822a8544b78c96a6ba4f5c9b792546d94b56ca12c8b9", - "0x595cb0e985bae9c59af151bc748a50923921a195bbec226a02157f3b2e066f5b", - "0x1c7aa6b36f402cec990bafefbdbb845fc6c185c7e08b6114a71dd388fe236d32", - "0x01ee2ff1a1e88858934a420258e9478585b059c587024e5ec0a77944821f798c", - "0x420a963a139637bffa43cb007360b9f7d305ee46b6a694b0db91db09618fc2e5", - "0x5a8e2ad20f8da35f7c885e9af93e50009929357f1f4b38a6c3073e8f58fae49e", - "0x52a405fdd84c9dd01d1da5e9d1c4ba95cb261b53bf714c651767ffa2f9e9ad81", - "0xa1a334c901a6d5adc8bac20b7df025e906f7c4cfc0996bfe2c62144691c21990", - "0xb789a00252f0b34bded3cb14ae969effcf3eb29d97b05a578c3be8a9e479c213", - "0xb9dbf7e9ddb638a515da245845bea53d07becdf3f8d1ec17de11d495624c8eab", - "0xaf566b41f5ed0c026fa8bc709533d3fa7a5c5d69b03c39971f32e14ab523fa3d", - "0x8121e0b2d9b106bb2aefd364fd6a450d88b88ee1f5e4aad7c0fcd8508653a112", - "0x8581c1be74279216b93e0a0d7272f4d6385f6f68be3eef3758d5f68b62ee7b6c", - "0x85386f009278f9a1f828404fa1bbfa02dfb9d896554f0a52678eb6ec8feadc55", - "0xf483ed167d92a0035ac65a1cfdb7906e4952f74ae3a1d86324d21f241daffcb7", - "0x3872485e2a520a350884accd990a1860e789dd0d0664ad14f50186a92c7be7be", - "0xc6c1a3301933019105f5650cabcb22bfbf221965ffcfc1329315b24ea3d77fd4", - "0xcee901330a60d212a867805ce0c28f53c6cc718f52156c9e74390d18f5df6280", - "0xa67ae793b1cd1a828a607bae418755c84dbb61adf00833d4c61a94665363284f", - "0x80d8159873b517aa6815ccd7c8ed7cfb74f84298d703a6c5a2f9d7d4d984ddde", - "0x1de5a8b915f2d9b45c97a8e134871e2effb576d05f4922b577ade8e3cd747a79", - "0x6ea17c5ece9b97dddb8b2101b923941a91e4b35e33d536ab4ff15b647579e1f5", - "0xcb78631e09bc1d79908ce1d3e0b6768c54b272a1a5f8b3b52485f98d6bba9245", - "0xd7c38f9d3ffdc626fe996218c008f5c69498a8a899c7fd1d63fbb03e1d2a073f", - "0x72cdef54267088d466244a92e4e6f10742ae5e6f7f6a615eef0da049a82068f9", - "0x60b3c490ba8c502656f9c0ed37c47283e74fe1bc7f0e9f651cbc76552a0d88eb", - "0x56bd0c66987a6f3761d677097be9440ea192c1cb0f5ec38f42789abe347e0ea9", - "0x3caac3e480f62320028f6f938ee147b4c78e88a183c464a0c9fb0df937ae30c1", - "0x7a4d2f11bddda1281aba5a160df4b814d23aef07669affe421a861fac2b4ec0f", - "0x9bb4d11299922dc309a4523959298a666ebe4063a9ee3bad1b93988ed59fb933", - "0x957323fffbaf8f938354662452115ae5acba1290f0d3f7b2a671f0359c109292", - "0x877624e31497d32e83559e67057c7a605fb888ed8e31ba68e89e02220eac7096", - "0x8456546ae97470ff6ea98daf8ae632e59b309bd3ff8e9211f7d21728620ed1e5", - "0xbacb26f574a00f466ce354e846718ffe3f3a64897d14d5ffb01afcf22f95e72b", - "0x0228743a6e543004c6617bf2c9a7eba1f92ebd0072fb0383cb2700c3aed38ba0", - "0x04f093f0f93c594549436860058371fb44e8daf78d6e5f563ba63a46b61ddbf0", - "0x0ba17c1ec93429ceaff08eb81195c9844821b64f2b5363926c2a6662f83fb930", - "0xd71605d8446878c677f146837090797e888416cfc9dc4e79ab11776cc6639d3f", - "0x33dde958dc5a6796138c453224d4d6e7f2ae740cceef3b52a8b669eb4b9691a1", - "0x3c39838295d1495e90e61ce59f6fcc693b31c292d02d31759719df6fe3214559", - "0x8aecc66f38644296cf0e6693863d57a243a31a4929130e22ab44cb6157b1af41", - "0xdf7153a7eab9521f2b37124067166c72de8f342249ac0e0f5350bd32f1251053", - "0xa498840b58897cf3bed3981b94c86d85536dfebbc437d276031ebd9352e171eb", - "0xb1df15a081042ab665458223a0449ffc71a10f85f3d977beb20380958fd92262", - "0x15d3bdbdee2a61b01d7a6b72a5482f6714358eedf4bece7bb8458e100caf8fba", - "0x0c96b7a0ea09c3ef758424ffb93654ce1520571e32e1f83aecbeded2388c3a7a", - "0xb4a3a8023266d141ecd7c8a7ca5282a825410b263bc11c7d6cab0587c9b5446e", - "0xf38f535969d9592416d8329932b3a571c6eacf1763de10fb7b309d3078b9b8d4", - "0x5a1e7b1c3b3943158341ce6d7f9f74ae481975250d89ae4d69b2fcd4c092eb4e", - "0xdad31e707d352f6cca78840f402f2ac9292094b51f55048abf0d2badfeff5463", - "0x097e290170068e014ceda3dd47b28ede57ff7f916940294a13c9d4aa2dc98aad", - "0x22e2dcedb6bb7f8ace1e43facaa502daa7513e523be98daf82163d2a76a1e0be", - "0x7ef2b211ab710137e3e8c78b72744bf9de81c2adde007aef6e9ce92a05e7a2c5", - "0x49b427805fc5186f31fdd1df9d4c3f51962ab74e15229e813072ec481c18c717", - "0xe60f6caa09fa803d97613d58762e4ff7f22f47d5c30b9d0116cdc6a357de4464", - "0xab3507b37ee92f026c72cc1559331630bc1c7335b374e4418d0d02687df1a9dd", - "0x50825ae74319c9adebc8909ed7fc461702db8230c59975e8add09ad5e7a647ab", - "0x0ee8e9c1d8a527a42fb8c2c8e9e51faf727cffc23ee22b5a95828f2790e87a29", - "0x675c21c290ddb40bec0302f36fbcd2d1832717a4bc05d113c6118a62bc8f9aca", - "0x580bafab24f673317b533148d7226d485e211eaa3d6e2be2529a83ca842b58a7", - "0x540e474776cae597af24c147dc1ae0f70a6233e98cf5c3ce31f38b830b75c99a", - "0x36eaf9f286e0f356eaaf8d81f71cc52c81d9ebc838c3b4859009f8567a224d16", - "0x0e2cbbb40954be047d02b1450a3dbd2350506448425dc25fd5faf3a66ee8f5c4", - "0x7eb0390cfe4c4eb120bbe693e87adc8ecab51d5fd8ce8f911c8ff07fad8cbe20", - "0xbf77589f5c2ebb465b8d7936f6260a18a243f59bd87390ee22cf579f6f020285", - "0x695b96bb28693f6928777591ef64146466d27521280a295936a52ec60707c565", - "0x22a0d018cbd4274caa8b9e7fb132e0a7ed787874046ca683a7d81d1c7c8b8f15", - "0x84092b122bb35e5ad85407b4b55f33707b86e0238c7970a8583f3c44308ed1d9", - "0xea346067ca67255235f9cae949f06e4b6c93846a7abc7c8c8cd786e9c4b3e4bc", - "0xa6df0716b125dc696b5d0e520cb49c1c089397c754efc146792e95bc58cc7159", - "0x7377b5d3953029fc597fb10bb6479ee34133d38f08783fbb61c7d070f34ea66f", - "0x7d79b00ffb976a10cd24476a394c8ed22f93837c51a58a3ddc7418153a5a8ea1", - "0x01e55182e80dff26cc3e06bb736b4a63745bde8ae28c604fa7fb97d99de5f416", - "0x062a2d5a207f8d540764d09648afecbf5033b13aec239f722b9033a762acf18b", - "0x48be60a3221d98b4d62f0b89d3bef74c70878dd65c6f79b34c2c36d0ddaa1da0", - "0x41e11f33543cf045c1a99419379ea31523d153bdf664549286b16207b9648c85", - "0xeef4d30b4700813414763a199e7cc6ab0faec65ef8b514faa01c6aa520c76334", - "0xea7cfe990422663417715e7859fc935ca47f47c943a1254044b6bc5934c94bc8", - "0xbbd3c834e5403b98a0ca346c915a23310f3d58880786628bc6cfbe05ba29c3c5", - "0xe216379f385bc9995ae0f37f1409a78d475c56b8aeb4ee434326724ec20124f7", - "0xdd328a1eee19d09b6fef06e252f8ad0ae328fbf900ef745f5950896803a3899d", - "0xa16fde34b0d743919feb0781eca0c525a499d279119af823cb3a8817000335db", - "0x7a28d108c59b83b12c85cd9aabc1d1d994a9a0329ae7b64a32aadcd61ebe50e3", - "0xb28bc82fceae74312eb837a805f0a8a01c0f669b99bb03fde31c4d58bedff89b", - "0x1b0d8f37d349781e846900b51a90c828aa384afe9b8ee1f88aeb8dba4b3168f2", - "0xbfd0301ff964c286c3331a30e09e0916da6f484e9c9596dbf1cae3cc902dbf9e", - "0xbb8254cb9ef6b485b8fb6caeafe45f920affc30f6b9d671e9a454530536f4fef", - "0xcad2317cf63dfa7147ded5c7e15f5f72e78f42d635e638f1ece6bc722ca3638b", - "0xb6c6e856fd45117f54775142f2b38f31114539d8943bcbcf823f6c7650c001e4", - "0x869f1baa35684c8f67a5bc99b294187852e6c85243a2f36481d0891d8b043020", - "0x14c6ccf145ee40ff56e3810058d2fba9a943ffc7c7087c48a08b2451c13dc788", - "0x263c1bcb712890f155b7e256cefa4abf92fe4380f3ffc11c627d5e4e30864d18", - "0x69f4eaf655e31ad7f7a725cd415ce7e45dd4a8396ac416950d42ed33155c3487", - "0x47e8eec2c5e33c9a54fe1f9b09e7744b614fb16531c36b862aa899424be13b05", - "0x5c985de270e62c44f0b49157882e8e83641b906ce47959e337fe8423e125a2eb", - "0x4e13b11e13202439bb5de5eea3bb75d2d7bf90f91411163ade06161a9cf424db", - "0x583a8fa159bb74fa175d72f4e1705e9a3b8ffe26ec5ad6e720444b99288f1213", - "0x903d2a746a98dfe2ee2632606d57a9b0fa6d8ccd895bb18c2245fd91f8a43676", - "0xa35a51330316012d81ec7249e3f2b0c9d7fcbb99dd98c62fe880d0a152587f51", - "0x33818a7beb91730c7b359b5e23f68a27b429967ea646d1ea99c314353f644218", - "0x183650af1e0b67f0e7acb59f8c72cc0e60acc13896184db2a3e4613f65b70a8b", - "0x857ff2974bef960e520937481c2047938a718cea0b709282ed4c2b0dbe2ef8fa", - "0x95a367ecb9a401e98a4f66f964fb0ece783da86536410a2082c5dbb3fc865799", - "0x56c606a736ac8268aedadd330d2681e7c7919af0fe855f6c1c3d5c837aa92338", - "0x5c97f7abf30c6d0d4c23e762c026b94a6052a444df4ed942e91975419f68a3a4", - "0x0b571de27d2022158a3128ae44d23a8136e7dd2dee74421aa4d6ed15ee1090a0", - "0xa17f6bc934a2f3c33cea594fee8c96c1290feec934316ebbbd9efab4937bf9f9", - "0x9ff57d70f27aad7281841e76435285fd27f10dad256b3f5cabde4ddc51b70eff", - "0xafa3071a847215b3ccdf51954aa7cb3dd2e6e2a39800042fc42009da705508b2", - "0x5e3bea33e4ac6f7c50a077d19571b1796e403549b1ce7b15e09905a0cc5a4acf", - "0x0dc7ba994e632ab95f3ecb7848312798810cf761d1c776181882d17fd6dda075", - "0xb4f7158679dad9f7370a2f64fbe617a40092849d17453b4f50a93ca8c6885844", - "0x094564b00f53c6f27c121fd8adfe1685b258b259e585a67b57c85efb804c57b2", - "0x9cd21a4249ba3fccffad550cdb8409dc12d8b74a7192874b6bafe2363886f318", - "0xbb22e0dad55cb315c564c038686419d40ef7f13af2143a28455bf445f6e10393", - "0x2a71d5e00821178c2cd39e7501e07da5cca6680eb7cdbe996f52dccafadb3735", - "0x9619406093b121e044a5b403bb1713ae160aeb52ad441f82dc6c63e4b323b969", - "0x3b8bd1d82c6d67ae707e19b889f1cb1f7bba912f12ae4284298f3a70c3644c79", - "0xd7a70c50d47d48785b299dbea01bf03ef18b8495de3c35cb265bc8f3295c4e15", - "0x8802ecce8dd6b6190af8ac79aafda3479c29f548d65e5798c0ca51a529b19108", - "0x4b630e1df52ec5fd650f4a4e76b3eeddda39e1e9eab996f6d3f02eefdf690990", - "0x0bfbff60fcf7f411d469f7f6f0a58ca305fd84eb529ee3ac73c00174793d723e", - "0x535f78b5f3a99a1c498e2c19dc1acb0fbbaba8972ba1d7d66936c28ab3667ebe", - "0x06ba92d8129db98fec1b75f9489a394022854f22f2e9b9450b187a6fc0d94a86", - "0xb7ae275ba10f80fb618a2cf949d5ad2e3ae24eb2eb37dcf1ec8c8b148d3ba27f", - "0xb275579bcf2584d9794dd3fc7f999902b13d33a9095e1980d506678e9c263de1", - "0x843ccd52a81e33d03ad2702b4ef68f07ca0419d4495df848bff16d4965689e48", - "0xde8b779ca7250f0eb867d5abdffd1d28c72a5a884d794383fc93ca40e5bf6276", - "0x6b789a2befccb8788941c9b006e496b7f1b03dbb8e530ba339db0247a78a2850", - "0xfccd4dca80bc52f9418f26b0528690255e320055327a34b50caf088235d2f660", - "0x18479ebfbe86c1e94cd05c70cb6cace6443bd9fdac7e01e9c9535a9e85141f2f", - "0x5350c8f3296441db954a261238c88a3a0c51ab418a234d566985f2809e211148", - "0xa5636614135361d03a381ba9f6168e2fd0bd2c1105f9b4e347c414df8759dea3", - "0xe7bb69e600992e6bd41c88a714f50f450153f1a05d0ddb4213a3fc4ba1f48c3f", - "0x17b42e81bae19591e22aa2510be06803bcb5c39946c928c977d78f346d3ca86b", - "0x30a10c07dc9646b7cbb3e1ab722a94d2c53e04c0c19efaaea7dccba1b00f2a20", - ], - compressed_lamport_pk: - "0x672ba456d0257fe01910d3a799c068550e84881c8d441f8f5f833cbd6c1a9356", - child_sk: - "7419543105316279183937430842449358701327973165530407166294956473095303972104" + seed: "0xc55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", + master_sk: + "6083874454709270928345386274498605044986640685124978867557563392430687146096", + child_index: 0, + lamport_0: vec![ + "0xe345d0ad7be270737de05cf036f688f385d5f99c7fddb054837658bdd2ebd519", + "0x65050bd4db9c77c051f67dcc801bf1cdf33d81131e608505bb3e4523868eb76c", + "0xc4f8e8d251fbdaed41bdd9c135b9ed5f83a614f49c38fffad67775a16575645a", + "0x638ad0feace7567255120a4165a687829ca97e0205108b8b73a204fba6a66faa", + "0xb29f95f64d0fcd0f45f265f15ff7209106ab5f5ce6a566eaa5b4a6f733139936", + "0xbcfbdd744c391229f340f02c4f2d092b28fe9f1201d4253b9045838dd341a6bf", + "0x8b9cf3531bfcf0e4acbfd4d7b4ed614fa2be7f81e9f4eaef53bedb509d0b186f", + "0xb32fcc5c4e2a95fb674fa629f3e2e7d85335f6a4eafe7f0e6bb83246a7eced5f", + "0xb4fe80f7ac23065e30c3398623b2761ac443902616e67ce55649aaa685d769ce", + "0xb99354f04cfe5f393193c699b8a93e5e11e6be40ec16f04c739d9b58c1f55bf3", + "0x93963f58802099ededb7843219efc66a097fab997c1501f8c7491991c780f169", + "0x430f3b027dbe9bd6136c0f0524a0848dad67b253a11a0e4301b44074ebf82894", + "0xd635c39b4a40ad8a54d9d49fc8111bd9d11fb65c3b30d8d3eaef7d7556aac805", + "0x1f7253a6474cf0b2c05b02a7e91269137acddedcb548144821f9a90b10eccbab", + "0x6e3bdb270b00e7b6eb8b044dbfae07b51ea7806e0d24218c59a807a7fd099c18", + "0x895488ad2169d8eaae332ce5b0fe1e60ffab70e62e1cb15a2a1487544af0a6e8", + "0x32d45a99d458c90e173a3087ea3661ab62d429b285089e92806a9663ba825342", + "0xc15c52106c3177f5848a173076a20d46600ca65958a1e3c7d45a593aaa9670ed", + "0xd8180c550fbe4cd6d5b676ff75e0728729d8e28a3b521d56152594ac6959d563", + "0x58fe153fac8f4213aaf175e458435e06304548024bcb845844212c774bdffb2a", + "0x10fff610a50f4bee5c978f512efa6ab4fafacb65929606951ba5b93eeb617b5a", + "0x78ac9819799b52eba329f13dd52cf0f6148a80bf04f93341814c4b47bb4aa5ec", + "0xa5c3339caa433fc11e74d1765bec577a13b054381a44b23c2482e750696876a9", + "0x9f716640ab5cdc2a5eb016235cddca2dc41fa4ec5acd7e58af628dade99ec376", + "0x2544364320e67577c4fed8c7c7c839deed93c24076d5343c5b8faca4cc6dc2d8", + "0x62553e782541f822c589796be5d5c83bfc814819100b2be0710b246f5aa7149c", + "0x229fb761c46c04b22ba5479f2696be0f936fded68d54dd74bcd736b8ba512afb", + "0x0af23996a65b98a0ebaf19f3ec0b3ef20177d1bfd6eb958b3bd36e0bdbe04c8c", + "0x6f0954f9deab52fd4c8d2daba69f73a80dea143dd49d9705c98db3d653adf98c", + "0xfa9221dd8823919a95b35196c1faeb59713735827f3e84298c25c83ac700c480", + "0x70c428e3ff9e5e3cda92d6bb85018fb89475c19f526461cca7cda64ebb2ff544", + "0xdcaac3413e22314f0f402f8058a719b62966b3a7429f890d947be952f2e314ba", + "0xb6b383cb5ec25afa701234824491916bfe6b09d28cf88185637e2367f0cf6edc", + "0x7b0d91488fc916aba3e9cb61a5a5645b9def3b02e4884603542f679f602afb8d", + "0xe9c20abca284acfde70c59584b9852b85c52fa7c263bb981389ff8d638429cd7", + "0x838524f798daee6507652877feb9597f5c47e9bb5f9aa52a35fb6fff796813b9", + "0xbe1ca18faf9bf322474fad1b3d9b4f1bc76ae9076e38e6dd2b16e2faf487742b", + "0xbf02d70f1a8519343a16d24bade7f7222912fd57fe4f739f367dfd99d0337e8e", + "0xc979eb67c107ff7ab257d1c0f4871adf327a4f2a69e01c42828ea27407caf058", + "0xf769123d3a3f19eb7b5c3fd4f467a042944a7c5ff8834cebe427f47dbd71460c", + "0xaefc8edc23257e1168a35999fe3832bcbc25053888cc89c38667482d6748095b", + "0x8ff399f364d3a2428b1c92213e4fdc5341e7998007da46a5a2f671929b42aaab", + "0xcf2a3d9e6963b24c5001fbba1e5ae7f45dd6cf520fd24861f745552db86bab48", + "0xb380e272d7f3091e5c887fa2e7c690c67d59f4d95f8376d150e555da8c738559", + "0xc006a749b091d91204dbb64f59059d284899de5986a7f84f8877afd5e0e4c253", + "0x818d8bb9b7da2dafa2ef059f91975e7b6257f5e199d217320de0a576f020de5c", + "0x7aabf4a1297d2e550a2ee20acb44c1033569e51b6ec09d95b22a8d131e30fd32", + "0xdd01c80964a5d682418a616fb10810647c9425d150df643c8ddbbe1bfb2768b7", + "0x1e2354e1d97d1b06eb6cfe9b3e611e8d75b5c57a444523e28a8f72a767eff115", + "0x989c9a649dca0580256113e49ea0dd232bbfd312f68c272fe7c878acc5da7a2c", + "0x14ee1efe512826fff9c028f8c7c86708b841f9dbf47ce4598298b01134ebdc1a", + "0x6f861dba4503f85762d9741fa8b652ce441373f0ef2b7ebbd5a794e48cdab51b", + "0xda110c9492ffdb87efe790214b7c9f707655a5ec08e5af19fb2ab2acc428e7dc", + "0x5576aa898f6448d16e40473fcb24c46c609a3fc46a404559faa2d0d34d7d49ce", + "0x9bd9a35675f2857792bc45893655bfdf905ffeaee942d93ad39fbcadd4ca9e11", + "0xfa95e4c37db9303d5213890fd984034089cbc9c6d754741625da0aa59cc45ccf", + "0xfef7d2079713f17b47239b76c8681bf7f800b1bfeac7a53265147579572ddf29", + "0x39aa7c0fecf9a1ed037c685144745fda16da36f6d2004844cf0e2d608ef6ed0e", + "0x5530654d502d6ba30f2b16f49cc5818279697308778fd8d40db8e84938144fb6", + "0xb1beaa36397ba1521d7bf7df16536969d8a716e63510b1b82a715940180eb29f", + "0x21abe342789f7c15a137afa373f686330c0db8c861572935a3cd8dcf9e4e1d45", + "0x27b5a1acda55b4e0658887bd884d3203696fcae0e94f19e31bfe931342b1c257", + "0x58401a02502d7708a812c0c72725f768f5a556480517258069f2d72543cda888", + "0x4b38f291548f51bee7e4cf8cc5c8aa8f4ad3ec2461dba4ccbab70f1c1bfd7feb", + "0x9b39a53fdafaaf1d23378e0aa8ae65d38480de69821de2910873eefc9f508568", + "0x932200566a3563ee9141913d12fd1812cb008cb735724e8610890e101ec10112", + "0x6a72f70b4ec5491f04780b17c4776a335fcc5bff5073d775150e08521dc74c91", + "0x86d5c60e627a4b7d5d075b0ba33e779c45f3f46d22ed51f31360afd140851b67", + "0x5ca2a736bb642abc4104faa781c9aff13d692a400d91dc961aec073889836946", + "0xa14bca5a262ac46ceac21388a763561fc85fb9db343148d786826930f3e510cd", + "0x87be03a87a9211504aa70ec149634ee1b97f7732c96377a3c04e98643dcba915", + "0x8fe283bc19a377823377e9c326374ebb3f29527c12ea77bfb809c18eef8943b0", + "0x8f519078b39a3969f7e4caeca9839d4e0eccc883b89e4a86d0e1731bfc5e33fc", + "0x33d7c28c3d26fdfc015a8c2131920e1392ef0aea55505637b54ea63069c7858e", + "0xe57de7c189fcc9170320c7acedb38798562a48dbc9943b2a8cd3441d58431128", + "0x513dac46017050f82751a07b6c890f14ec43cadf687f7d202d2369e35b1836b4", + "0xfd967d9f805bb7e78f7b7caa7692fdd3d6b5109c41ad239a08ad0a38eeb0ac4c", + "0xf2013e4da9abcc0f03ca505ed94ec097556dbfd659088cd24ec223e02ac43329", + "0xe0dcfac50633f7417f36231df2c81fa1203d358d5f57e896e1ab4b512196556b", + "0xf022848130e73fe556490754ef0ecfcdaaf3b9ff16ae1eda7d38c95c4f159ded", + "0x2147163a3339591ec7831d2412fb2d0588c38da3cd074fa2a4d3e5d21f9f1d2d", + "0x11ee2404731962bf3238dca0d9759e06d1a5851308b4e6321090886ec5190b69", + "0xf7679ecd07143f8ac166b66790fa09aed39352c09c0b4766bbe500b1ebace5a5", + "0xc7a0e95f09076472e101813a95e6ea463c35bd5ee9cfda3e5d5dbccb35888ef0", + "0xde625d3b547eb71bea5325a0191a592fa92a72e4b718a499fdba32e245ddf37e", + "0x7e5bdccd95df216e8c59665073249072cb3c9d0aef6b341afc0ca90456942639", + "0xc27f65fd9f797ede374e06b4ddb6e8aa59c7d6f36301f18b42c48b1889552fe3", + "0x8175730a52ea571677b035f8e2482239dda1cfbff6bc5cde00603963511a81af", + "0x09e440f2612dad1259012983dc6a1e24a73581feb1bd69d8a356eea16ba5fd0e", + "0x59dcc81d594cbe735a495e38953e8133f8b3825fd84767af9e4ea06c49dbabfa", + "0x6c8480b59a1a958c434b9680edea73b1207077fb9a8a19ea5f9fbbf6f47c4124", + "0x81f5c89601893b7a5a231a7d37d6ab9aa4c57f174fcfc6b40002fa808714c3a1", + "0x41ba4d6b4da141fcc1ee0f4b47a209cfd143d34e74fc7016e9956cedeb2db329", + "0x5e0b5b404c60e9892040feacfb4a84a09c2bc4a8a5f54f3dad5dca4acdc899dc", + "0xe922eebf1f5f15000d8967d16862ed274390cde808c75137d2fb9c2c0a80e391", + "0xbf49d31a59a20484f0c08990b2345dfa954509aa1f8901566ab9da052b826745", + "0xb84e07da828ae668c95d6aa31d4087504c372dbf4b5f8a8e4ded1bcf279fd52b", + "0x89288bf52d8c4a9561421ad199204d794038c5d19ae9fee765ee2b5470e68e7e", + "0xf6f618be99b85ec9a80b728454a417c647842215e2160c6fe547dd5a69bd9302", + "0xdd9adc002f98c9a47c7b704fc0ce0a5c7861a5e2795b6014749cde8bcb8a034b", + "0xd119a4b2c0db41fe01119115bcc35c4b7dbfdb42ad3cf2cc3f01c83732acb561", + "0x9c66bc84d416b9193bad9349d8c665a9a06b835f82dc93ae0cccc218f808aad0", + "0xd4b50eefcd2b5df075f14716cf6f2d26dfc8ae02e3993d711f4a287313038fde", + "0xaf72bfb346c2f336b8bc100bff4ba35d006a3dad1c5952a0adb40789447f2704", + "0xc43ca166f01dc955e7b4330227635feb1b0e0076a9c5633ca5c614a620244e5b", + "0x5efca76970629521cfa053fbbbda8d3679cadc018e2e891043b0f52989cc2603", + "0x35c57de1c788947f187051ce032ad1e899d9887d865266ec6fcfda49a8578b2b", + "0x56d4be8a65b257216eab7e756ee547db5a882b4edcd12a84ed114fbd4f5be1f1", + "0x257e858f8a4c07a41e6987aabaa425747af8b56546f2a3406f60d610bcc1f269", + "0x40bd9ee36d52717ab22f1f6b0ee4fb38b594f58399e0bf680574570f1b4b8c90", + "0xcb6ac01c21fc288c12973427c5df6eb8f6aefe64b92a6420c6388acdf36bc096", + "0xa5716441312151a5f0deb52993a293884c6c8f445054ce1e395c96adeee66c6d", + "0xe15696477f90113a10e04ba8225c28ad338c3b6bdd7bdeb95c0722921115ec85", + "0x8faeaa52ca2f1d791cd6843330d16c75eaf6257e4ba236e3dda2bc1a644aee00", + "0xc847fe595713bf136637ce8b43f9de238762953fed16798878344da909cc76ae", + "0xb5740dc579594dd110078ce430b9696e6a308078022dde2d7cfe0ef7647b904e", + "0x551a06d0771fcd3c53aea15aa8bf700047138ef1aa22265bee7fb965a84c9615", + "0x9a65397a5907d604030508d41477de621ce4a0d79b772e81112d634455e7a4da", + "0x6462d4cc2262d7faf8856812248dc608ae3d197bf2ef410f00c3ae43f2040995", + "0x6782b1bd319568e30d54b324ab9ed8fdeac6515e36b609e428a60785e15fb301", + "0x8bcdcf82c7eb2a07e14db20d80d9d2efea8d40320e121923784c92bf38250a8e", + "0x46ed84fa17d226d5895e44685747ab82a97246e97d6237014611aaaba65ed268", + "0x147e87981673326c5a2bdb06f5e90eaaa9583857129451eed6dde0c117fb061f", + "0x4141d6fe070104c29879523ba6669552f3d457c0929bb878d2751f4ff059b895", + "0xd866ce4ef226d74841f950fc28cdf2235db21e0e3f07a0c8f807704464db2210", + "0xa804f9118bf92558f684f90c2bda832a4f51ef771ffb2765cde3ec6f48124f32", + "0xc436d4a65910124e00cded9a637178914a8fbc090400f3f031c03eac4d0295a5", + "0x643fdb9243656512316528de04dcc7344ca33783580ad0c3debf8c4a6e7c8bc4", + "0x7f4a345b41706b281b2de998e91ff62d908eb29fc333ee336221757753c96e23", + "0x6bdc086a5b11de950cabea33b72d98db886b291c4c2f02d3e997edc36785d249", + "0xfb10b5b47d374078c0a52bff7174bf1cd14d872c7d20b4a009e2afd3017a9a17", + "0x1e07e605312db5380afad8f3d7bd602998102fdd39565b618ac177b13a6527e6", + "0xc3161b5a7b93aabf05652088b0e5b4803a18be693f590744c42c24c7aaaeef48", + "0xa47e4f25112a7d276313f153d359bc11268b397933a5d5375d30151766bc689a", + "0xb24260e2eff88716b5bf5cb75ea171ac030f5641a37ea89b3ac45acb30aae519", + "0x2bcacbebc0a7f34406db2c088390b92ee34ae0f2922dedc51f9227b9afb46636", + "0xc78c304f6dbe882c99c5e1354ce6077824cd42ed876db6706654551c7472a564", + "0x6e2ee19d3ee440c78491f4e354a84fa593202e152d623ed899e700728744ac85", + "0x2a3f438c5dc012aa0997b66f661b8c10f4a0cd7aa5b6e5922b1d73020561b27f", + "0xd804f755d93173408988b95e9ea0e9feae10d404a090f73d9ff84df96f081cf7", + "0xe06fda941b6936b8b33f00ffa02c8b05fd78fbec953da61da2043f5644b30a50", + "0x45ee279b465d53148850a16cc7f6bd33e7627aef554a9418ed012ca8f9717f80", + "0x9c79348c1bcd6aa2135452491d73564413a247ea8cc38fa7dcc6c43f8a2d61d5", + "0x7c91e056f89f2a77d3e3642e595bcf4973c3bca68dd2b10f51ca0d8945e4255e", + "0x669f976ebe38cbd22c5b1f785e14b76809d673d2cb1458983dbda41f5adf966b", + "0x8bc71e99ffcc119fd8bd604af54c0663b0325a3203a214810fa2c588089ed5a7", + "0x36b3f1ffeae5d9855e0965eef33f4c5133d99685802ac5ce5e1bb288d308f889", + "0x0aad33df38b3f31598e04a42ec22f20bf2e2e9472d02371eb1f8a06434621180", + "0x38c5632b81f90efbc51a729dcae03626a3063aa1f0a102fd0e4326e86a08a732", + "0x6ea721753348ed799c98ffa330d801e6760c882f720125250889f107915e270a", + "0xe700dd57ce8a653ce4269e6b1593a673d04d3de8b79b813354ac7c59d1b99adc", + "0xe9294a24b560d62649ca898088dea35a644d0796906d41673e29e4ea8cd16021", + "0xf20bb60d13a498a0ec01166bf630246c2f3b7481919b92019e2cfccb331f2791", + "0xf639a667209acdd66301c8e8c2385e1189b755f00348d614dc92da14e6866b38", + "0x49041904ee65c412ce2cd66d35570464882f60ac4e3dea40a97dd52ffc7b37a2", + "0xdb36b16d3a1010ad172fc55976d45df7c03b05eab5432a77be41c2f739b361f8", + "0x71400cdd2ea78ac1bf568c25a908e989f6d7e2a3690bc869c7c14e09c255d911", + "0xf0d920b2d8a00b88f78e7894873a189c580747405beef5998912fc9266220d98", + "0x1a2baefbbd41aa9f1cc5b10e0a7325c9798ba87de6a1302cf668a5de17bc926a", + "0x449538a20e52fd61777c45d35ff6c2bcb9d9165c7eb02244d521317f07af6691", + "0x97006755b9050b24c1855a58c4f4d52f01db4633baff4b4ef3d9c44013c5c665", + "0xe441363a27b26d1fff3288222fa8ed540f8ca5d949ddcc5ff8afc634eec05336", + "0xed587aa8752a42657fea1e68bc9616c40c68dcbbd5cb8d781e8574043e29ef28", + "0x47d896133ba81299b8949fbadef1c00313d466827d6b13598685bcbb8776c1d2", + "0x7786bc2cb2d619d07585e2ea4875f15efa22110e166af87b29d22af37b6c047d", + "0x956b76194075fe3daf3ca508a6fad161deb05d0026a652929e37c2317239cbc6", + "0xec9577cb7b85554b2383cc4239d043d14c08d005f0549af0eca6994e203cb4e7", + "0x0722d0c68d38b23b83330b972254bbf9bfcf32104cc6416c2dad67224ac52887", + "0x532b19d54fb6d77d96452d3e562b79bfd65175526cd793f26054c5f6f965df39", + "0x4d62e065e57cbf60f975134a360da29cabdcea7fcfc664cf2014d23c733ab3b4", + "0x09be0ea6b363fd746b303e482cb4e15ef25f8ae57b7143e64cbd5c4a1d069ebe", + "0x69dcddc3e05147860d8d0e90d602ac454b609a82ae7bb960ee2ecd1627d77777", + "0xa5e2ae69d902971000b1855b8066a4227a5be7234ac9513b3c769af79d997df4", + "0xc287d4bc953dcff359d707caf2ccba8cc8312156eca8aafa261fb72412a0ea28", + "0xb27584fd151fb30ed338f9cba28cf570f7ca39ebb03eb2e23140423af940bd96", + "0x7e02928194441a5047af89a6b6555fea218f1df78bcdb5f274911b48d847f5f8", + "0x9ba611add61ea6ba0d6d494c0c4edd03df9e6c03cafe10738cee8b7f45ce9476", + "0x62647ec3109ac3db3f3d9ea78516859f0677cdde3ba2f27f00d7fda3a447dd01", + "0xfa93ff6c25bfd9e17d520addf5ed2a60f1930278ff23866216584853f1287ac1", + "0x3b391c2aa79c2a42888102cd99f1d2760b74f772c207a39a8515b6d18e66888a", + "0xcc9ae3c14cbfb40bf01a09bcde913a3ed208e13e4b4edf54549eba2c0c948517", + "0xc2b8bce78dd4e876da04c54a7053ca8b2bedc8c639cee82ee257c754c0bea2b2", + "0xdb186f42871f438dba4d43755c59b81a6788cb3b544c0e1a3e463f6c2b6f7548", + "0xb7f8ba137c7783137c0729de14855e20c2ac4416c33f5cac3b235d05acbab634", + "0x282987e1f47e254e86d62bf681b0803df61340fdc9a8cf625ef2274f67fc6b5a", + "0x04aa195b1aa736bf8875777e0aebf88147346d347613b5ab77bef8d1b502c08c", + "0x3f732c559aee2b1e1117cf1dec4216a070259e4fa573a7dcadfa6aab74aec704", + "0x72699d1351a59aa73fcede3856838953ee90c6aa5ef5f1f7e21c703fc0089083", + "0x6d9ce1b8587e16a02218d5d5bed8e8d7da4ac40e1a8b46eeb412df35755c372c", + "0x4f9c19b411c9a74b8616db1357dc0a7eaf213cb8cd2455a39eb7ae4515e7ff34", + "0x9163dafa55b2b673fa7770b419a8ede4c7122e07919381225c240d1e90d90470", + "0x268ff4507b42e623e423494d3bb0bc5c0917ee24996fb6d0ebedec9ce8cd9d5c", + "0xff6e6169d233171ddc834e572024586eeb5b1bda9cb81e5ad1866dbc53dc75fe", + "0xb379a9c8279205e8753b6a5c865fbbf70eb998f9005cd7cbde1511f81aed5256", + "0x3a6b145e35a592e037c0992c9d259ef3212e17dca81045e446db2f3686380558", + "0x60fb781d7b3137481c601871c1c3631992f4e01d415841b7f5414743dcb4cfd7", + "0x90541b20b0c2ea49bca847e2db9b7bba5ce15b74e1d29194a12780e73686f3dd", + "0xe2b0507c13ab66b4b769ad1a1a86834e385b315da2f716f7a7a8ff35a9e8f98c", + "0xeefe54bc9fa94b921b20e7590979c28a97d8191d1074c7c68a656953e2836a72", + "0x8676e7f59d6f2ebb0edda746fc1589ef55e07feab00d7008a0f2f6f129b7bb3a", + "0x78a3d93181b40152bd5a8d84d0df7f2adde5db7529325c13bc24a5b388aed3c4", + "0xcc0e2d0cba7aaa19c874dbf0393d847086a980628f7459e9204fda39fad375c0", + "0x6e46a52cd7745f84048998df1a966736d2ac09a95a1c553016fef6b9ec156575", + "0x204ac2831d2376d4f9c1f5c106760851da968dbfc488dc8a715d1c764c238263", + "0xbdb8cc7b7e5042a947fca6c000c10b9b584e965c3590f92f6af3fe4fb23e1358", + "0x4a55e4b8a138e8508e7b11726f617dcf4155714d4600e7d593fd965657fcbd89", + "0xdfe064bb37f28d97b16d58b575844964205e7606dce914a661f2afa89157c45b", + "0x560e374fc0edda5848eef7ff06471545fcbdd8aefb2ecddd35dfbb4cb03b7ddf", + "0x10a66c82e146da5ec6f48b614080741bc51322a60d208a87090ad7c7bf6b71c6", + "0x62534c7dc682cbf356e6081fc397c0a17221b88508eaeff798d5977f85630d4f", + "0x0138bba8de2331861275356f6302b0e7424bbc74d88d8c534479e17a3494a15b", + "0x580c7768bf151175714b4a6f2685dc5bcfeb088706ee7ed5236604888b84d3e4", + "0xd290adb1a5dfc69da431c1c0c13da3be788363238d7b46bc20185edb45ab9139", + "0x1689879db6c78eb4d3038ed81be1bc106f8cfa70a7c6245bd4be642bfa02ebd7", + "0x6064c384002c8b1594e738954ed4088a0430316738def62822d08b2285514918", + "0x01fd23493f4f1cc3c5ff4e96a9ee386b2a144b50a428a6b5db654072bddadfe7", + "0xd5d05bb7f23ab0fa2b82fb1fb14ac29c2477d81a85423d0a45a4b7d5bfd81619", + "0xd72b9a73ae7b24db03b84e01106cea734d4b9d9850b0b7e9d65d6001d859c772", + "0x156317cb64578db93fee2123749aff58c81eae82b189b0d6f466f91de02b59df", + "0x5fba299f3b2c099edbac18d785be61852225890fc004bf6be0787d62926a79b3", + "0x004154f28f685bdbf0f0d6571e7a962a4c29b6c3ebedaaaf66097dfe8ae5f756", + "0x4b45816f9834c3b289affce7a3dc80056c2b7ffd3e3c250d6dff7f923e7af695", + "0x6ca53bc37816fff82346946d83bef87860626bbee7fd6ee9a4aeb904d893a11f", + "0xf48b2f43184358d66d5b5f7dd2b14a741c7441cc7a33ba3ebcc94a7b0192d496", + "0x3cb98f4baa429250311f93b46e745174f65f901fab4eb8075d380908aaaef650", + "0x343dfc26b4473b3a20e706a8e87e5202a4e6b96b53ed448afb9180c3f766e5f8", + "0x1ace0e8a735073bcbaea001af75b681298ef3b84f1dbab46ea52cee95ab0e7f9", + "0xd239b110dd71460cdbc41ddc99494a7531186c09da2a697d6351c116e667733b", + "0x22d6955236bd275969b8a6a30c23932670a6067f68e236d2869b6a8b4b493b83", + "0x53c1c01f8d061ac89187e5815ef924751412e6a6aa4dc8e3abafb1807506b4e0", + "0x2f56dd20c44d7370b713e7d7a1bfb1a800cac33f8a6157f278e17a943806a1f7", + "0xc99773d8a5b3e60115896a65ac1d6c15863317d403ef58b90cb89846f4715a7f", + "0x9f4b6b77c254094621cd336da06fbc6cbb7b8b1d2afa8e537ceca1053c561ef5", + "0x87944d0b210ae0a6c201cba04e293f606c42ebaed8b4a5d1c33f56863ae7e1b5", + "0xa7d116d962d03ca31a455f9cda90f33638fb36d3e3506605aa19ead554487a37", + "0x4042e32e224889efd724899c9edb57a703e63a404129ec99858048fbc12f2ce0", + "0x36759f7a0faeea1cd4cb91e404e4bf09908de6e53739603d5f0db52b664158a3", + "0xa4d50d005fb7b9fea8f86f1c92439cc9b8446efef7333ca03a8f6a35b2d49c38", + "0x80cb7c3e20f619006542edbe71837cdadc12161890a69eea8f41be2ee14c08a3", + "0xbb3c44e1df45f2bb93fb80e7f82cee886c153ab484c0095b1c18df03523629b4", + "0x04cb749e70fac3ac60dea779fceb0730b2ec5b915b0f8cf28a6246cf6da5db29", + "0x4f5189b8f650687e65a962ef3372645432b0c1727563777433ade7fa26f8a728", + "0x322eddddf0898513697599b68987be5f88c0258841affec48eb17cf3f61248e8", + "0x6416be41cda27711d9ec22b3c0ed4364ff6975a24a774179c52ef7e6de9718d6", + "0x0622d31b8c4ac7f2e30448bdadfebd5baddc865e0759057a6bf7d2a2c8b527e2", + "0x40f096513588cc19c08a69e4a48ab6a43739df4450b86d3ec2fb3c6a743b5485", + "0x09fcf7d49290785c9ea2d54c3d63f84f6ea0a2e9acfcdbb0cc3a281ce438250e", + "0x2000a519bf3da827f580982d449b5c70fcc0d4fa232addabe47bb8b1c471e62e", + "0xf4f80008518e200c40b043f34fb87a6f61b82f8c737bd784292911af3740245e", + "0x939eaab59f3d2ad49e50a0220080882319db7633274a978ced03489870945a65", + "0xadcad043d8c753fb10689280b7670f313253f5d719039e250a673d94441ee17c", + "0x58b7b75f090166b8954c61057074707d7e38d55ce39d9b2251bbc3d72be458f8", + "0xf61031890c94c5f87229ec608f2a9aa0a3f455ba8094b78395ae312cbfa04087", + "0x356a55def50139f94945e4ea432e7a9defa5db7975462ebb6ca99601c614ea1d", + "0x65963bb743d5db080005c4db59e29c4a4e86f92ab1dd7a59f69ea7eaf8e9aa79", + ], + lamport_1: vec![ + "0x9c0bfb14de8d2779f88fc8d5b016f8668be9e231e745640096d35dd5f53b0ae2", + "0x756586b0f3227ab0df6f4b7362786916bd89f353d0739fffa534368d8d793816", + "0x710108dddc39e579dcf0819f9ad107b3c56d1713530dd94325db1d853a675a37", + "0x8862b5f428ce5da50c89afb50aa779bb2c4dfe60e6f6a070b3a0208a4a970fe5", + "0x54a9cd342fa3a4bf685c01d1ce84f3068b0d5b6a58ee22dda8fbac4908bb9560", + "0x0fa3800efeaddd28247e114a1cf0f86b9014ccae9c3ee5f8488168b1103c1b44", + "0xbb393428b7ebfe2eda218730f93925d2e80c020d41a29f4746dcbb9138f7233a", + "0x7b42710942ef38ef2ff8fe44848335f26189c88c22a49fda84a51512ac68cd5d", + "0x90e99786a3e8b04db95ccd44d01e75558d75f3ddd12a1e9a2c2ce76258bf4813", + "0x3f6f71e40251728aa760763d25deeae54dc3a9b53807c737deee219120a2230a", + "0xe56081a7933c6eaf4ef2c5a04e21ab8a3897785dd83a34719d1b62d82cfd00c2", + "0x76cc54fa15f53e326575a9a2ac0b8ed2869403b6b6488ce4f3934f17db0f6bee", + "0x1cd9cd1d882ea3830e95162b5de4beb5ddff34fdbf7aec64e83b82a6d11b417c", + "0xb8ca8ae36d717c448aa27405037e44d9ee28bb8c6cc538a5d22e4535c8befd84", + "0x5c4492108c25f873a23d5fd7957b3229edc22858e8894febe7428c0831601982", + "0x907bcd75e7465e9791dc34e684742a2c0dc7007736313a95070a7e6b961c9c46", + "0xe7134b1511559e6b2440672073fa303ec3915398e75086149eb004f55e893214", + "0x2ddc2415e4753bfc383d48733e8b2a3f082883595edc5515514ebb872119af09", + "0xf2ad0f76b08ffa1eee62228ba76f4982fab4fbede5d4752c282c3541900bcd5b", + "0x0a84a6b15abd1cbc2da7092bf7bac418b8002b7000236dfba7c8335f27e0f1d4", + "0x97404e02b9ff5478c928e1e211850c08cc553ebac5d4754d13efd92588b1f20d", + "0xfa6ca3bcff1f45b557cdec34cb465ab06ade397e9d9470a658901e1f0f124659", + "0x5bd972d55f5472e5b08988ee4bccc7240a8019a5ba338405528cc8a38b29bc21", + "0x52952e4f96c803bb76749800891e3bfe55f7372facd5b5a587a39ac10b161bcc", + "0xf96731ae09abcad016fd81dc4218bbb5b2cb5fe2e177a715113f381814007314", + "0xe7d79e07cf9f2b52623491519a21a0a3d045401a5e7e10dd8873a85076616326", + "0xe4892f3777a4614ee6770b22098eaa0a3f32c5c44b54ecedacd69789d676dffe", + "0x20c932574779e2cc57780933d1dc6ce51a5ef920ce5bf681f7647ac751106367", + "0x057252c573908e227cc07797117701623a4835f4b047dcaa9678105299e48e70", + "0x20bad780930fa2a036fe1dea4ccbf46ac5b3c489818cdb0f97ae49d6e2f11fbf", + "0xc0d7dd26ffecdb098585a1694e45a54029bb1e31c7c5209289058efebb4cc91b", + "0x9a8744beb1935c0abe4b11812fc02748ef7c8cb650db3024dde3c5463e9d8714", + "0x8ce6eea4585bbeb657b326daa4f01f6aef34954338b3ca42074aedd1110ba495", + "0x1c85b43f5488b370721290d2faea19d9918d094c99963d6863acdfeeca564363", + "0xe88a244347e448349e32d0525b40b18533ea227a9d3e9b78a9ff14ce0a586061", + "0x352ca61efc5b8ff9ee78e738e749142dd1606154801a1449bbb278fa6bcc3dbe", + "0xa066926f9209220b24ea586fb20eb8199a05a247c82d7af60b380f6237429be7", + "0x3052337ccc990bfbae26d2f9fe5d7a4eb8edfb83a03203dca406fba9f4509b6e", + "0x343ce573a93c272688a068d758df53c0161aa7f9b55dec8beced363a38b33069", + "0x0f16b5593f133b58d706fe1793113a10750e8111eadee65301df7a1e84f782d3", + "0x808ae8539357e85b648020f1e9d255bc4114bee731a6220d7c5bcb5b85224e03", + "0x3b2bd97e31909251752ac57eda6015bb05b85f2838d475095cfd146677430625", + "0xe4f857c93b2d8b250050c7381a6c7c660bd29066195806c8ef11a2e6a6640236", + "0x23d91589b5070f443ddcefa0838c596518d54928119251ecf3ec0946a8128f52", + "0xb72736dfad52503c7f5f0c59827fb6ef4ef75909ff9526268abc0f296ee37296", + "0x80a8c66436d86b8afe87dde7e53a53ef87e057a5d4995963e76d159286de61b6", + "0xbec92c09ee5e0c84d5a8ba6ca329683ff550ace34631ea607a3a21f99cd36d67", + "0x83c97c9807b9ba6d9d914ae49dabdb4c55e12e35013f9b179e6bc92d5d62222b", + "0x8d9c79f6af3920672dc4cf97a297c186e75083d099aeb5c1051207bad0c98964", + "0x2aaa5944a2bd852b0b1be3166e88f357db097b001c1a71ba92040b473b30a607", + "0x46693d27ec4b764fbb516017c037c441f4558aebfe972cdcd03da67c98404e19", + "0x903b25d9e12208438f203c9ae2615b87f41633d5ffda9cf3f124c1c3922ba08f", + "0x3ec23dc8bc1b49f5c7160d78008f3f235252086a0a0fa3a7a5a3a53ad29ec410", + "0xa1fe74ceaf3cccd992001583a0783d7d7b7a245ea374f369133585b576b9c6d8", + "0xb2d6b0fe4932a2e06b99531232398f39a45b0f64c3d4ebeaaebc8f8e50a80607", + "0xe19893353f9214eebf08e5d83c6d44c24bffe0eceee4dc2e840d42eab0642536", + "0x5b798e4bc099fa2e2b4b5b90335c51befc9bbab31b4dd02451b0abd09c06ee79", + "0xbab2cdec1553a408cac8e61d9e6e19fb8ccfb48efe6d02bd49467a26eeeca920", + "0x1c1a544c28c38e5c423fe701506693511b3bc5f2af9771b9b2243cd8d41bebfc", + "0x704d6549d99be8cdefeec9a58957f75a2be4af7bc3dc4655fa606e7f3e03b030", + "0x051330f43fe39b08ed7d82d68c49b36a8bfa31357b546bfb32068712df89d190", + "0xe69174c7b03896461cab2dfaab33d549e3aac15e6b0f6f6f466fb31dae709b9b", + "0xe5f668603e0ddbbcde585ac41c54c3c4a681fffb7a5deb205344de294758e6ac", + "0xca70d5e4c3a81c1f21f246a3f52c41eaef9a683f38eb7c512eac8b385f46cbcd", + "0x3173a6b882b21cd147f0fc60ef8f24bbc42104caed4f9b154f2d2eafc3a56907", + "0xc71469c192bf5cc36242f6365727f57a19f924618b8a908ef885d8f459833cc3", + "0x59c596fc388afd8508bd0f5a1e767f3dda9ed30f6646d15bc59f0b07c4de646f", + "0xb200faf29368581f551bd351d357b6fa8cbf90bdc73b37335e51cad36b4cba83", + "0x275cede69b67a9ee0fff1a762345261cb20fa8191470159cc65c7885cfb8313c", + "0x0ce4ef84916efbe1ba9a0589bed098793b1ea529758ea089fd79151cc9dc7494", + "0x0f08483bb720e766d60a3cbd902ce7c9d835d3f7fdf6dbe1f37bcf2f0d4764a2", + "0xb30a73e5db2464e6da47d10667c82926fa91fceb337d89a52db5169008bc6726", + "0x6b9c50fed1cc404bf2dd6fffbfd18e30a4caa1500bfeb080aa93f78d10331aaf", + "0xf17c84286df03ce175966f560600dd562e0f59f18f1d1276b4d8aca545d57856", + "0x11455f2ef96a6b2be69854431ee219806008eb80ea38c81e45b2e58b3f975a20", + "0x9a61e03e2157a5c403dfcde690f7b7d704dd56ea1716cf14cf7111075a8d6491", + "0x30312c910ce6b39e00dbaa669f0fb7823a51f20e83eaeb5afa63fb57668cc2f4", + "0x17c18d261d94fba82886853a4f262b9c8b915ed3263b0052ece5826fd7e7d906", + "0x2d8f6ea0f5b9d0e4bc1478161f5ed2ad3d8495938b414dcaec9548adbe572671", + "0x19954625f13d9bab758074bf6dee47484260d29ee118347c1701aaa74abd9848", + "0x842ef2ad456e6f53d75e91e8744b96398df80350cf7af90b145fea51fbbcf067", + "0x34a8b0a76ac20308aa5175710fb3e75c275b1ff25dba17c04e3a3e3c48ca222c", + "0x58efcbe75f32577afe5e9ff827624368b1559c32fcca0cf4fd704af8ce019c63", + "0x411b4d242ef8f14d92bd8b0b01cb4fa3ca6f29c6f9073cfdd3ce614fa717463b", + "0xf76dbda66ede5e789314a88cff87ecb4bd9ca418c75417d4d920e0d21a523257", + "0xd801821a0f87b4520c1b003fe4936b6852c410ee00b46fb0f81621c9ac6bf6b4", + "0x97ad11d6a29c8cf3c548c094c92f077014de3629d1e9053a25dbfaf7eb55f72d", + "0xa87012090cd19886d49521d564ab2ad0f18fd489599050c42213bb960c9ee8ff", + "0x8868d8a26e758d50913f2bf228da0444a206e52853bb42dd8f90f09abe9c859a", + "0xc257fb0cc9970e02830571bf062a14540556abad2a1a158f17a18f14b8bcbe95", + "0xfe611ce27238541b14dc174b652dd06719dfbcda846a027f9d1a9e8e9df2c065", + "0xc9b25ea410f420cc2d4fc6057801d180c6cab959bce56bf6120f555966e6de6d", + "0x95437f0524ec3c04d4132c83be7f1a603e6f4743a85ede25aa97a1a4e3f3f8fc", + "0x82a12910104065f35e983699c4b9187aed0ab0ec6146f91728901efecc7e2e20", + "0x6622dd11e09252004fb5aaa39e283333c0686065f228c48a5b55ee2060dbd139", + "0x89a2879f25733dab254e4fa6fddb4f04b8ddf018bf9ad5c162aea5c858e6faaa", + "0x8a71b62075a6011fd9b65d956108fa79cc9ebb8f194d64d3105a164e01cf43a6", + "0x103f4fe9ce211b6452181371f0dc4a30a557064b684645a4495136f4ebd0936a", + "0x97914adc5d7ce80147c2f44a6b29d0b495d38dedd8cc299064abcc62ed1ddabc", + "0x825c481da6c836a8696d7fda4b0563d204a9e7d9e4c47b46ded26db3e2d7d734", + "0xf8c0637ba4c0a383229f1d730db733bc11d6a4e33214216c23f69ec965dcaaad", + "0xaed3bdaf0cb12d37764d243ee0e8acdefc399be2cabbf1e51dc43454efd79cbd", + "0xe8427f56cc5cec8554e2f5f586b57adccbea97d5fc3ef7b8bbe97c2097cf848c", + "0xba4ad0abd5c14d526357fd0b6f8676ef6126aeb4a6d80cabe1f1281b9d28246c", + "0x4cff20b72e2ab5af3fafbf9222146949527c25f485ec032f22d94567ff91b22f", + "0x0d32925d89dd8fed989912afcbe830a4b5f8f7ae1a3e08ff1d3a575a77071d99", + "0xe51a1cbeae0be5d2fdbc7941aea904d3eade273f7477f60d5dd6a12807246030", + "0xfb8615046c969ef0fa5e6dc9628c8a9880e86a5dc2f6fc87aff216ea83fcf161", + "0x64dd705e105c88861470d112c64ca3d038f67660a02d3050ea36c34a9ebf47f9", + "0xb6ad148095c97528180f60fa7e8609bf5ce92bd562682092d79228c2e6f0750c", + "0x5bae0cd81f3bd0384ca3143a72068e6010b946462a73299e746ca639c026781c", + "0xc39a0fc7764fcfc0402b12fb0bbe78fe3633cbfb33c7f849279585a878a26d7c", + "0x2b752fda1c0c53d685cc91144f78d371db6b766725872b62cc99e1234cca8c1a", + "0x40ee6b9635d87c95a528757729212a261843ecb06d975de91352d43ca3c7f196", + "0x75e2005d3726cf8a4bb97ea5287849a361e3f8fdfadc3c1372feed1208c89f6b", + "0x0976f8ab556153964b58158678a5297da4d6ad92e284da46052a791ee667aee4", + "0xdbeef07841e41e0672771fb550a5b9233ae8e9256e23fa0d34d5ae5efe067ec8", + "0xa890f412ab6061c0c5ee661e80d4edc5c36b22fb79ac172ddd5ff26a7dbe9751", + "0xb666ae07f9276f6d0a33f9efeb3c5cfcba314fbc06e947563db92a40d7a341e8", + "0x83a082cf97ee78fbd7f31a01ae72e40c2e980a6dab756161544c27da86043528", + "0xfa726a919c6f8840c456dc77b0fec5adbed729e0efbb9317b75f77ed479c0f44", + "0xa8606800c54faeab2cbc9d85ff556c49dd7e1a0476027e0f7ce2c1dc2ba7ccbf", + "0x2796277836ab4c17a584c9f6c7778d10912cb19e541fb75453796841e1f6cd1c", + "0xf648b8b3c7be06f1f8d9cda13fd6d60f913e5048a8e0b283b110ca427eeb715f", + "0xa21d00b8fdcd77295d4064e00fbc30bed579d8255e9cf3a9016911d832390717", + "0xe741afcd98cbb3bb140737ed77bb968ac60d5c00022d722f9f04f56e97235dc9", + "0xbeecc9638fac39708ec16910e5b02c91f83f6321f6eb658cf8a96353cfb49806", + "0x912eee6cabeb0fed8d6e6ca0ba61977fd8e09ea0780ff8fbec995e2a85e08b52", + "0xc665bc0bb121a1229bc56ecc07a7e234fd24c523ea14700aa09e569b5f53ad33", + "0x39501621c2bdff2f62ab8d8e3fe47fe1701a98c665697c5b750ee1892f11846e", + "0x03d32e16c3a6c913daefb139f131e1e95a742b7be8e20ee39b785b4772a50e44", + "0x4f504eb46a82d440f1c952a06f143994bc66eb9e3ed865080cd9dfc6d652b69c", + "0xad753dc8710a46a70e19189d8fc7f4c773e4d9ccc7a70c354b574fe377328741", + "0xf7f5464a2d723b81502adb9133a0a4f0589b4134ca595a82e660987c6b011610", + "0x216b60b1c3e3bb4213ab5d43e04619d13e1ecedbdd65a1752bda326223e3ca3e", + "0x763664aa96d27b6e2ac7974e3ca9c9d2a702911bc5d550d246631965cf2bd4a2", + "0x292b5c8c8431b040c04d631f313d4e6b67b5fd3d4b8ac9f2edb09d13ec61f088", + "0x80db43c2b9e56eb540592f15f5900222faf3f75ce62e78189b5aa98c54568a5e", + "0x1b5fdf8969bcd4d65e86a2cefb3a673e18d587843f4f50db4e3ee77a0ba2ef1c", + "0x11e237953fff3e95e6572da50a92768467ffdfd0640d3384aa1c486357e7c24a", + "0x1fabd4faa8dba44808cc87d0bc389654a98496745578f3d17d134adc7f7b10f3", + "0x5eca4aa96f20a56197772ae6b600762154ca9d2702cab12664ea47cbff1a440c", + "0x0b4234f5bb02abcf3b5ce6c44ea85f55ec7db98fa5a7b90abef6dd0df034743c", + "0x316761e295bf350313c4c92efea591b522f1df4211ce94b22e601f30aefa51ef", + "0xe93a55ddb4d7dfe02598e8f909ff34b3de40a1c0ac8c7fba48cb604ea60631fb", + "0xe6e6c877b996857637f8a71d0cd9a6d47fdeb03752c8965766f010073332b087", + "0xa4f95c8874e611eddd2c4502e4e1196f0f1be90bfc37db35f8588e7d81d34aeb", + "0x9351710a5633714bb8b2d226e15ba4caa6f50f56c5508e5fa1239d5cc6a7e1aa", + "0x8d0aef52ec7266f37adb572913a6213b8448caaf0384008373dec525ae6cdff1", + "0x718e24c3970c85bcb14d2763201812c43abac0a7f16fc5787a7a7b2f37288586", + "0x3600ce44cebc3ee46b39734532128eaf715c0f3596b554f8478b961b0d6e389a", + "0x50dd1db7b0a5f6bd2d16252f43254d0f5d009e59f61ebc817c4bbf388519a46b", + "0x67861ed00f5fef446e1f4e671950ac2ddae1f3b564f1a6fe945e91678724ef03", + "0x0e332c26e169648bc20b4f430fbf8c26c6edf1a235f978d09d4a74c7b8754aad", + "0x6c9901015adf56e564dfb51d41a82bde43fb67273b6911c9ef7fa817555c9557", + "0x53c83391e5e0a024f68d5ade39b7a769f10664e12e4942c236398dd5dbce47a1", + "0x78619564f0b2399a9fcb229d938bf1e298d62b03b7a37fe6486034185d7f7d27", + "0x4625f15381a8723452ec80f3dd0293c213ae35de737c508f42427e1735398c3a", + "0x69542425ddb39d3d3981e76b41173eb1a09500f11164658a3536bf3e292f8b6a", + "0x82ac4f5bb40aece7d6706f1bdf4dfba5c835c09afba6446ef408d8ec6c09300f", + "0x740f9180671091b4c5b3ca59b9515bd0fc751f48e488a9f7f4b6848602490e21", + "0x9a04b08b4115986d8848e80960ad67490923154617cb82b3d88656ec1176c24c", + "0xf9ffe528eccffad519819d9eef70cef317af33899bcaee16f1e720caf9a98744", + "0x46da5e1a14b582b237f75556a0fd108c4ea0d55c0edd8f5d06c59a42e57410df", + "0x098f3429c8ccda60c3b5b9755e5632dd6a3f5297ee819bec8de2d8d37893968a", + "0x1a5b91af6025c11911ac072a98b8a44ed81f1f3c76ae752bd28004915db6f554", + "0x8bed50c7cae549ed4f8e05e02aa09b2a614c0af8eec719e4c6f7aee975ec3ec7", + "0xd86130f624b5dcc116f2dfbb5219b1afde4b7780780decd0b42694e15c1f8d8b", + "0x4167aa9bc0075f624d25d40eb29139dd2c452ebf17739fab859e14ac6765337a", + "0xa258ce5db20e91fb2ea30d607ac2f588bdc1924b21bbe39dc881e19889a7f5c6", + "0xe5ef8b5ab3cc8894452d16dc875b69a55fd925808ac7cafef1cd19485d0bb50a", + "0x120df2b3975d85b6dfca56bb98a82025ade5ac1d33e4319d2e0105b8de9ebf58", + "0xc964291dd2e0807a468396ebba3d59cfe385d949f6d6215976fc9a0a11de209a", + "0xf23f14cb709074b79abe166f159bc52b50de687464df6a5ebf112aa953c95ad5", + "0x622c092c9bd7e30f880043762e26d8e9c73ab7c0d0806f3c5e472a4152b35a93", + "0x8a5f090662731e7422bf651187fb89812419ab6808f2c62da213d6944fccfe9f", + "0xfbea3c0d92e061fd2399606f42647d65cc54191fa46d57b325103a75f5c22ba6", + "0x2babfbcc08d69b52c3747ddc8dcad4ea5511edabf24496f3ff96a1194d6f680e", + "0x4d3d019c28c779496b616d85aee201a3d79d9eecf35f728d00bcb12245ace703", + "0xe76fcee1f08325110436f8d4a95476251326b4827399f9b2ef7e12b7fb9c4ba1", + "0x4884d9c0bb4a9454ea37926591fc3eed2a28356e0506106a18f093035638da93", + "0x74c3f303d93d4cc4f0c1eb1b4378d34139220eb836628b82b649d1deb519b1d3", + "0xacb806670b278d3f0c84ba9c7a68c7df3b89e3451731a55d7351468c7c864c1c", + "0x8660fb8cd97e585ea7a41bccb22dd46e07eee8bbf34d90f0f0ca854b93b1ebee", + "0x2fc9c89cdca71a1c0224d469d0c364c96bbd99c1067a7ebe8ef412c645357a76", + "0x8ec6d5ab6ad7135d66091b8bf269be44c20af1d828694cd8650b5479156fd700", + "0x50ab4776e8cabe3d864fb7a1637de83f8fbb45d6e49645555ffe9526b27ebd66", + "0xbf39f5e17082983da4f409f91c7d9059acd02ccbefa69694aca475bb8d40b224", + "0x3135b3b981c850cc3fe9754ec6af117459d355ad6b0915beb61e84ea735c31bf", + "0xa7971dab52ce4bf45813223b0695f8e87f64b614c9c5499faac6f842e5c41be9", + "0x9e480f5617323ab104b4087ac4ef849a5da03427712fb302ac085507c77d8f37", + "0x57a6d474654d5e8d408159be39ad0e7026e6a4c6a6543e23a63d30610dc8dfc1", + "0x09eb3e01a5915a4e26d90b4c58bf0cf1e560fdc8ba53faed9d946ad3e9bc78fa", + "0x29c6d25da80a772310226b1b89d845c7916e4a4bc94d75aa330ec3eaa14b1e28", + "0x1a1ccfee11edeb989ca02e3cb89f062612a22a69ec816a625835d79370173987", + "0x1cb63dc541cf7f71c1c4e8cabd2619c3503c0ea1362dec75eccdf1e9efdbfcfc", + "0xac9dff32a69e75b396a2c250e206b36c34c63b955c9e5732e65eaf7ccca03c62", + "0x3e1b4f0c3ebd3d38cec389720147746774fc01ff6bdd065f0baf2906b16766a8", + "0x5cc8bed25574463026205e90aad828521f8e3d440970d7e810d1b46849681db5", + "0x255185d264509bd3a768bb0d50b568e66eb1fec96d573e33aaacc716d7c8fb93", + "0xe81b86ba631973918a859ff5995d7840b12511184c2865401f2693a71b9fa07e", + "0x61e67e42616598da8d36e865b282127c761380d3a56d26b8d35fbbc7641433c5", + "0x60c62ffef83fe603a34ca20b549522394e650dad5510ae68b6e074f0cd209a56", + "0x78577f2caf4a54f6065593535d76216f5f4075af7e7a98b79571d33b1822920c", + "0xfd4cb354f2869c8650200de0fe06f3d39e4dbebf19b0c1c2677da916ea84f44d", + "0x453769cef6ff9ba2d5c917982a1ad3e2f7e947d9ea228857556af0005665e0b0", + "0xe567f93f8f88bf1a6b33214f17f5d60c5dbbb531b4ab21b8c0b799b6416891e0", + "0x7e65a39a17f902a30ceb2469fe21cba8d4e0da9740fcefd5c647c81ff1ae95fa", + "0x03e4a7eea0cd6fc02b987138ef88e8795b5f839636ca07f6665bbae9e5878931", + "0xc3558e2b437cf0347cabc63c95fa2710d3f43c65d380feb998511903f9f4dcf0", + "0xe3a615f80882fb5dfbd08c1d7a8b0a4d3b651d5e8221f99b879cb01d97037a9c", + "0xb56db4a5fea85cbffaee41f05304689ea321c40d4c108b1146fa69118431d9b2", + "0xab28e1f077f18117945910c235bc9c6f9b6d2b45e9ef03009053006c637e3e26", + "0xefcabc1d5659fd6e48430dbfcc9fb4e08e8a9b895f7bf9b3d6c7661bfc44ada2", + "0xc7547496f212873e7c3631dafaca62a6e95ac39272acf25a7394bac6ea1ae357", + "0xc482013cb01bd69e0ea9f447b611b06623352e321469f4adc739e3ee189298eb", + "0x5942f42e91e391bb44bb2c4d40da1906164dbb6d1c184f00fa62899baa0dba2c", + "0xb4bcb46c80ad4cd603aff2c1baf8f2c896a628a46cc5786f0e58dae846694677", + "0xd0a7305b995fa8c317c330118fee4bfef9f65f70b54558c0988945b08e90ff08", + "0x687f801b7f32fdfa7d50274cc7b126efedbdae8de154d36395d33967216f3086", + "0xeb19ec10ac6c15ffa619fa46792971ee22a9328fa53bd69a10ed6e9617dd1bbf", + "0xa2bb3f0367f62abdb3a9fa6da34b20697cf214a4ff14fd42826da140ee025213", + "0x070a76511f32c882374400af59b22d88974a06fbc10d786dd07ca7527ebd8b90", + "0x8f195689537b446e946b376ec1e9eb5af5b4542ab47be550a5700fa5d81440d5", + "0x10cc09778699fc8ac109e7e6773f83391eeba2a6db5226fbe953dd8d99126ca5", + "0x8cc839cb7dc84fd3b8c0c7ca637e86a2f72a8715cc16c7afb597d12da717530b", + "0xa32504e6cc6fd0ee441440f213f082fcf76f72d36b5e2a0f3b6bdd50cdd825a2", + "0x8f45151db8878e51eec12c450b69fa92176af21a4543bb78c0d4c27286e74469", + "0x23f5c465bd35bcd4353216dc9505df68324a27990df9825a242e1288e40a13bb", + "0x35f409ce748af33c20a6ae693b8a48ba4623de9686f9834e22be4410e637d24f", + "0xb962e5845c1db624532562597a99e2acc5e434b97d8db0725bdeddd71a98e737", + "0x0f8364f99f43dd52b4cfa9e426c48f7b6ab18dc40a896e96a09eceebb3363afe", + "0xa842746868da7644fccdbb07ae5e08c71a6287ab307c4f9717eadb414c9c99f4", + "0xa59064c6b7fe7d2407792d99ed1218d2dc2f240185fbd8f767997438241b92e9", + "0xb6ea0d58e8d48e05b9ff4d75b2ebe0bd9752c0e2691882f754be66cdec7628d3", + "0xf16b78c9d14c52b2b5156690b6ce37a5e09661f49674ad22604c7d3755e564d1", + "0xbfa8ef74e8a37cd64b8b4a4260c4fc162140603f9c2494b9cf4c1e13de522ed9", + "0xf4b89f1776ebf30640dc5ec99e43de22136b6ef936a85193ef940931108e408a", + "0xefb9a4555d495a584dbcc2a50938f6b9827eb014ffae2d2d0aae356a57894de8", + "0x0627a466d42a26aca72cf531d4722e0e5fc5d491f4527786be4e1b641e693ac2", + "0x7d10d21542de3d8f074dbfd1a6e11b3df32c36272891aae54053029d39ebae10", + "0x0f21118ee9763f46cc175a21de876da233b2b3b62c6f06fa2df73f6deccf37f3", + "0x143213b96f8519c15164742e2350cc66e814c9570634e871a8c1ddae4d31b6b5", + "0x8d2877120abae3854e00ae8cf5c8c95b3ede10590ab79ce2be7127239507e18d", + "0xaccd0005d59472ac04192c059ed9c10aea42c4dabec9e581f6cb10b261746573", + "0x67bc8dd5422f39e741b9995e6e60686e75d6620aa0d745b84191f5dba9b5bb18", + "0x11b8e95f6a654d4373cefbbac29a90fdd8ae098043d1969b9fa7885318376b34", + "0x431a0b8a6f08760c942eeff5791e7088fd210f877825ce4dcabe365e03e4a65c", + "0x704007f11bae513f428c9b0d23593fd2809d0dbc4c331009856135dafec23ce4", + "0xc06dee39a33a05e30c522061c1d9272381bde3f9e42fa9bd7d5a5c8ef11ec6ec", + "0x66b4157baaae85db0948ad72882287a80b286df2c40080b8da4d5d3db0a61bd2", + "0xef1983b1906239b490baaaa8e4527f78a57a0a767d731f062dd09efb59ae8e3d", + "0xf26d0d5c520cce6688ca5d51dee285af26f150794f2ea9f1d73f6df213d78338", + "0x8b28838382e6892f59c42a7709d6d38396495d3af5a8d5b0a60f172a6a8940bd", + "0x261a605fa5f2a9bdc7cffac530edcf976e7ea7af4e443b625fe01ed39dad44b6", + ], + compressed_lamport_pk: + "0xdd635d27d1d52b9a49df9e5c0c622360a4dd17cba7db4e89bce3cb048fb721a5", + child_sk: + "20397789859736650942317412262472558107875392172444076792671091975210932703118", } } } diff --git a/crypto/eth2_key_derivation/tests/eip2333_vectors.rs b/crypto/eth2_key_derivation/tests/eip2333_vectors.rs index 6995bd087b4..e4406ab1f7e 100644 --- a/crypto/eth2_key_derivation/tests/eip2333_vectors.rs +++ b/crypto/eth2_key_derivation/tests/eip2333_vectors.rs @@ -65,9 +65,9 @@ fn assert_vector_passes(raw: RawTestVector) { fn eip2333_test_case_0() { assert_vector_passes(RawTestVector { seed: "0xc55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", - master_sk: "12513733877922233913083619867448865075222526338446857121953625441395088009793", + master_sk: "6083874454709270928345386274498605044986640685124978867557563392430687146096", child_index: 0, - child_sk: "7419543105316279183937430842449358701327973165530407166294956473095303972104" + child_sk: "20397789859736650942317412262472558107875392172444076792671091975210932703118", }) } @@ -75,9 +75,9 @@ fn eip2333_test_case_0() { fn eip2333_test_case_1() { assert_vector_passes(RawTestVector { seed: "0x3141592653589793238462643383279502884197169399375105820974944592", - master_sk: "46029459550803682895343812821003080589696405386150182061394330539196052371668", + master_sk: "29757020647961307431480504535336562678282505419141012933316116377660817309383", child_index: 3141592653, - child_sk: "43469287647733616183478983885105537266268532274998688773496918571876759327260", + child_sk: "25457201688850691947727629385191704516744796114925897962676248250929345014287", }) } @@ -85,9 +85,9 @@ fn eip2333_test_case_1() { fn eip2333_test_case_2() { assert_vector_passes(RawTestVector { seed: "0x0099FF991111002299DD7744EE3355BBDD8844115566CC55663355668888CC00", - master_sk: "45379166311535261329029945990467475187325618028073620882733843918126031931161", + master_sk: "27580842291869792442942448775674722299803720648445448686099262467207037398656", child_index: 4294967295, - child_sk: "46475244006136701976831062271444482037125148379128114617927607151318277762946", + child_sk: "29358610794459428860402234341874281240803786294062035874021252734817515685787", }) } @@ -95,8 +95,8 @@ fn eip2333_test_case_2() { fn eip2333_test_case_3() { assert_vector_passes(RawTestVector { seed: "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", - master_sk: "31740500954810567003972734830331791822878290325762596213711963944729383643688", + master_sk: "19022158461524446591288038168518313374041767046816487870552872741050760015818", child_index: 42, - child_sk: "51041472511529980987749393477251359993058329222191894694692317000136653813011", + child_sk: "31372231650479070279774297061823572166496564838472787488249775572789064611981", }) } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index e4b55fd01d9..89d7a562daf 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "0.2.11" +version = "0.2.12" authors = ["Paul Hauner "] edition = "2018" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 087321d3b12..fdc8de36d64 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "0.2.11" +version = "0.2.12" authors = ["Sigma Prime "] edition = "2018" @@ -9,9 +9,6 @@ edition = "2018" write_ssz_files = ["beacon_node/write_ssz_files"] # Compiles the BLS crypto code so that the binary is portable across machines. portable = ["bls/supranational-portable"] -# Compiles BLST so that it always uses ADX instructions. -# Compatible with processors from 2013 onwards. -modern = ["bls/supranational-force-adx"] # Uses the slower Milagro BLS library, which is written in native Rust. milagro = ["bls/milagro"] diff --git a/lighthouse/environment/tests/environment_builder.rs b/lighthouse/environment/tests/environment_builder.rs index 505f1e632e3..8594825a5f5 100644 --- a/lighthouse/environment/tests/environment_builder.rs +++ b/lighthouse/environment/tests/environment_builder.rs @@ -3,7 +3,7 @@ use environment::EnvironmentBuilder; use eth2_testnet_config::Eth2TestnetConfig; use std::path::PathBuf; -use types::{Epoch, MainnetEthSpec, YamlConfig}; +use types::{MainnetEthSpec, YamlConfig}; fn builder() -> EnvironmentBuilder { EnvironmentBuilder::mainnet() @@ -36,8 +36,8 @@ mod setup_eth2_config { .expect("should build environment"); assert_eq!( - environment.eth2_config.spec.far_future_epoch, - Epoch::new(999) // see testnet_dir/config.yaml + environment.eth2_config.spec.max_committees_per_slot, + 128 // see testnet_dir/config.yaml ); } } diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 7404c99ff3a..493827f2987 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -1,56 +1,156 @@ -FAR_FUTURE_EPOCH: 999 # for testing -BASE_REWARDS_PER_EPOCH: 4 -DEPOSIT_CONTRACT_TREE_DEPTH: 32 -MAX_COMMITTEES_PER_SLOT: 64 +# Mainnet preset +# Note: the intention of this file (for now) is to illustrate what a mainnet configuration could look like. +# Some of these constants may still change before the launch of Phase 0. + +CONFIG_NAME: "mainnet" + +# Misc +# --------------------------------------------------------------- +MAX_COMMITTEES_PER_SLOT: 128 # MODIFIED FOR TESTING +# 2**7 (= 128) TARGET_COMMITTEE_SIZE: 128 +# 2**11 (= 2,048) +MAX_VALIDATORS_PER_COMMITTEE: 2048 +# 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 +# See issue 563 SHUFFLE_ROUND_COUNT: 90 -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 4096 -MIN_GENESIS_TIME: 0 -GENESIS_DELAY: 3600 -MIN_DEPOSIT_AMOUNT: 10000000 -MAX_EFFECTIVE_BALANCE: 3200000000 -EJECTION_BALANCE: 1600000000 -EFFECTIVE_BALANCE_INCREMENT: 100000000 -GENESIS_SLOT: 0 -GENESIS_FORK_VERSION: 0x01030307 +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 +# Jan 3, 2020 +MIN_GENESIS_TIME: 1578009600 +# 4 +HYSTERESIS_QUOTIENT: 4 +# 1 (minus 0.25) +HYSTERESIS_DOWNWARD_MULTIPLIER: 1 +# 5 (plus 1.25) +HYSTERESIS_UPWARD_MULTIPLIER: 5 +# 3 +PROPORTIONAL_SLASHING_MULTIPLIER: 3 + + +# Fork Choice +# --------------------------------------------------------------- +# 2**3 (= 8) +SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 + + +# Validator +# --------------------------------------------------------------- +# 2**10 (= 1,024) +ETH1_FOLLOW_DISTANCE: 1024 +# 2**4 (= 16) +TARGET_AGGREGATORS_PER_COMMITTEE: 16 +# 2**0 (= 1) +RANDOM_SUBNETS_PER_VALIDATOR: 1 +# 2**8 (= 256) +EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 + + +# Deposit contract +# --------------------------------------------------------------- +# Ethereum PoW Mainnet +DEPOSIT_CHAIN_ID: 1 +DEPOSIT_NETWORK_ID: 1 +# **TBD** +DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 + + +# Gwei values +# --------------------------------------------------------------- +# 2**0 * 10**9 (= 1,000,000,000) Gwei +MIN_DEPOSIT_AMOUNT: 1000000000 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MAX_EFFECTIVE_BALANCE: 32000000000 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**0 * 10**9 (= 1,000,000,000) Gwei +EFFECTIVE_BALANCE_INCREMENT: 1000000000 + + +# Initial values +# --------------------------------------------------------------- +# Mainnet initial fork version, recommend altering for testnets +GENESIS_FORK_VERSION: 0x00000000 BLS_WITHDRAWAL_PREFIX: 0x00 + + +# Time parameters +# --------------------------------------------------------------- +# 172800 seconds (2 days) +GENESIS_DELAY: 172800 +# 12 seconds SECONDS_PER_SLOT: 12 +# 2**0 (= 1) slots 12 seconds MIN_ATTESTATION_INCLUSION_DELAY: 1 +# 2**5 (= 32) slots 6.4 minutes +SLOTS_PER_EPOCH: 32 +# 2**0 (= 1) epochs 6.4 minutes MIN_SEED_LOOKAHEAD: 1 +# 2**2 (= 4) epochs 25.6 minutes MAX_SEED_LOOKAHEAD: 4 -MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -PERSISTENT_COMMITTEE_PERIOD: 2048 -BASE_REWARD_FACTOR: 64 -WHISTLEBLOWER_REWARD_QUOTIENT: 512 -PROPOSER_REWARD_QUOTIENT: 8 -INACTIVITY_PENALTY_QUOTIENT: 33554432 -MIN_SLASHING_PENALTY_QUOTIENT: 32 -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 -DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_BEACON_ATTESTER: 0x01000000 -DOMAIN_RANDAO: 0x02000000 -DOMAIN_DEPOSIT: 0x03000000 -DOMAIN_VOLUNTARY_EXIT: 0x04000000 -JUSTIFICATION_BITS_LENGTH: 0x04000000 -MAX_VALIDATORS_PER_COMMITTEE: 2048 -GENESIS_EPOCH: 0 -SLOTS_PER_EPOCH: 32 -SLOTS_PER_ETH1_VOTING_PERIOD: 1024 +# 2**5 (= 32) epochs ~3.4 hours +EPOCHS_PER_ETH1_VOTING_PERIOD: 32 +# 2**13 (= 8,192) slots ~13 hours SLOTS_PER_HISTORICAL_ROOT: 8192 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**2 (= 4) epochs 25.6 minutes +MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 + + +# State vector lengths +# --------------------------------------------------------------- +# 2**16 (= 65,536) epochs ~0.8 years EPOCHS_PER_HISTORICAL_VECTOR: 65536 +# 2**13 (= 8,192) epochs ~36 days EPOCHS_PER_SLASHINGS_VECTOR: 8192 +# 2**24 (= 16,777,216) historical roots, ~26,131 years HISTORICAL_ROOTS_LIMIT: 16777216 +# 2**40 (= 1,099,511,627,776) validator spots VALIDATOR_REGISTRY_LIMIT: 1099511627776 + + +# Reward and penalty quotients +# --------------------------------------------------------------- +# 2**6 (= 64) +BASE_REWARD_FACTOR: 64 +# 2**9 (= 512) +WHISTLEBLOWER_REWARD_QUOTIENT: 512 +# 2**3 (= 8) +PROPOSER_REWARD_QUOTIENT: 8 +# 2**24 (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT: 16777216 +# 2**5 (= 32) +MIN_SLASHING_PENALTY_QUOTIENT: 32 + + +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) MAX_PROPOSER_SLASHINGS: 16 +# 2**1 (= 2) MAX_ATTESTER_SLASHINGS: 2 +# 2**7 (= 128) MAX_ATTESTATIONS: 128 +# 2**4 (= 16) MAX_DEPOSITS: 16 +# 2**4 (= 16) MAX_VOLUNTARY_EXITS: 16 -ETH1_FOLLOW_DISTANCE: 16 -TARGET_AGGREGATORS_PER_COMMITTEE: 0 -RANDOM_SUBNETS_PER_VALIDATOR: 0 -EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 0 -SECONDS_PER_ETH1_BLOCK: 14 + + +# Signature domains +# --------------------------------------------------------------- +DOMAIN_BEACON_PROPOSER: 0x00000000 +DOMAIN_BEACON_ATTESTER: 0x01000000 +DOMAIN_RANDAO: 0x02000000 +DOMAIN_DEPOSIT: 0x03000000 +DOMAIN_VOLUNTARY_EXIT: 0x04000000 +DOMAIN_SELECTION_PROOF: 0x05000000 +DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 1ec5b98c6ae..78cb1d848ae 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -17,8 +17,6 @@ pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; fn bls_library_name() -> &'static str { if cfg!(feature = "portable") { "blst-portable" - } else if cfg!(feature = "modern") { - "blst-modern" } else if cfg!(feature = "milagro") { "milagro" } else { @@ -118,7 +116,7 @@ fn main() { .long("testnet") .value_name("testnet") .help("Name of network lighthouse will connect to") - .possible_values(&["medalla", "altona"]) + .possible_values(&["medalla", "altona", "spadina"]) .conflicts_with("testnet-dir") .takes_value(true) .global(true) @@ -187,13 +185,6 @@ fn run( )); } - #[cfg(all(feature = "modern", target_arch = "x86_64"))] - if !std::is_x86_feature_detected!("adx") { - return Err(format!( - "CPU incompatible with optimized binary, please try Lighthouse portable build" - )); - } - let debug_level = matches .value_of("debug-level") .ok_or_else(|| "Expected --debug-level flag".to_string())?; @@ -299,61 +290,63 @@ fn run( "name" => testnet_name ); - let beacon_node = if let Some(sub_matches) = matches.subcommand_matches("beacon_node") { - let runtime_context = environment.core_context(); - - let beacon = environment - .runtime() - .block_on(ProductionBeaconNode::new_from_cli( - runtime_context, - sub_matches, - )) - .map_err(|e| format!("Failed to start beacon node: {}", e))?; - - Some(beacon) - } else { - None - }; - - let validator_client = if let Some(sub_matches) = matches.subcommand_matches("validator_client") - { - let runtime_context = environment.core_context(); - - let mut validator = environment - .runtime() - .block_on(ProductionValidatorClient::new_from_cli( - runtime_context, - sub_matches, - )) - .map_err(|e| format!("Failed to init validator client: {}", e))?; - - environment - .core_context() - .executor - .runtime_handle() - .enter(|| { - validator - .start_service() - .map_err(|e| format!("Failed to start validator client service: {}", e)) - })?; - - Some(validator) - } else { - None + match matches.subcommand() { + ("beacon_node", Some(matches)) => { + let context = environment.core_context(); + let log = context.log().clone(); + let executor = context.executor.clone(); + let config = beacon_node::get_config::( + matches, + &context.eth2_config.spec_constants, + &context.eth2_config().spec, + context.log().clone(), + )?; + environment.runtime().spawn(async move { + if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { + crit!(log, "Failed to start beacon node"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send("Failed to start beacon node"); + } + }) + } + ("validator_client", Some(matches)) => { + let context = environment.core_context(); + let log = context.log().clone(); + let executor = context.executor.clone(); + let config = validator_client::Config::from_cli(&matches) + .map_err(|e| format!("Unable to initialize validator config: {}", e))?; + environment.runtime().spawn(async move { + let run = async { + ProductionValidatorClient::new(context, config) + .await? + .start_service()?; + + Ok::<(), String>(()) + }; + if let Err(e) = run.await { + crit!(log, "Failed to start validator client"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send("Failed to start validator client"); + } + }) + } + _ => { + crit!(log, "No subcommand supplied. See --help ."); + return Err("No subcommand supplied.".into()); + } }; - if beacon_node.is_none() && validator_client.is_none() { - crit!(log, "No subcommand supplied. See --help ."); - return Err("No subcommand supplied.".into()); - } - // Block this thread until we get a ctrl-c or a task sends a shutdown signal. environment.block_until_shutdown_requested()?; info!(log, "Shutting down.."); environment.fire_signal(); - drop(beacon_node); - drop(validator_client); // Shutdown the environment once all tasks have completed. environment.shutdown_on_idle(); diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 563186792c7..30f885b4e8b 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -404,7 +404,7 @@ fn validator_import_launchpad() { .arg(format!("--{}", VALIDATOR_DIR_FLAG)) .arg(dst_dir.path().as_os_str()) .arg(IMPORT_CMD) - .arg(format!("--{}", import::STDIN_PASSWORD_FLAG)) // Using tty does not work well with tests. + .arg(format!("--{}", STDIN_INPUTS_FLAG)) // Using tty does not work well with tests. .arg(format!("--{}", import::DIR_FLAG)) .arg(src_dir.path().as_os_str()) .stderr(Stdio::piped()) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index a8e6c73c09c..e9d7275ad3e 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v0.12.2 +TESTS_TAG := v0.12.3 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index ff6dad6dd03..9f39a46c684 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -140,13 +140,20 @@ impl> LoadCase for Operations { // Check BLS setting here before SSZ deserialization, as most types require signatures // to be valid. - let operation = if metadata.bls_setting.unwrap_or_default().check().is_ok() { - Some(ssz_decode_file(&path.join(O::filename()))?) + let (operation, bls_error) = if metadata.bls_setting.unwrap_or_default().check().is_ok() { + match ssz_decode_file(&path.join(O::filename())) { + Ok(op) => (Some(op), None), + Err(Error::InvalidBLSInput(error)) => (None, Some(error)), + Err(e) => return Err(e), + } } else { - None + (None, None) }; let post_filename = path.join("post.ssz"); let post = if post_filename.is_file() { + if let Some(bls_error) = bls_error { + panic!("input is unexpectedly invalid: {}", bls_error); + } Some(ssz_decode_file(&post_filename)?) } else { None diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index c1ea6fb3b98..8d6486bb846 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -21,11 +21,19 @@ pub fn ssz_decode_file(path: &Path) -> Result { }) .and_then(|s| { T::from_ssz_bytes(&s).map_err(|e| { - Error::FailedToParseTest(format!( - "Unable to parse SSZ at {}: {:?}", - path.display(), - e - )) + match e { + // NOTE: this is a bit hacky, but seemingly better than the alternatives + ssz::DecodeError::BytesInvalid(message) + if message.contains("Blst") || message.contains("Milagro") => + { + Error::InvalidBLSInput(message) + } + e => Error::FailedToParseTest(format!( + "Unable to parse SSZ at {}: {:?}", + path.display(), + e + )), + } }) }) } diff --git a/testing/ef_tests/src/error.rs b/testing/ef_tests/src/error.rs index 98ac9e6dd12..2adec6dc108 100644 --- a/testing/ef_tests/src/error.rs +++ b/testing/ef_tests/src/error.rs @@ -6,6 +6,8 @@ pub enum Error { DidntFail(String), /// Failed to parse the test (internal error). FailedToParseTest(String), + /// Test case contained invalid BLS data. + InvalidBLSInput(String), /// Skipped the test because the BLS setting was mismatched. SkippedBls, /// Skipped the test because it's known to fail. @@ -18,6 +20,7 @@ impl Error { Error::NotEqual(_) => "NotEqual", Error::DidntFail(_) => "DidntFail", Error::FailedToParseTest(_) => "FailedToParseTest", + Error::InvalidBLSInput(_) => "InvalidBLSInput", Error::SkippedBls => "SkippedBls", Error::SkippedKnownFailure => "SkippedKnownFailure", } @@ -28,6 +31,7 @@ impl Error { Error::NotEqual(m) => m.as_str(), Error::DidntFail(m) => m.as_str(), Error::FailedToParseTest(m) => m.as_str(), + Error::InvalidBLSInput(m) => m.as_str(), _ => self.name(), } } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 571db3ca290..f15118e849a 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "validator_client" -version = "0.2.11" +version = "0.2.12" authors = ["Paul Hauner ", "Age Manning ", "Luke Anderson "] edition = "2018" diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index ebf56a8ffd3..59cf79807cf 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -51,11 +51,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { nodes using the same key. Automatically enabled unless `--strict` is specified", )) .arg( - Arg::with_name("strict-lockfiles") - .long("strict-lockfiles") + Arg::with_name("delete-lockfiles") + .long("delete-lockfiles") .help( - "If present, do not load validators that are guarded by a lockfile. Note: for \ - Eth2 mainnet, this flag will likely be removed and its behaviour will become default." + "If present, ignore and delete any keystore lockfiles encountered during start up. \ + This is useful if the validator client did not exit gracefully on the last run. \ + WARNING: lockfiles help prevent users from accidentally running the same validator \ + using two different validator clients, an action that likely leads to slashing. \ + Ensure you are certain that there are no other validator client instances running \ + that might also be using the same keystores." ) ) .arg( diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 1e55ef1804c..991b5516220 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -26,8 +26,8 @@ pub struct Config { /// If true, the validator client will still poll for duties and produce blocks even if the /// beacon node is not synced at startup. pub allow_unsynced_beacon_node: bool, - /// If true, refuse to unlock a keypair that is guarded by a lockfile. - pub strict_lockfiles: bool, + /// If true, delete any validator keystore lockfiles that would prevent starting. + pub delete_lockfiles: bool, /// If true, don't scan the validators dir for new keystores. pub disable_auto_discover: bool, /// If true, don't re-register existing validators in definitions.yml for slashing protection. @@ -52,7 +52,7 @@ impl Default for Config { secrets_dir, http_server: DEFAULT_HTTP_SERVER.to_string(), allow_unsynced_beacon_node: false, - strict_lockfiles: false, + delete_lockfiles: false, disable_auto_discover: false, strict_slashing_protection: false, graffiti: None, @@ -107,7 +107,7 @@ impl Config { } config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced"); - config.strict_lockfiles = cli_args.is_present("strict-lockfiles"); + config.delete_lockfiles = cli_args.is_present("delete-lockfiles"); config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); config.strict_slashing_protection = cli_args.is_present("strict-slashing-protection"); diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 436dcb4bae3..400768f5cb4 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -54,6 +54,10 @@ pub enum Error { PasswordUnknown(PathBuf), /// There was an error reading from stdin. UnableToReadPasswordFromUser(String), + /// There was an error running a tokio async task. + TokioJoin(tokio::task::JoinError), + /// There was a filesystem error when deleting a lockfile. + UnableToDeleteLockfile(io::Error), } /// A method used by a validator to sign messages. @@ -86,7 +90,7 @@ impl InitializedValidator { /// If the validator is unable to be initialized for whatever reason. pub fn from_definition( def: ValidatorDefinition, - strict_lockfiles: bool, + delete_lockfiles: bool, log: &Logger, ) -> Result { if !def.enabled { @@ -150,16 +154,17 @@ impl InitializedValidator { })?; if voting_keystore_lockfile_path.exists() { - if strict_lockfiles { - return Err(Error::LockfileExists(voting_keystore_lockfile_path)); - } else { - // If **not** respecting lockfiles, just raise a warning if the voting - // keypair cannot be unlocked. + if delete_lockfiles { warn!( log, - "Ignoring validator lockfile"; + "Deleting validator lockfile"; "file" => format!("{:?}", voting_keystore_lockfile_path) ); + + fs::remove_file(&voting_keystore_lockfile_path) + .map_err(Error::UnableToDeleteLockfile)?; + } else { + return Err(Error::LockfileExists(voting_keystore_lockfile_path)); } } else { // Create a new lockfile. @@ -279,7 +284,7 @@ pub struct InitializedValidators { impl InitializedValidators { /// Instantiates `Self`, initializing all validators in `definitions`. - pub fn from_definitions( + pub async fn from_definitions( definitions: ValidatorDefinitions, validators_dir: PathBuf, strict_lockfiles: bool, @@ -292,7 +297,7 @@ impl InitializedValidators { validators: HashMap::default(), log, }; - this.update_validators()?; + this.update_validators().await?; Ok(this) } @@ -328,7 +333,7 @@ impl InitializedValidators { /// validator will be removed from `self.validators`. /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. - pub fn set_validator_status( + pub async fn set_validator_status( &mut self, voting_public_key: &PublicKey, enabled: bool, @@ -342,7 +347,7 @@ impl InitializedValidators { def.enabled = enabled; } - self.update_validators()?; + self.update_validators().await?; self.definitions .save(&self.validators_dir) @@ -362,7 +367,7 @@ impl InitializedValidators { /// A validator is considered "already known" and skipped if the public key is already known. /// I.e., if there are two different definitions with the same public key then the second will /// be ignored. - fn update_validators(&mut self) -> Result<(), Error> { + async fn update_validators(&mut self) -> Result<(), Error> { for def in self.definitions.as_slice() { if def.enabled { match &def.signing_definition { @@ -371,11 +376,23 @@ impl InitializedValidators { continue; } - match InitializedValidator::from_definition( - def.clone(), - self.strict_lockfiles, - &self.log, - ) { + // Decoding a local keystore can take several seconds, therefore it's best + // to keep if off the core executor. This also has the fortunate effect of + // interrupting the potentially long-running task during shut down. + let inner_def = def.clone(); + let strict_lockfiles = self.strict_lockfiles; + let inner_log = self.log.clone(); + let result = tokio::task::spawn_blocking(move || { + InitializedValidator::from_definition( + inner_def, + strict_lockfiles, + &inner_log, + ) + }) + .await + .map_err(Error::TokioJoin)?; + + match result { Ok(init) => { self.validators .insert(init.voting_public_key().clone(), init); diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 7b5c969d368..13e7b5a88e4 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -18,6 +18,7 @@ use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; use duties_service::{DutiesService, DutiesServiceBuilder}; use environment::RuntimeContext; +use eth2_config::Eth2Config; use fork_service::{ForkService, ForkServiceBuilder}; use futures::channel::mpsc; use initialized_validators::InitializedValidators; @@ -28,7 +29,7 @@ use slot_clock::SlotClock; use slot_clock::SystemTimeSlotClock; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{delay_for, Duration}; -use types::EthSpec; +use types::{EthSpec, Hash256}; use validator_store::ValidatorStore; /// The interval between attempts to contact the beacon node during startup. @@ -90,9 +91,10 @@ impl ProductionValidatorClient { let validators = InitializedValidators::from_definitions( validator_defs, config.validator_dir.clone(), - config.strict_lockfiles, + config.delete_lockfiles, log.clone(), ) + .await .map_err(|e| format!("Unable to initialize validators: {:?}", e))?; info!( @@ -106,56 +108,11 @@ impl ProductionValidatorClient { RemoteBeaconNode::new_with_timeout(config.http_server.clone(), HTTP_TIMEOUT) .map_err(|e| format!("Unable to init beacon node http client: {}", e))?; - // TODO: check if all logs in wait_for_node are produed while awaiting - let beacon_node = wait_for_node(beacon_node, &log).await?; - let eth2_config = beacon_node - .http - .spec() - .get_eth2_config() - .await - .map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e))?; - let genesis_time = beacon_node - .http - .beacon() - .get_genesis_time() - .await - .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e))?; - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to read system time: {:?}", e))?; - let genesis = Duration::from_secs(genesis_time); - - // If the time now is less than (prior to) genesis, then delay until the - // genesis instant. - // - // If the validator client starts before genesis, it will get errors from - // the slot clock. - if now < genesis { - info!( - log, - "Starting node prior to genesis"; - "seconds_to_wait" => (genesis - now).as_secs() - ); - - delay_for(genesis - now).await - } else { - info!( - log, - "Genesis has already occurred"; - "seconds_ago" => (now - genesis).as_secs() - ); - } - let genesis_validators_root = beacon_node - .http - .beacon() - .get_genesis_validators_root() - .await - .map_err(|e| { - format!( - "Unable to read genesis validators root from beacon node: {:?}", - e - ) - })?; + // Perform some potentially long-running initialization tasks. + let (eth2_config, genesis_time, genesis_validators_root) = tokio::select! { + tuple = init_from_beacon_node(&beacon_node, &context) => tuple?, + () = context.executor.exit() => return Err("Shutting down".to_string()) + }; // Do not permit a connection to a beacon node using different spec constants. if context.eth2_config.spec_constants != eth2_config.spec_constants { @@ -270,12 +227,71 @@ impl ProductionValidatorClient { } } +async fn init_from_beacon_node( + beacon_node: &RemoteBeaconNode, + context: &RuntimeContext, +) -> Result<(Eth2Config, u64, Hash256), String> { + // Wait for the beacon node to come online. + wait_for_node(beacon_node, context.log()).await?; + + let eth2_config = beacon_node + .http + .spec() + .get_eth2_config() + .await + .map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e))?; + let genesis_time = beacon_node + .http + .beacon() + .get_genesis_time() + .await + .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e))?; + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {:?}", e))?; + let genesis = Duration::from_secs(genesis_time); + + // If the time now is less than (prior to) genesis, then delay until the + // genesis instant. + // + // If the validator client starts before genesis, it will get errors from + // the slot clock. + if now < genesis { + info!( + context.log(), + "Starting node prior to genesis"; + "seconds_to_wait" => (genesis - now).as_secs() + ); + + delay_for(genesis - now).await; + } else { + info!( + context.log(), + "Genesis has already occurred"; + "seconds_ago" => (now - genesis).as_secs() + ); + } + let genesis_validators_root = beacon_node + .http + .beacon() + .get_genesis_validators_root() + .await + .map_err(|e| { + format!( + "Unable to read genesis validators root from beacon node: {:?}", + e + ) + })?; + + Ok((eth2_config, genesis_time, genesis_validators_root)) +} + /// Request the version from the node, looping back and trying again on failure. Exit once the node /// has been contacted. async fn wait_for_node( - beacon_node: RemoteBeaconNode, + beacon_node: &RemoteBeaconNode, log: &Logger, -) -> Result, String> { +) -> Result<(), String> { // Try to get the version string from the node, looping until success is returned. loop { let log = log.clone(); @@ -295,7 +311,7 @@ async fn wait_for_node( "version" => version, ); - return Ok(beacon_node); + return Ok(()); } Err(e) => { error!(